• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /**
3  * @file definition of host message ring functionality
4  * Provides type definitions and function prototypes used to link the
5  * DHD OS, bus, and protocol modules.
6  *
7  * Copyright (C) 1999-2019, Broadcom.
8  *
9  *      Unless you and Broadcom execute a separate written software license
10  * agreement governing use of this software, this software is licensed to you
11  * under the terms of the GNU General Public License version 2 (the "GPL"),
12  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
13  * following added to such license:
14  *
15  *      As a special exception, the copyright holders of this software give you
16  * permission to link this software with independent modules, and to copy and
17  * distribute the resulting executable under terms of your choice, provided that
18  * you also meet, for each linked independent module, the terms and conditions of
19  * the license of that module.  An independent module is a module which is not
20  * derived from this software.  The special exception does not apply to any
21  * modifications of the software.
22  *
23  *      Notwithstanding the above, under no circumstances may you combine this
24  * software in any way with any other Broadcom software provided under a license
25  * other than the GPL, without Broadcom's express prior written consent.
26  *
27  *
28  * <<Broadcom-WL-IPTag/Open:>>
29  *
30  * $Id: dhd_msgbuf.c 825801 2019-06-17 10:51:10Z $
31  */
32 
33 #include <typedefs.h>
34 #include <osl.h>
35 
36 #include <bcmutils.h>
37 #include <bcmmsgbuf.h>
38 #include <bcmendian.h>
39 #include <bcmstdlib_s.h>
40 
41 #include <dngl_stats.h>
42 #include <dhd.h>
43 #include <dhd_proto.h>
44 
45 #include <dhd_bus.h>
46 
47 #include <dhd_dbg.h>
48 #include <siutils.h>
49 #include <dhd_debug.h>
50 
51 #include <dhd_flowring.h>
52 
53 #include <pcie_core.h>
54 #include <bcmpcie.h>
55 #include <dhd_pcie.h>
56 #include <dhd_config.h>
57 
58 #if defined(DHD_LB)
59 #include <linux/cpu.h>
60 #include <bcm_ring.h>
61 #define DHD_LB_WORKQ_SZ			    (8192)
62 #define DHD_LB_WORKQ_SYNC           (16)
63 #define DHD_LB_WORK_SCHED           (DHD_LB_WORKQ_SYNC * 2)
64 #endif /* DHD_LB */
65 
66 #include <etd.h>
67 #include <hnd_debug.h>
68 #include <bcmtlv.h>
69 #include <hnd_armtrap.h>
70 #include <dnglevent.h>
71 
72 #ifdef DHD_EWPR_VER2
73 #include <dhd_bitpack.h>
74 #endif /* DHD_EWPR_VER2 */
75 
76 extern char dhd_version[];
77 extern char fw_version[];
78 
79 /**
80  * Host configures a soft doorbell for d2h rings, by specifying a 32bit host
81  * address where a value must be written. Host may also interrupt coalescing
82  * on this soft doorbell.
83  * Use Case: Hosts with network processors, may register with the dongle the
84  * network processor's thread wakeup register and a value corresponding to the
85  * core/thread context. Dongle will issue a write transaction <address,value>
86  * to the PCIE RC which will need to be routed to the mapped register space, by
87  * the host.
88  */
89 /* #define DHD_D2H_SOFT_DOORBELL_SUPPORT */
90 
91 /* Dependency Check */
92 #if defined(IOCTLRESP_USE_CONSTMEM) && defined(DHD_USE_STATIC_CTRLBUF)
93 #error "DHD_USE_STATIC_CTRLBUF is NOT working with DHD_USE_OSLPKT_FOR_RESPBUF"
94 #endif /* IOCTLRESP_USE_CONSTMEM && DHD_USE_STATIC_CTRLBUF */
95 
96 #define RETRIES 2		/* # of retries to retrieve matching ioctl response */
97 
98 #define DEFAULT_RX_BUFFERS_TO_POST	256
99 #define RXBUFPOST_THRESHOLD			32
100 #define RX_BUF_BURST				32 /* Rx buffers for MSDU Data */
101 
102 #define DHD_STOP_QUEUE_THRESHOLD	200
103 #define DHD_START_QUEUE_THRESHOLD	100
104 
105 #define RX_DMA_OFFSET		8 /* Mem2mem DMA inserts an extra 8 */
106 #define IOCT_RETBUF_SIZE	(RX_DMA_OFFSET + WLC_IOCTL_MAXLEN)
107 
108 /* flags for ioctl pending status */
109 #define MSGBUF_IOCTL_ACK_PENDING	(1<<0)
110 #define MSGBUF_IOCTL_RESP_PENDING	(1<<1)
111 
112 #define DHD_IOCTL_REQ_PKTBUFSZ		2048
113 #define MSGBUF_IOCTL_MAX_RQSTLEN	(DHD_IOCTL_REQ_PKTBUFSZ - H2DRING_CTRL_SUB_ITEMSIZE)
114 
115 #define DMA_ALIGN_LEN		4
116 
117 #define DMA_D2H_SCRATCH_BUF_LEN	8
118 #define DMA_XFER_LEN_LIMIT	0x400000
119 
120 #ifdef BCM_HOST_BUF
121 #ifndef DMA_HOST_BUFFER_LEN
122 #define DMA_HOST_BUFFER_LEN	0x200000
123 #endif // endif
124 #endif /* BCM_HOST_BUF */
125 
126 #define DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ		8192
127 
128 #define DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D		1
129 #define DHD_FLOWRING_MAX_EVENTBUF_POST			32
130 #define DHD_FLOWRING_MAX_IOCTLRESPBUF_POST		8
131 #define DHD_H2D_INFORING_MAX_BUF_POST			32
132 #define DHD_MAX_TSBUF_POST			8
133 
134 #define DHD_PROT_FUNCS	43
135 
136 /* Length of buffer in host for bus throughput measurement */
137 #define DHD_BUS_TPUT_BUF_LEN 2048
138 
139 #define TXP_FLUSH_NITEMS
140 
141 /* optimization to write "n" tx items at a time to ring */
142 #define TXP_FLUSH_MAX_ITEMS_FLUSH_CNT	48
143 
144 #define RING_NAME_MAX_LENGTH		24
145 #define CTRLSUB_HOSTTS_MEESAGE_SIZE		1024
146 /* Giving room before ioctl_trans_id rollsover. */
147 #define BUFFER_BEFORE_ROLLOVER 300
148 
149 /* 512K memory + 32K registers */
150 #define SNAPSHOT_UPLOAD_BUF_SIZE	((512 + 32) * 1024)
151 
152 struct msgbuf_ring; /* ring context for common and flow rings */
153 
154 /**
155  * PCIE D2H DMA Complete Sync Modes
156  *
157  * Firmware may interrupt the host, prior to the D2H Mem2Mem DMA completes into
158  * Host system memory. A WAR using one of 3 approaches is needed:
159  * 1. Dongle places a modulo-253 seqnum in last word of each D2H message
160  * 2. XOR Checksum, with epoch# in each work item. Dongle builds an XOR checksum
161  *    writes in the last word of each work item. Each work item has a seqnum
162  *    number = sequence num % 253.
163  *
164  * 3. Read Barrier: Dongle does a host memory read access prior to posting an
165  *    interrupt, ensuring that D2H data transfer indeed completed.
166  * 4. Dongle DMA's all indices after producing items in the D2H ring, flushing
167  *    ring contents before the indices.
168  *
169  * Host does not sync for DMA to complete with option #3 or #4, and a noop sync
170  * callback (see dhd_prot_d2h_sync_none) may be bound.
171  *
172  * Dongle advertizes host side sync mechanism requirements.
173  */
174 
175 #define PCIE_D2H_SYNC_WAIT_TRIES    (512U)
176 #define PCIE_D2H_SYNC_NUM_OF_STEPS  (5U)
177 #define PCIE_D2H_SYNC_DELAY         (100UL)	/* in terms of usecs */
178 
179 #define HWA_DB_TYPE_RXPOST	(0x0050)
180 #define HWA_DB_TYPE_TXCPLT	(0x0060)
181 #define HWA_DB_TYPE_RXCPLT	(0x0170)
182 #define HWA_DB_INDEX_VALUE(val)	((uint32)(val) << 16)
183 
184 #define HWA_ENAB_BITMAP_RXPOST	(1U << 0)	/* 1A */
185 #define HWA_ENAB_BITMAP_RXCPLT	(1U << 1)	/* 2B */
186 #define HWA_ENAB_BITMAP_TXCPLT	(1U << 2)	/* 4B */
187 
188 /**
189  * Custom callback attached based upon D2H DMA Sync mode advertized by dongle.
190  *
191  * On success: return cmn_msg_hdr_t::msg_type
192  * On failure: return 0 (invalid msg_type)
193  */
194 typedef uint8 (* d2h_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring,
195                                 volatile cmn_msg_hdr_t *msg, int msglen);
196 
197 /**
198  * Custom callback attached based upon D2H DMA Sync mode advertized by dongle.
199  * For EDL messages.
200  *
201  * On success: return cmn_msg_hdr_t::msg_type
202  * On failure: return 0 (invalid msg_type)
203  */
204 #ifdef EWP_EDL
205 typedef int (* d2h_edl_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring,
206                                 volatile cmn_msg_hdr_t *msg);
207 #endif /* EWP_EDL */
208 
209 /*
210  * +----------------------------------------------------------------------------
211  *
212  * RingIds and FlowId are not equivalent as ringids include D2H rings whereas
213  * flowids do not.
214  *
215  * Dongle advertizes the max H2D rings, as max_sub_queues = 'N' which includes
216  * the H2D common rings as well as the (N-BCMPCIE_H2D_COMMON_MSGRINGS) flowrings
217  *
218  * Here is a sample mapping for (based on PCIE Full Dongle Rev5) where,
219  *  BCMPCIE_H2D_COMMON_MSGRINGS = 2, i.e. 2 H2D common rings,
220  *  BCMPCIE_COMMON_MSGRINGS     = 5, i.e. include 3 D2H common rings.
221  *
222  *  H2D Control  Submit   RingId = 0        FlowId = 0 reserved never allocated
223  *  H2D RxPost   Submit   RingId = 1        FlowId = 1 reserved never allocated
224  *
225  *  D2H Control  Complete RingId = 2
226  *  D2H Transmit Complete RingId = 3
227  *  D2H Receive  Complete RingId = 4
228  *
229  *  H2D TxPost   FLOWRING RingId = 5         FlowId = 2     (1st flowring)
230  *  H2D TxPost   FLOWRING RingId = 6         FlowId = 3     (2nd flowring)
231  *  H2D TxPost   FLOWRING RingId = 5 + (N-1) FlowId = (N-1) (Nth flowring)
232  *
233  * When TxPost FlowId(s) are allocated, the FlowIds [0..FLOWID_RESERVED) are
234  * unused, where FLOWID_RESERVED is BCMPCIE_H2D_COMMON_MSGRINGS.
235  *
236  * Example: when a system supports 4 bc/mc and 128 uc flowrings, with
237  * BCMPCIE_H2D_COMMON_MSGRINGS = 2, and BCMPCIE_H2D_COMMON_MSGRINGS = 5, and the
238  * FlowId values would be in the range [2..133] and the corresponding
239  * RingId values would be in the range [5..136].
240  *
241  * The flowId allocator, may chose to, allocate Flowids:
242  *   bc/mc (per virtual interface) in one consecutive range [2..(2+VIFS))
243  *   X# of uc flowids in consecutive ranges (per station Id), where X is the
244  *   packet's access category (e.g. 4 uc flowids per station).
245  *
246  * CAUTION:
247  * When DMA indices array feature is used, RingId=5, corresponding to the 0th
248  * FLOWRING, will actually use the FlowId as index into the H2D DMA index,
249  * since the FlowId truly represents the index in the H2D DMA indices array.
250  *
251  * Likewise, in the D2H direction, the RingId - BCMPCIE_H2D_COMMON_MSGRINGS,
252  * will represent the index in the D2H DMA indices array.
253  *
254  * +----------------------------------------------------------------------------
255  */
256 
257 /* First TxPost Flowring Id */
258 #define DHD_FLOWRING_START_FLOWID   BCMPCIE_H2D_COMMON_MSGRINGS
259 
260 /* Determine whether a ringid belongs to a TxPost flowring */
261 #define DHD_IS_FLOWRING(ringid, max_flow_rings) \
262 	((ringid) >= BCMPCIE_COMMON_MSGRINGS && \
263 	(ringid) < ((max_flow_rings) + BCMPCIE_COMMON_MSGRINGS))
264 
265 /* Convert a H2D TxPost FlowId to a MsgBuf RingId */
266 #define DHD_FLOWID_TO_RINGID(flowid) \
267 	(BCMPCIE_COMMON_MSGRINGS + ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS))
268 
269 /* Convert a MsgBuf RingId to a H2D TxPost FlowId */
270 #define DHD_RINGID_TO_FLOWID(ringid) \
271 	(BCMPCIE_H2D_COMMON_MSGRINGS + ((ringid) - BCMPCIE_COMMON_MSGRINGS))
272 
273 /* Convert a H2D MsgBuf RingId to an offset index into the H2D DMA indices array
274  * This may be used for the H2D DMA WR index array or H2D DMA RD index array or
275  * any array of H2D rings.
276  */
277 #define DHD_H2D_RING_OFFSET(ringid) \
278 	(((ringid) >= BCMPCIE_COMMON_MSGRINGS) ? DHD_RINGID_TO_FLOWID(ringid) : (ringid))
279 
280 /* Convert a H2D MsgBuf Flowring Id to an offset index into the H2D DMA indices array
281  * This may be used for IFRM.
282  */
283 #define DHD_H2D_FRM_FLOW_RING_OFFSET(ringid) \
284 	((ringid) - BCMPCIE_COMMON_MSGRINGS)
285 
286 /* Convert a D2H MsgBuf RingId to an offset index into the D2H DMA indices array
287  * This may be used for the D2H DMA WR index array or D2H DMA RD index array or
288  * any array of D2H rings.
289  * d2h debug ring is located at the end, i.e. after all the tx flow rings and h2d debug ring
290  * max_h2d_rings: total number of h2d rings
291  */
292 #define DHD_D2H_RING_OFFSET(ringid, max_h2d_rings) \
293 	((ringid) > (max_h2d_rings) ? \
294 		((ringid) - max_h2d_rings) : \
295 		((ringid) - BCMPCIE_H2D_COMMON_MSGRINGS))
296 
297 /* Convert a D2H DMA Indices Offset to a RingId */
298 #define DHD_D2H_RINGID(offset) \
299 	((offset) + BCMPCIE_H2D_COMMON_MSGRINGS)
300 
301 #define DHD_DMAH_NULL      ((void*)NULL)
302 
303 /*
304  * Pad a DMA-able buffer by an additional cachline. If the end of the DMA-able
305  * buffer does not occupy the entire cacheline, and another object is placed
306  * following the DMA-able buffer, data corruption may occur if the DMA-able
307  * buffer is used to DMAing into (e.g. D2H direction), when HW cache coherency
308  * is not available.
309  */
310 #if defined(L1_CACHE_BYTES)
311 #define DHD_DMA_PAD        (L1_CACHE_BYTES)
312 #else
313 #define DHD_DMA_PAD        (128)
314 #endif // endif
315 
316 /*
317  * +----------------------------------------------------------------------------
318  * Flowring Pool
319  *
320  * Unlike common rings, which are attached very early on (dhd_prot_attach),
321  * flowrings are dynamically instantiated. Moreover, flowrings may require a
322  * larger DMA-able buffer. To avoid issues with fragmented cache coherent
323  * DMA-able memory, a pre-allocated pool of msgbuf_ring_t is allocated once.
324  * The DMA-able buffers are attached to these pre-allocated msgbuf_ring.
325  *
326  * Each DMA-able buffer may be allocated independently, or may be carved out
327  * of a single large contiguous region that is registered with the protocol
328  * layer into flowrings_dma_buf. On a 64bit platform, this contiguous region
329  * may not span 0x00000000FFFFFFFF (avoid dongle side 64bit ptr arithmetic).
330  *
331  * No flowring pool action is performed in dhd_prot_attach(), as the number
332  * of h2d rings is not yet known.
333  *
334  * In dhd_prot_init(), the dongle advertized number of h2d rings is used to
335  * determine the number of flowrings required, and a pool of msgbuf_rings are
336  * allocated and a DMA-able buffer (carved or allocated) is attached.
337  * See: dhd_prot_flowrings_pool_attach()
338  *
339  * A flowring msgbuf_ring object may be fetched from this pool during flowring
340  * creation, using the flowid. Likewise, flowrings may be freed back into the
341  * pool on flowring deletion.
342  * See: dhd_prot_flowrings_pool_fetch(), dhd_prot_flowrings_pool_release()
343  *
344  * In dhd_prot_detach(), the flowring pool is detached. The DMA-able buffers
345  * are detached (returned back to the carved region or freed), and the pool of
346  * msgbuf_ring and any objects allocated against it are freed.
347  * See: dhd_prot_flowrings_pool_detach()
348  *
349  * In dhd_prot_reset(), the flowring pool is simply reset by returning it to a
350  * state as-if upon an attach. All DMA-able buffers are retained.
351  * Following a dhd_prot_reset(), in a subsequent dhd_prot_init(), the flowring
352  * pool attach will notice that the pool persists and continue to use it. This
353  * will avoid the case of a fragmented DMA-able region.
354  *
355  * +----------------------------------------------------------------------------
356  */
357 
358 /* Conversion of a flowid to a flowring pool index */
359 #define DHD_FLOWRINGS_POOL_OFFSET(flowid) \
360 	((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS)
361 
362 /* Fetch the msgbuf_ring_t from the flowring pool given a flowid */
363 #define DHD_RING_IN_FLOWRINGS_POOL(prot, flowid) \
364 	(msgbuf_ring_t*)((prot)->h2d_flowrings_pool) + \
365 	    DHD_FLOWRINGS_POOL_OFFSET(flowid)
366 
367 /* Traverse each flowring in the flowring pool, assigning ring and flowid */
368 #define FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, total_flowrings) \
369 	for ((flowid) = DHD_FLOWRING_START_FLOWID, \
370 		(ring) = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); \
371 		 (flowid) < ((total_flowrings) + DHD_FLOWRING_START_FLOWID); \
372 		 (ring)++, (flowid)++)
373 
374 /* Used in loopback tests */
375 typedef struct dhd_dmaxfer {
376 	dhd_dma_buf_t srcmem;
377 	dhd_dma_buf_t dstmem;
378 	uint32        srcdelay;
379 	uint32        destdelay;
380 	uint32        len;
381 	bool          in_progress;
382 	uint64        start_usec;
383 	uint64        time_taken;
384 	uint32        d11_lpbk;
385 	int           status;
386 } dhd_dmaxfer_t;
387 
388 /**
389  * msgbuf_ring : This object manages the host side ring that includes a DMA-able
390  * buffer, the WR and RD indices, ring parameters such as max number of items
391  * an length of each items, and other miscellaneous runtime state.
392  * A msgbuf_ring may be used to represent a H2D or D2H common ring or a
393  * H2D TxPost ring as specified in the PCIE FullDongle Spec.
394  * Ring parameters are conveyed to the dongle, which maintains its own peer end
395  * ring state. Depending on whether the DMA Indices feature is supported, the
396  * host will update the WR/RD index in the DMA indices array in host memory or
397  * directly in dongle memory.
398  */
399 typedef struct msgbuf_ring {
400 	bool           inited;
401 	uint16         idx;       /* ring id */
402 	uint16         rd;        /* read index */
403 	uint16         curr_rd;   /* read index for debug */
404 	uint16         wr;        /* write index */
405 	uint16         max_items; /* maximum number of items in ring */
406 	uint16         item_len;  /* length of each item in the ring */
407 	sh_addr_t      base_addr; /* LITTLE ENDIAN formatted: base address */
408 	dhd_dma_buf_t  dma_buf;   /* DMA-able buffer: pa, va, len, dmah, secdma */
409 	uint32         seqnum;    /* next expected item's sequence number */
410 #ifdef TXP_FLUSH_NITEMS
411 	void           *start_addr;
412 	/* # of messages on ring not yet announced to dongle */
413 	uint16         pend_items_count;
414 #endif /* TXP_FLUSH_NITEMS */
415 
416 	uint8   ring_type;
417 	uint16  hwa_db_type;	  /* hwa type non-zero for Data path rings */
418 	uint8   n_completion_ids;
419 	bool    create_pending;
420 	uint16  create_req_id;
421 	uint8   current_phase;
422 	uint16	compeltion_ring_ids[MAX_COMPLETION_RING_IDS_ASSOCIATED];
423 	uchar		name[RING_NAME_MAX_LENGTH];
424 	uint32		ring_mem_allocated;
425 	void	*ring_lock;
426 } msgbuf_ring_t;
427 
428 #define DHD_RING_BGN_VA(ring)           ((ring)->dma_buf.va)
429 #define DHD_RING_END_VA(ring) \
430 	((uint8 *)(DHD_RING_BGN_VA((ring))) + \
431 	 (((ring)->max_items - 1) * (ring)->item_len))
432 
433 /* This can be overwritten by module parameter defined in dhd_linux.c
434  * or by dhd iovar h2d_max_txpost.
435  */
436 int h2d_max_txpost = H2DRING_TXPOST_MAX_ITEM;
437 
438 /** DHD protocol handle. Is an opaque type to other DHD software layers. */
439 typedef struct dhd_prot {
440 	osl_t *osh;		/* OSL handle */
441 	uint16 rxbufpost_sz;
442 	uint16 rxbufpost;
443 	uint16 max_rxbufpost;
444 	uint16 max_eventbufpost;
445 	uint16 max_ioctlrespbufpost;
446 	uint16 max_tsbufpost;
447 	uint16 max_infobufpost;
448 	uint16 infobufpost;
449 	uint16 cur_event_bufs_posted;
450 	uint16 cur_ioctlresp_bufs_posted;
451 	uint16 cur_ts_bufs_posted;
452 
453 	/* Flow control mechanism based on active transmits pending */
454 	osl_atomic_t active_tx_count; /* increments/decrements on every packet tx/tx_status */
455 	uint16 h2d_max_txpost;
456 	uint16 txp_threshold;  /* optimization to write "n" tx items at a time to ring */
457 
458 	/* MsgBuf Ring info: has a dhd_dma_buf that is dynamically allocated */
459 	msgbuf_ring_t h2dring_ctrl_subn; /* H2D ctrl message submission ring */
460 	msgbuf_ring_t h2dring_rxp_subn; /* H2D RxBuf post ring */
461 	msgbuf_ring_t d2hring_ctrl_cpln; /* D2H ctrl completion ring */
462 	msgbuf_ring_t d2hring_tx_cpln; /* D2H Tx complete message ring */
463 	msgbuf_ring_t d2hring_rx_cpln; /* D2H Rx complete message ring */
464 	msgbuf_ring_t *h2dring_info_subn; /* H2D info submission ring */
465 	msgbuf_ring_t *d2hring_info_cpln; /* D2H info completion ring */
466 	msgbuf_ring_t *d2hring_edl; /* D2H Enhanced Debug Lane (EDL) ring */
467 
468 	msgbuf_ring_t *h2d_flowrings_pool; /* Pool of preallocated flowings */
469 	dhd_dma_buf_t flowrings_dma_buf; /* Contiguous DMA buffer for flowrings */
470 	uint16        h2d_rings_total; /* total H2D (common rings + flowrings) */
471 
472 	uint32		rx_dataoffset;
473 
474 	dhd_mb_ring_t	mb_ring_fn;	/* called when dongle needs to be notified of new msg */
475 	dhd_mb_ring_2_t	mb_2_ring_fn;	/* called when dongle needs to be notified of new msg */
476 
477 	/* ioctl related resources */
478 	uint8 ioctl_state;
479 	int16 ioctl_status;		/* status returned from dongle */
480 	uint16 ioctl_resplen;
481 	dhd_ioctl_recieved_status_t ioctl_received;
482 	uint curr_ioctl_cmd;
483 	dhd_dma_buf_t	retbuf;		/* For holding ioctl response */
484 	dhd_dma_buf_t	ioctbuf;	/* For holding ioctl request */
485 
486 	dhd_dma_buf_t	d2h_dma_scratch_buf;	/* For holding d2h scratch */
487 
488 	/* DMA-able arrays for holding WR and RD indices */
489 	uint32          rw_index_sz; /* Size of a RD or WR index in dongle */
490 	dhd_dma_buf_t   h2d_dma_indx_wr_buf;	/* Array of H2D WR indices */
491 	dhd_dma_buf_t	h2d_dma_indx_rd_buf;	/* Array of H2D RD indices */
492 	dhd_dma_buf_t	d2h_dma_indx_wr_buf;	/* Array of D2H WR indices */
493 	dhd_dma_buf_t	d2h_dma_indx_rd_buf;	/* Array of D2H RD indices */
494 	dhd_dma_buf_t h2d_ifrm_indx_wr_buf;	/* Array of H2D WR indices for ifrm */
495 
496 	dhd_dma_buf_t	host_bus_throughput_buf; /* bus throughput measure buffer */
497 
498 	dhd_dma_buf_t   *flowring_buf;    /* pool of flow ring buf */
499 	uint32			flowring_num;
500 
501 	d2h_sync_cb_t d2h_sync_cb; /* Sync on D2H DMA done: SEQNUM or XORCSUM */
502 #ifdef EWP_EDL
503 	d2h_edl_sync_cb_t d2h_edl_sync_cb; /* Sync on EDL D2H DMA done: SEQNUM or XORCSUM */
504 #endif /* EWP_EDL */
505 	ulong d2h_sync_wait_max; /* max number of wait loops to receive one msg */
506 	ulong d2h_sync_wait_tot; /* total wait loops */
507 
508 	dhd_dmaxfer_t	dmaxfer; /* for test/DMA loopback */
509 
510 	uint16		ioctl_seq_no;
511 	uint16		data_seq_no;
512 	uint16		ioctl_trans_id;
513 	void		*pktid_ctrl_map; /* a pktid maps to a packet and its metadata */
514 	void		*pktid_rx_map;	/* pktid map for rx path */
515 	void		*pktid_tx_map;	/* pktid map for tx path */
516 	bool		metadata_dbg;
517 	void		*pktid_map_handle_ioctl;
518 #ifdef DHD_MAP_PKTID_LOGGING
519 	void		*pktid_dma_map;	/* pktid map for DMA MAP */
520 	void		*pktid_dma_unmap; /* pktid map for DMA UNMAP */
521 #endif /* DHD_MAP_PKTID_LOGGING */
522 	uint32		pktid_depleted_cnt;	/* pktid depleted count */
523 	/* netif tx queue stop count */
524 	uint8		pktid_txq_stop_cnt;
525 	/* netif tx queue start count */
526 	uint8		pktid_txq_start_cnt;
527 	uint64		ioctl_fillup_time;	/* timestamp for ioctl fillup */
528 	uint64		ioctl_ack_time;		/* timestamp for ioctl ack */
529 	uint64		ioctl_cmplt_time;	/* timestamp for ioctl completion */
530 
531 	/* Applications/utilities can read tx and rx metadata using IOVARs */
532 	uint16		rx_metadata_offset;
533 	uint16		tx_metadata_offset;
534 
535 #if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT)
536 	/* Host's soft doorbell configuration */
537 	bcmpcie_soft_doorbell_t soft_doorbell[BCMPCIE_D2H_COMMON_MSGRINGS];
538 #endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */
539 
540 	/* Work Queues to be used by the producer and the consumer, and threshold
541 	 * when the WRITE index must be synced to consumer's workq
542 	 */
543 #if defined(DHD_LB_TXC)
544 	uint32 tx_compl_prod_sync ____cacheline_aligned;
545 	bcm_workq_t tx_compl_prod, tx_compl_cons;
546 #endif /* DHD_LB_TXC */
547 #if defined(DHD_LB_RXC)
548 	uint32 rx_compl_prod_sync ____cacheline_aligned;
549 	bcm_workq_t rx_compl_prod, rx_compl_cons;
550 #endif /* DHD_LB_RXC */
551 
552 	dhd_dma_buf_t	fw_trap_buf; /* firmware trap buffer */
553 
554 	uint32  host_ipc_version; /* Host sypported IPC rev */
555 	uint32  device_ipc_version; /* FW supported IPC rev */
556 	uint32  active_ipc_version; /* Host advertised IPC rev */
557 	dhd_dma_buf_t   hostts_req_buf; /* For holding host timestamp request buf */
558 	bool    hostts_req_buf_inuse;
559 	bool    rx_ts_log_enabled;
560 	bool    tx_ts_log_enabled;
561 	bool no_retry;
562 	bool no_aggr;
563 	bool fixed_rate;
564 	dhd_dma_buf_t	host_scb_buf;	/* scb host offload buffer */
565 #ifdef DHD_HP2P
566 	msgbuf_ring_t *d2hring_hp2p_txcpl; /* D2H HPP Tx completion ring */
567 	msgbuf_ring_t *d2hring_hp2p_rxcpl; /* D2H HPP Rx completion ring */
568 #endif /* DHD_HP2P */
569 	bool no_tx_resource;
570 } dhd_prot_t;
571 
572 #ifdef DHD_EWPR_VER2
573 #define HANG_INFO_BASE64_BUFFER_SIZE 640
574 #endif // endif
575 
576 #ifdef DHD_DUMP_PCIE_RINGS
577 static
578 int dhd_ring_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file,
579 	const void *user_buf, unsigned long *file_posn);
580 #ifdef EWP_EDL
581 static
582 int dhd_edl_ring_hdr_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file, const void *user_buf,
583 	unsigned long *file_posn);
584 #endif /* EWP_EDL */
585 #endif /* DHD_DUMP_PCIE_RINGS */
586 
587 extern bool dhd_timesync_delay_post_bufs(dhd_pub_t *dhdp);
588 extern void dhd_schedule_dmaxfer_free(dhd_pub_t* dhdp, dmaxref_mem_map_t *dmmap);
589 /* Convert a dmaaddr_t to a base_addr with htol operations */
590 static INLINE void dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa);
591 
592 /* APIs for managing a DMA-able buffer */
593 static int  dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
594 static void dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
595 
596 /* msgbuf ring management */
597 static int dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring,
598 	const char *name, uint16 max_items, uint16 len_item, uint16 ringid);
599 static void dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring);
600 static void dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring);
601 static void dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring);
602 static void dhd_prot_process_fw_timestamp(dhd_pub_t *dhd, void* buf);
603 
604 /* Pool of pre-allocated msgbuf_ring_t with DMA-able buffers for Flowrings */
605 static int  dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd);
606 static void dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd);
607 static void dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd);
608 
609 /* Fetch and Release a flowring msgbuf_ring from flowring  pool */
610 static msgbuf_ring_t *dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd,
611 	uint16 flowid);
612 /* see also dhd_prot_flowrings_pool_release() in dhd_prot.h */
613 
614 /* Producer: Allocate space in a msgbuf ring */
615 static void* dhd_prot_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring,
616 	uint16 nitems, uint16 *alloced, bool exactly_nitems);
617 static void* dhd_prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems,
618 	uint16 *alloced, bool exactly_nitems);
619 
620 /* Consumer: Determine the location where the next message may be consumed */
621 static uint8* dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring,
622 	uint32 *available_len);
623 
624 /* Producer (WR index update) or Consumer (RD index update) indication */
625 static void dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t *ring,
626 	void *p, uint16 len);
627 static void dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t *ring);
628 
629 static INLINE int dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type,
630 	dhd_dma_buf_t *dma_buf, uint32 bufsz);
631 
632 /* Set/Get a RD or WR index in the array of indices */
633 /* See also: dhd_prot_dma_indx_init() */
634 void dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type,
635 	uint16 ringid);
636 static uint16 dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid);
637 
638 /* Locate a packet given a pktid */
639 static INLINE void *dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype,
640 	bool free_pktid);
641 /* Locate a packet given a PktId and free it. */
642 static INLINE void dhd_prot_packet_free(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send);
643 
644 static int dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd,
645 	void *buf, uint len, uint8 action);
646 static int dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd,
647 	void *buf, uint len, uint8 action);
648 static int dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf);
649 static int dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd,
650 	void *buf, int ifidx);
651 
652 /* Post buffers for Rx, control ioctl response and events */
653 static uint16 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, uint8 msgid, uint32 max_to_post);
654 static void dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *pub);
655 static void dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *pub);
656 static void dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid);
657 static int dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid);
658 static int dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t *pub);
659 
660 static void dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint32 pktid, uint32 rxcnt);
661 
662 /* D2H Message handling */
663 static int dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len);
664 
665 /* D2H Message handlers */
666 static void dhd_prot_noop(dhd_pub_t *dhd, void *msg);
667 static void dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg);
668 static void dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg);
669 static void dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg);
670 static void dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg);
671 static void dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg);
672 static void dhd_prot_event_process(dhd_pub_t *dhd, void *msg);
673 
674 /* Loopback test with dongle */
675 static void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dma);
676 static int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len, uint srcdelay,
677 	uint destdelay, dhd_dmaxfer_t *dma);
678 static void dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg);
679 
680 /* Flowring management communication with dongle */
681 static void dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg);
682 static void dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg);
683 static void dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg);
684 static void dhd_prot_process_flow_ring_resume_response(dhd_pub_t *dhd, void* msg);
685 static void dhd_prot_process_flow_ring_suspend_response(dhd_pub_t *dhd, void* msg);
686 
687 /* Monitor Mode */
688 #ifdef WL_MONITOR
689 extern bool dhd_monitor_enabled(dhd_pub_t *dhd, int ifidx);
690 extern void dhd_rx_mon_pkt(dhd_pub_t *dhdp, host_rxbuf_cmpl_t* msg, void *pkt, int ifidx);
691 #endif /* WL_MONITOR */
692 
693 /* Configure a soft doorbell per D2H ring */
694 static void dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd);
695 static void dhd_prot_process_d2h_ring_config_complete(dhd_pub_t *dhd, void *msg);
696 static void dhd_prot_process_d2h_ring_create_complete(dhd_pub_t *dhd, void *buf);
697 static void dhd_prot_process_h2d_ring_create_complete(dhd_pub_t *dhd, void *buf);
698 static void dhd_prot_process_infobuf_complete(dhd_pub_t *dhd, void* buf);
699 static void dhd_prot_process_d2h_mb_data(dhd_pub_t *dhd, void* buf);
700 static void dhd_prot_detach_info_rings(dhd_pub_t *dhd);
701 #ifdef DHD_HP2P
702 static void dhd_prot_detach_hp2p_rings(dhd_pub_t *dhd);
703 #endif /* DHD_HP2P */
704 #ifdef EWP_EDL
705 static void dhd_prot_detach_edl_rings(dhd_pub_t *dhd);
706 #endif // endif
707 static void dhd_prot_process_d2h_host_ts_complete(dhd_pub_t *dhd, void* buf);
708 static void dhd_prot_process_snapshot_complete(dhd_pub_t *dhd, void *buf);
709 
710 #ifdef DHD_HP2P
711 static void dhd_update_hp2p_rxstats(dhd_pub_t *dhd, host_rxbuf_cmpl_t *rxstatus);
712 static void dhd_update_hp2p_txstats(dhd_pub_t *dhd, host_txbuf_cmpl_t *txstatus);
713 static void dhd_calc_hp2p_burst(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint16 flowid);
714 static void dhd_update_hp2p_txdesc(dhd_pub_t *dhd, host_txbuf_post_t *txdesc);
715 #endif // endif
716 typedef void (*dhd_msgbuf_func_t)(dhd_pub_t *dhd, void *msg);
717 
718 /** callback functions for messages generated by the dongle */
719 #define MSG_TYPE_INVALID 0
720 
721 static dhd_msgbuf_func_t table_lookup[DHD_PROT_FUNCS] = {
722 	dhd_prot_noop, /* 0 is MSG_TYPE_INVALID */
723 	dhd_prot_genstatus_process, /* MSG_TYPE_GEN_STATUS */
724 	dhd_prot_ringstatus_process, /* MSG_TYPE_RING_STATUS */
725 	NULL,
726 	dhd_prot_flow_ring_create_response_process, /* MSG_TYPE_FLOW_RING_CREATE_CMPLT */
727 	NULL,
728 	dhd_prot_flow_ring_delete_response_process, /* MSG_TYPE_FLOW_RING_DELETE_CMPLT */
729 	NULL,
730 	dhd_prot_flow_ring_flush_response_process, /* MSG_TYPE_FLOW_RING_FLUSH_CMPLT */
731 	NULL,
732 	dhd_prot_ioctack_process, /* MSG_TYPE_IOCTLPTR_REQ_ACK */
733 	NULL,
734 	dhd_prot_ioctcmplt_process, /* MSG_TYPE_IOCTL_CMPLT */
735 	NULL,
736 	dhd_prot_event_process, /* MSG_TYPE_WL_EVENT */
737 	NULL,
738 	dhd_prot_txstatus_process, /* MSG_TYPE_TX_STATUS */
739 	NULL,
740 	NULL,	/* MSG_TYPE_RX_CMPLT use dedicated handler */
741 	NULL,
742 	dhd_msgbuf_dmaxfer_process, /* MSG_TYPE_LPBK_DMAXFER_CMPLT */
743 	NULL, /* MSG_TYPE_FLOW_RING_RESUME */
744 	dhd_prot_process_flow_ring_resume_response, /* MSG_TYPE_FLOW_RING_RESUME_CMPLT */
745 	NULL, /* MSG_TYPE_FLOW_RING_SUSPEND */
746 	dhd_prot_process_flow_ring_suspend_response, /* MSG_TYPE_FLOW_RING_SUSPEND_CMPLT */
747 	NULL, /* MSG_TYPE_INFO_BUF_POST */
748 	dhd_prot_process_infobuf_complete, /* MSG_TYPE_INFO_BUF_CMPLT */
749 	NULL, /* MSG_TYPE_H2D_RING_CREATE */
750 	NULL, /* MSG_TYPE_D2H_RING_CREATE */
751 	dhd_prot_process_h2d_ring_create_complete, /* MSG_TYPE_H2D_RING_CREATE_CMPLT */
752 	dhd_prot_process_d2h_ring_create_complete, /* MSG_TYPE_D2H_RING_CREATE_CMPLT */
753 	NULL, /* MSG_TYPE_H2D_RING_CONFIG */
754 	NULL, /* MSG_TYPE_D2H_RING_CONFIG */
755 	NULL, /* MSG_TYPE_H2D_RING_CONFIG_CMPLT */
756 	dhd_prot_process_d2h_ring_config_complete, /* MSG_TYPE_D2H_RING_CONFIG_CMPLT */
757 	NULL, /* MSG_TYPE_H2D_MAILBOX_DATA */
758 	dhd_prot_process_d2h_mb_data, /* MSG_TYPE_D2H_MAILBOX_DATA */
759 	NULL,	/* MSG_TYPE_TIMSTAMP_BUFPOST */
760 	NULL,	/* MSG_TYPE_HOSTTIMSTAMP */
761 	dhd_prot_process_d2h_host_ts_complete,	/* MSG_TYPE_HOSTTIMSTAMP_CMPLT */
762 	dhd_prot_process_fw_timestamp,	/* MSG_TYPE_FIRMWARE_TIMESTAMP */
763 	NULL,	/* MSG_TYPE_SNAPSHOT_UPLOAD */
764 	dhd_prot_process_snapshot_complete,	/* MSG_TYPE_SNAPSHOT_CMPLT */
765 };
766 
767 #ifdef DHD_RX_CHAINING
768 
769 #define PKT_CTF_CHAINABLE(dhd, ifidx, evh, prio, h_sa, h_da, h_prio) \
770 	(dhd_wet_chainable(dhd) && \
771 	dhd_rx_pkt_chainable((dhd), (ifidx)) && \
772 	!ETHER_ISNULLDEST(((struct ether_header *)(evh))->ether_dhost) && \
773 	!ETHER_ISMULTI(((struct ether_header *)(evh))->ether_dhost) && \
774 	!eacmp((h_da), ((struct ether_header *)(evh))->ether_dhost) && \
775 	!eacmp((h_sa), ((struct ether_header *)(evh))->ether_shost) && \
776 	((h_prio) == (prio)) && (dhd_ctf_hotbrc_check((dhd), (evh), (ifidx))) && \
777 	((((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IP)) || \
778 	(((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IPV6))))
779 
780 static INLINE void BCMFASTPATH dhd_rxchain_reset(rxchain_info_t *rxchain);
781 static void BCMFASTPATH dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx);
782 static void BCMFASTPATH dhd_rxchain_commit(dhd_pub_t *dhd);
783 
784 #define DHD_PKT_CTF_MAX_CHAIN_LEN	64
785 
786 #endif /* DHD_RX_CHAINING */
787 
788 #define DHD_LPBKDTDUMP_ON()	(dhd_msg_level & DHD_LPBKDTDUMP_VAL)
789 
790 static void dhd_prot_h2d_sync_init(dhd_pub_t *dhd);
791 
792 bool
dhd_prot_is_cmpl_ring_empty(dhd_pub_t * dhd,void * prot_info)793 dhd_prot_is_cmpl_ring_empty(dhd_pub_t *dhd, void *prot_info)
794 {
795 	msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)prot_info;
796 	uint16 rd, wr;
797 	bool ret;
798 
799 	if (dhd->dma_d2h_ring_upd_support) {
800 		wr = flow_ring->wr;
801 	} else {
802 		dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx);
803 	}
804 	if (dhd->dma_h2d_ring_upd_support) {
805 		rd = flow_ring->rd;
806 	} else {
807 		dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx);
808 	}
809 	ret = (wr == rd) ? TRUE : FALSE;
810 	return ret;
811 }
812 
813 void
dhd_prot_dump_ring_ptrs(void * prot_info)814 dhd_prot_dump_ring_ptrs(void *prot_info)
815 {
816 	msgbuf_ring_t *ring = (msgbuf_ring_t *)prot_info;
817 	DHD_ERROR(("%s curr_rd: %d rd: %d wr: %d \n", __FUNCTION__,
818 		ring->curr_rd, ring->rd, ring->wr));
819 }
820 
821 uint16
dhd_prot_get_h2d_max_txpost(dhd_pub_t * dhd)822 dhd_prot_get_h2d_max_txpost(dhd_pub_t *dhd)
823 {
824 	return (uint16)h2d_max_txpost;
825 }
826 void
dhd_prot_set_h2d_max_txpost(dhd_pub_t * dhd,uint16 max_txpost)827 dhd_prot_set_h2d_max_txpost(dhd_pub_t *dhd, uint16 max_txpost)
828 {
829 	h2d_max_txpost = max_txpost;
830 }
831 /**
832  * D2H DMA to completion callback handlers. Based on the mode advertised by the
833  * dongle through the PCIE shared region, the appropriate callback will be
834  * registered in the proto layer to be invoked prior to precessing any message
835  * from a D2H DMA ring. If the dongle uses a read barrier or another mode that
836  * does not require host participation, then a noop callback handler will be
837  * bound that simply returns the msg_type.
838  */
839 static void dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 msg_seqnum, msgbuf_ring_t *ring,
840                                        uint32 tries, volatile uchar *msg, int msglen);
841 static uint8 dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
842                                       volatile cmn_msg_hdr_t *msg, int msglen);
843 static uint8 dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
844                                        volatile cmn_msg_hdr_t *msg, int msglen);
845 static uint8 dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
846                                     volatile cmn_msg_hdr_t *msg, int msglen);
847 static void dhd_prot_d2h_sync_init(dhd_pub_t *dhd);
848 static int dhd_send_d2h_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create,
849 	uint16 ring_type, uint32 id);
850 static int dhd_send_h2d_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create,
851 	uint8 type, uint32 id);
852 
853 /**
854  * dhd_prot_d2h_sync_livelock - when the host determines that a DMA transfer has
855  * not completed, a livelock condition occurs. Host will avert this livelock by
856  * dropping this message and moving to the next. This dropped message can lead
857  * to a packet leak, or even something disastrous in the case the dropped
858  * message happens to be a control response.
859  * Here we will log this condition. One may choose to reboot the dongle.
860  *
861  */
862 static void
dhd_prot_d2h_sync_livelock(dhd_pub_t * dhd,uint32 msg_seqnum,msgbuf_ring_t * ring,uint32 tries,volatile uchar * msg,int msglen)863 dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 msg_seqnum, msgbuf_ring_t *ring, uint32 tries,
864                            volatile uchar *msg, int msglen)
865 {
866 	uint32 ring_seqnum = ring->seqnum;
867 
868 	if (dhd_query_bus_erros(dhd)) {
869 		return;
870 	}
871 
872 	DHD_ERROR((
873 		"LIVELOCK DHD<%p> ring<%s> msg_seqnum<%u> ring_seqnum<%u:%u> tries<%u> max<%lu>"
874 		" tot<%lu> dma_buf va<%p> msg<%p> curr_rd<%d> rd<%d> wr<%d>\n",
875 		dhd, ring->name, msg_seqnum, ring_seqnum, ring_seqnum% D2H_EPOCH_MODULO, tries,
876 		dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot,
877 		ring->dma_buf.va, msg, ring->curr_rd, ring->rd, ring->wr));
878 
879 	dhd_prhex("D2H MsgBuf Failure", msg, msglen, DHD_ERROR_VAL);
880 
881 	/* Try to resume if already suspended or suspend in progress */
882 
883 	/* Skip if still in suspended or suspend in progress */
884 	if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhd)) {
885 		DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
886 			__FUNCTION__, dhd->busstate, dhd->dhd_bus_busy_state));
887 		goto exit;
888 	}
889 
890 	dhd_bus_dump_console_buffer(dhd->bus);
891 	dhd_prot_debug_info_print(dhd);
892 
893 #ifdef DHD_FW_COREDUMP
894 	if (dhd->memdump_enabled) {
895 		/* collect core dump */
896 		dhd->memdump_type = DUMP_TYPE_BY_LIVELOCK;
897 		dhd_bus_mem_dump(dhd);
898 	}
899 #endif /* DHD_FW_COREDUMP */
900 
901 exit:
902 	dhd_schedule_reset(dhd);
903 
904 	dhd->livelock_occured = TRUE;
905 }
906 
907 /**
908  * dhd_prot_d2h_sync_seqnum - Sync on a D2H DMA completion using the SEQNUM
909  * mode. Sequence number is always in the last word of a message.
910  */
911 static uint8 BCMFASTPATH
dhd_prot_d2h_sync_seqnum(dhd_pub_t * dhd,msgbuf_ring_t * ring,volatile cmn_msg_hdr_t * msg,int msglen)912 dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
913                          volatile cmn_msg_hdr_t *msg, int msglen)
914 {
915 	uint32 tries;
916 	uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
917 	int num_words = msglen / sizeof(uint32); /* num of 32bit words */
918 	volatile uint32 *marker = (volatile uint32 *)msg + (num_words - 1); /* last word */
919 	dhd_prot_t *prot = dhd->prot;
920 	uint32 msg_seqnum;
921 	uint32 step = 0;
922 	uint32 delay = PCIE_D2H_SYNC_DELAY;
923 	uint32 total_tries = 0;
924 
925 	ASSERT(msglen == ring->item_len);
926 
927 	BCM_REFERENCE(delay);
928 	/*
929 	 * For retries we have to make some sort of stepper algorithm.
930 	 * We see that every time when the Dongle comes out of the D3
931 	 * Cold state, the first D2H mem2mem DMA takes more time to
932 	 * complete, leading to livelock issues.
933 	 *
934 	 * Case 1 - Apart from Host CPU some other bus master is
935 	 * accessing the DDR port, probably page close to the ring
936 	 * so, PCIE does not get a change to update the memory.
937 	 * Solution - Increase the number of tries.
938 	 *
939 	 * Case 2 - The 50usec delay given by the Host CPU is not
940 	 * sufficient for the PCIe RC to start its work.
941 	 * In this case the breathing time of 50usec given by
942 	 * the Host CPU is not sufficient.
943 	 * Solution: Increase the delay in a stepper fashion.
944 	 * This is done to ensure that there are no
945 	 * unwanted extra delay introdcued in normal conditions.
946 	 */
947 	for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
948 		for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
949 			msg_seqnum = *marker;
950 			if (ltoh32(msg_seqnum) == ring_seqnum) { /* dma upto last word done */
951 				ring->seqnum++; /* next expected sequence number */
952 				/* Check for LIVELOCK induce flag, which is set by firing
953 				 * dhd iovar to induce LIVELOCK error. If flag is set,
954 				 * MSG_TYPE_INVALID is returned, which results in to LIVELOCK error.
955 				 */
956 				if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) {
957 					goto dma_completed;
958 				}
959 			}
960 
961 			total_tries = (uint32)(((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries);
962 
963 			if (total_tries > prot->d2h_sync_wait_max)
964 				prot->d2h_sync_wait_max = total_tries;
965 
966 			OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
967 			OSL_CPU_RELAX(); /* CPU relax for msg_seqnum  value to update */
968 			OSL_DELAY(delay * step); /* Add stepper delay */
969 
970 		} /* for PCIE_D2H_SYNC_WAIT_TRIES */
971 	} /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
972 
973 	dhd_prot_d2h_sync_livelock(dhd, msg_seqnum, ring, total_tries,
974 		(volatile uchar *) msg, msglen);
975 
976 	ring->seqnum++; /* skip this message ... leak of a pktid */
977 	return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */
978 
979 dma_completed:
980 
981 	prot->d2h_sync_wait_tot += tries;
982 	return msg->msg_type;
983 }
984 
985 /**
986  * dhd_prot_d2h_sync_xorcsum - Sync on a D2H DMA completion using the XORCSUM
987  * mode. The xorcsum is placed in the last word of a message. Dongle will also
988  * place a seqnum in the epoch field of the cmn_msg_hdr.
989  */
990 static uint8 BCMFASTPATH
dhd_prot_d2h_sync_xorcsum(dhd_pub_t * dhd,msgbuf_ring_t * ring,volatile cmn_msg_hdr_t * msg,int msglen)991 dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
992                           volatile cmn_msg_hdr_t *msg, int msglen)
993 {
994 	uint32 tries;
995 	uint32 prot_checksum = 0; /* computed checksum */
996 	int num_words = msglen / sizeof(uint32); /* num of 32bit words */
997 	uint8 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
998 	dhd_prot_t *prot = dhd->prot;
999 	uint32 step = 0;
1000 	uint32 delay = PCIE_D2H_SYNC_DELAY;
1001 	uint32 total_tries = 0;
1002 
1003 	ASSERT(msglen == ring->item_len);
1004 
1005 	BCM_REFERENCE(delay);
1006 	/*
1007 	 * For retries we have to make some sort of stepper algorithm.
1008 	 * We see that every time when the Dongle comes out of the D3
1009 	 * Cold state, the first D2H mem2mem DMA takes more time to
1010 	 * complete, leading to livelock issues.
1011 	 *
1012 	 * Case 1 - Apart from Host CPU some other bus master is
1013 	 * accessing the DDR port, probably page close to the ring
1014 	 * so, PCIE does not get a change to update the memory.
1015 	 * Solution - Increase the number of tries.
1016 	 *
1017 	 * Case 2 - The 50usec delay given by the Host CPU is not
1018 	 * sufficient for the PCIe RC to start its work.
1019 	 * In this case the breathing time of 50usec given by
1020 	 * the Host CPU is not sufficient.
1021 	 * Solution: Increase the delay in a stepper fashion.
1022 	 * This is done to ensure that there are no
1023 	 * unwanted extra delay introdcued in normal conditions.
1024 	 */
1025 	for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
1026 		for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
1027 			/* First verify if the seqnumber has been update,
1028 			 * if yes, then only check xorcsum.
1029 			 * Once seqnum and xorcsum is proper that means
1030 			 * complete message has arrived.
1031 			 */
1032 			if (msg->epoch == ring_seqnum) {
1033 				prot_checksum = bcm_compute_xor32((volatile uint32 *)msg,
1034 					num_words);
1035 				if (prot_checksum == 0U) { /* checksum is OK */
1036 					ring->seqnum++; /* next expected sequence number */
1037 					/* Check for LIVELOCK induce flag, which is set by firing
1038 					 * dhd iovar to induce LIVELOCK error. If flag is set,
1039 					 * MSG_TYPE_INVALID is returned, which results in to
1040 					 * LIVELOCK error.
1041 					 */
1042 					if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) {
1043 						goto dma_completed;
1044 					}
1045 				}
1046 			}
1047 
1048 			total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries;
1049 
1050 			if (total_tries > prot->d2h_sync_wait_max)
1051 				prot->d2h_sync_wait_max = total_tries;
1052 
1053 			OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
1054 			OSL_CPU_RELAX(); /* CPU relax for msg_seqnum  value to update */
1055 			OSL_DELAY(delay * step); /* Add stepper delay */
1056 
1057 		} /* for PCIE_D2H_SYNC_WAIT_TRIES */
1058 	} /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
1059 
1060 	DHD_ERROR(("%s: prot_checksum = 0x%x\n", __FUNCTION__, prot_checksum));
1061 	dhd_prot_d2h_sync_livelock(dhd, msg->epoch, ring, total_tries,
1062 		(volatile uchar *) msg, msglen);
1063 
1064 	ring->seqnum++; /* skip this message ... leak of a pktid */
1065 	return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */
1066 
1067 dma_completed:
1068 
1069 	prot->d2h_sync_wait_tot += tries;
1070 	return msg->msg_type;
1071 }
1072 
1073 /**
1074  * dhd_prot_d2h_sync_none - Dongle ensure that the DMA will complete and host
1075  * need to try to sync. This noop sync handler will be bound when the dongle
1076  * advertises that neither the SEQNUM nor XORCSUM mode of DMA sync is required.
1077  */
1078 static uint8 BCMFASTPATH
dhd_prot_d2h_sync_none(dhd_pub_t * dhd,msgbuf_ring_t * ring,volatile cmn_msg_hdr_t * msg,int msglen)1079 dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
1080                        volatile cmn_msg_hdr_t *msg, int msglen)
1081 {
1082 	/* Check for LIVELOCK induce flag, which is set by firing
1083 	* dhd iovar to induce LIVELOCK error. If flag is set,
1084 	* MSG_TYPE_INVALID is returned, which results in to LIVELOCK error.
1085 	*/
1086 	if (dhd->dhd_induce_error == DHD_INDUCE_LIVELOCK) {
1087 		DHD_ERROR(("%s: Inducing livelock\n", __FUNCTION__));
1088 		return MSG_TYPE_INVALID;
1089 	} else {
1090 		return msg->msg_type;
1091 	}
1092 }
1093 
1094 #ifdef EWP_EDL
1095 /**
1096  * dhd_prot_d2h_sync_edl - Sync on a D2H DMA completion by validating the cmn_msg_hdr_t
1097  * header values at both the beginning and end of the payload.
1098  * The cmn_msg_hdr_t is placed at the start and end of the payload
1099  * in each work item in the EDL ring.
1100  * Dongle will place a seqnum inside the cmn_msg_hdr_t 'epoch' field
1101  * and the length of the payload in the 'request_id' field.
1102  * Structure of each work item in the EDL ring:
1103  * | cmn_msg_hdr_t | payload (var len) | cmn_msg_hdr_t |
1104  * NOTE: - it was felt that calculating xorcsum for the entire payload (max length of 1648 bytes) is
1105  * too costly on the dongle side and might take up too many ARM cycles,
1106  * hence the xorcsum sync method is not being used for EDL ring.
1107  */
1108 static int
BCMFASTPATH(dhd_prot_d2h_sync_edl)1109 BCMFASTPATH(dhd_prot_d2h_sync_edl)(dhd_pub_t *dhd, msgbuf_ring_t *ring,
1110                           volatile cmn_msg_hdr_t *msg)
1111 {
1112 	uint32 tries;
1113 	int msglen = 0, len = 0;
1114 	uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
1115 	dhd_prot_t *prot = dhd->prot;
1116 	uint32 step = 0;
1117 	uint32 delay = PCIE_D2H_SYNC_DELAY;
1118 	uint32 total_tries = 0;
1119 	volatile cmn_msg_hdr_t *trailer = NULL;
1120 	volatile uint8 *buf = NULL;
1121 	bool valid_msg = FALSE;
1122 
1123 	BCM_REFERENCE(delay);
1124 	/*
1125 	 * For retries we have to make some sort of stepper algorithm.
1126 	 * We see that every time when the Dongle comes out of the D3
1127 	 * Cold state, the first D2H mem2mem DMA takes more time to
1128 	 * complete, leading to livelock issues.
1129 	 *
1130 	 * Case 1 - Apart from Host CPU some other bus master is
1131 	 * accessing the DDR port, probably page close to the ring
1132 	 * so, PCIE does not get a change to update the memory.
1133 	 * Solution - Increase the number of tries.
1134 	 *
1135 	 * Case 2 - The 50usec delay given by the Host CPU is not
1136 	 * sufficient for the PCIe RC to start its work.
1137 	 * In this case the breathing time of 50usec given by
1138 	 * the Host CPU is not sufficient.
1139 	 * Solution: Increase the delay in a stepper fashion.
1140 	 * This is done to ensure that there are no
1141 	 * unwanted extra delay introdcued in normal conditions.
1142 	 */
1143 	for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
1144 		for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
1145 			/* First verify if the seqnumber has been updated,
1146 			 * if yes, only then validate the header and trailer.
1147 			 * Once seqnum, header and trailer have been validated, it means
1148 			 * that the complete message has arrived.
1149 			 */
1150 			valid_msg = FALSE;
1151 			if (msg->epoch == ring_seqnum &&
1152 				msg->msg_type == MSG_TYPE_INFO_PYLD &&
1153 				msg->request_id > 0 &&
1154 				msg->request_id <= ring->item_len) {
1155 				/* proceed to check trailer only if header is valid */
1156 				buf = (volatile uint8 *)msg;
1157 				msglen = sizeof(cmn_msg_hdr_t) + msg->request_id;
1158 				buf += msglen;
1159 				if (msglen + sizeof(cmn_msg_hdr_t) <= ring->item_len) {
1160 					trailer = (volatile cmn_msg_hdr_t *)buf;
1161 					valid_msg = (trailer->epoch == ring_seqnum) &&
1162 						(trailer->msg_type == msg->msg_type) &&
1163 						(trailer->request_id == msg->request_id);
1164 					if (!valid_msg) {
1165 						DHD_TRACE(("%s:invalid trailer! seqnum=%u;reqid=%u"
1166 						" expected, seqnum=%u; reqid=%u. Retrying... \n",
1167 						__FUNCTION__, trailer->epoch, trailer->request_id,
1168 						msg->epoch, msg->request_id));
1169 					}
1170 				} else {
1171 					DHD_TRACE(("%s: invalid payload length (%u)! Retrying.. \n",
1172 						__FUNCTION__, msg->request_id));
1173 				}
1174 
1175 				if (valid_msg) {
1176 					/* data is OK */
1177 					ring->seqnum++; /* next expected sequence number */
1178 					if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) {
1179 						goto dma_completed;
1180 					}
1181 				}
1182 			} else {
1183 				DHD_TRACE(("%s: wrong hdr, seqnum expected %u, got %u."
1184 					" msg_type=0x%x, request_id=%u."
1185 					" Retrying...\n",
1186 					__FUNCTION__, ring_seqnum, msg->epoch,
1187 					msg->msg_type, msg->request_id));
1188 			}
1189 
1190 			total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries;
1191 
1192 			if (total_tries > prot->d2h_sync_wait_max)
1193 				prot->d2h_sync_wait_max = total_tries;
1194 
1195 			OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
1196 			OSL_CPU_RELAX(); /* CPU relax for msg_seqnum  value to update */
1197 			OSL_DELAY(delay * step); /* Add stepper delay */
1198 
1199 		} /* for PCIE_D2H_SYNC_WAIT_TRIES */
1200 	} /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
1201 
1202 	DHD_ERROR(("%s: EDL header check fails !\n", __FUNCTION__));
1203 	DHD_ERROR(("%s: header: seqnum=%u; expected-seqnum=%u"
1204 		" msgtype=0x%x; expected-msgtype=0x%x"
1205 		" length=%u; expected-max-length=%u", __FUNCTION__,
1206 		msg->epoch, ring_seqnum, msg->msg_type, MSG_TYPE_INFO_PYLD,
1207 		msg->request_id, ring->item_len));
1208 	dhd_prhex("msg header bytes: ", (volatile uchar *)msg, sizeof(*msg), DHD_ERROR_VAL);
1209 	if (trailer && msglen > 0 &&
1210 			(msglen + sizeof(cmn_msg_hdr_t)) <= ring->item_len) {
1211 		DHD_ERROR(("%s: trailer: seqnum=%u; expected-seqnum=%u"
1212 			" msgtype=0x%x; expected-msgtype=0x%x"
1213 			" length=%u; expected-length=%u", __FUNCTION__,
1214 			trailer->epoch, ring_seqnum, trailer->msg_type, MSG_TYPE_INFO_PYLD,
1215 			trailer->request_id, msg->request_id));
1216 		dhd_prhex("msg trailer bytes: ", (volatile uchar *)trailer,
1217 			sizeof(*trailer), DHD_ERROR_VAL);
1218 	}
1219 
1220 	if ((msglen + sizeof(cmn_msg_hdr_t)) <= ring->item_len)
1221 		len = msglen + sizeof(cmn_msg_hdr_t);
1222 	else
1223 		len = ring->item_len;
1224 
1225 	dhd_prot_d2h_sync_livelock(dhd, msg->epoch, ring, total_tries,
1226 		(volatile uchar *) msg, len);
1227 
1228 	ring->seqnum++; /* skip this message */
1229 	return BCME_ERROR; /* invalid msg_type 0 -> noop callback */
1230 
1231 dma_completed:
1232 	DHD_TRACE(("%s: EDL header check pass, seqnum=%u; reqid=%u\n", __FUNCTION__,
1233 		msg->epoch, msg->request_id));
1234 
1235 	prot->d2h_sync_wait_tot += tries;
1236 	return BCME_OK;
1237 }
1238 
1239 /**
1240  * dhd_prot_d2h_sync_edl_none - Dongle ensure that the DMA will complete and host
1241  * need to try to sync. This noop sync handler will be bound when the dongle
1242  * advertises that neither the SEQNUM nor XORCSUM mode of DMA sync is required.
1243  */
1244 static int BCMFASTPATH
dhd_prot_d2h_sync_edl_none(dhd_pub_t * dhd,msgbuf_ring_t * ring,volatile cmn_msg_hdr_t * msg)1245 dhd_prot_d2h_sync_edl_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
1246                        volatile cmn_msg_hdr_t *msg)
1247 {
1248 	/* Check for LIVELOCK induce flag, which is set by firing
1249 	* dhd iovar to induce LIVELOCK error. If flag is set,
1250 	* MSG_TYPE_INVALID is returned, which results in to LIVELOCK error.
1251 	*/
1252 	if (dhd->dhd_induce_error == DHD_INDUCE_LIVELOCK) {
1253 		DHD_ERROR(("%s: Inducing livelock\n", __FUNCTION__));
1254 		return BCME_ERROR;
1255 	} else {
1256 		if (msg->msg_type == MSG_TYPE_INFO_PYLD)
1257 			return BCME_OK;
1258 		else
1259 			return msg->msg_type;
1260 	}
1261 }
1262 #endif /* EWP_EDL */
1263 
1264 INLINE void
dhd_wakeup_ioctl_event(dhd_pub_t * dhd,dhd_ioctl_recieved_status_t reason)1265 dhd_wakeup_ioctl_event(dhd_pub_t *dhd, dhd_ioctl_recieved_status_t reason)
1266 {
1267 	/* To synchronize with the previous memory operations call wmb() */
1268 	OSL_SMP_WMB();
1269 	dhd->prot->ioctl_received = reason;
1270 	/* Call another wmb() to make sure before waking up the other event value gets updated */
1271 	OSL_SMP_WMB();
1272 	dhd_os_ioctl_resp_wake(dhd);
1273 }
1274 
1275 /**
1276  * dhd_prot_d2h_sync_init - Setup the host side DMA sync mode based on what
1277  * dongle advertizes.
1278  */
1279 static void
dhd_prot_d2h_sync_init(dhd_pub_t * dhd)1280 dhd_prot_d2h_sync_init(dhd_pub_t *dhd)
1281 {
1282 	dhd_prot_t *prot = dhd->prot;
1283 	prot->d2h_sync_wait_max = 0UL;
1284 	prot->d2h_sync_wait_tot = 0UL;
1285 
1286 	prot->d2hring_ctrl_cpln.seqnum = D2H_EPOCH_INIT_VAL;
1287 	prot->d2hring_ctrl_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
1288 
1289 	prot->d2hring_tx_cpln.seqnum = D2H_EPOCH_INIT_VAL;
1290 	prot->d2hring_tx_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
1291 
1292 	prot->d2hring_rx_cpln.seqnum = D2H_EPOCH_INIT_VAL;
1293 	prot->d2hring_rx_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
1294 
1295 	if (HWA_ACTIVE(dhd)) {
1296 		prot->d2hring_tx_cpln.hwa_db_type =
1297 			(dhd->bus->hwa_enab_bmap & HWA_ENAB_BITMAP_TXCPLT) ? HWA_DB_TYPE_TXCPLT : 0;
1298 		prot->d2hring_rx_cpln.hwa_db_type =
1299 			(dhd->bus->hwa_enab_bmap & HWA_ENAB_BITMAP_RXCPLT) ? HWA_DB_TYPE_RXCPLT : 0;
1300 		DHD_ERROR(("%s: TXCPLT hwa_db_type:0x%x RXCPLT hwa_db_type:0x%x\n",
1301 			__FUNCTION__, prot->d2hring_tx_cpln.hwa_db_type,
1302 			prot->d2hring_rx_cpln.hwa_db_type));
1303 	}
1304 
1305 	if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) {
1306 		prot->d2h_sync_cb = dhd_prot_d2h_sync_seqnum;
1307 #ifdef EWP_EDL
1308 		prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl;
1309 #endif /* EWP_EDL */
1310 		DHD_ERROR(("%s(): D2H sync mechanism is SEQNUM \r\n", __FUNCTION__));
1311 	} else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) {
1312 		prot->d2h_sync_cb = dhd_prot_d2h_sync_xorcsum;
1313 #ifdef EWP_EDL
1314 		prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl;
1315 #endif /* EWP_EDL */
1316 		DHD_ERROR(("%s(): D2H sync mechanism is XORCSUM \r\n", __FUNCTION__));
1317 	} else {
1318 		prot->d2h_sync_cb = dhd_prot_d2h_sync_none;
1319 #ifdef EWP_EDL
1320 		prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl_none;
1321 #endif /* EWP_EDL */
1322 		DHD_ERROR(("%s(): D2H sync mechanism is NONE \r\n", __FUNCTION__));
1323 	}
1324 }
1325 
1326 /**
1327  * dhd_prot_h2d_sync_init - Per H2D common ring, setup the msgbuf ring seqnum
1328  */
1329 static void
dhd_prot_h2d_sync_init(dhd_pub_t * dhd)1330 dhd_prot_h2d_sync_init(dhd_pub_t *dhd)
1331 {
1332 	dhd_prot_t *prot = dhd->prot;
1333 	prot->h2dring_rxp_subn.seqnum = H2D_EPOCH_INIT_VAL;
1334 
1335 	if (HWA_ACTIVE(dhd)) {
1336 		prot->h2dring_rxp_subn.hwa_db_type =
1337 			(dhd->bus->hwa_enab_bmap & HWA_ENAB_BITMAP_RXPOST) ? HWA_DB_TYPE_RXPOST : 0;
1338 		DHD_ERROR(("%s: RXPOST hwa_db_type:0x%x\n",
1339 			__FUNCTION__, prot->d2hring_tx_cpln.hwa_db_type));
1340 	}
1341 
1342 	prot->h2dring_rxp_subn.current_phase = 0;
1343 
1344 	prot->h2dring_ctrl_subn.seqnum = H2D_EPOCH_INIT_VAL;
1345 	prot->h2dring_ctrl_subn.current_phase = 0;
1346 }
1347 
1348 /* +-----------------  End of PCIE DHD H2D DMA SYNC ------------------------+ */
1349 
1350 /*
1351  * +---------------------------------------------------------------------------+
1352  * PCIE DMA-able buffer. Sets up a dhd_dma_buf_t object, which includes the
1353  * virtual and physical address, the buffer lenght and the DMA handler.
1354  * A secdma handler is also included in the dhd_dma_buf object.
1355  * +---------------------------------------------------------------------------+
1356  */
1357 
1358 static INLINE void
dhd_base_addr_htolpa(sh_addr_t * base_addr,dmaaddr_t pa)1359 dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa)
1360 {
1361 	base_addr->low_addr = htol32(PHYSADDRLO(pa));
1362 	base_addr->high_addr = htol32(PHYSADDRHI(pa));
1363 }
1364 
1365 /**
1366  * dhd_dma_buf_audit - Any audits on a DHD DMA Buffer.
1367  */
1368 static int
dhd_dma_buf_audit(dhd_pub_t * dhd,dhd_dma_buf_t * dma_buf)1369 dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
1370 {
1371 	uint32 pa_lowaddr, end; /* dongle uses 32bit ptr arithmetic */
1372 	ASSERT(dma_buf);
1373 	pa_lowaddr = PHYSADDRLO(dma_buf->pa);
1374 	ASSERT(PHYSADDRLO(dma_buf->pa) || PHYSADDRHI(dma_buf->pa));
1375 	ASSERT(ISALIGNED(pa_lowaddr, DMA_ALIGN_LEN));
1376 	ASSERT(dma_buf->len != 0);
1377 
1378 	/* test 32bit offset arithmetic over dma buffer for loss of carry-over */
1379 	end = (pa_lowaddr + dma_buf->len); /* end address */
1380 
1381 	if ((end & 0xFFFFFFFF) < (pa_lowaddr & 0xFFFFFFFF)) { /* exclude carryover */
1382 		DHD_ERROR(("%s: dma_buf %x len %d spans dongle 32bit ptr arithmetic\n",
1383 			__FUNCTION__, pa_lowaddr, dma_buf->len));
1384 		return BCME_ERROR;
1385 	}
1386 
1387 	return BCME_OK;
1388 }
1389 
1390 /**
1391  * dhd_dma_buf_alloc - Allocate a cache coherent DMA-able buffer.
1392  * returns BCME_OK=0 on success
1393  * returns non-zero negative error value on failure.
1394  */
1395 int
dhd_dma_buf_alloc(dhd_pub_t * dhd,dhd_dma_buf_t * dma_buf,uint32 buf_len)1396 dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len)
1397 {
1398 	uint32 dma_pad = 0;
1399 	osl_t *osh = dhd->osh;
1400 	uint16 dma_align = DMA_ALIGN_LEN;
1401 	uint32 rem = 0;
1402 
1403 	ASSERT(dma_buf != NULL);
1404 	ASSERT(dma_buf->va == NULL);
1405 	ASSERT(dma_buf->len == 0);
1406 
1407 	/* Pad the buffer length to align to cacheline size. */
1408 	rem = (buf_len % DHD_DMA_PAD);
1409 	dma_pad = rem ? (DHD_DMA_PAD - rem) : 0;
1410 
1411 	dma_buf->va = DMA_ALLOC_CONSISTENT(osh, buf_len + dma_pad,
1412 		dma_align, &dma_buf->_alloced, &dma_buf->pa, &dma_buf->dmah);
1413 
1414 	if (dma_buf->va == NULL) {
1415 		DHD_ERROR(("%s: buf_len %d, no memory available\n",
1416 			__FUNCTION__, buf_len));
1417 		return BCME_NOMEM;
1418 	}
1419 
1420 	dma_buf->len = buf_len; /* not including padded len */
1421 
1422 	if (dhd_dma_buf_audit(dhd, dma_buf) != BCME_OK) { /* audit dma buf */
1423 		dhd_dma_buf_free(dhd, dma_buf);
1424 		return BCME_ERROR;
1425 	}
1426 
1427 	dhd_dma_buf_reset(dhd, dma_buf); /* zero out and cache flush */
1428 
1429 	return BCME_OK;
1430 }
1431 
1432 /**
1433  * dhd_dma_buf_reset - Reset a cache coherent DMA-able buffer.
1434  */
1435 static void
dhd_dma_buf_reset(dhd_pub_t * dhd,dhd_dma_buf_t * dma_buf)1436 dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
1437 {
1438 	if ((dma_buf == NULL) || (dma_buf->va == NULL))
1439 		return;
1440 
1441 	(void)dhd_dma_buf_audit(dhd, dma_buf);
1442 
1443 	/* Zero out the entire buffer and cache flush */
1444 	memset((void*)dma_buf->va, 0, dma_buf->len);
1445 	OSL_CACHE_FLUSH((void *)dma_buf->va, dma_buf->len);
1446 }
1447 
1448 /**
1449  * dhd_dma_buf_free - Free a DMA-able buffer that was previously allocated using
1450  * dhd_dma_buf_alloc().
1451  */
1452 void
dhd_dma_buf_free(dhd_pub_t * dhd,dhd_dma_buf_t * dma_buf)1453 dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
1454 {
1455 	osl_t *osh = dhd->osh;
1456 
1457 	ASSERT(dma_buf);
1458 
1459 	if (dma_buf->va == NULL)
1460 		return; /* Allow for free invocation, when alloc failed */
1461 
1462 	/* DEBUG: dhd_dma_buf_reset(dhd, dma_buf) */
1463 	(void)dhd_dma_buf_audit(dhd, dma_buf);
1464 
1465 	/* dma buffer may have been padded at allocation */
1466 	DMA_FREE_CONSISTENT(osh, dma_buf->va, dma_buf->_alloced,
1467 		dma_buf->pa, dma_buf->dmah);
1468 
1469 	memset(dma_buf, 0, sizeof(dhd_dma_buf_t));
1470 }
1471 
1472 /**
1473  * dhd_dma_buf_init - Initialize a dhd_dma_buf with speicifed values.
1474  * Do not use dhd_dma_buf_init to zero out a dhd_dma_buf_t object. Use memset 0.
1475  */
1476 void
dhd_dma_buf_init(dhd_pub_t * dhd,void * dhd_dma_buf,void * va,uint32 len,dmaaddr_t pa,void * dmah,void * secdma)1477 dhd_dma_buf_init(dhd_pub_t *dhd, void *dhd_dma_buf,
1478 	void *va, uint32 len, dmaaddr_t pa, void *dmah, void *secdma)
1479 {
1480 	dhd_dma_buf_t *dma_buf;
1481 	ASSERT(dhd_dma_buf);
1482 	dma_buf = (dhd_dma_buf_t *)dhd_dma_buf;
1483 	dma_buf->va = va;
1484 	dma_buf->len = len;
1485 	dma_buf->pa = pa;
1486 	dma_buf->dmah = dmah;
1487 	dma_buf->secdma = secdma;
1488 
1489 	/* Audit user defined configuration */
1490 	(void)dhd_dma_buf_audit(dhd, dma_buf);
1491 }
1492 
1493 /* +------------------  End of PCIE DHD DMA BUF ADT ------------------------+ */
1494 
1495 /*
1496  * +---------------------------------------------------------------------------+
1497  * DHD_MAP_PKTID_LOGGING
1498  * Logging the PKTID and DMA map/unmap information for the SMMU fault issue
1499  * debugging in customer platform.
1500  * +---------------------------------------------------------------------------+
1501  */
1502 
1503 #ifdef DHD_MAP_PKTID_LOGGING
1504 typedef struct dhd_pktid_log_item {
1505 	dmaaddr_t pa;		/* DMA bus address */
1506 	uint64 ts_nsec;		/* Timestamp: nsec */
1507 	uint32 size;		/* DMA map/unmap size */
1508 	uint32 pktid;		/* Packet ID */
1509 	uint8 pkttype;		/* Packet Type */
1510 	uint8 rsvd[7];		/* Reserved for future use */
1511 } dhd_pktid_log_item_t;
1512 
1513 typedef struct dhd_pktid_log {
1514 	uint32 items;		/* number of total items */
1515 	uint32 index;		/* index of pktid_log_item */
1516 	dhd_pktid_log_item_t map[0];	/* metadata storage */
1517 } dhd_pktid_log_t;
1518 
1519 typedef void * dhd_pktid_log_handle_t; /* opaque handle to pktid log */
1520 
1521 #define	MAX_PKTID_LOG				(2048)
1522 #define DHD_PKTID_LOG_ITEM_SZ			(sizeof(dhd_pktid_log_item_t))
1523 #define DHD_PKTID_LOG_SZ(items)			(uint32)((sizeof(dhd_pktid_log_t)) + \
1524 					((DHD_PKTID_LOG_ITEM_SZ) * (items)))
1525 
1526 #define DHD_PKTID_LOG_INIT(dhd, hdl)		dhd_pktid_logging_init((dhd), (hdl))
1527 #define DHD_PKTID_LOG_FINI(dhd, hdl)		dhd_pktid_logging_fini((dhd), (hdl))
1528 #define DHD_PKTID_LOG(dhd, hdl, pa, pktid, len, pkttype)	\
1529 	dhd_pktid_logging((dhd), (hdl), (pa), (pktid), (len), (pkttype))
1530 #define DHD_PKTID_LOG_DUMP(dhd)			dhd_pktid_logging_dump((dhd))
1531 
1532 static dhd_pktid_log_handle_t *
dhd_pktid_logging_init(dhd_pub_t * dhd,uint32 num_items)1533 dhd_pktid_logging_init(dhd_pub_t *dhd, uint32 num_items)
1534 {
1535 	dhd_pktid_log_t *log;
1536 	uint32 log_size;
1537 
1538 	log_size = DHD_PKTID_LOG_SZ(num_items);
1539 	log = (dhd_pktid_log_t *)MALLOCZ(dhd->osh, log_size);
1540 	if (log == NULL) {
1541 		DHD_ERROR(("%s: MALLOC failed for size %d\n",
1542 			__FUNCTION__, log_size));
1543 		return (dhd_pktid_log_handle_t *)NULL;
1544 	}
1545 
1546 	log->items = num_items;
1547 	log->index = 0;
1548 
1549 	return (dhd_pktid_log_handle_t *)log; /* opaque handle */
1550 }
1551 
1552 static void
dhd_pktid_logging_fini(dhd_pub_t * dhd,dhd_pktid_log_handle_t * handle)1553 dhd_pktid_logging_fini(dhd_pub_t *dhd, dhd_pktid_log_handle_t *handle)
1554 {
1555 	dhd_pktid_log_t *log;
1556 	uint32 log_size;
1557 
1558 	if (handle == NULL) {
1559 		DHD_ERROR(("%s: handle is NULL\n", __FUNCTION__));
1560 		return;
1561 	}
1562 
1563 	log = (dhd_pktid_log_t *)handle;
1564 	log_size = DHD_PKTID_LOG_SZ(log->items);
1565 	MFREE(dhd->osh, handle, log_size);
1566 }
1567 
1568 static void
dhd_pktid_logging(dhd_pub_t * dhd,dhd_pktid_log_handle_t * handle,dmaaddr_t pa,uint32 pktid,uint32 len,uint8 pkttype)1569 dhd_pktid_logging(dhd_pub_t *dhd, dhd_pktid_log_handle_t *handle, dmaaddr_t pa,
1570 	uint32 pktid, uint32 len, uint8 pkttype)
1571 {
1572 	dhd_pktid_log_t *log;
1573 	uint32 idx;
1574 
1575 	if (handle == NULL) {
1576 		DHD_ERROR(("%s: handle is NULL\n", __FUNCTION__));
1577 		return;
1578 	}
1579 
1580 	log = (dhd_pktid_log_t *)handle;
1581 	idx = log->index;
1582 	log->map[idx].ts_nsec = OSL_LOCALTIME_NS();
1583 	log->map[idx].pa = pa;
1584 	log->map[idx].pktid = pktid;
1585 	log->map[idx].size = len;
1586 	log->map[idx].pkttype = pkttype;
1587 	log->index = (idx + 1) % (log->items);	/* update index */
1588 }
1589 
1590 void
dhd_pktid_logging_dump(dhd_pub_t * dhd)1591 dhd_pktid_logging_dump(dhd_pub_t *dhd)
1592 {
1593 	dhd_prot_t *prot = dhd->prot;
1594 	dhd_pktid_log_t *map_log, *unmap_log;
1595 	uint64 ts_sec, ts_usec;
1596 
1597 	if (prot == NULL) {
1598 		DHD_ERROR(("%s: prot is NULL\n", __FUNCTION__));
1599 		return;
1600 	}
1601 
1602 	map_log = (dhd_pktid_log_t *)(prot->pktid_dma_map);
1603 	unmap_log = (dhd_pktid_log_t *)(prot->pktid_dma_unmap);
1604 	OSL_GET_LOCALTIME(&ts_sec, &ts_usec);
1605 	if (map_log && unmap_log) {
1606 		DHD_ERROR(("%s: map_idx=%d unmap_idx=%d "
1607 			"current time=[%5lu.%06lu]\n", __FUNCTION__,
1608 			map_log->index, unmap_log->index,
1609 			(unsigned long)ts_sec, (unsigned long)ts_usec));
1610 		DHD_ERROR(("%s: pktid_map_log(pa)=0x%llx size=%d, "
1611 			"pktid_unmap_log(pa)=0x%llx size=%d\n", __FUNCTION__,
1612 			(uint64)__virt_to_phys((ulong)(map_log->map)),
1613 			(uint32)(DHD_PKTID_LOG_ITEM_SZ * map_log->items),
1614 			(uint64)__virt_to_phys((ulong)(unmap_log->map)),
1615 			(uint32)(DHD_PKTID_LOG_ITEM_SZ * unmap_log->items)));
1616 	}
1617 }
1618 #endif /* DHD_MAP_PKTID_LOGGING */
1619 
1620 /* +-----------------  End of DHD_MAP_PKTID_LOGGING -----------------------+ */
1621 
1622 /*
1623  * +---------------------------------------------------------------------------+
1624  * PktId Map: Provides a native packet pointer to unique 32bit PktId mapping.
1625  * Main purpose is to save memory on the dongle, has other purposes as well.
1626  * The packet id map, also includes storage for some packet parameters that
1627  * may be saved. A native packet pointer along with the parameters may be saved
1628  * and a unique 32bit pkt id will be returned. Later, the saved packet pointer
1629  * and the metadata may be retrieved using the previously allocated packet id.
1630  * +---------------------------------------------------------------------------+
1631  */
1632 #define DHD_PCIE_PKTID
1633 #define MAX_CTRL_PKTID		(1024) /* Maximum number of pktids supported */
1634 #define MAX_RX_PKTID		(1024)
1635 #define MAX_TX_PKTID		(3072 * 12)
1636 
1637 /* On Router, the pktptr serves as a pktid. */
1638 
1639 #if defined(PROP_TXSTATUS) && !defined(DHD_PCIE_PKTID)
1640 #error "PKTIDMAP must be supported with PROP_TXSTATUS/WLFC"
1641 #endif // endif
1642 
1643 /* Enum for marking the buffer color based on usage */
1644 typedef enum dhd_pkttype {
1645 	PKTTYPE_DATA_TX = 0,
1646 	PKTTYPE_DATA_RX,
1647 	PKTTYPE_IOCTL_RX,
1648 	PKTTYPE_EVENT_RX,
1649 	PKTTYPE_INFO_RX,
1650 	/* dhd_prot_pkt_free no check, if pktid reserved and no space avail case */
1651 	PKTTYPE_NO_CHECK,
1652 	PKTTYPE_TSBUF_RX
1653 } dhd_pkttype_t;
1654 
1655 #define DHD_PKTID_MIN_AVAIL_COUNT		512U
1656 #define DHD_PKTID_DEPLETED_MAX_COUNT		(DHD_PKTID_MIN_AVAIL_COUNT * 2U)
1657 #define DHD_PKTID_INVALID			(0U)
1658 #define DHD_IOCTL_REQ_PKTID			(0xFFFE)
1659 #define DHD_FAKE_PKTID				(0xFACE)
1660 #define DHD_H2D_DBGRING_REQ_PKTID		0xFFFD
1661 #define DHD_D2H_DBGRING_REQ_PKTID		0xFFFC
1662 #define DHD_H2D_HOSTTS_REQ_PKTID		0xFFFB
1663 #define DHD_H2D_BTLOGRING_REQ_PKTID		0xFFFA
1664 #define DHD_D2H_BTLOGRING_REQ_PKTID		0xFFF9
1665 #define DHD_H2D_SNAPSHOT_UPLOAD_REQ_PKTID	0xFFF8
1666 #ifdef DHD_HP2P
1667 #define DHD_D2H_HPPRING_TXREQ_PKTID		0xFFF7
1668 #define DHD_D2H_HPPRING_RXREQ_PKTID		0xFFF6
1669 #endif /* DHD_HP2P */
1670 
1671 #define IS_FLOWRING(ring) \
1672 	((strncmp(ring->name, "h2dflr", sizeof("h2dflr"))) == (0))
1673 
1674 typedef void * dhd_pktid_map_handle_t; /* opaque handle to a pktid map */
1675 
1676 /* Construct a packet id mapping table, returning an opaque map handle */
1677 static dhd_pktid_map_handle_t *dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items);
1678 
1679 /* Destroy a packet id mapping table, freeing all packets active in the table */
1680 static void dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map);
1681 
1682 #define DHD_NATIVE_TO_PKTID_INIT(dhd, items) dhd_pktid_map_init((dhd), (items))
1683 #define DHD_NATIVE_TO_PKTID_RESET(dhd, map)  dhd_pktid_map_reset((dhd), (map))
1684 #define DHD_NATIVE_TO_PKTID_FINI(dhd, map)   dhd_pktid_map_fini((dhd), (map))
1685 #define DHD_NATIVE_TO_PKTID_FINI_IOCTL(osh, map)  dhd_pktid_map_fini_ioctl((osh), (map))
1686 
1687 #ifdef MACOSX_DHD
1688 #undef DHD_PCIE_PKTID
1689 #define DHD_PCIE_PKTID 1
1690 #endif /* MACOSX_DHD */
1691 
1692 #if defined(DHD_PCIE_PKTID)
1693 #if defined(MACOSX_DHD)
1694 #define IOCTLRESP_USE_CONSTMEM
1695 static void free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf);
1696 static int  alloc_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf);
1697 #endif // endif
1698 
1699 /* Determine number of pktids that are available */
1700 static INLINE uint32 dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle);
1701 
1702 /* Allocate a unique pktid against which a pkt and some metadata is saved */
1703 static INLINE uint32 dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
1704 	void *pkt, dhd_pkttype_t pkttype);
1705 static INLINE void dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
1706 	void *pkt, uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dma,
1707 	void *dmah, void *secdma, dhd_pkttype_t pkttype);
1708 static uint32 dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
1709 	void *pkt, dmaaddr_t pa, uint32 len, uint8 dma,
1710 	void *dmah, void *secdma, dhd_pkttype_t pkttype);
1711 /* Return an allocated pktid, retrieving previously saved pkt and metadata */
1712 static void *dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
1713 	uint32 id, dmaaddr_t *pa, uint32 *len, void **dmah,
1714 	void **secdma, dhd_pkttype_t pkttype, bool rsv_locker);
1715 
1716 /*
1717  * DHD_PKTID_AUDIT_ENABLED: Audit of PktIds in DHD for duplicate alloc and frees
1718  *
1719  * DHD_PKTID_AUDIT_MAP: Audit the LIFO or FIFO PktIdMap allocator
1720  * DHD_PKTID_AUDIT_RING: Audit the pktid during producer/consumer ring operation
1721  *
1722  * CAUTION: When DHD_PKTID_AUDIT_ENABLED is defined,
1723  *    either DHD_PKTID_AUDIT_MAP or DHD_PKTID_AUDIT_RING may be selected.
1724  */
1725 #if defined(DHD_PKTID_AUDIT_ENABLED)
1726 #define USE_DHD_PKTID_AUDIT_LOCK 1
1727 /* Audit the pktidmap allocator */
1728 /* #define DHD_PKTID_AUDIT_MAP */
1729 
1730 /* Audit the pktid during production/consumption of workitems */
1731 #define DHD_PKTID_AUDIT_RING
1732 
1733 #if defined(DHD_PKTID_AUDIT_MAP) && defined(DHD_PKTID_AUDIT_RING)
1734 #error "May only enabled audit of MAP or RING, at a time."
1735 #endif /* DHD_PKTID_AUDIT_MAP && DHD_PKTID_AUDIT_RING */
1736 
1737 #define DHD_DUPLICATE_ALLOC     1
1738 #define DHD_DUPLICATE_FREE      2
1739 #define DHD_TEST_IS_ALLOC       3
1740 #define DHD_TEST_IS_FREE        4
1741 
1742 typedef enum dhd_pktid_map_type {
1743 	DHD_PKTID_MAP_TYPE_CTRL = 1,
1744 	DHD_PKTID_MAP_TYPE_TX,
1745 	DHD_PKTID_MAP_TYPE_RX,
1746 	DHD_PKTID_MAP_TYPE_UNKNOWN
1747 } dhd_pktid_map_type_t;
1748 
1749 #ifdef USE_DHD_PKTID_AUDIT_LOCK
1750 #define DHD_PKTID_AUDIT_LOCK_INIT(osh)          dhd_os_spin_lock_init(osh)
1751 #define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock)  dhd_os_spin_lock_deinit(osh, lock)
1752 #define DHD_PKTID_AUDIT_LOCK(lock)              dhd_os_spin_lock(lock)
1753 #define DHD_PKTID_AUDIT_UNLOCK(lock, flags)     dhd_os_spin_unlock(lock, flags)
1754 #else
1755 #define DHD_PKTID_AUDIT_LOCK_INIT(osh)          (void *)(1)
1756 #define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock)  do { /* noop */ } while (0)
1757 #define DHD_PKTID_AUDIT_LOCK(lock)              0
1758 #define DHD_PKTID_AUDIT_UNLOCK(lock, flags)     do { /* noop */ } while (0)
1759 #endif /* !USE_DHD_PKTID_AUDIT_LOCK */
1760 
1761 #endif /* DHD_PKTID_AUDIT_ENABLED */
1762 
1763 #define USE_DHD_PKTID_LOCK   1
1764 
1765 #ifdef USE_DHD_PKTID_LOCK
1766 #define DHD_PKTID_LOCK_INIT(osh)                dhd_os_spin_lock_init(osh)
1767 #define DHD_PKTID_LOCK_DEINIT(osh, lock)        dhd_os_spin_lock_deinit(osh, lock)
1768 #define DHD_PKTID_LOCK(lock, flags)             (flags) = dhd_os_spin_lock(lock)
1769 #define DHD_PKTID_UNLOCK(lock, flags)           dhd_os_spin_unlock(lock, flags)
1770 #else
1771 #define DHD_PKTID_LOCK_INIT(osh)                (void *)(1)
1772 #define DHD_PKTID_LOCK_DEINIT(osh, lock)	\
1773 	do { \
1774 		BCM_REFERENCE(osh); \
1775 		BCM_REFERENCE(lock); \
1776 	} while (0)
1777 #define DHD_PKTID_LOCK(lock)                    0
1778 #define DHD_PKTID_UNLOCK(lock, flags)           \
1779 	do { \
1780 		BCM_REFERENCE(lock); \
1781 		BCM_REFERENCE(flags); \
1782 	} while (0)
1783 #endif /* !USE_DHD_PKTID_LOCK */
1784 
1785 typedef enum dhd_locker_state {
1786 	LOCKER_IS_FREE,
1787 	LOCKER_IS_BUSY,
1788 	LOCKER_IS_RSVD
1789 } dhd_locker_state_t;
1790 
1791 /* Packet metadata saved in packet id mapper */
1792 
1793 typedef struct dhd_pktid_item {
1794 	dhd_locker_state_t state;  /* tag a locker to be free, busy or reserved */
1795 	uint8       dir;      /* dma map direction (Tx=flush or Rx=invalidate) */
1796 	dhd_pkttype_t pkttype; /* pktlists are maintained based on pkttype */
1797 	uint16      len;      /* length of mapped packet's buffer */
1798 	void        *pkt;     /* opaque native pointer to a packet */
1799 	dmaaddr_t   pa;       /* physical address of mapped packet's buffer */
1800 	void        *dmah;    /* handle to OS specific DMA map */
1801 	void		*secdma;
1802 } dhd_pktid_item_t;
1803 
1804 typedef uint32 dhd_pktid_key_t;
1805 
1806 typedef struct dhd_pktid_map {
1807 	uint32      items;    /* total items in map */
1808 	uint32      avail;    /* total available items */
1809 	int         failures; /* lockers unavailable count */
1810 	/* Spinlock to protect dhd_pktid_map in process/tasklet context */
1811 	void        *pktid_lock; /* Used when USE_DHD_PKTID_LOCK is defined */
1812 
1813 #if defined(DHD_PKTID_AUDIT_ENABLED)
1814 	void		*pktid_audit_lock;
1815 	struct bcm_mwbmap *pktid_audit; /* multi word bitmap based audit */
1816 #endif /* DHD_PKTID_AUDIT_ENABLED */
1817 	dhd_pktid_key_t	*keys; /* map_items +1 unique pkt ids */
1818 	dhd_pktid_item_t lockers[0];           /* metadata storage */
1819 } dhd_pktid_map_t;
1820 
1821 /*
1822  * PktId (Locker) #0 is never allocated and is considered invalid.
1823  *
1824  * On request for a pktid, a value DHD_PKTID_INVALID must be treated as a
1825  * depleted pktid pool and must not be used by the caller.
1826  *
1827  * Likewise, a caller must never free a pktid of value DHD_PKTID_INVALID.
1828  */
1829 
1830 #define DHD_PKTID_FREE_LOCKER           (FALSE)
1831 #define DHD_PKTID_RSV_LOCKER            (TRUE)
1832 
1833 #define DHD_PKTID_ITEM_SZ               (sizeof(dhd_pktid_item_t))
1834 #define DHD_PKIDMAP_ITEMS(items)        (items)
1835 #define DHD_PKTID_MAP_SZ(items)         (sizeof(dhd_pktid_map_t) + \
1836 	                                     (DHD_PKTID_ITEM_SZ * ((items) + 1)))
1837 #define DHD_PKTIDMAP_KEYS_SZ(items)     (sizeof(dhd_pktid_key_t) * ((items) + 1))
1838 
1839 #define DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, map)  dhd_pktid_map_reset_ioctl((dhd), (map))
1840 
1841 /* Convert a packet to a pktid, and save pkt pointer in busy locker */
1842 #define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt, pkttype)    \
1843 	dhd_pktid_map_reserve((dhd), (map), (pkt), (pkttype))
1844 /* Reuse a previously reserved locker to save packet params */
1845 #define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dir, dmah, secdma, pkttype) \
1846 	dhd_pktid_map_save((dhd), (map), (void *)(pkt), (nkey), (pa), (uint32)(len), \
1847 		(uint8)(dir), (void *)(dmah), (void *)(secdma), \
1848 		(dhd_pkttype_t)(pkttype))
1849 /* Convert a packet to a pktid, and save packet params in locker */
1850 #define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dir, dmah, secdma, pkttype) \
1851 	dhd_pktid_map_alloc((dhd), (map), (void *)(pkt), (pa), (uint32)(len), \
1852 		(uint8)(dir), (void *)(dmah), (void *)(secdma), \
1853 		(dhd_pkttype_t)(pkttype))
1854 
1855 /* Convert pktid to a packet, and free the locker */
1856 #define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
1857 	dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \
1858 		(dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
1859 		(void **)&(secdma), (dhd_pkttype_t)(pkttype), DHD_PKTID_FREE_LOCKER)
1860 
1861 /* Convert the pktid to a packet, empty locker, but keep it reserved */
1862 #define DHD_PKTID_TO_NATIVE_RSV(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
1863 	dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \
1864 	                   (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
1865 	                   (void **)&(secdma), (dhd_pkttype_t)(pkttype), DHD_PKTID_RSV_LOCKER)
1866 
1867 #define DHD_PKTID_AVAIL(map)                 dhd_pktid_map_avail_cnt(map)
1868 
1869 #if defined(DHD_PKTID_AUDIT_ENABLED)
1870 
1871 static int
dhd_get_pktid_map_type(dhd_pub_t * dhd,dhd_pktid_map_t * pktid_map)1872 dhd_get_pktid_map_type(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map)
1873 {
1874 	dhd_prot_t *prot = dhd->prot;
1875 	int pktid_map_type;
1876 
1877 	if (pktid_map == prot->pktid_ctrl_map) {
1878 		pktid_map_type = DHD_PKTID_MAP_TYPE_CTRL;
1879 	} else if (pktid_map == prot->pktid_tx_map) {
1880 		pktid_map_type = DHD_PKTID_MAP_TYPE_TX;
1881 	} else if (pktid_map == prot->pktid_rx_map) {
1882 		pktid_map_type = DHD_PKTID_MAP_TYPE_RX;
1883 	} else {
1884 		pktid_map_type = DHD_PKTID_MAP_TYPE_UNKNOWN;
1885 	}
1886 
1887 	return pktid_map_type;
1888 }
1889 
1890 /**
1891 * __dhd_pktid_audit - Use the mwbmap to audit validity of a pktid.
1892 */
1893 static int
__dhd_pktid_audit(dhd_pub_t * dhd,dhd_pktid_map_t * pktid_map,uint32 pktid,const int test_for,const char * errmsg)1894 __dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid,
1895 	const int test_for, const char *errmsg)
1896 {
1897 #define DHD_PKT_AUDIT_STR "ERROR: %16s Host PktId Audit: "
1898 	struct bcm_mwbmap *handle;
1899 	uint32	flags;
1900 	bool ignore_audit;
1901 	int error = BCME_OK;
1902 
1903 	if (pktid_map == (dhd_pktid_map_t *)NULL) {
1904 		DHD_ERROR((DHD_PKT_AUDIT_STR "Pkt id map NULL\n", errmsg));
1905 		return BCME_OK;
1906 	}
1907 
1908 	flags = DHD_PKTID_AUDIT_LOCK(pktid_map->pktid_audit_lock);
1909 
1910 	handle = pktid_map->pktid_audit;
1911 	if (handle == (struct bcm_mwbmap *)NULL) {
1912 		DHD_ERROR((DHD_PKT_AUDIT_STR "Handle NULL\n", errmsg));
1913 		goto out;
1914 	}
1915 
1916 	/* Exclude special pktids from audit */
1917 	ignore_audit = (pktid == DHD_IOCTL_REQ_PKTID) | (pktid == DHD_FAKE_PKTID);
1918 	if (ignore_audit) {
1919 		goto out;
1920 	}
1921 
1922 	if ((pktid == DHD_PKTID_INVALID) || (pktid > pktid_map->items)) {
1923 		DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> invalid\n", errmsg, pktid));
1924 		error = BCME_ERROR;
1925 		goto out;
1926 	}
1927 
1928 	/* Perform audit */
1929 	switch (test_for) {
1930 		case DHD_DUPLICATE_ALLOC:
1931 			if (!bcm_mwbmap_isfree(handle, pktid)) {
1932 				DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> alloc duplicate\n",
1933 				           errmsg, pktid));
1934 				error = BCME_ERROR;
1935 			} else {
1936 				bcm_mwbmap_force(handle, pktid);
1937 			}
1938 			break;
1939 
1940 		case DHD_DUPLICATE_FREE:
1941 			if (bcm_mwbmap_isfree(handle, pktid)) {
1942 				DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> free duplicate\n",
1943 				           errmsg, pktid));
1944 				error = BCME_ERROR;
1945 			} else {
1946 				bcm_mwbmap_free(handle, pktid);
1947 			}
1948 			break;
1949 
1950 		case DHD_TEST_IS_ALLOC:
1951 			if (bcm_mwbmap_isfree(handle, pktid)) {
1952 				DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not allocated\n",
1953 				           errmsg, pktid));
1954 				error = BCME_ERROR;
1955 			}
1956 			break;
1957 
1958 		case DHD_TEST_IS_FREE:
1959 			if (!bcm_mwbmap_isfree(handle, pktid)) {
1960 				DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not free",
1961 				           errmsg, pktid));
1962 				error = BCME_ERROR;
1963 			}
1964 			break;
1965 
1966 		default:
1967 			DHD_ERROR(("%s: Invalid test case: %d\n", __FUNCTION__, test_for));
1968 			error = BCME_ERROR;
1969 			break;
1970 	}
1971 
1972 out:
1973 	DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
1974 
1975 	if (error != BCME_OK) {
1976 		dhd->pktid_audit_failed = TRUE;
1977 	}
1978 
1979 	return error;
1980 }
1981 
1982 static int
dhd_pktid_audit(dhd_pub_t * dhd,dhd_pktid_map_t * pktid_map,uint32 pktid,const int test_for,const char * errmsg)1983 dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid,
1984 	const int test_for, const char *errmsg)
1985 {
1986 	int ret = BCME_OK;
1987 	ret = __dhd_pktid_audit(dhd, pktid_map, pktid, test_for, errmsg);
1988 	if (ret == BCME_ERROR) {
1989 		DHD_ERROR(("%s: Got Pkt Id Audit failure: PKTID<%d> PKTID MAP TYPE<%d>\n",
1990 			__FUNCTION__, pktid, dhd_get_pktid_map_type(dhd, pktid_map)));
1991 		dhd_pktid_error_handler(dhd);
1992 	}
1993 
1994 	return ret;
1995 }
1996 
1997 #define DHD_PKTID_AUDIT(dhdp, map, pktid, test_for) \
1998 	dhd_pktid_audit((dhdp), (dhd_pktid_map_t *)(map), (pktid), (test_for), __FUNCTION__)
1999 
2000 static int
dhd_pktid_audit_ring_debug(dhd_pub_t * dhdp,dhd_pktid_map_t * map,uint32 pktid,const int test_for,void * msg,uint32 msg_len,const char * func)2001 dhd_pktid_audit_ring_debug(dhd_pub_t *dhdp, dhd_pktid_map_t *map, uint32 pktid,
2002 	const int test_for, void *msg, uint32 msg_len, const char *func)
2003 {
2004 	int ret = BCME_OK;
2005 
2006 	if (dhd_query_bus_erros(dhdp)) {
2007 		return BCME_ERROR;
2008 	}
2009 
2010 	ret = __dhd_pktid_audit(dhdp, map, pktid, test_for, func);
2011 	if (ret == BCME_ERROR) {
2012 		DHD_ERROR(("%s: Got Pkt Id Audit failure: PKTID<%d> PKTID MAP TYPE<%d>\n",
2013 			__FUNCTION__, pktid, dhd_get_pktid_map_type(dhdp, map)));
2014 		prhex(func, (uchar *)msg, msg_len);
2015 		dhd_pktid_error_handler(dhdp);
2016 	}
2017 	return ret;
2018 }
2019 #define DHD_PKTID_AUDIT_RING_DEBUG(dhdp, map, pktid, test_for, msg, msg_len) \
2020 	dhd_pktid_audit_ring_debug((dhdp), (dhd_pktid_map_t *)(map), \
2021 		(pktid), (test_for), msg, msg_len, __FUNCTION__)
2022 
2023 #endif /* DHD_PKTID_AUDIT_ENABLED */
2024 
2025 /**
2026  * +---------------------------------------------------------------------------+
2027  * Packet to Packet Id mapper using a <numbered_key, locker> paradigm.
2028  *
2029  * dhd_pktid_map manages a set of unique Packet Ids range[1..MAX_xxx_PKTID].
2030  *
2031  * dhd_pktid_map_alloc() may be used to save some packet metadata, and a unique
2032  * packet id is returned. This unique packet id may be used to retrieve the
2033  * previously saved packet metadata, using dhd_pktid_map_free(). On invocation
2034  * of dhd_pktid_map_free(), the unique packet id is essentially freed. A
2035  * subsequent call to dhd_pktid_map_alloc() may reuse this packet id.
2036  *
2037  * Implementation Note:
2038  * Convert this into a <key,locker> abstraction and place into bcmutils !
2039  * Locker abstraction should treat contents as opaque storage, and a
2040  * callback should be registered to handle busy lockers on destructor.
2041  *
2042  * +---------------------------------------------------------------------------+
2043  */
2044 
2045 /** Allocate and initialize a mapper of num_items <numbered_key, locker> */
2046 
2047 static dhd_pktid_map_handle_t *
dhd_pktid_map_init(dhd_pub_t * dhd,uint32 num_items)2048 dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items)
2049 {
2050 	void* osh;
2051 	uint32 nkey;
2052 	dhd_pktid_map_t *map;
2053 	uint32 dhd_pktid_map_sz;
2054 	uint32 map_items;
2055 	uint32 map_keys_sz;
2056 	osh = dhd->osh;
2057 
2058 	dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(num_items);
2059 
2060 	map = (dhd_pktid_map_t *)VMALLOC(osh, dhd_pktid_map_sz);
2061 	if (map == NULL) {
2062 		DHD_ERROR(("%s:%d: MALLOC failed for size %d\n",
2063 			__FUNCTION__, __LINE__, dhd_pktid_map_sz));
2064 		return (dhd_pktid_map_handle_t *)NULL;
2065 	}
2066 
2067 	map->items = num_items;
2068 	map->avail = num_items;
2069 
2070 	map_items = DHD_PKIDMAP_ITEMS(map->items);
2071 
2072 	map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
2073 
2074 	/* Initialize the lock that protects this structure */
2075 	map->pktid_lock = DHD_PKTID_LOCK_INIT(osh);
2076 	if (map->pktid_lock == NULL) {
2077 		DHD_ERROR(("%s:%d: Lock init failed \r\n", __FUNCTION__, __LINE__));
2078 		goto error;
2079 	}
2080 
2081 	map->keys = (dhd_pktid_key_t *)MALLOC(osh, map_keys_sz);
2082 	if (map->keys == NULL) {
2083 		DHD_ERROR(("%s:%d: MALLOC failed for map->keys size %d\n",
2084 			__FUNCTION__, __LINE__, map_keys_sz));
2085 		goto error;
2086 	}
2087 
2088 #if defined(DHD_PKTID_AUDIT_ENABLED)
2089 		/* Incarnate a hierarchical multiword bitmap for auditing pktid allocator */
2090 		map->pktid_audit = bcm_mwbmap_init(osh, map_items + 1);
2091 		if (map->pktid_audit == (struct bcm_mwbmap *)NULL) {
2092 			DHD_ERROR(("%s:%d: pktid_audit init failed\r\n", __FUNCTION__, __LINE__));
2093 			goto error;
2094 		} else {
2095 			DHD_ERROR(("%s:%d: pktid_audit init succeeded %d\n",
2096 				__FUNCTION__, __LINE__, map_items + 1));
2097 		}
2098 		map->pktid_audit_lock = DHD_PKTID_AUDIT_LOCK_INIT(osh);
2099 #endif /* DHD_PKTID_AUDIT_ENABLED */
2100 
2101 	for (nkey = 1; nkey <= map_items; nkey++) { /* locker #0 is reserved */
2102 		map->keys[nkey] = nkey; /* populate with unique keys */
2103 		map->lockers[nkey].state = LOCKER_IS_FREE;
2104 		map->lockers[nkey].pkt   = NULL; /* bzero: redundant */
2105 		map->lockers[nkey].len   = 0;
2106 	}
2107 
2108 	/* Reserve pktid #0, i.e. DHD_PKTID_INVALID to be inuse */
2109 	map->lockers[DHD_PKTID_INVALID].state = LOCKER_IS_BUSY; /* tag locker #0 as inuse */
2110 	map->lockers[DHD_PKTID_INVALID].pkt   = NULL; /* bzero: redundant */
2111 	map->lockers[DHD_PKTID_INVALID].len   = 0;
2112 
2113 #if defined(DHD_PKTID_AUDIT_ENABLED)
2114 	/* do not use dhd_pktid_audit() here, use bcm_mwbmap_force directly */
2115 	bcm_mwbmap_force(map->pktid_audit, DHD_PKTID_INVALID);
2116 #endif /* DHD_PKTID_AUDIT_ENABLED */
2117 
2118 	return (dhd_pktid_map_handle_t *)map; /* opaque handle */
2119 
2120 error:
2121 	if (map) {
2122 #if defined(DHD_PKTID_AUDIT_ENABLED)
2123 		if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
2124 			bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */
2125 			map->pktid_audit = (struct bcm_mwbmap *)NULL;
2126 			if (map->pktid_audit_lock)
2127 				DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock);
2128 		}
2129 #endif /* DHD_PKTID_AUDIT_ENABLED */
2130 
2131 		if (map->keys) {
2132 			MFREE(osh, map->keys, map_keys_sz);
2133 		}
2134 
2135 		if (map->pktid_lock) {
2136 			DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock);
2137 		}
2138 
2139 		VMFREE(osh, map, dhd_pktid_map_sz);
2140 	}
2141 	return (dhd_pktid_map_handle_t *)NULL;
2142 }
2143 
2144 /**
2145  * Retrieve all allocated keys and free all <numbered_key, locker>.
2146  * Freeing implies: unmapping the buffers and freeing the native packet
2147  * This could have been a callback registered with the pktid mapper.
2148  */
2149 static void
dhd_pktid_map_reset(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle)2150 dhd_pktid_map_reset(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
2151 {
2152 	void *osh;
2153 	uint32 nkey;
2154 	dhd_pktid_map_t *map;
2155 	dhd_pktid_item_t *locker;
2156 	uint32 map_items;
2157 	unsigned long flags;
2158 	bool data_tx = FALSE;
2159 
2160 	map = (dhd_pktid_map_t *)handle;
2161 	DHD_PKTID_LOCK(map->pktid_lock, flags);
2162 	osh = dhd->osh;
2163 
2164 	map_items = DHD_PKIDMAP_ITEMS(map->items);
2165 	/* skip reserved KEY #0, and start from 1 */
2166 
2167 	for (nkey = 1; nkey <= map_items; nkey++) {
2168 		if (map->lockers[nkey].state == LOCKER_IS_BUSY) {
2169 			locker = &map->lockers[nkey];
2170 			locker->state = LOCKER_IS_FREE;
2171 			data_tx = (locker->pkttype == PKTTYPE_DATA_TX);
2172 			if (data_tx) {
2173 				OSL_ATOMIC_DEC(dhd->osh, &dhd->prot->active_tx_count);
2174 			}
2175 
2176 #ifdef DHD_PKTID_AUDIT_RING
2177 			DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */
2178 #endif /* DHD_PKTID_AUDIT_RING */
2179 #ifdef DHD_MAP_PKTID_LOGGING
2180 			DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_unmap,
2181 				locker->pa, nkey, locker->len,
2182 				locker->pkttype);
2183 #endif /* DHD_MAP_PKTID_LOGGING */
2184 
2185 			{
2186 				if (SECURE_DMA_ENAB(dhd->osh))
2187 					SECURE_DMA_UNMAP(osh, locker->pa,
2188 						locker->len, locker->dir, 0,
2189 						locker->dmah, locker->secdma, 0);
2190 				else
2191 					DMA_UNMAP(osh, locker->pa, locker->len,
2192 						locker->dir, 0, locker->dmah);
2193 			}
2194 			dhd_prot_packet_free(dhd, (ulong*)locker->pkt,
2195 				locker->pkttype, data_tx);
2196 		}
2197 		else {
2198 #ifdef DHD_PKTID_AUDIT_RING
2199 			DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
2200 #endif /* DHD_PKTID_AUDIT_RING */
2201 		}
2202 		map->keys[nkey] = nkey; /* populate with unique keys */
2203 	}
2204 
2205 	map->avail = map_items;
2206 	memset(&map->lockers[1], 0, sizeof(dhd_pktid_item_t) * map_items);
2207 	DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2208 }
2209 
2210 #ifdef IOCTLRESP_USE_CONSTMEM
2211 /** Called in detach scenario. Releasing IOCTL buffers. */
2212 static void
dhd_pktid_map_reset_ioctl(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle)2213 dhd_pktid_map_reset_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
2214 {
2215 	uint32 nkey;
2216 	dhd_pktid_map_t *map;
2217 	dhd_pktid_item_t *locker;
2218 	uint32 map_items;
2219 	unsigned long flags;
2220 
2221 	map = (dhd_pktid_map_t *)handle;
2222 	DHD_PKTID_LOCK(map->pktid_lock, flags);
2223 
2224 	map_items = DHD_PKIDMAP_ITEMS(map->items);
2225 	/* skip reserved KEY #0, and start from 1 */
2226 	for (nkey = 1; nkey <= map_items; nkey++) {
2227 		if (map->lockers[nkey].state == LOCKER_IS_BUSY) {
2228 			dhd_dma_buf_t retbuf;
2229 
2230 #ifdef DHD_PKTID_AUDIT_RING
2231 			DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */
2232 #endif /* DHD_PKTID_AUDIT_RING */
2233 
2234 			locker = &map->lockers[nkey];
2235 			retbuf.va = locker->pkt;
2236 			retbuf.len = locker->len;
2237 			retbuf.pa = locker->pa;
2238 			retbuf.dmah = locker->dmah;
2239 			retbuf.secdma = locker->secdma;
2240 
2241 			free_ioctl_return_buffer(dhd, &retbuf);
2242 		}
2243 		else {
2244 #ifdef DHD_PKTID_AUDIT_RING
2245 			DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
2246 #endif /* DHD_PKTID_AUDIT_RING */
2247 		}
2248 		map->keys[nkey] = nkey; /* populate with unique keys */
2249 	}
2250 
2251 	map->avail = map_items;
2252 	memset(&map->lockers[1], 0, sizeof(dhd_pktid_item_t) * map_items);
2253 	DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2254 }
2255 #endif /* IOCTLRESP_USE_CONSTMEM */
2256 
2257 /**
2258  * Free the pktid map.
2259  */
2260 static void
dhd_pktid_map_fini(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle)2261 dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
2262 {
2263 	dhd_pktid_map_t *map;
2264 	uint32 dhd_pktid_map_sz;
2265 	uint32 map_keys_sz;
2266 
2267 	if (handle == NULL)
2268 		return;
2269 
2270 	/* Free any pending packets */
2271 	dhd_pktid_map_reset(dhd, handle);
2272 
2273 	map = (dhd_pktid_map_t *)handle;
2274 	dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
2275 	map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
2276 
2277 	DHD_PKTID_LOCK_DEINIT(dhd->osh, map->pktid_lock);
2278 
2279 #if defined(DHD_PKTID_AUDIT_ENABLED)
2280 	if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
2281 		bcm_mwbmap_fini(dhd->osh, map->pktid_audit); /* Destruct pktid_audit */
2282 		map->pktid_audit = (struct bcm_mwbmap *)NULL;
2283 		if (map->pktid_audit_lock) {
2284 			DHD_PKTID_AUDIT_LOCK_DEINIT(dhd->osh, map->pktid_audit_lock);
2285 		}
2286 	}
2287 #endif /* DHD_PKTID_AUDIT_ENABLED */
2288 	MFREE(dhd->osh, map->keys, map_keys_sz);
2289 	VMFREE(dhd->osh, handle, dhd_pktid_map_sz);
2290 }
2291 
2292 #ifdef IOCTLRESP_USE_CONSTMEM
2293 static void
dhd_pktid_map_fini_ioctl(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle)2294 dhd_pktid_map_fini_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
2295 {
2296 	dhd_pktid_map_t *map;
2297 	uint32 dhd_pktid_map_sz;
2298 	uint32 map_keys_sz;
2299 
2300 	if (handle == NULL)
2301 		return;
2302 
2303 	/* Free any pending packets */
2304 	dhd_pktid_map_reset_ioctl(dhd, handle);
2305 
2306 	map = (dhd_pktid_map_t *)handle;
2307 	dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
2308 	map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
2309 
2310 	DHD_PKTID_LOCK_DEINIT(dhd->osh, map->pktid_lock);
2311 
2312 #if defined(DHD_PKTID_AUDIT_ENABLED)
2313 	if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
2314 		bcm_mwbmap_fini(dhd->osh, map->pktid_audit); /* Destruct pktid_audit */
2315 		map->pktid_audit = (struct bcm_mwbmap *)NULL;
2316 		if (map->pktid_audit_lock) {
2317 			DHD_PKTID_AUDIT_LOCK_DEINIT(dhd->osh, map->pktid_audit_lock);
2318 		}
2319 	}
2320 #endif /* DHD_PKTID_AUDIT_ENABLED */
2321 
2322 	MFREE(dhd->osh, map->keys, map_keys_sz);
2323 	VMFREE(dhd->osh, handle, dhd_pktid_map_sz);
2324 }
2325 #endif /* IOCTLRESP_USE_CONSTMEM */
2326 
2327 /** Get the pktid free count */
2328 static INLINE uint32 BCMFASTPATH
dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t * handle)2329 dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle)
2330 {
2331 	dhd_pktid_map_t *map;
2332 	uint32	avail;
2333 	unsigned long flags;
2334 
2335 	ASSERT(handle != NULL);
2336 	map = (dhd_pktid_map_t *)handle;
2337 
2338 	DHD_PKTID_LOCK(map->pktid_lock, flags);
2339 	avail = map->avail;
2340 	DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2341 
2342 	return avail;
2343 }
2344 
2345 /**
2346  * dhd_pktid_map_reserve - reserve a unique numbered key. Reserved locker is not
2347  * yet populated. Invoke the pktid save api to populate the packet parameters
2348  * into the locker. This function is not reentrant, and is the caller's
2349  * responsibility. Caller must treat a returned value DHD_PKTID_INVALID as
2350  * a failure case, implying a depleted pool of pktids.
2351  */
2352 static INLINE uint32
dhd_pktid_map_reserve(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle,void * pkt,dhd_pkttype_t pkttype)2353 dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
2354 	void *pkt, dhd_pkttype_t pkttype)
2355 {
2356 	uint32 nkey;
2357 	dhd_pktid_map_t *map;
2358 	dhd_pktid_item_t *locker;
2359 	unsigned long flags;
2360 
2361 	ASSERT(handle != NULL);
2362 	map = (dhd_pktid_map_t *)handle;
2363 
2364 	DHD_PKTID_LOCK(map->pktid_lock, flags);
2365 
2366 	if ((int)(map->avail) <= 0) { /* no more pktids to allocate */
2367 		map->failures++;
2368 		DHD_INFO(("%s:%d: failed, no free keys\n", __FUNCTION__, __LINE__));
2369 		DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2370 		return DHD_PKTID_INVALID; /* failed alloc request */
2371 	}
2372 
2373 	ASSERT(map->avail <= map->items);
2374 	nkey = map->keys[map->avail]; /* fetch a free locker, pop stack */
2375 
2376 	if ((map->avail > map->items) || (nkey > map->items)) {
2377 		map->failures++;
2378 		DHD_ERROR(("%s:%d: failed to allocate a new pktid,"
2379 			" map->avail<%u>, nkey<%u>, pkttype<%u>\n",
2380 			__FUNCTION__, __LINE__, map->avail, nkey,
2381 			pkttype));
2382 		DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2383 		return DHD_PKTID_INVALID; /* failed alloc request */
2384 	}
2385 
2386 	locker = &map->lockers[nkey]; /* save packet metadata in locker */
2387 	map->avail--;
2388 	locker->pkt = pkt; /* pkt is saved, other params not yet saved. */
2389 	locker->len = 0;
2390 	locker->state = LOCKER_IS_BUSY; /* reserve this locker */
2391 
2392 	DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2393 
2394 	ASSERT(nkey != DHD_PKTID_INVALID);
2395 
2396 	return nkey; /* return locker's numbered key */
2397 }
2398 
2399 /*
2400  * dhd_pktid_map_save - Save a packet's parameters into a locker
2401  * corresponding to a previously reserved unique numbered key.
2402  */
2403 static INLINE void
dhd_pktid_map_save(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle,void * pkt,uint32 nkey,dmaaddr_t pa,uint32 len,uint8 dir,void * dmah,void * secdma,dhd_pkttype_t pkttype)2404 dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
2405 	uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma,
2406 	dhd_pkttype_t pkttype)
2407 {
2408 	dhd_pktid_map_t *map;
2409 	dhd_pktid_item_t *locker;
2410 	unsigned long flags;
2411 
2412 	ASSERT(handle != NULL);
2413 	map = (dhd_pktid_map_t *)handle;
2414 
2415 	DHD_PKTID_LOCK(map->pktid_lock, flags);
2416 
2417 	if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) {
2418 		DHD_ERROR(("%s:%d: Error! saving invalid pktid<%u> pkttype<%u>\n",
2419 			__FUNCTION__, __LINE__, nkey, pkttype));
2420 		DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2421 #ifdef DHD_FW_COREDUMP
2422 		if (dhd->memdump_enabled) {
2423 			/* collect core dump */
2424 			dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
2425 			dhd_bus_mem_dump(dhd);
2426 		}
2427 #else
2428 		ASSERT(0);
2429 #endif /* DHD_FW_COREDUMP */
2430 		return;
2431 	}
2432 
2433 	locker = &map->lockers[nkey];
2434 
2435 	ASSERT(((locker->state == LOCKER_IS_BUSY) && (locker->pkt == pkt)) ||
2436 		((locker->state == LOCKER_IS_RSVD) && (locker->pkt == NULL)));
2437 
2438 	/* store contents in locker */
2439 	locker->dir = dir;
2440 	locker->pa = pa;
2441 	locker->len = (uint16)len; /* 16bit len */
2442 	locker->dmah = dmah; /* 16bit len */
2443 	locker->secdma = secdma;
2444 	locker->pkttype = pkttype;
2445 	locker->pkt = pkt;
2446 	locker->state = LOCKER_IS_BUSY; /* make this locker busy */
2447 #ifdef DHD_MAP_PKTID_LOGGING
2448 	DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_map, pa, nkey, len, pkttype);
2449 #endif /* DHD_MAP_PKTID_LOGGING */
2450 	DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2451 }
2452 
2453 /**
2454  * dhd_pktid_map_alloc - Allocate a unique numbered key and save the packet
2455  * contents into the corresponding locker. Return the numbered key.
2456  */
2457 static uint32 BCMFASTPATH
dhd_pktid_map_alloc(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle,void * pkt,dmaaddr_t pa,uint32 len,uint8 dir,void * dmah,void * secdma,dhd_pkttype_t pkttype)2458 dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
2459 	dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma,
2460 	dhd_pkttype_t pkttype)
2461 {
2462 	uint32 nkey;
2463 
2464 	nkey = dhd_pktid_map_reserve(dhd, handle, pkt, pkttype);
2465 	if (nkey != DHD_PKTID_INVALID) {
2466 		dhd_pktid_map_save(dhd, handle, pkt, nkey, pa,
2467 			len, dir, dmah, secdma, pkttype);
2468 	}
2469 
2470 	return nkey;
2471 }
2472 
2473 /**
2474  * dhd_pktid_map_free - Given a numbered key, return the locker contents.
2475  * dhd_pktid_map_free() is not reentrant, and is the caller's responsibility.
2476  * Caller may not free a pktid value DHD_PKTID_INVALID or an arbitrary pktid
2477  * value. Only a previously allocated pktid may be freed.
2478  */
2479 static void * BCMFASTPATH
dhd_pktid_map_free(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle,uint32 nkey,dmaaddr_t * pa,uint32 * len,void ** dmah,void ** secdma,dhd_pkttype_t pkttype,bool rsv_locker)2480 dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, uint32 nkey,
2481 	dmaaddr_t *pa, uint32 *len, void **dmah, void **secdma, dhd_pkttype_t pkttype,
2482 	bool rsv_locker)
2483 {
2484 	dhd_pktid_map_t *map;
2485 	dhd_pktid_item_t *locker;
2486 	void * pkt;
2487 	unsigned long long locker_addr;
2488 	unsigned long flags;
2489 
2490 	ASSERT(handle != NULL);
2491 
2492 	map = (dhd_pktid_map_t *)handle;
2493 
2494 	DHD_PKTID_LOCK(map->pktid_lock, flags);
2495 
2496 	if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) {
2497 		DHD_ERROR(("%s:%d: Error! Try to free invalid pktid<%u>, pkttype<%d>\n",
2498 		           __FUNCTION__, __LINE__, nkey, pkttype));
2499 		DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2500 #ifdef DHD_FW_COREDUMP
2501 		if (dhd->memdump_enabled) {
2502 			/* collect core dump */
2503 			dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
2504 			dhd_bus_mem_dump(dhd);
2505 		}
2506 #else
2507 		ASSERT(0);
2508 #endif /* DHD_FW_COREDUMP */
2509 		return NULL;
2510 	}
2511 
2512 	locker = &map->lockers[nkey];
2513 
2514 #if defined(DHD_PKTID_AUDIT_MAP)
2515 	DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* Audit duplicate FREE */
2516 #endif /* DHD_PKTID_AUDIT_MAP */
2517 
2518 	/* Debug check for cloned numbered key */
2519 	if (locker->state == LOCKER_IS_FREE) {
2520 		DHD_ERROR(("%s:%d: Error! freeing already freed invalid pktid<%u>\n",
2521 		           __FUNCTION__, __LINE__, nkey));
2522 		DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2523 #ifdef DHD_FW_COREDUMP
2524 		if (dhd->memdump_enabled) {
2525 			/* collect core dump */
2526 			dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
2527 			dhd_bus_mem_dump(dhd);
2528 		}
2529 #else
2530 		ASSERT(0);
2531 #endif /* DHD_FW_COREDUMP */
2532 		return NULL;
2533 	}
2534 
2535 	/* Check for the colour of the buffer i.e The buffer posted for TX,
2536 	 * should be freed for TX completion. Similarly the buffer posted for
2537 	 * IOCTL should be freed for IOCT completion etc.
2538 	 */
2539 	if ((pkttype != PKTTYPE_NO_CHECK) && (locker->pkttype != pkttype)) {
2540 
2541 		DHD_ERROR(("%s:%d: Error! Invalid Buffer Free for pktid<%u> \n",
2542 			__FUNCTION__, __LINE__, nkey));
2543 #ifdef BCMDMA64OSL
2544 		PHYSADDRTOULONG(locker->pa, locker_addr);
2545 #else
2546 		locker_addr = PHYSADDRLO(locker->pa);
2547 #endif /* BCMDMA64OSL */
2548 		DHD_ERROR(("%s:%d: locker->state <%d>, locker->pkttype <%d>,"
2549 			"pkttype <%d> locker->pa <0x%llx> \n",
2550 			__FUNCTION__, __LINE__, locker->state, locker->pkttype,
2551 			pkttype, locker_addr));
2552 		DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2553 #ifdef DHD_FW_COREDUMP
2554 		if (dhd->memdump_enabled) {
2555 			/* collect core dump */
2556 			dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
2557 			dhd_bus_mem_dump(dhd);
2558 		}
2559 #else
2560 		ASSERT(0);
2561 #endif /* DHD_FW_COREDUMP */
2562 		return NULL;
2563 	}
2564 
2565 	if (rsv_locker == DHD_PKTID_FREE_LOCKER) {
2566 		map->avail++;
2567 		map->keys[map->avail] = nkey; /* make this numbered key available */
2568 		locker->state = LOCKER_IS_FREE; /* open and free Locker */
2569 	} else {
2570 		/* pktid will be reused, but the locker does not have a valid pkt */
2571 		locker->state = LOCKER_IS_RSVD;
2572 	}
2573 
2574 #if defined(DHD_PKTID_AUDIT_MAP)
2575 	DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
2576 #endif /* DHD_PKTID_AUDIT_MAP */
2577 #ifdef DHD_MAP_PKTID_LOGGING
2578 	DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_unmap, locker->pa, nkey,
2579 		(uint32)locker->len, pkttype);
2580 #endif /* DHD_MAP_PKTID_LOGGING */
2581 
2582 	*pa = locker->pa; /* return contents of locker */
2583 	*len = (uint32)locker->len;
2584 	*dmah = locker->dmah;
2585 	*secdma = locker->secdma;
2586 
2587 	pkt = locker->pkt;
2588 	locker->pkt = NULL; /* Clear pkt */
2589 	locker->len = 0;
2590 
2591 	DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2592 
2593 	return pkt;
2594 }
2595 
2596 #else /* ! DHD_PCIE_PKTID */
2597 
2598 typedef struct pktlist {
2599 	PKT_LIST *tx_pkt_list;		/* list for tx packets */
2600 	PKT_LIST *rx_pkt_list;		/* list for rx packets */
2601 	PKT_LIST *ctrl_pkt_list;	/* list for ioctl/event buf post */
2602 } pktlists_t;
2603 
2604 /*
2605  * Given that each workitem only uses a 32bit pktid, only 32bit hosts may avail
2606  * of a one to one mapping 32bit pktptr and a 32bit pktid.
2607  *
2608  * - When PKTIDMAP is not used, DHD_NATIVE_TO_PKTID variants will never fail.
2609  * - Neither DHD_NATIVE_TO_PKTID nor DHD_PKTID_TO_NATIVE need to be protected by
2610  *   a lock.
2611  * - Hence DHD_PKTID_INVALID is not defined when DHD_PCIE_PKTID is undefined.
2612  */
2613 #define DHD_PKTID32(pktptr32)	((uint32)(pktptr32))
2614 #define DHD_PKTPTR32(pktid32)	((void *)(pktid32))
2615 
2616 static INLINE uint32 dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32,
2617 	dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma,
2618 	dhd_pkttype_t pkttype);
2619 static INLINE void * dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32,
2620 	dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma,
2621 	dhd_pkttype_t pkttype);
2622 
2623 static dhd_pktid_map_handle_t *
dhd_pktid_map_init(dhd_pub_t * dhd,uint32 num_items)2624 dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items)
2625 {
2626 	osl_t *osh = dhd->osh;
2627 	pktlists_t *handle = NULL;
2628 
2629 	if ((handle = (pktlists_t *) MALLOCZ(osh, sizeof(pktlists_t))) == NULL) {
2630 		DHD_ERROR(("%s:%d: MALLOC failed for lists allocation, size=%d\n",
2631 		           __FUNCTION__, __LINE__, sizeof(pktlists_t)));
2632 		goto error_done;
2633 	}
2634 
2635 	if ((handle->tx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
2636 		DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
2637 		           __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
2638 		goto error;
2639 	}
2640 
2641 	if ((handle->rx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
2642 		DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
2643 		           __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
2644 		goto error;
2645 	}
2646 
2647 	if ((handle->ctrl_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
2648 		DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
2649 		           __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
2650 		goto error;
2651 	}
2652 
2653 	PKTLIST_INIT(handle->tx_pkt_list);
2654 	PKTLIST_INIT(handle->rx_pkt_list);
2655 	PKTLIST_INIT(handle->ctrl_pkt_list);
2656 
2657 	return (dhd_pktid_map_handle_t *) handle;
2658 
2659 error:
2660 	if (handle->ctrl_pkt_list) {
2661 		MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST));
2662 	}
2663 
2664 	if (handle->rx_pkt_list) {
2665 		MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST));
2666 	}
2667 
2668 	if (handle->tx_pkt_list) {
2669 		MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST));
2670 	}
2671 
2672 	if (handle) {
2673 		MFREE(osh, handle, sizeof(pktlists_t));
2674 	}
2675 
2676 error_done:
2677 	return (dhd_pktid_map_handle_t *)NULL;
2678 }
2679 
2680 static void
dhd_pktid_map_reset(dhd_pub_t * dhd,pktlists_t * handle)2681 dhd_pktid_map_reset(dhd_pub_t *dhd, pktlists_t *handle)
2682 {
2683 	osl_t *osh = dhd->osh;
2684 
2685 	if (handle->ctrl_pkt_list) {
2686 		PKTLIST_FINI(handle->ctrl_pkt_list);
2687 		MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST));
2688 	}
2689 
2690 	if (handle->rx_pkt_list) {
2691 		PKTLIST_FINI(handle->rx_pkt_list);
2692 		MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST));
2693 	}
2694 
2695 	if (handle->tx_pkt_list) {
2696 		PKTLIST_FINI(handle->tx_pkt_list);
2697 		MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST));
2698 	}
2699 }
2700 
2701 static void
dhd_pktid_map_fini(dhd_pub_t * dhd,dhd_pktid_map_handle_t * map)2702 dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map)
2703 {
2704 	osl_t *osh = dhd->osh;
2705 	pktlists_t *handle = (pktlists_t *) map;
2706 
2707 	ASSERT(handle != NULL);
2708 	if (handle == (pktlists_t *)NULL) {
2709 		return;
2710 	}
2711 
2712 	dhd_pktid_map_reset(dhd, handle);
2713 
2714 	if (handle) {
2715 		MFREE(osh, handle, sizeof(pktlists_t));
2716 	}
2717 }
2718 
2719 /** Save dma parameters into the packet's pkttag and convert a pktptr to pktid */
2720 static INLINE uint32
dhd_native_to_pktid(dhd_pktid_map_handle_t * map,void * pktptr32,dmaaddr_t pa,uint32 dma_len,void * dmah,void * secdma,dhd_pkttype_t pkttype)2721 dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32,
2722 	dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma,
2723 	dhd_pkttype_t pkttype)
2724 {
2725 	pktlists_t *handle = (pktlists_t *) map;
2726 	ASSERT(pktptr32 != NULL);
2727 	DHD_PKT_SET_DMA_LEN(pktptr32, dma_len);
2728 	DHD_PKT_SET_DMAH(pktptr32, dmah);
2729 	DHD_PKT_SET_PA(pktptr32, pa);
2730 	DHD_PKT_SET_SECDMA(pktptr32, secdma);
2731 
2732 	if (pkttype == PKTTYPE_DATA_TX) {
2733 		PKTLIST_ENQ(handle->tx_pkt_list,  pktptr32);
2734 	} else if (pkttype == PKTTYPE_DATA_RX) {
2735 		PKTLIST_ENQ(handle->rx_pkt_list,  pktptr32);
2736 	} else {
2737 		PKTLIST_ENQ(handle->ctrl_pkt_list,  pktptr32);
2738 	}
2739 
2740 	return DHD_PKTID32(pktptr32);
2741 }
2742 
2743 /** Convert a pktid to pktptr and retrieve saved dma parameters from packet */
2744 static INLINE void *
dhd_pktid_to_native(dhd_pktid_map_handle_t * map,uint32 pktid32,dmaaddr_t * pa,uint32 * dma_len,void ** dmah,void ** secdma,dhd_pkttype_t pkttype)2745 dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32,
2746 	dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma,
2747 	dhd_pkttype_t pkttype)
2748 {
2749 	pktlists_t *handle = (pktlists_t *) map;
2750 	void *pktptr32;
2751 
2752 	ASSERT(pktid32 != 0U);
2753 	pktptr32 = DHD_PKTPTR32(pktid32);
2754 	*dma_len = DHD_PKT_GET_DMA_LEN(pktptr32);
2755 	*dmah = DHD_PKT_GET_DMAH(pktptr32);
2756 	*pa = DHD_PKT_GET_PA(pktptr32);
2757 	*secdma = DHD_PKT_GET_SECDMA(pktptr32);
2758 
2759 	if (pkttype == PKTTYPE_DATA_TX) {
2760 		PKTLIST_UNLINK(handle->tx_pkt_list,  pktptr32);
2761 	} else if (pkttype == PKTTYPE_DATA_RX) {
2762 		PKTLIST_UNLINK(handle->rx_pkt_list,  pktptr32);
2763 	} else {
2764 		PKTLIST_UNLINK(handle->ctrl_pkt_list,  pktptr32);
2765 	}
2766 
2767 	return pktptr32;
2768 }
2769 
2770 #define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt, pkttype)  DHD_PKTID32(pkt)
2771 
2772 #define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dma_dir, dmah, secdma, pkttype) \
2773 	({ BCM_REFERENCE(dhd); BCM_REFERENCE(nkey); BCM_REFERENCE(dma_dir); \
2774 	   dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \
2775 			   (dmah), (secdma), (dhd_pkttype_t)(pkttype)); \
2776 	})
2777 
2778 #define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dma_dir, dmah, secdma, pkttype) \
2779 	({ BCM_REFERENCE(dhd); BCM_REFERENCE(dma_dir); \
2780 	   dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \
2781 			   (dmah), (secdma), (dhd_pkttype_t)(pkttype)); \
2782 	})
2783 
2784 #define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
2785 	({ BCM_REFERENCE(dhd); BCM_REFERENCE(pkttype);	\
2786 		dhd_pktid_to_native((dhd_pktid_map_handle_t *) map, (uint32)(pktid), \
2787 				(dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
2788 				(void **)&secdma, (dhd_pkttype_t)(pkttype)); \
2789 	})
2790 
2791 #define DHD_PKTID_AVAIL(map)  (~0)
2792 
2793 #endif /* ! DHD_PCIE_PKTID */
2794 
2795 /* +------------------ End of PCIE DHD PKTID MAPPER  -----------------------+ */
2796 
2797 /**
2798  * The PCIE FD protocol layer is constructed in two phases:
2799  *    Phase 1. dhd_prot_attach()
2800  *    Phase 2. dhd_prot_init()
2801  *
2802  * dhd_prot_attach() - Allocates a dhd_prot_t object and resets all its fields.
2803  * All Common rings are allose attached (msgbuf_ring_t objects are allocated
2804  * with DMA-able buffers).
2805  * All dhd_dma_buf_t objects are also allocated here.
2806  *
2807  * As dhd_prot_attach is invoked prior to the pcie_shared object is read, any
2808  * initialization of objects that requires information advertized by the dongle
2809  * may not be performed here.
2810  * E.g. the number of TxPost flowrings is not know at this point, neither do
2811  * we know shich form of D2H DMA sync mechanism is advertized by the dongle, or
2812  * whether the dongle supports DMA-ing of WR/RD indices for the H2D and/or D2H
2813  * rings (common + flow).
2814  *
2815  * dhd_prot_init() is invoked after the bus layer has fetched the information
2816  * advertized by the dongle in the pcie_shared_t.
2817  */
2818 int
dhd_prot_attach(dhd_pub_t * dhd)2819 dhd_prot_attach(dhd_pub_t *dhd)
2820 {
2821 	osl_t *osh = dhd->osh;
2822 	dhd_prot_t *prot;
2823 
2824 	/* FW going to DMA extended trap data,
2825 	 * allocate buffer for the maximum extended trap data.
2826 	 */
2827 	uint32 trap_buf_len = BCMPCIE_EXT_TRAP_DATA_MAXLEN;
2828 
2829 	/* Allocate prot structure */
2830 	if (!(prot = (dhd_prot_t *)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_PROT,
2831 		sizeof(dhd_prot_t)))) {
2832 		DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
2833 		goto fail;
2834 	}
2835 	memset(prot, 0, sizeof(*prot));
2836 
2837 	prot->osh = osh;
2838 	dhd->prot = prot;
2839 
2840 	/* DMAing ring completes supported? FALSE by default  */
2841 	dhd->dma_d2h_ring_upd_support = FALSE;
2842 	dhd->dma_h2d_ring_upd_support = FALSE;
2843 	dhd->dma_ring_upd_overwrite = FALSE;
2844 
2845 	dhd->hwa_inited = 0;
2846 	dhd->idma_inited = 0;
2847 	dhd->ifrm_inited = 0;
2848 	dhd->dar_inited = 0;
2849 
2850 	/* Common Ring Allocations */
2851 
2852 	/* Ring  0: H2D Control Submission */
2853 	if (dhd_prot_ring_attach(dhd, &prot->h2dring_ctrl_subn, "h2dctrl",
2854 	        H2DRING_CTRL_SUB_MAX_ITEM, H2DRING_CTRL_SUB_ITEMSIZE,
2855 	        BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT) != BCME_OK) {
2856 		DHD_ERROR(("%s: dhd_prot_ring_attach H2D Ctrl Submission failed\n",
2857 			__FUNCTION__));
2858 		goto fail;
2859 	}
2860 
2861 	/* Ring  1: H2D Receive Buffer Post */
2862 	if (dhd_prot_ring_attach(dhd, &prot->h2dring_rxp_subn, "h2drxp",
2863 	        H2DRING_RXPOST_MAX_ITEM, H2DRING_RXPOST_ITEMSIZE,
2864 	        BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT) != BCME_OK) {
2865 		DHD_ERROR(("%s: dhd_prot_ring_attach H2D RxPost failed\n",
2866 			__FUNCTION__));
2867 		goto fail;
2868 	}
2869 
2870 	/* Ring  2: D2H Control Completion */
2871 	if (dhd_prot_ring_attach(dhd, &prot->d2hring_ctrl_cpln, "d2hctrl",
2872 	        D2HRING_CTRL_CMPLT_MAX_ITEM, D2HRING_CTRL_CMPLT_ITEMSIZE,
2873 	        BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE) != BCME_OK) {
2874 		DHD_ERROR(("%s: dhd_prot_ring_attach D2H Ctrl Completion failed\n",
2875 			__FUNCTION__));
2876 		goto fail;
2877 	}
2878 
2879 	/* Ring  3: D2H Transmit Complete */
2880 	if (dhd_prot_ring_attach(dhd, &prot->d2hring_tx_cpln, "d2htxcpl",
2881 	        D2HRING_TXCMPLT_MAX_ITEM, D2HRING_TXCMPLT_ITEMSIZE,
2882 	        BCMPCIE_D2H_MSGRING_TX_COMPLETE) != BCME_OK) {
2883 		DHD_ERROR(("%s: dhd_prot_ring_attach D2H Tx Completion failed\n",
2884 			__FUNCTION__));
2885 		goto fail;
2886 
2887 	}
2888 
2889 	/* Ring  4: D2H Receive Complete */
2890 	if (dhd_prot_ring_attach(dhd, &prot->d2hring_rx_cpln, "d2hrxcpl",
2891 	        D2HRING_RXCMPLT_MAX_ITEM, D2HRING_RXCMPLT_ITEMSIZE,
2892 	        BCMPCIE_D2H_MSGRING_RX_COMPLETE) != BCME_OK) {
2893 		DHD_ERROR(("%s: dhd_prot_ring_attach D2H Rx Completion failed\n",
2894 			__FUNCTION__));
2895 		goto fail;
2896 
2897 	}
2898 
2899 	/*
2900 	 * Max number of flowrings is not yet known. msgbuf_ring_t with DMA-able
2901 	 * buffers for flowrings will be instantiated, in dhd_prot_init() .
2902 	 * See dhd_prot_flowrings_pool_attach()
2903 	 */
2904 	/* ioctl response buffer */
2905 	if (dhd_dma_buf_alloc(dhd, &prot->retbuf, IOCT_RETBUF_SIZE)) {
2906 		goto fail;
2907 	}
2908 
2909 	/* IOCTL request buffer */
2910 	if (dhd_dma_buf_alloc(dhd, &prot->ioctbuf, IOCT_RETBUF_SIZE)) {
2911 		goto fail;
2912 	}
2913 
2914 	/* Host TS request buffer one buffer for now */
2915 	if (dhd_dma_buf_alloc(dhd, &prot->hostts_req_buf, CTRLSUB_HOSTTS_MEESAGE_SIZE)) {
2916 		goto fail;
2917 	}
2918 	prot->hostts_req_buf_inuse = FALSE;
2919 
2920 	/* Scratch buffer for dma rx offset */
2921 #ifdef BCM_HOST_BUF
2922 	if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf,
2923 		ROUNDUP(DMA_D2H_SCRATCH_BUF_LEN, 16) + DMA_HOST_BUFFER_LEN))
2924 #else
2925 	if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf, DMA_D2H_SCRATCH_BUF_LEN))
2926 
2927 #endif /* BCM_HOST_BUF */
2928 	{
2929 		goto fail;
2930 	}
2931 
2932 	/* scratch buffer bus throughput measurement */
2933 	if (dhd_dma_buf_alloc(dhd, &prot->host_bus_throughput_buf, DHD_BUS_TPUT_BUF_LEN)) {
2934 		goto fail;
2935 	}
2936 
2937 #ifdef DHD_RX_CHAINING
2938 	dhd_rxchain_reset(&prot->rxchain);
2939 #endif // endif
2940 
2941 	prot->pktid_ctrl_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_CTRL_PKTID);
2942 	if (prot->pktid_ctrl_map == NULL) {
2943 		goto fail;
2944 	}
2945 
2946 	prot->pktid_rx_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_RX_PKTID);
2947 	if (prot->pktid_rx_map == NULL)
2948 		goto fail;
2949 
2950 	prot->pktid_tx_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_TX_PKTID);
2951 	if (prot->pktid_tx_map == NULL)
2952 		goto fail;
2953 
2954 #ifdef IOCTLRESP_USE_CONSTMEM
2955 	prot->pktid_map_handle_ioctl = DHD_NATIVE_TO_PKTID_INIT(dhd,
2956 		DHD_FLOWRING_MAX_IOCTLRESPBUF_POST);
2957 	if (prot->pktid_map_handle_ioctl == NULL) {
2958 		goto fail;
2959 	}
2960 #endif /* IOCTLRESP_USE_CONSTMEM */
2961 
2962 #ifdef DHD_MAP_PKTID_LOGGING
2963 	prot->pktid_dma_map = DHD_PKTID_LOG_INIT(dhd, MAX_PKTID_LOG);
2964 	if (prot->pktid_dma_map == NULL) {
2965 		DHD_ERROR(("%s: failed to allocate pktid_dma_map\n",
2966 			__FUNCTION__));
2967 	}
2968 
2969 	prot->pktid_dma_unmap = DHD_PKTID_LOG_INIT(dhd, MAX_PKTID_LOG);
2970 	if (prot->pktid_dma_unmap == NULL) {
2971 		DHD_ERROR(("%s: failed to allocate pktid_dma_unmap\n",
2972 			__FUNCTION__));
2973 	}
2974 #endif /* DHD_MAP_PKTID_LOGGING */
2975 
2976 	   /* Initialize the work queues to be used by the Load Balancing logic */
2977 #if defined(DHD_LB_TXC)
2978 	{
2979 		void *buffer;
2980 		buffer = MALLOC(dhd->osh, sizeof(void*) * DHD_LB_WORKQ_SZ);
2981 		if (buffer == NULL) {
2982 			DHD_ERROR(("%s: failed to allocate RXC work buffer\n", __FUNCTION__));
2983 			goto fail;
2984 		}
2985 		bcm_workq_init(&prot->tx_compl_prod, &prot->tx_compl_cons,
2986 			buffer, DHD_LB_WORKQ_SZ);
2987 		prot->tx_compl_prod_sync = 0;
2988 		DHD_INFO(("%s: created tx_compl_workq <%p,%d>\n",
2989 			__FUNCTION__, buffer, DHD_LB_WORKQ_SZ));
2990 	   }
2991 #endif /* DHD_LB_TXC */
2992 
2993 #if defined(DHD_LB_RXC)
2994 	   {
2995 		void *buffer;
2996 		buffer = MALLOC(dhd->osh, sizeof(void*) * DHD_LB_WORKQ_SZ);
2997 		if (buffer == NULL) {
2998 			DHD_ERROR(("%s: failed to allocate RXC work buffer\n", __FUNCTION__));
2999 			goto fail;
3000 		}
3001 		bcm_workq_init(&prot->rx_compl_prod, &prot->rx_compl_cons,
3002 			buffer, DHD_LB_WORKQ_SZ);
3003 		prot->rx_compl_prod_sync = 0;
3004 		DHD_INFO(("%s: created rx_compl_workq <%p,%d>\n",
3005 			__FUNCTION__, buffer, DHD_LB_WORKQ_SZ));
3006 	   }
3007 #endif /* DHD_LB_RXC */
3008 
3009 	/* Initialize trap buffer */
3010 	if (dhd_dma_buf_alloc(dhd, &dhd->prot->fw_trap_buf, trap_buf_len)) {
3011 		DHD_ERROR(("%s: dhd_init_trap_buffer falied\n", __FUNCTION__));
3012 		goto fail;
3013 	}
3014 
3015 	return BCME_OK;
3016 
3017 fail:
3018 
3019 	if (prot) {
3020 		/* Free up all allocated memories */
3021 		dhd_prot_detach(dhd);
3022 	}
3023 
3024 	return BCME_NOMEM;
3025 } /* dhd_prot_attach */
3026 
3027 static int
dhd_alloc_host_scbs(dhd_pub_t * dhd)3028 dhd_alloc_host_scbs(dhd_pub_t *dhd)
3029 {
3030 	int ret = BCME_OK;
3031 	sh_addr_t base_addr;
3032 	dhd_prot_t *prot = dhd->prot;
3033 	uint32 host_scb_size = 0;
3034 
3035 	if (dhd->hscb_enable) {
3036 		/* read number of bytes to allocate from F/W */
3037 		dhd_bus_cmn_readshared(dhd->bus, &host_scb_size, HOST_SCB_ADDR, 0);
3038 		if (host_scb_size) {
3039 			dhd_dma_buf_free(dhd, &prot->host_scb_buf);
3040 			/* alloc array of host scbs */
3041 			ret = dhd_dma_buf_alloc(dhd, &prot->host_scb_buf, host_scb_size);
3042 			/* write host scb address to F/W */
3043 			if (ret == BCME_OK) {
3044 				dhd_base_addr_htolpa(&base_addr, prot->host_scb_buf.pa);
3045 				dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
3046 					HOST_SCB_ADDR, 0);
3047 			} else {
3048 				DHD_TRACE(("dhd_alloc_host_scbs: dhd_dma_buf_alloc error\n"));
3049 			}
3050 		} else {
3051 			DHD_TRACE(("dhd_alloc_host_scbs: host_scb_size is 0.\n"));
3052 		}
3053 	} else {
3054 		DHD_TRACE(("dhd_alloc_host_scbs: Host scb not supported in F/W.\n"));
3055 	}
3056 
3057 	return ret;
3058 }
3059 
3060 void
dhd_set_host_cap(dhd_pub_t * dhd)3061 dhd_set_host_cap(dhd_pub_t *dhd)
3062 {
3063 	uint32 data = 0;
3064 	dhd_prot_t *prot = dhd->prot;
3065 
3066 	if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6) {
3067 		if (dhd->h2d_phase_supported) {
3068 			data |= HOSTCAP_H2D_VALID_PHASE;
3069 			if (dhd->force_dongletrap_on_bad_h2d_phase)
3070 				data |= HOSTCAP_H2D_ENABLE_TRAP_ON_BADPHASE;
3071 		}
3072 		if (prot->host_ipc_version > prot->device_ipc_version)
3073 			prot->active_ipc_version = prot->device_ipc_version;
3074 		else
3075 			prot->active_ipc_version = prot->host_ipc_version;
3076 
3077 		data |= prot->active_ipc_version;
3078 
3079 		if (dhdpcie_bus_get_pcie_hostready_supported(dhd->bus)) {
3080 			DHD_INFO(("Advertise Hostready Capability\n"));
3081 			data |= HOSTCAP_H2D_ENABLE_HOSTRDY;
3082 		}
3083 		{
3084 			/* Disable DS altogether */
3085 			data |= HOSTCAP_DS_NO_OOB_DW;
3086 			dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_NONE);
3087 		}
3088 
3089 		/* Indicate support for extended trap data */
3090 		data |= HOSTCAP_EXTENDED_TRAP_DATA;
3091 
3092 		/* Indicate support for TX status metadata */
3093 		if (dhd->pcie_txs_metadata_enable != 0)
3094 			data |= HOSTCAP_TXSTATUS_METADATA;
3095 
3096 		/* Enable fast delete ring in firmware if supported */
3097 		if (dhd->fast_delete_ring_support) {
3098 			data |= HOSTCAP_FAST_DELETE_RING;
3099 		}
3100 
3101 		if (dhdpcie_bus_get_pcie_hwa_supported(dhd->bus)) {
3102 			DHD_ERROR(("HWA inited\n"));
3103 			/* TODO: Is hostcap needed? */
3104 			dhd->hwa_inited = TRUE;
3105 		}
3106 
3107 		if (dhdpcie_bus_get_pcie_idma_supported(dhd->bus)) {
3108 			DHD_ERROR(("IDMA inited\n"));
3109 			data |= HOSTCAP_H2D_IDMA;
3110 			dhd->idma_inited = TRUE;
3111 		}
3112 
3113 		if (dhdpcie_bus_get_pcie_ifrm_supported(dhd->bus)) {
3114 			DHD_ERROR(("IFRM Inited\n"));
3115 			data |= HOSTCAP_H2D_IFRM;
3116 			dhd->ifrm_inited = TRUE;
3117 			dhd->dma_h2d_ring_upd_support = FALSE;
3118 			dhd_prot_dma_indx_free(dhd);
3119 		}
3120 
3121 		if (dhdpcie_bus_get_pcie_dar_supported(dhd->bus)) {
3122 			DHD_ERROR(("DAR doorbell Use\n"));
3123 			data |= HOSTCAP_H2D_DAR;
3124 			dhd->dar_inited = TRUE;
3125 		}
3126 
3127 		data |= HOSTCAP_UR_FW_NO_TRAP;
3128 
3129 		if (dhd->hscb_enable) {
3130 			data |= HOSTCAP_HSCB;
3131 		}
3132 
3133 #ifdef EWP_EDL
3134 		if (dhd->dongle_edl_support) {
3135 			data |= HOSTCAP_EDL_RING;
3136 			DHD_ERROR(("Enable EDL host cap\n"));
3137 		} else {
3138 			DHD_ERROR(("DO NOT SET EDL host cap\n"));
3139 		}
3140 #endif /* EWP_EDL */
3141 
3142 #ifdef DHD_HP2P
3143 		if (dhd->hp2p_capable) {
3144 			data |= HOSTCAP_PKT_TIMESTAMP;
3145 			data |= HOSTCAP_PKT_HP2P;
3146 			DHD_ERROR(("Enable HP2P in host cap\n"));
3147 		} else {
3148 			DHD_ERROR(("HP2P not enabled in host cap\n"));
3149 		}
3150 #endif // endif
3151 
3152 #ifdef DHD_DB0TS
3153 		if (dhd->db0ts_capable) {
3154 			data |= HOSTCAP_DB0_TIMESTAMP;
3155 			DHD_ERROR(("Enable DB0 TS in host cap\n"));
3156 		} else {
3157 			DHD_ERROR(("DB0 TS not enabled in host cap\n"));
3158 		}
3159 #endif /* DHD_DB0TS */
3160 		if (dhd->extdtxs_in_txcpl) {
3161 			DHD_ERROR(("Enable hostcap: EXTD TXS in txcpl\n"));
3162 			data |= HOSTCAP_PKT_TXSTATUS;
3163 		}
3164 		else {
3165 			DHD_ERROR(("Enable hostcap: EXTD TXS in txcpl\n"));
3166 		}
3167 
3168 		DHD_INFO(("%s:Active Ver:%d, Host Ver:%d, FW Ver:%d\n",
3169 			__FUNCTION__,
3170 			prot->active_ipc_version, prot->host_ipc_version,
3171 			prot->device_ipc_version));
3172 
3173 		dhd_bus_cmn_writeshared(dhd->bus, &data, sizeof(uint32), HOST_API_VERSION, 0);
3174 		dhd_bus_cmn_writeshared(dhd->bus, &prot->fw_trap_buf.pa,
3175 			sizeof(prot->fw_trap_buf.pa), DNGL_TO_HOST_TRAP_ADDR, 0);
3176 	}
3177 
3178 }
3179 
3180 /**
3181  * dhd_prot_init - second stage of dhd_prot_attach. Now that the dongle has
3182  * completed it's initialization of the pcie_shared structure, we may now fetch
3183  * the dongle advertized features and adjust the protocol layer accordingly.
3184  *
3185  * dhd_prot_init() may be invoked again after a dhd_prot_reset().
3186  */
3187 int
dhd_prot_init(dhd_pub_t * dhd)3188 dhd_prot_init(dhd_pub_t *dhd)
3189 {
3190 	sh_addr_t base_addr;
3191 	dhd_prot_t *prot = dhd->prot;
3192 	int ret = 0;
3193 	uint32 idmacontrol;
3194 	uint32 waitcount = 0;
3195 
3196 #ifdef WL_MONITOR
3197 	dhd->monitor_enable = FALSE;
3198 #endif /* WL_MONITOR */
3199 
3200 	/**
3201 	 * A user defined value can be assigned to global variable h2d_max_txpost via
3202 	 * 1. DHD IOVAR h2d_max_txpost, before firmware download
3203 	 * 2. module parameter h2d_max_txpost
3204 	 * prot->h2d_max_txpost is assigned with H2DRING_TXPOST_MAX_ITEM,
3205 	 * if user has not defined any buffers by one of the above methods.
3206 	 */
3207 	prot->h2d_max_txpost = (uint16)h2d_max_txpost;
3208 
3209 	DHD_ERROR(("%s:%d: h2d_max_txpost = %d\n", __FUNCTION__, __LINE__, prot->h2d_max_txpost));
3210 
3211 	/* Read max rx packets supported by dongle */
3212 	dhd_bus_cmn_readshared(dhd->bus, &prot->max_rxbufpost, MAX_HOST_RXBUFS, 0);
3213 	if (prot->max_rxbufpost == 0) {
3214 		/* This would happen if the dongle firmware is not */
3215 		/* using the latest shared structure template */
3216 		prot->max_rxbufpost = DEFAULT_RX_BUFFERS_TO_POST;
3217 	}
3218 	DHD_ERROR(("%s:%d: MAX_RXBUFPOST = %d\n", __FUNCTION__, __LINE__, prot->max_rxbufpost));
3219 
3220 	/* Initialize.  bzero() would blow away the dma pointers. */
3221 	prot->max_eventbufpost = DHD_FLOWRING_MAX_EVENTBUF_POST;
3222 	prot->max_ioctlrespbufpost = DHD_FLOWRING_MAX_IOCTLRESPBUF_POST;
3223 	prot->max_infobufpost = DHD_H2D_INFORING_MAX_BUF_POST;
3224 	prot->max_tsbufpost = DHD_MAX_TSBUF_POST;
3225 
3226 	prot->cur_ioctlresp_bufs_posted = 0;
3227 	OSL_ATOMIC_INIT(dhd->osh, &prot->active_tx_count);
3228 	prot->data_seq_no = 0;
3229 	prot->ioctl_seq_no = 0;
3230 	prot->rxbufpost = 0;
3231 	prot->cur_event_bufs_posted = 0;
3232 	prot->ioctl_state = 0;
3233 	prot->curr_ioctl_cmd = 0;
3234 	prot->cur_ts_bufs_posted = 0;
3235 	prot->infobufpost = 0;
3236 
3237 	prot->dmaxfer.srcmem.va = NULL;
3238 	prot->dmaxfer.dstmem.va = NULL;
3239 	prot->dmaxfer.in_progress = FALSE;
3240 
3241 	prot->metadata_dbg = FALSE;
3242 	prot->rx_metadata_offset = 0;
3243 	prot->tx_metadata_offset = 0;
3244 	prot->txp_threshold = TXP_FLUSH_MAX_ITEMS_FLUSH_CNT;
3245 
3246 	/* To catch any rollover issues fast, starting with higher ioctl_trans_id */
3247 	prot->ioctl_trans_id = MAXBITVAL(NBITS(prot->ioctl_trans_id)) - BUFFER_BEFORE_ROLLOVER;
3248 	prot->ioctl_state = 0;
3249 	prot->ioctl_status = 0;
3250 	prot->ioctl_resplen = 0;
3251 	prot->ioctl_received = IOCTL_WAIT;
3252 
3253 	/* Initialize Common MsgBuf Rings */
3254 
3255 	prot->device_ipc_version = dhd->bus->api.fw_rev;
3256 	prot->host_ipc_version = PCIE_SHARED_VERSION;
3257 	prot->no_tx_resource = FALSE;
3258 
3259 	/* Init the host API version */
3260 	dhd_set_host_cap(dhd);
3261 
3262 	/* alloc and configure scb host address for dongle */
3263 	if ((ret = dhd_alloc_host_scbs(dhd))) {
3264 		return ret;
3265 	}
3266 
3267 	/* Register the interrupt function upfront */
3268 	/* remove corerev checks in data path */
3269 	/* do this after host/fw negotiation for DAR */
3270 	prot->mb_ring_fn = dhd_bus_get_mbintr_fn(dhd->bus);
3271 	prot->mb_2_ring_fn = dhd_bus_get_mbintr_2_fn(dhd->bus);
3272 
3273 	dhd->bus->_dar_war = (dhd->bus->sih->buscorerev < 64) ? TRUE : FALSE;
3274 
3275 	dhd_prot_ring_init(dhd, &prot->h2dring_ctrl_subn);
3276 	dhd_prot_ring_init(dhd, &prot->h2dring_rxp_subn);
3277 	dhd_prot_ring_init(dhd, &prot->d2hring_ctrl_cpln);
3278 
3279 	/* Make it compatibile with pre-rev7 Firmware */
3280 	if (prot->active_ipc_version < PCIE_SHARED_VERSION_7) {
3281 		prot->d2hring_tx_cpln.item_len =
3282 			D2HRING_TXCMPLT_ITEMSIZE_PREREV7;
3283 		prot->d2hring_rx_cpln.item_len =
3284 			D2HRING_RXCMPLT_ITEMSIZE_PREREV7;
3285 	}
3286 	dhd_prot_ring_init(dhd, &prot->d2hring_tx_cpln);
3287 	dhd_prot_ring_init(dhd, &prot->d2hring_rx_cpln);
3288 
3289 	dhd_prot_d2h_sync_init(dhd);
3290 
3291 	dhd_prot_h2d_sync_init(dhd);
3292 
3293 	/* init the scratch buffer */
3294 	dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_scratch_buf.pa);
3295 	dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
3296 		D2H_DMA_SCRATCH_BUF, 0);
3297 	dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_scratch_buf.len,
3298 		sizeof(prot->d2h_dma_scratch_buf.len), D2H_DMA_SCRATCH_BUF_LEN, 0);
3299 
3300 	/* If supported by the host, indicate the memory block
3301 	 * for completion writes / submission reads to shared space
3302 	 */
3303 	if (dhd->dma_d2h_ring_upd_support) {
3304 		dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_wr_buf.pa);
3305 		dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
3306 			D2H_DMA_INDX_WR_BUF, 0);
3307 		dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_rd_buf.pa);
3308 		dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
3309 			H2D_DMA_INDX_RD_BUF, 0);
3310 	}
3311 
3312 	if (dhd->dma_h2d_ring_upd_support || IDMA_ENAB(dhd)) {
3313 		dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_wr_buf.pa);
3314 		dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
3315 			H2D_DMA_INDX_WR_BUF, 0);
3316 		dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_rd_buf.pa);
3317 		dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
3318 			D2H_DMA_INDX_RD_BUF, 0);
3319 	}
3320 	/* Signal to the dongle that common ring init is complete */
3321 	if (dhd->hostrdy_after_init)
3322 		dhd_bus_hostready(dhd->bus);
3323 
3324 	/*
3325 	 * If the DMA-able buffers for flowring needs to come from a specific
3326 	 * contiguous memory region, then setup prot->flowrings_dma_buf here.
3327 	 * dhd_prot_flowrings_pool_attach() will carve out DMA-able buffers from
3328 	 * this contiguous memory region, for each of the flowrings.
3329 	 */
3330 
3331 	/* Pre-allocate pool of msgbuf_ring for flowrings */
3332 	if (dhd_prot_flowrings_pool_attach(dhd) != BCME_OK) {
3333 		return BCME_ERROR;
3334 	}
3335 
3336 	/* If IFRM is enabled, wait for FW to setup the DMA channel */
3337 	if (IFRM_ENAB(dhd)) {
3338 		dhd_base_addr_htolpa(&base_addr, prot->h2d_ifrm_indx_wr_buf.pa);
3339 		dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
3340 			H2D_IFRM_INDX_WR_BUF, 0);
3341 	}
3342 
3343 	/* If IDMA is enabled and initied, wait for FW to setup the IDMA descriptors
3344 	 * Waiting just before configuring doorbell
3345 	 */
3346 #define	IDMA_ENABLE_WAIT  10
3347 	if (IDMA_ACTIVE(dhd)) {
3348 		/* wait for idma_en bit in IDMAcontrol register to be set */
3349 		/* Loop till idma_en is not set */
3350 		uint buscorerev = dhd->bus->sih->buscorerev;
3351 		idmacontrol = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
3352 			IDMAControl(buscorerev), 0, 0);
3353 		while (!(idmacontrol & PCIE_IDMA_MODE_EN(buscorerev)) &&
3354 			(waitcount++ < IDMA_ENABLE_WAIT)) {
3355 
3356 			DHD_ERROR(("iDMA not enabled yet,waiting 1 ms c=%d IDMAControl = %08x\n",
3357 				waitcount, idmacontrol));
3358 			OSL_DELAY(1000); /* 1ms as its onetime only */
3359 			idmacontrol = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
3360 				IDMAControl(buscorerev), 0, 0);
3361 		}
3362 
3363 		if (waitcount < IDMA_ENABLE_WAIT) {
3364 			DHD_ERROR(("iDMA enabled PCIEControl = %08x\n", idmacontrol));
3365 		} else {
3366 			DHD_ERROR(("Error: wait for iDMA timed out wait=%d IDMAControl = %08x\n",
3367 				waitcount, idmacontrol));
3368 			return BCME_ERROR;
3369 		}
3370 		// add delay to fix bring up issue
3371 		OSL_SLEEP(1);
3372 	}
3373 
3374 	/* Host should configure soft doorbells if needed ... here */
3375 
3376 	/* Post to dongle host configured soft doorbells */
3377 	dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd);
3378 
3379 	dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
3380 	dhd_msgbuf_rxbuf_post_event_bufs(dhd);
3381 
3382 	prot->no_retry = FALSE;
3383 	prot->no_aggr = FALSE;
3384 	prot->fixed_rate = FALSE;
3385 
3386 	/*
3387 	 * Note that any communication with the Dongle should be added
3388 	 * below this point. Any other host data structure initialiation that
3389 	 * needs to be done prior to the DPC starts executing should be done
3390 	 * befor this point.
3391 	 * Because once we start sending H2D requests to Dongle, the Dongle
3392 	 * respond immediately. So the DPC context to handle this
3393 	 * D2H response could preempt the context in which dhd_prot_init is running.
3394 	 * We want to ensure that all the Host part of dhd_prot_init is
3395 	 * done before that.
3396 	 */
3397 
3398 	/* See if info rings could be created, info rings should be created
3399 	* only if dongle does not support EDL
3400 	*/
3401 #ifdef EWP_EDL
3402 	if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !dhd->dongle_edl_support)
3403 #else
3404 	if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
3405 #endif /* EWP_EDL */
3406 	{
3407 		if ((ret = dhd_prot_init_info_rings(dhd)) != BCME_OK) {
3408 			/* For now log and proceed, further clean up action maybe necessary
3409 			 * when we have more clarity.
3410 			 */
3411 			DHD_ERROR(("%s Info rings couldn't be created: Err Code%d",
3412 				__FUNCTION__, ret));
3413 		}
3414 	}
3415 
3416 #ifdef EWP_EDL
3417 	/* Create Enhanced Debug Lane rings (EDL) if dongle supports it */
3418 	if (dhd->dongle_edl_support) {
3419 		if ((ret = dhd_prot_init_edl_rings(dhd)) != BCME_OK) {
3420 			DHD_ERROR(("%s EDL rings couldn't be created: Err Code%d",
3421 				__FUNCTION__, ret));
3422 		}
3423 	}
3424 #endif /* EWP_EDL */
3425 
3426 #ifdef DHD_HP2P
3427 	/* create HPP txcmpl/rxcmpl rings */
3428 	if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_7 && dhd->hp2p_capable) {
3429 		if ((ret = dhd_prot_init_hp2p_rings(dhd)) != BCME_OK) {
3430 			/* For now log and proceed, further clean up action maybe necessary
3431 			 * when we have more clarity.
3432 			 */
3433 			DHD_ERROR(("%s HP2P rings couldn't be created: Err Code%d",
3434 				__FUNCTION__, ret));
3435 		}
3436 	}
3437 #endif /* DHD_HP2P */
3438 
3439 	return BCME_OK;
3440 } /* dhd_prot_init */
3441 
3442 /**
3443  * dhd_prot_detach - PCIE FD protocol layer destructor.
3444  * Unlink, frees allocated protocol memory (including dhd_prot)
3445  */
dhd_prot_detach(dhd_pub_t * dhd)3446 void dhd_prot_detach(dhd_pub_t *dhd)
3447 {
3448 	dhd_prot_t *prot = dhd->prot;
3449 
3450 	/* Stop the protocol module */
3451 	if (prot) {
3452 
3453 		/* free up all DMA-able buffers allocated during prot attach/init */
3454 
3455 		dhd_dma_buf_free(dhd, &prot->d2h_dma_scratch_buf);
3456 		dhd_dma_buf_free(dhd, &prot->retbuf);
3457 		dhd_dma_buf_free(dhd, &prot->ioctbuf);
3458 		dhd_dma_buf_free(dhd, &prot->host_bus_throughput_buf);
3459 		dhd_dma_buf_free(dhd, &prot->hostts_req_buf);
3460 		dhd_dma_buf_free(dhd, &prot->fw_trap_buf);
3461 		dhd_dma_buf_free(dhd, &prot->host_scb_buf);
3462 
3463 		/* DMA-able buffers for DMAing H2D/D2H WR/RD indices */
3464 		dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf);
3465 		dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_rd_buf);
3466 		dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_wr_buf);
3467 		dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf);
3468 
3469 		dhd_dma_buf_free(dhd, &prot->h2d_ifrm_indx_wr_buf);
3470 
3471 		/* Common MsgBuf Rings */
3472 		dhd_prot_ring_detach(dhd, &prot->h2dring_ctrl_subn);
3473 		dhd_prot_ring_detach(dhd, &prot->h2dring_rxp_subn);
3474 		dhd_prot_ring_detach(dhd, &prot->d2hring_ctrl_cpln);
3475 		dhd_prot_ring_detach(dhd, &prot->d2hring_tx_cpln);
3476 		dhd_prot_ring_detach(dhd, &prot->d2hring_rx_cpln);
3477 
3478 		/* Detach each DMA-able buffer and free the pool of msgbuf_ring_t */
3479 		dhd_prot_flowrings_pool_detach(dhd);
3480 
3481 		/* detach info rings */
3482 		dhd_prot_detach_info_rings(dhd);
3483 
3484 #ifdef EWP_EDL
3485 		dhd_prot_detach_edl_rings(dhd);
3486 #endif // endif
3487 #ifdef DHD_HP2P
3488 		/* detach HPP rings */
3489 		dhd_prot_detach_hp2p_rings(dhd);
3490 #endif /* DHD_HP2P */
3491 
3492 		/* if IOCTLRESP_USE_CONSTMEM is defined IOCTL PKTs use pktid_map_handle_ioctl
3493 		 * handler and PKT memory is allocated using alloc_ioctl_return_buffer(), Otherwise
3494 		 * they will be part of pktid_ctrl_map handler and PKT memory is allocated using
3495 		 * PKTGET_STATIC (if DHD_USE_STATIC_CTRLBUF is defined) OR PKGET.
3496 		 * Similarly for freeing PKT buffers DHD_NATIVE_TO_PKTID_FINI will be used
3497 		 * which calls PKTFREE_STATIC (if DHD_USE_STATIC_CTRLBUF is defined) OR PKFREE.
3498 		 * Else if IOCTLRESP_USE_CONSTMEM is defined IOCTL PKTs will be freed using
3499 		 * DHD_NATIVE_TO_PKTID_FINI_IOCTL which calls free_ioctl_return_buffer.
3500 		 */
3501 		DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_ctrl_map);
3502 		DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_rx_map);
3503 		DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_tx_map);
3504 #ifdef IOCTLRESP_USE_CONSTMEM
3505 		DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, prot->pktid_map_handle_ioctl);
3506 #endif // endif
3507 #ifdef DHD_MAP_PKTID_LOGGING
3508 		DHD_PKTID_LOG_FINI(dhd, prot->pktid_dma_map);
3509 		DHD_PKTID_LOG_FINI(dhd, prot->pktid_dma_unmap);
3510 #endif /* DHD_MAP_PKTID_LOGGING */
3511 
3512 #if defined(DHD_LB_TXC)
3513 		if (prot->tx_compl_prod.buffer)
3514 			MFREE(dhd->osh, prot->tx_compl_prod.buffer,
3515 			      sizeof(void*) * DHD_LB_WORKQ_SZ);
3516 #endif /* DHD_LB_TXC */
3517 #if defined(DHD_LB_RXC)
3518 		if (prot->rx_compl_prod.buffer)
3519 			MFREE(dhd->osh, prot->rx_compl_prod.buffer,
3520 			      sizeof(void*) * DHD_LB_WORKQ_SZ);
3521 #endif /* DHD_LB_RXC */
3522 
3523 		DHD_OS_PREFREE(dhd, dhd->prot, sizeof(dhd_prot_t));
3524 
3525 		dhd->prot = NULL;
3526 	}
3527 } /* dhd_prot_detach */
3528 
3529 /**
3530  * dhd_prot_reset - Reset the protocol layer without freeing any objects.
3531  * This may be invoked to soft reboot the dongle, without having to
3532  * detach and attach the entire protocol layer.
3533  *
3534  * After dhd_prot_reset(), dhd_prot_init() may be invoked
3535  * without going througha dhd_prot_attach() phase.
3536  */
3537 void
dhd_prot_reset(dhd_pub_t * dhd)3538 dhd_prot_reset(dhd_pub_t *dhd)
3539 {
3540 	struct dhd_prot *prot = dhd->prot;
3541 
3542 	DHD_TRACE(("%s\n", __FUNCTION__));
3543 
3544 	if (prot == NULL) {
3545 		return;
3546 	}
3547 
3548 	dhd_prot_flowrings_pool_reset(dhd);
3549 
3550 	/* Reset Common MsgBuf Rings */
3551 	dhd_prot_ring_reset(dhd, &prot->h2dring_ctrl_subn);
3552 	dhd_prot_ring_reset(dhd, &prot->h2dring_rxp_subn);
3553 	dhd_prot_ring_reset(dhd, &prot->d2hring_ctrl_cpln);
3554 	dhd_prot_ring_reset(dhd, &prot->d2hring_tx_cpln);
3555 	dhd_prot_ring_reset(dhd, &prot->d2hring_rx_cpln);
3556 
3557 	/* Reset info rings */
3558 	if (prot->h2dring_info_subn) {
3559 		dhd_prot_ring_reset(dhd, prot->h2dring_info_subn);
3560 	}
3561 
3562 	if (prot->d2hring_info_cpln) {
3563 		dhd_prot_ring_reset(dhd, prot->d2hring_info_cpln);
3564 	}
3565 #ifdef EWP_EDL
3566 	if (prot->d2hring_edl) {
3567 		dhd_prot_ring_reset(dhd, prot->d2hring_edl);
3568 	}
3569 #endif /* EWP_EDL */
3570 
3571 	/* Reset all DMA-able buffers allocated during prot attach */
3572 	dhd_dma_buf_reset(dhd, &prot->d2h_dma_scratch_buf);
3573 	dhd_dma_buf_reset(dhd, &prot->retbuf);
3574 	dhd_dma_buf_reset(dhd, &prot->ioctbuf);
3575 	dhd_dma_buf_reset(dhd, &prot->host_bus_throughput_buf);
3576 	dhd_dma_buf_reset(dhd, &prot->hostts_req_buf);
3577 	dhd_dma_buf_reset(dhd, &prot->fw_trap_buf);
3578 	dhd_dma_buf_reset(dhd, &prot->host_scb_buf);
3579 
3580 	dhd_dma_buf_reset(dhd, &prot->h2d_ifrm_indx_wr_buf);
3581 
3582 	/* Rest all DMA-able buffers for DMAing H2D/D2H WR/RD indices */
3583 	dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_rd_buf);
3584 	dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_wr_buf);
3585 	dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_rd_buf);
3586 	dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_wr_buf);
3587 
3588 	prot->rx_metadata_offset = 0;
3589 	prot->tx_metadata_offset = 0;
3590 
3591 	prot->rxbufpost = 0;
3592 	prot->cur_event_bufs_posted = 0;
3593 	prot->cur_ioctlresp_bufs_posted = 0;
3594 
3595 	OSL_ATOMIC_INIT(dhd->osh, &prot->active_tx_count);
3596 	prot->data_seq_no = 0;
3597 	prot->ioctl_seq_no = 0;
3598 	prot->ioctl_state = 0;
3599 	prot->curr_ioctl_cmd = 0;
3600 	prot->ioctl_received = IOCTL_WAIT;
3601 	/* To catch any rollover issues fast, starting with higher ioctl_trans_id */
3602 	prot->ioctl_trans_id = MAXBITVAL(NBITS(prot->ioctl_trans_id)) - BUFFER_BEFORE_ROLLOVER;
3603 
3604 	/* dhd_flow_rings_init is located at dhd_bus_start,
3605 	 * so when stopping bus, flowrings shall be deleted
3606 	 */
3607 	if (dhd->flow_rings_inited) {
3608 		dhd_flow_rings_deinit(dhd);
3609 	}
3610 
3611 #ifdef DHD_HP2P
3612 	if (prot->d2hring_hp2p_txcpl) {
3613 		dhd_prot_ring_reset(dhd, prot->d2hring_hp2p_txcpl);
3614 	}
3615 	if (prot->d2hring_hp2p_rxcpl) {
3616 		dhd_prot_ring_reset(dhd, prot->d2hring_hp2p_rxcpl);
3617 	}
3618 #endif /* DHD_HP2P */
3619 
3620 	/* Reset PKTID map */
3621 	DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_ctrl_map);
3622 	DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_rx_map);
3623 	DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_tx_map);
3624 #ifdef IOCTLRESP_USE_CONSTMEM
3625 	DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, prot->pktid_map_handle_ioctl);
3626 #endif /* IOCTLRESP_USE_CONSTMEM */
3627 #ifdef DMAMAP_STATS
3628 	dhd->dma_stats.txdata = dhd->dma_stats.txdata_sz = 0;
3629 	dhd->dma_stats.rxdata = dhd->dma_stats.rxdata_sz = 0;
3630 #ifndef IOCTLRESP_USE_CONSTMEM
3631 	dhd->dma_stats.ioctl_rx = dhd->dma_stats.ioctl_rx_sz = 0;
3632 #endif /* IOCTLRESP_USE_CONSTMEM */
3633 	dhd->dma_stats.event_rx = dhd->dma_stats.event_rx_sz = 0;
3634 	dhd->dma_stats.info_rx = dhd->dma_stats.info_rx_sz = 0;
3635 	dhd->dma_stats.tsbuf_rx = dhd->dma_stats.tsbuf_rx_sz = 0;
3636 #endif /* DMAMAP_STATS */
3637 } /* dhd_prot_reset */
3638 
3639 #if defined(DHD_LB_RXP)
3640 #define DHD_LB_DISPATCH_RX_PROCESS(dhdp)	dhd_lb_dispatch_rx_process(dhdp)
3641 #else /* !DHD_LB_RXP */
3642 #define DHD_LB_DISPATCH_RX_PROCESS(dhdp)	do { /* noop */ } while (0)
3643 #endif /* !DHD_LB_RXP */
3644 
3645 #if defined(DHD_LB_RXC)
3646 #define DHD_LB_DISPATCH_RX_COMPL(dhdp)	dhd_lb_dispatch_rx_compl(dhdp)
3647 #else /* !DHD_LB_RXC */
3648 #define DHD_LB_DISPATCH_RX_COMPL(dhdp)	do { /* noop */ } while (0)
3649 #endif /* !DHD_LB_RXC */
3650 
3651 #if defined(DHD_LB_TXC)
3652 #define DHD_LB_DISPATCH_TX_COMPL(dhdp)	dhd_lb_dispatch_tx_compl(dhdp)
3653 #else /* !DHD_LB_TXC */
3654 #define DHD_LB_DISPATCH_TX_COMPL(dhdp)	do { /* noop */ } while (0)
3655 #endif /* !DHD_LB_TXC */
3656 
3657 #if defined(DHD_LB)
3658 /* DHD load balancing: deferral of work to another online CPU */
3659 /* DHD_LB_TXC DHD_LB_RXC DHD_LB_RXP dispatchers, in dhd_linux.c */
3660 extern void dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp);
3661 extern void dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp);
3662 extern void dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp);
3663 extern void dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx);
3664 
3665 #if defined(DHD_LB_RXP)
3666 /**
3667  * dhd_lb_dispatch_rx_process - load balance by dispatch Rx processing work
3668  * to other CPU cores
3669  */
3670 static INLINE void
dhd_lb_dispatch_rx_process(dhd_pub_t * dhdp)3671 dhd_lb_dispatch_rx_process(dhd_pub_t *dhdp)
3672 {
3673 	dhd_lb_rx_napi_dispatch(dhdp); /* dispatch rx_process_napi */
3674 }
3675 #endif /* DHD_LB_RXP */
3676 
3677 #if defined(DHD_LB_TXC)
3678 /**
3679  * dhd_lb_dispatch_tx_compl - load balance by dispatch Tx complition work
3680  * to other CPU cores
3681  */
3682 static INLINE void
dhd_lb_dispatch_tx_compl(dhd_pub_t * dhdp,uint16 ring_idx)3683 dhd_lb_dispatch_tx_compl(dhd_pub_t *dhdp, uint16 ring_idx)
3684 {
3685 	bcm_workq_prod_sync(&dhdp->prot->tx_compl_prod); /* flush WR index */
3686 	dhd_lb_tx_compl_dispatch(dhdp); /* dispatch tx_compl_tasklet */
3687 }
3688 
3689 /**
3690  * DHD load balanced tx completion tasklet handler, that will perform the
3691  * freeing of packets on the selected CPU. Packet pointers are delivered to
3692  * this tasklet via the tx complete workq.
3693  */
3694 void
dhd_lb_tx_compl_handler(unsigned long data)3695 dhd_lb_tx_compl_handler(unsigned long data)
3696 {
3697 	int elem_ix;
3698 	void *pkt, **elem;
3699 	dmaaddr_t pa;
3700 	uint32 pa_len;
3701 	dhd_pub_t *dhd = (dhd_pub_t *)data;
3702 	dhd_prot_t *prot = dhd->prot;
3703 	bcm_workq_t *workq = &prot->tx_compl_cons;
3704 	uint32 count = 0;
3705 
3706 	int curr_cpu;
3707 	curr_cpu = get_cpu();
3708 	put_cpu();
3709 
3710 	DHD_LB_STATS_TXC_PERCPU_CNT_INCR(dhd);
3711 
3712 	while (1) {
3713 		elem_ix = bcm_ring_cons(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
3714 
3715 		if (elem_ix == BCM_RING_EMPTY) {
3716 			break;
3717 		}
3718 
3719 		elem = WORKQ_ELEMENT(void *, workq, elem_ix);
3720 		pkt = *elem;
3721 
3722 		DHD_INFO(("%s: tx_compl_cons pkt<%p>\n", __FUNCTION__, pkt));
3723 
3724 		OSL_PREFETCH(PKTTAG(pkt));
3725 		OSL_PREFETCH(pkt);
3726 
3727 		pa = DHD_PKTTAG_PA((dhd_pkttag_fr_t *)PKTTAG(pkt));
3728 		pa_len = DHD_PKTTAG_PA_LEN((dhd_pkttag_fr_t *)PKTTAG(pkt));
3729 
3730 		DMA_UNMAP(dhd->osh, pa, pa_len, DMA_RX, 0, 0);
3731 #if defined(BCMPCIE)
3732 		dhd_txcomplete(dhd, pkt, true);
3733 #ifdef DHD_4WAYM4_FAIL_DISCONNECT
3734 		dhd_eap_txcomplete(dhd, pkt, TRUE, txstatus->cmn_hdr.if_id);
3735 #endif /* DHD_4WAYM4_FAIL_DISCONNECT */
3736 #endif // endif
3737 
3738 		PKTFREE(dhd->osh, pkt, TRUE);
3739 		count++;
3740 	}
3741 
3742 	/* smp_wmb(); */
3743 	bcm_workq_cons_sync(workq);
3744 	DHD_LB_STATS_UPDATE_TXC_HISTO(dhd, count);
3745 }
3746 #endif /* DHD_LB_TXC */
3747 
3748 #if defined(DHD_LB_RXC)
3749 
3750 /**
3751  * dhd_lb_dispatch_rx_compl - load balance by dispatch rx complition work
3752  * to other CPU cores
3753  */
3754 static INLINE void
dhd_lb_dispatch_rx_compl(dhd_pub_t * dhdp)3755 dhd_lb_dispatch_rx_compl(dhd_pub_t *dhdp)
3756 {
3757 	dhd_prot_t *prot = dhdp->prot;
3758 	/* Schedule the takslet only if we have to */
3759 	if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) {
3760 		/* flush WR index */
3761 		bcm_workq_prod_sync(&dhdp->prot->rx_compl_prod);
3762 		dhd_lb_rx_compl_dispatch(dhdp); /* dispatch rx_compl_tasklet */
3763 	}
3764 }
3765 
3766 void
dhd_lb_rx_compl_handler(unsigned long data)3767 dhd_lb_rx_compl_handler(unsigned long data)
3768 {
3769 	dhd_pub_t *dhd = (dhd_pub_t *)data;
3770 	bcm_workq_t *workq = &dhd->prot->rx_compl_cons;
3771 
3772 	DHD_LB_STATS_RXC_PERCPU_CNT_INCR(dhd);
3773 
3774 	dhd_msgbuf_rxbuf_post(dhd, TRUE); /* re-use pktids */
3775 	bcm_workq_cons_sync(workq);
3776 }
3777 #endif /* DHD_LB_RXC */
3778 #endif /* DHD_LB */
3779 
3780 void
dhd_prot_rx_dataoffset(dhd_pub_t * dhd,uint32 rx_offset)3781 dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 rx_offset)
3782 {
3783 	dhd_prot_t *prot = dhd->prot;
3784 	prot->rx_dataoffset = rx_offset;
3785 }
3786 
3787 static int
dhd_check_create_info_rings(dhd_pub_t * dhd)3788 dhd_check_create_info_rings(dhd_pub_t *dhd)
3789 {
3790 	dhd_prot_t *prot = dhd->prot;
3791 	int ret = BCME_ERROR;
3792 	uint16 ringid;
3793 
3794 	{
3795 		/* dongle may increase max_submission_rings so keep
3796 		 * ringid at end of dynamic rings
3797 		 */
3798 		ringid = dhd->bus->max_tx_flowrings +
3799 			(dhd->bus->max_submission_rings - dhd->bus->max_tx_flowrings) +
3800 			BCMPCIE_H2D_COMMON_MSGRINGS;
3801 	}
3802 
3803 	if (prot->d2hring_info_cpln) {
3804 		/* for d2hring re-entry case, clear inited flag */
3805 		prot->d2hring_info_cpln->inited = FALSE;
3806 	}
3807 
3808 	if (prot->h2dring_info_subn && prot->d2hring_info_cpln) {
3809 		return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
3810 	}
3811 
3812 	if (prot->h2dring_info_subn == NULL) {
3813 		prot->h2dring_info_subn = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
3814 
3815 		if (prot->h2dring_info_subn == NULL) {
3816 			DHD_ERROR(("%s: couldn't alloc memory for h2dring_info_subn\n",
3817 				__FUNCTION__));
3818 			return BCME_NOMEM;
3819 		}
3820 
3821 		DHD_INFO(("%s: about to create debug submit ring\n", __FUNCTION__));
3822 		ret = dhd_prot_ring_attach(dhd, prot->h2dring_info_subn, "h2dinfo",
3823 			H2DRING_DYNAMIC_INFO_MAX_ITEM, H2DRING_INFO_BUFPOST_ITEMSIZE,
3824 			ringid);
3825 		if (ret != BCME_OK) {
3826 			DHD_ERROR(("%s: couldn't alloc resources for dbg submit ring\n",
3827 				__FUNCTION__));
3828 			goto err;
3829 		}
3830 	}
3831 
3832 	if (prot->d2hring_info_cpln == NULL) {
3833 		prot->d2hring_info_cpln = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
3834 
3835 		if (prot->d2hring_info_cpln == NULL) {
3836 			DHD_ERROR(("%s: couldn't alloc memory for h2dring_info_subn\n",
3837 				__FUNCTION__));
3838 			return BCME_NOMEM;
3839 		}
3840 
3841 		/* create the debug info completion ring next to debug info submit ring
3842 		* ringid = id next to debug info submit ring
3843 		*/
3844 		ringid = ringid + 1;
3845 
3846 		DHD_INFO(("%s: about to create debug cpl ring\n", __FUNCTION__));
3847 		ret = dhd_prot_ring_attach(dhd, prot->d2hring_info_cpln, "d2hinfo",
3848 			D2HRING_DYNAMIC_INFO_MAX_ITEM, D2HRING_INFO_BUFCMPLT_ITEMSIZE,
3849 			ringid);
3850 		if (ret != BCME_OK) {
3851 			DHD_ERROR(("%s: couldn't alloc resources for dbg cpl ring\n",
3852 				__FUNCTION__));
3853 			dhd_prot_ring_detach(dhd, prot->h2dring_info_subn);
3854 			goto err;
3855 		}
3856 	}
3857 
3858 	return ret;
3859 err:
3860 	MFREE(prot->osh, prot->h2dring_info_subn, sizeof(msgbuf_ring_t));
3861 	prot->h2dring_info_subn = NULL;
3862 
3863 	if (prot->d2hring_info_cpln) {
3864 		MFREE(prot->osh, prot->d2hring_info_cpln, sizeof(msgbuf_ring_t));
3865 		prot->d2hring_info_cpln = NULL;
3866 	}
3867 	return ret;
3868 } /* dhd_check_create_info_rings */
3869 
3870 int
dhd_prot_init_info_rings(dhd_pub_t * dhd)3871 dhd_prot_init_info_rings(dhd_pub_t *dhd)
3872 {
3873 	dhd_prot_t *prot = dhd->prot;
3874 	int ret = BCME_OK;
3875 
3876 	if ((ret = dhd_check_create_info_rings(dhd)) != BCME_OK) {
3877 		DHD_ERROR(("%s: info rings aren't created! \n",
3878 			__FUNCTION__));
3879 		return ret;
3880 	}
3881 
3882 	if ((prot->d2hring_info_cpln->inited) || (prot->d2hring_info_cpln->create_pending)) {
3883 		DHD_INFO(("Info completion ring was created!\n"));
3884 		return ret;
3885 	}
3886 
3887 	DHD_TRACE(("trying to send create d2h info ring: id %d\n", prot->d2hring_info_cpln->idx));
3888 	ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_info_cpln,
3889 		BCMPCIE_D2H_RING_TYPE_DBGBUF_CPL, DHD_D2H_DBGRING_REQ_PKTID);
3890 	if (ret != BCME_OK)
3891 		return ret;
3892 
3893 	prot->h2dring_info_subn->seqnum = H2D_EPOCH_INIT_VAL;
3894 	prot->h2dring_info_subn->current_phase = 0;
3895 	prot->d2hring_info_cpln->seqnum = D2H_EPOCH_INIT_VAL;
3896 	prot->d2hring_info_cpln->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
3897 
3898 	DHD_TRACE(("trying to send create h2d info ring id %d\n", prot->h2dring_info_subn->idx));
3899 	prot->h2dring_info_subn->n_completion_ids = 1;
3900 	prot->h2dring_info_subn->compeltion_ring_ids[0] = prot->d2hring_info_cpln->idx;
3901 
3902 	ret = dhd_send_h2d_ringcreate(dhd, prot->h2dring_info_subn,
3903 		BCMPCIE_H2D_RING_TYPE_DBGBUF_SUBMIT, DHD_H2D_DBGRING_REQ_PKTID);
3904 
3905 	/* Note that there is no way to delete d2h or h2d ring deletion incase either fails,
3906 	 * so can not cleanup if one ring was created while the other failed
3907 	 */
3908 	return ret;
3909 } /* dhd_prot_init_info_rings */
3910 
3911 static void
dhd_prot_detach_info_rings(dhd_pub_t * dhd)3912 dhd_prot_detach_info_rings(dhd_pub_t *dhd)
3913 {
3914 	if (dhd->prot->h2dring_info_subn) {
3915 		dhd_prot_ring_detach(dhd, dhd->prot->h2dring_info_subn);
3916 		MFREE(dhd->prot->osh, dhd->prot->h2dring_info_subn, sizeof(msgbuf_ring_t));
3917 		dhd->prot->h2dring_info_subn = NULL;
3918 	}
3919 	if (dhd->prot->d2hring_info_cpln) {
3920 		dhd_prot_ring_detach(dhd, dhd->prot->d2hring_info_cpln);
3921 		MFREE(dhd->prot->osh, dhd->prot->d2hring_info_cpln, sizeof(msgbuf_ring_t));
3922 		dhd->prot->d2hring_info_cpln = NULL;
3923 	}
3924 }
3925 
3926 #ifdef DHD_HP2P
3927 static int
dhd_check_create_hp2p_rings(dhd_pub_t * dhd)3928 dhd_check_create_hp2p_rings(dhd_pub_t *dhd)
3929 {
3930 	dhd_prot_t *prot = dhd->prot;
3931 	int ret = BCME_ERROR;
3932 	uint16 ringid;
3933 
3934 	/* Last 2 dynamic ring indices are used by hp2p rings */
3935 	ringid = dhd->bus->max_submission_rings + dhd->bus->max_completion_rings - 2;
3936 
3937 	if (prot->d2hring_hp2p_txcpl == NULL) {
3938 		prot->d2hring_hp2p_txcpl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
3939 
3940 		if (prot->d2hring_hp2p_txcpl == NULL) {
3941 			DHD_ERROR(("%s: couldn't alloc memory for d2hring_hp2p_txcpl\n",
3942 				__FUNCTION__));
3943 			return BCME_NOMEM;
3944 		}
3945 
3946 		DHD_INFO(("%s: about to create hp2p txcpl ring\n", __FUNCTION__));
3947 		ret = dhd_prot_ring_attach(dhd, prot->d2hring_hp2p_txcpl, "d2hhp2p_txcpl",
3948 			dhd_bus_get_hp2p_ring_max_size(dhd->bus, TRUE), D2HRING_TXCMPLT_ITEMSIZE,
3949 			ringid);
3950 		if (ret != BCME_OK) {
3951 			DHD_ERROR(("%s: couldn't alloc resources for hp2p txcpl ring\n",
3952 				__FUNCTION__));
3953 			goto err2;
3954 		}
3955 	} else {
3956 		/* for re-entry case, clear inited flag */
3957 		prot->d2hring_hp2p_txcpl->inited = FALSE;
3958 	}
3959 	if (prot->d2hring_hp2p_rxcpl == NULL) {
3960 		prot->d2hring_hp2p_rxcpl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
3961 
3962 		if (prot->d2hring_hp2p_rxcpl == NULL) {
3963 			DHD_ERROR(("%s: couldn't alloc memory for d2hring_hp2p_rxcpl\n",
3964 				__FUNCTION__));
3965 			return BCME_NOMEM;
3966 		}
3967 
3968 		/* create the hp2p rx completion ring next to hp2p tx compl ring
3969 		* ringid = id next to hp2p tx compl ring
3970 		*/
3971 		ringid = ringid + 1;
3972 
3973 		DHD_INFO(("%s: about to create hp2p rxcpl ring\n", __FUNCTION__));
3974 		ret = dhd_prot_ring_attach(dhd, prot->d2hring_hp2p_rxcpl, "d2hhp2p_rxcpl",
3975 			dhd_bus_get_hp2p_ring_max_size(dhd->bus, FALSE), D2HRING_RXCMPLT_ITEMSIZE,
3976 			ringid);
3977 		if (ret != BCME_OK) {
3978 			DHD_ERROR(("%s: couldn't alloc resources for hp2p rxcpl ring\n",
3979 				__FUNCTION__));
3980 			goto err1;
3981 		}
3982 	} else {
3983 		/* for re-entry case, clear inited flag */
3984 		prot->d2hring_hp2p_rxcpl->inited = FALSE;
3985 	}
3986 
3987 	return ret;
3988 err1:
3989 	MFREE(prot->osh, prot->d2hring_hp2p_rxcpl, sizeof(msgbuf_ring_t));
3990 	prot->d2hring_hp2p_rxcpl = NULL;
3991 
3992 err2:
3993 	MFREE(prot->osh, prot->d2hring_hp2p_txcpl, sizeof(msgbuf_ring_t));
3994 	prot->d2hring_hp2p_txcpl = NULL;
3995 	return ret;
3996 } /* dhd_check_create_hp2p_rings */
3997 
3998 int
dhd_prot_init_hp2p_rings(dhd_pub_t * dhd)3999 dhd_prot_init_hp2p_rings(dhd_pub_t *dhd)
4000 {
4001 	dhd_prot_t *prot = dhd->prot;
4002 	int ret = BCME_OK;
4003 
4004 	dhd->hp2p_ring_active = FALSE;
4005 
4006 	if ((ret = dhd_check_create_hp2p_rings(dhd)) != BCME_OK) {
4007 		DHD_ERROR(("%s: hp2p rings aren't created! \n",
4008 			__FUNCTION__));
4009 		return ret;
4010 	}
4011 
4012 	if ((prot->d2hring_hp2p_txcpl->inited) || (prot->d2hring_hp2p_txcpl->create_pending)) {
4013 		DHD_INFO(("hp2p tx completion ring was created!\n"));
4014 		return ret;
4015 	}
4016 
4017 	DHD_TRACE(("trying to send create d2h hp2p txcpl ring: id %d\n",
4018 		prot->d2hring_hp2p_txcpl->idx));
4019 	ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_hp2p_txcpl,
4020 		BCMPCIE_D2H_RING_TYPE_HPP_TX_CPL, DHD_D2H_HPPRING_TXREQ_PKTID);
4021 	if (ret != BCME_OK)
4022 		return ret;
4023 
4024 	prot->d2hring_hp2p_txcpl->seqnum = D2H_EPOCH_INIT_VAL;
4025 	prot->d2hring_hp2p_txcpl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
4026 
4027 	if ((prot->d2hring_hp2p_rxcpl->inited) || (prot->d2hring_hp2p_rxcpl->create_pending)) {
4028 		DHD_INFO(("hp2p rx completion ring was created!\n"));
4029 		return ret;
4030 	}
4031 
4032 	DHD_TRACE(("trying to send create d2h hp2p rxcpl ring: id %d\n",
4033 		prot->d2hring_hp2p_rxcpl->idx));
4034 	ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_hp2p_rxcpl,
4035 		BCMPCIE_D2H_RING_TYPE_HPP_RX_CPL, DHD_D2H_HPPRING_RXREQ_PKTID);
4036 	if (ret != BCME_OK)
4037 		return ret;
4038 
4039 	prot->d2hring_hp2p_rxcpl->seqnum = D2H_EPOCH_INIT_VAL;
4040 	prot->d2hring_hp2p_rxcpl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
4041 
4042 	/* Note that there is no way to delete d2h or h2d ring deletion incase either fails,
4043 	 * so can not cleanup if one ring was created while the other failed
4044 	 */
4045 	return BCME_OK;
4046 } /* dhd_prot_init_hp2p_rings */
4047 
4048 static void
dhd_prot_detach_hp2p_rings(dhd_pub_t * dhd)4049 dhd_prot_detach_hp2p_rings(dhd_pub_t *dhd)
4050 {
4051 	if (dhd->prot->d2hring_hp2p_txcpl) {
4052 		dhd_prot_ring_detach(dhd, dhd->prot->d2hring_hp2p_txcpl);
4053 		MFREE(dhd->prot->osh, dhd->prot->d2hring_hp2p_txcpl, sizeof(msgbuf_ring_t));
4054 		dhd->prot->d2hring_hp2p_txcpl = NULL;
4055 	}
4056 	if (dhd->prot->d2hring_hp2p_rxcpl) {
4057 		dhd_prot_ring_detach(dhd, dhd->prot->d2hring_hp2p_rxcpl);
4058 		MFREE(dhd->prot->osh, dhd->prot->d2hring_hp2p_rxcpl, sizeof(msgbuf_ring_t));
4059 		dhd->prot->d2hring_hp2p_rxcpl = NULL;
4060 	}
4061 }
4062 #endif /* DHD_HP2P */
4063 
4064 #ifdef EWP_EDL
4065 static int
dhd_check_create_edl_rings(dhd_pub_t * dhd)4066 dhd_check_create_edl_rings(dhd_pub_t *dhd)
4067 {
4068 	dhd_prot_t *prot = dhd->prot;
4069 	int ret = BCME_ERROR;
4070 	uint16 ringid;
4071 
4072 	{
4073 		/* dongle may increase max_submission_rings so keep
4074 		 * ringid at end of dynamic rings (re-use info ring cpl ring id)
4075 		 */
4076 		ringid = dhd->bus->max_tx_flowrings +
4077 			(dhd->bus->max_submission_rings - dhd->bus->max_tx_flowrings) +
4078 			BCMPCIE_H2D_COMMON_MSGRINGS + 1;
4079 	}
4080 
4081 	if (prot->d2hring_edl) {
4082 		prot->d2hring_edl->inited = FALSE;
4083 		return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
4084 	}
4085 
4086 	if (prot->d2hring_edl == NULL) {
4087 		prot->d2hring_edl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
4088 
4089 		if (prot->d2hring_edl == NULL) {
4090 			DHD_ERROR(("%s: couldn't alloc memory for d2hring_edl\n",
4091 				__FUNCTION__));
4092 			return BCME_NOMEM;
4093 		}
4094 
4095 		DHD_ERROR(("%s: about to create EDL ring, ringid: %u \n", __FUNCTION__,
4096 			ringid));
4097 		ret = dhd_prot_ring_attach(dhd, prot->d2hring_edl, "d2hring_edl",
4098 			D2HRING_EDL_MAX_ITEM, D2HRING_EDL_ITEMSIZE,
4099 			ringid);
4100 		if (ret != BCME_OK) {
4101 			DHD_ERROR(("%s: couldn't alloc resources for EDL ring\n",
4102 				__FUNCTION__));
4103 			goto err;
4104 		}
4105 	}
4106 
4107 	return ret;
4108 err:
4109 	MFREE(prot->osh, prot->d2hring_edl, sizeof(msgbuf_ring_t));
4110 	prot->d2hring_edl = NULL;
4111 
4112 	return ret;
4113 } /* dhd_check_create_btlog_rings */
4114 
4115 int
dhd_prot_init_edl_rings(dhd_pub_t * dhd)4116 dhd_prot_init_edl_rings(dhd_pub_t *dhd)
4117 {
4118 	dhd_prot_t *prot = dhd->prot;
4119 	int ret = BCME_ERROR;
4120 
4121 	if ((ret = dhd_check_create_edl_rings(dhd)) != BCME_OK) {
4122 		DHD_ERROR(("%s: EDL rings aren't created! \n",
4123 			__FUNCTION__));
4124 		return ret;
4125 	}
4126 
4127 	if ((prot->d2hring_edl->inited) || (prot->d2hring_edl->create_pending)) {
4128 		DHD_INFO(("EDL completion ring was created!\n"));
4129 		return ret;
4130 	}
4131 
4132 	DHD_ERROR(("trying to send create d2h edl ring: idx %d\n", prot->d2hring_edl->idx));
4133 	ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_edl,
4134 		BCMPCIE_D2H_RING_TYPE_EDL, DHD_D2H_DBGRING_REQ_PKTID);
4135 	if (ret != BCME_OK)
4136 		return ret;
4137 
4138 	prot->d2hring_edl->seqnum = D2H_EPOCH_INIT_VAL;
4139 	prot->d2hring_edl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
4140 
4141 	return BCME_OK;
4142 } /* dhd_prot_init_btlog_rings */
4143 
4144 static void
dhd_prot_detach_edl_rings(dhd_pub_t * dhd)4145 dhd_prot_detach_edl_rings(dhd_pub_t *dhd)
4146 {
4147 	if (dhd->prot->d2hring_edl) {
4148 		dhd_prot_ring_detach(dhd, dhd->prot->d2hring_edl);
4149 		MFREE(dhd->prot->osh, dhd->prot->d2hring_edl, sizeof(msgbuf_ring_t));
4150 		dhd->prot->d2hring_edl = NULL;
4151 	}
4152 }
4153 #endif	/* EWP_EDL */
4154 
4155 /**
4156  * Initialize protocol: sync w/dongle state.
4157  * Sets dongle media info (iswl, drv_version, mac address).
4158  */
dhd_sync_with_dongle(dhd_pub_t * dhd)4159 int dhd_sync_with_dongle(dhd_pub_t *dhd)
4160 {
4161 	int ret = 0;
4162 	wlc_rev_info_t revinfo;
4163 	char buf[128];
4164 	dhd_prot_t *prot = dhd->prot;
4165 
4166 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4167 
4168 	dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
4169 
4170 	/* Post ts buffer after shim layer is attached */
4171 	ret = dhd_msgbuf_rxbuf_post_ts_bufs(dhd);
4172 
4173 #ifdef DHD_FW_COREDUMP
4174 	/* Check the memdump capability */
4175 	dhd_get_memdump_info(dhd);
4176 #endif /* DHD_FW_COREDUMP */
4177 #ifdef BCMASSERT_LOG
4178 	dhd_get_assert_info(dhd);
4179 #endif /* BCMASSERT_LOG */
4180 
4181 	/* Get the device rev info */
4182 	memset(&revinfo, 0, sizeof(revinfo));
4183 	ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0);
4184 	if (ret < 0) {
4185 		DHD_ERROR(("%s: GET revinfo FAILED\n", __FUNCTION__));
4186 		goto done;
4187 	}
4188 	DHD_ERROR(("%s: GET_REVINFO device 0x%x, vendor 0x%x, chipnum 0x%x\n", __FUNCTION__,
4189 		revinfo.deviceid, revinfo.vendorid, revinfo.chipnum));
4190 
4191 	/* Get the RxBuf post size */
4192 	memset(buf, 0, sizeof(buf));
4193 	bcm_mkiovar("rxbufpost_sz", NULL, 0, buf, sizeof(buf));
4194 	ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0);
4195 	if (ret < 0) {
4196 		DHD_ERROR(("%s: GET RxBuf post FAILED, default to %d\n",
4197 			__FUNCTION__, DHD_FLOWRING_RX_BUFPOST_PKTSZ));
4198 		prot->rxbufpost_sz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
4199 	} else {
4200 		memcpy_s(&(prot->rxbufpost_sz), sizeof(prot->rxbufpost_sz), buf, sizeof(uint16));
4201 		if (prot->rxbufpost_sz > DHD_FLOWRING_RX_BUFPOST_PKTSZ_MAX) {
4202 			DHD_ERROR(("%s: Invalid RxBuf post size : %d, default to %d\n",
4203 				__FUNCTION__, prot->rxbufpost_sz, DHD_FLOWRING_RX_BUFPOST_PKTSZ));
4204 			prot->rxbufpost_sz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
4205 		} else {
4206 			DHD_ERROR(("%s: RxBuf Post : %d\n", __FUNCTION__, prot->rxbufpost_sz));
4207 		}
4208 	}
4209 
4210 	/* Post buffers for packet reception */
4211 	dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
4212 
4213 	DHD_SSSR_DUMP_INIT(dhd);
4214 
4215 	dhd_process_cid_mac(dhd, TRUE);
4216 	ret = dhd_preinit_ioctls(dhd);
4217 	dhd_process_cid_mac(dhd, FALSE);
4218 
4219 #if defined(DHD_H2D_LOG_TIME_SYNC)
4220 #ifdef DHD_HP2P
4221 	if (FW_SUPPORTED(dhd, h2dlogts) || dhd->hp2p_capable)
4222 #else
4223 	if (FW_SUPPORTED(dhd, h2dlogts))
4224 #endif // endif
4225 	{
4226 #ifdef DHD_HP2P
4227 		if (dhd->hp2p_enable) {
4228 			dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH / 40;
4229 		} else {
4230 			dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH;
4231 		}
4232 #else
4233 		dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH;
4234 #endif // endif
4235 		dhd->bus->dhd_rte_time_sync_count = OSL_SYSUPTIME_US();
4236 		/* This is during initialization. */
4237 		dhd_h2d_log_time_sync(dhd);
4238 	} else {
4239 		dhd->dhd_rte_time_sync_ms = 0;
4240 	}
4241 #endif /* DHD_H2D_LOG_TIME_SYNC || DHD_HP2P */
4242 	/* Always assumes wl for now */
4243 	dhd->iswl = TRUE;
4244 done:
4245 	return ret;
4246 } /* dhd_sync_with_dongle */
4247 
4248 #define DHD_DBG_SHOW_METADATA	0
4249 
4250 #if DHD_DBG_SHOW_METADATA
4251 static void BCMFASTPATH
dhd_prot_print_metadata(dhd_pub_t * dhd,void * ptr,int len)4252 dhd_prot_print_metadata(dhd_pub_t *dhd, void *ptr, int len)
4253 {
4254 	uint8 tlv_t;
4255 	uint8 tlv_l;
4256 	uint8 *tlv_v = (uint8 *)ptr;
4257 
4258 	if (len <= BCMPCIE_D2H_METADATA_HDRLEN)
4259 		return;
4260 
4261 	len -= BCMPCIE_D2H_METADATA_HDRLEN;
4262 	tlv_v += BCMPCIE_D2H_METADATA_HDRLEN;
4263 
4264 	while (len > TLV_HDR_LEN) {
4265 		tlv_t = tlv_v[TLV_TAG_OFF];
4266 		tlv_l = tlv_v[TLV_LEN_OFF];
4267 
4268 		len -= TLV_HDR_LEN;
4269 		tlv_v += TLV_HDR_LEN;
4270 		if (len < tlv_l)
4271 			break;
4272 		if ((tlv_t == 0) || (tlv_t == WLFC_CTL_TYPE_FILLER))
4273 			break;
4274 
4275 		switch (tlv_t) {
4276 		case WLFC_CTL_TYPE_TXSTATUS: {
4277 			uint32 txs;
4278 			memcpy(&txs, tlv_v, sizeof(uint32));
4279 			if (tlv_l < (sizeof(wl_txstatus_additional_info_t) + sizeof(uint32))) {
4280 				printf("METADATA TX_STATUS: %08x\n", txs);
4281 			} else {
4282 				wl_txstatus_additional_info_t tx_add_info;
4283 				memcpy(&tx_add_info, tlv_v + sizeof(uint32),
4284 					sizeof(wl_txstatus_additional_info_t));
4285 				printf("METADATA TX_STATUS: %08x WLFCTS[%04x | %08x - %08x - %08x]"
4286 					" rate = %08x tries = %d - %d\n", txs,
4287 					tx_add_info.seq, tx_add_info.entry_ts,
4288 					tx_add_info.enq_ts, tx_add_info.last_ts,
4289 					tx_add_info.rspec, tx_add_info.rts_cnt,
4290 					tx_add_info.tx_cnt);
4291 			}
4292 			} break;
4293 
4294 		case WLFC_CTL_TYPE_RSSI: {
4295 			if (tlv_l == 1)
4296 				printf("METADATA RX_RSSI: rssi = %d\n", *tlv_v);
4297 			else
4298 				printf("METADATA RX_RSSI[%04x]: rssi = %d snr = %d\n",
4299 					(*(tlv_v + 3) << 8) | *(tlv_v + 2),
4300 					(int8)(*tlv_v), *(tlv_v + 1));
4301 			} break;
4302 
4303 		case WLFC_CTL_TYPE_FIFO_CREDITBACK:
4304 			bcm_print_bytes("METADATA FIFO_CREDITBACK", tlv_v, tlv_l);
4305 			break;
4306 
4307 		case WLFC_CTL_TYPE_TX_ENTRY_STAMP:
4308 			bcm_print_bytes("METADATA TX_ENTRY", tlv_v, tlv_l);
4309 			break;
4310 
4311 		case WLFC_CTL_TYPE_RX_STAMP: {
4312 			struct {
4313 				uint32 rspec;
4314 				uint32 bus_time;
4315 				uint32 wlan_time;
4316 			} rx_tmstamp;
4317 			memcpy(&rx_tmstamp, tlv_v, sizeof(rx_tmstamp));
4318 			printf("METADATA RX TIMESTMAP: WLFCTS[%08x - %08x] rate = %08x\n",
4319 				rx_tmstamp.wlan_time, rx_tmstamp.bus_time, rx_tmstamp.rspec);
4320 			} break;
4321 
4322 		case WLFC_CTL_TYPE_TRANS_ID:
4323 			bcm_print_bytes("METADATA TRANS_ID", tlv_v, tlv_l);
4324 			break;
4325 
4326 		case WLFC_CTL_TYPE_COMP_TXSTATUS:
4327 			bcm_print_bytes("METADATA COMP_TXSTATUS", tlv_v, tlv_l);
4328 			break;
4329 
4330 		default:
4331 			bcm_print_bytes("METADATA UNKNOWN", tlv_v, tlv_l);
4332 			break;
4333 		}
4334 
4335 		len -= tlv_l;
4336 		tlv_v += tlv_l;
4337 	}
4338 }
4339 #endif /* DHD_DBG_SHOW_METADATA */
4340 
4341 static INLINE void BCMFASTPATH
dhd_prot_packet_free(dhd_pub_t * dhd,void * pkt,uint8 pkttype,bool send)4342 dhd_prot_packet_free(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send)
4343 {
4344 	if (pkt) {
4345 		if (pkttype == PKTTYPE_IOCTL_RX ||
4346 			pkttype == PKTTYPE_EVENT_RX ||
4347 			pkttype == PKTTYPE_INFO_RX ||
4348 			pkttype == PKTTYPE_TSBUF_RX) {
4349 #ifdef DHD_USE_STATIC_CTRLBUF
4350 			PKTFREE_STATIC(dhd->osh, pkt, send);
4351 #else
4352 			PKTFREE(dhd->osh, pkt, send);
4353 #endif /* DHD_USE_STATIC_CTRLBUF */
4354 		} else {
4355 			PKTFREE(dhd->osh, pkt, send);
4356 		}
4357 	}
4358 }
4359 
4360 /**
4361  * dhd_prot_packet_get should be called only for items having pktid_ctrl_map handle
4362  * and all the bottom most functions like dhd_pktid_map_free hold separate DHD_PKTID_LOCK
4363  * to ensure thread safety, so no need to hold any locks for this function
4364  */
4365 static INLINE void * BCMFASTPATH
dhd_prot_packet_get(dhd_pub_t * dhd,uint32 pktid,uint8 pkttype,bool free_pktid)4366 dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype, bool free_pktid)
4367 {
4368 	void *PKTBUF;
4369 	dmaaddr_t pa;
4370 	uint32 len;
4371 	void *dmah;
4372 	void *secdma;
4373 
4374 #ifdef DHD_PCIE_PKTID
4375 	if (free_pktid) {
4376 		PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_ctrl_map,
4377 			pktid, pa, len, dmah, secdma, pkttype);
4378 	} else {
4379 		PKTBUF = DHD_PKTID_TO_NATIVE_RSV(dhd, dhd->prot->pktid_ctrl_map,
4380 			pktid, pa, len, dmah, secdma, pkttype);
4381 	}
4382 #else
4383 	PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_ctrl_map, pktid, pa,
4384 		len, dmah, secdma, pkttype);
4385 #endif /* DHD_PCIE_PKTID */
4386 	if (PKTBUF) {
4387 		{
4388 			if (SECURE_DMA_ENAB(dhd->osh))
4389 				SECURE_DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah,
4390 					secdma, 0);
4391 			else
4392 				DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
4393 #ifdef DMAMAP_STATS
4394 			switch (pkttype) {
4395 #ifndef IOCTLRESP_USE_CONSTMEM
4396 				case PKTTYPE_IOCTL_RX:
4397 					dhd->dma_stats.ioctl_rx--;
4398 					dhd->dma_stats.ioctl_rx_sz -= len;
4399 					break;
4400 #endif /* IOCTLRESP_USE_CONSTMEM */
4401 				case PKTTYPE_EVENT_RX:
4402 					dhd->dma_stats.event_rx--;
4403 					dhd->dma_stats.event_rx_sz -= len;
4404 					break;
4405 				case PKTTYPE_INFO_RX:
4406 					dhd->dma_stats.info_rx--;
4407 					dhd->dma_stats.info_rx_sz -= len;
4408 					break;
4409 				case PKTTYPE_TSBUF_RX:
4410 					dhd->dma_stats.tsbuf_rx--;
4411 					dhd->dma_stats.tsbuf_rx_sz -= len;
4412 					break;
4413 			}
4414 #endif /* DMAMAP_STATS */
4415 		}
4416 	}
4417 
4418 	return PKTBUF;
4419 }
4420 
4421 #ifdef IOCTLRESP_USE_CONSTMEM
4422 static INLINE void BCMFASTPATH
dhd_prot_ioctl_ret_buffer_get(dhd_pub_t * dhd,uint32 pktid,dhd_dma_buf_t * retbuf)4423 dhd_prot_ioctl_ret_buffer_get(dhd_pub_t *dhd, uint32 pktid, dhd_dma_buf_t *retbuf)
4424 {
4425 	memset(retbuf, 0, sizeof(dhd_dma_buf_t));
4426 	retbuf->va = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle_ioctl, pktid,
4427 		retbuf->pa, retbuf->len, retbuf->dmah, retbuf->secdma, PKTTYPE_IOCTL_RX);
4428 
4429 	return;
4430 }
4431 #endif // endif
4432 
4433 static void BCMFASTPATH
dhd_msgbuf_rxbuf_post(dhd_pub_t * dhd,bool use_rsv_pktid)4434 dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid)
4435 {
4436 	dhd_prot_t *prot = dhd->prot;
4437 	int16 fillbufs;
4438 	uint16 cnt = 256;
4439 	int retcount = 0;
4440 
4441 	fillbufs = prot->max_rxbufpost - prot->rxbufpost;
4442 	while (fillbufs >= RX_BUF_BURST) {
4443 		cnt--;
4444 		if (cnt == 0) {
4445 			/* find a better way to reschedule rx buf post if space not available */
4446 			DHD_ERROR(("h2d rx post ring not available to post host buffers \n"));
4447 			DHD_ERROR(("Current posted host buf count %d \n", prot->rxbufpost));
4448 			break;
4449 		}
4450 
4451 		/* Post in a burst of 32 buffers at a time */
4452 		fillbufs = MIN(fillbufs, RX_BUF_BURST);
4453 
4454 		/* Post buffers */
4455 		retcount = dhd_prot_rxbuf_post(dhd, fillbufs, use_rsv_pktid);
4456 
4457 		if (retcount >= 0) {
4458 			prot->rxbufpost += (uint16)retcount;
4459 #ifdef DHD_LB_RXC
4460 			/* dhd_prot_rxbuf_post returns the number of buffers posted */
4461 			DHD_LB_STATS_UPDATE_RXC_HISTO(dhd, retcount);
4462 #endif /* DHD_LB_RXC */
4463 			/* how many more to post */
4464 			fillbufs = prot->max_rxbufpost - prot->rxbufpost;
4465 		} else {
4466 			/* Make sure we don't run loop any further */
4467 			fillbufs = 0;
4468 		}
4469 	}
4470 }
4471 
4472 /** Post 'count' no of rx buffers to dongle */
4473 static int BCMFASTPATH
dhd_prot_rxbuf_post(dhd_pub_t * dhd,uint16 count,bool use_rsv_pktid)4474 dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid)
4475 {
4476 	void *p, **pktbuf;
4477 	uint8 *rxbuf_post_tmp;
4478 	host_rxbuf_post_t *rxbuf_post;
4479 	void *msg_start;
4480 	dmaaddr_t pa, *pktbuf_pa;
4481 	uint32 *pktlen;
4482 	uint16 i = 0, alloced = 0;
4483 	unsigned long flags;
4484 	uint32 pktid;
4485 	dhd_prot_t *prot = dhd->prot;
4486 	msgbuf_ring_t *ring = &prot->h2dring_rxp_subn;
4487 	void *lcl_buf;
4488 	uint16 lcl_buf_size;
4489 	uint16 pktsz = prot->rxbufpost_sz;
4490 
4491 	/* allocate a local buffer to store pkt buffer va, pa and length */
4492 	lcl_buf_size = (sizeof(void *) + sizeof(dmaaddr_t) + sizeof(uint32)) *
4493 		RX_BUF_BURST;
4494 	lcl_buf = MALLOC(dhd->osh, lcl_buf_size);
4495 	if (!lcl_buf) {
4496 		DHD_ERROR(("%s: local scratch buffer allocation failed\n", __FUNCTION__));
4497 		return 0;
4498 	}
4499 	pktbuf = lcl_buf;
4500 	pktbuf_pa = (dmaaddr_t *)((uint8 *)pktbuf + sizeof(void *) * RX_BUF_BURST);
4501 	pktlen = (uint32 *)((uint8 *)pktbuf_pa + sizeof(dmaaddr_t) * RX_BUF_BURST);
4502 
4503 	for (i = 0; i < count; i++) {
4504 		if ((p = PKTGET(dhd->osh, pktsz, FALSE)) == NULL) {
4505 			DHD_ERROR(("%s:%d: PKTGET for rxbuf failed\n", __FUNCTION__, __LINE__));
4506 			dhd->rx_pktgetfail++;
4507 			break;
4508 		}
4509 
4510 		pktlen[i] = PKTLEN(dhd->osh, p);
4511 		if (SECURE_DMA_ENAB(dhd->osh)) {
4512 			pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen[i],
4513 				DMA_RX, p, 0, ring->dma_buf.secdma, 0);
4514 		}
4515 #ifndef BCM_SECURE_DMA
4516 		else
4517 			pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen[i], DMA_RX, p, 0);
4518 #endif /* #ifndef BCM_SECURE_DMA */
4519 
4520 		if (PHYSADDRISZERO(pa)) {
4521 			PKTFREE(dhd->osh, p, FALSE);
4522 			DHD_ERROR(("Invalid phyaddr 0\n"));
4523 			ASSERT(0);
4524 			break;
4525 		}
4526 #ifdef DMAMAP_STATS
4527 		dhd->dma_stats.rxdata++;
4528 		dhd->dma_stats.rxdata_sz += pktlen[i];
4529 #endif /* DMAMAP_STATS */
4530 
4531 		PKTPULL(dhd->osh, p, prot->rx_metadata_offset);
4532 		pktlen[i] = PKTLEN(dhd->osh, p);
4533 		pktbuf[i] = p;
4534 		pktbuf_pa[i] = pa;
4535 	}
4536 
4537 	/* only post what we have */
4538 	count = i;
4539 
4540 	/* grab the ring lock to allocate pktid and post on ring */
4541 	DHD_RING_LOCK(ring->ring_lock, flags);
4542 
4543 	/* Claim space for exactly 'count' no of messages, for mitigation purpose */
4544 	msg_start = (void *)
4545 		dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, TRUE);
4546 	if (msg_start == NULL) {
4547 		DHD_INFO(("%s:%d: Rxbufpost Msgbuf Not available\n", __FUNCTION__, __LINE__));
4548 		DHD_RING_UNLOCK(ring->ring_lock, flags);
4549 		goto cleanup;
4550 	}
4551 	/* if msg_start !=  NULL, we should have alloced space for atleast 1 item */
4552 	ASSERT(alloced > 0);
4553 
4554 	rxbuf_post_tmp = (uint8*)msg_start;
4555 
4556 	for (i = 0; i < alloced; i++) {
4557 		rxbuf_post = (host_rxbuf_post_t *)rxbuf_post_tmp;
4558 		p = pktbuf[i];
4559 		pa = pktbuf_pa[i];
4560 
4561 #if defined(DHD_LB_RXC)
4562 		if (use_rsv_pktid == TRUE) {
4563 			bcm_workq_t *workq = &prot->rx_compl_cons;
4564 			int elem_ix = bcm_ring_cons(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
4565 
4566 			if (elem_ix == BCM_RING_EMPTY) {
4567 				DHD_INFO(("%s rx_compl_cons ring is empty\n", __FUNCTION__));
4568 				pktid = DHD_PKTID_INVALID;
4569 				goto alloc_pkt_id;
4570 			} else {
4571 				uint32 *elem = WORKQ_ELEMENT(uint32, workq, elem_ix);
4572 				pktid = *elem;
4573 			}
4574 
4575 			rxbuf_post->cmn_hdr.request_id = htol32(pktid);
4576 
4577 			/* Now populate the previous locker with valid information */
4578 			if (pktid != DHD_PKTID_INVALID) {
4579 				DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_rx_map,
4580 					p, pktid, pa, pktlen[i], DMA_RX, NULL, NULL,
4581 					PKTTYPE_DATA_RX);
4582 			}
4583 		} else
4584 #endif /* ! DHD_LB_RXC */
4585 		{
4586 #if defined(DHD_LB_RXC)
4587 alloc_pkt_id:
4588 #endif /* DHD_LB_RXC */
4589 		pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_rx_map, p, pa,
4590 			pktlen[i], DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_RX);
4591 #if defined(DHD_PCIE_PKTID)
4592 		if (pktid == DHD_PKTID_INVALID) {
4593 			break;
4594 		}
4595 #endif /* DHD_PCIE_PKTID */
4596 		}
4597 
4598 		/* Common msg header */
4599 		rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_RXBUF_POST;
4600 		rxbuf_post->cmn_hdr.if_id = 0;
4601 		rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
4602 		rxbuf_post->cmn_hdr.flags = ring->current_phase;
4603 		ring->seqnum++;
4604 		rxbuf_post->data_buf_len = htol16((uint16)pktlen[i]);
4605 		rxbuf_post->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
4606 		rxbuf_post->data_buf_addr.low_addr =
4607 			htol32(PHYSADDRLO(pa) + prot->rx_metadata_offset);
4608 
4609 		if (prot->rx_metadata_offset) {
4610 			rxbuf_post->metadata_buf_len = prot->rx_metadata_offset;
4611 			rxbuf_post->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
4612 			rxbuf_post->metadata_buf_addr.low_addr  = htol32(PHYSADDRLO(pa));
4613 		} else {
4614 			rxbuf_post->metadata_buf_len = 0;
4615 			rxbuf_post->metadata_buf_addr.high_addr = 0;
4616 			rxbuf_post->metadata_buf_addr.low_addr  = 0;
4617 		}
4618 
4619 #ifdef DHD_PKTID_AUDIT_RING
4620 		DHD_PKTID_AUDIT(dhd, prot->pktid_rx_map, pktid, DHD_DUPLICATE_ALLOC);
4621 #endif /* DHD_PKTID_AUDIT_RING */
4622 
4623 		rxbuf_post->cmn_hdr.request_id = htol32(pktid);
4624 
4625 		/* Move rxbuf_post_tmp to next item */
4626 		rxbuf_post_tmp = rxbuf_post_tmp + ring->item_len;
4627 
4628 #ifdef DHD_LBUF_AUDIT
4629 		PKTAUDIT(dhd->osh, p);
4630 #endif // endif
4631 	}
4632 
4633 	if (i < alloced) {
4634 		if (ring->wr < (alloced - i))
4635 			ring->wr = ring->max_items - (alloced - i);
4636 		else
4637 			ring->wr -= (alloced - i);
4638 
4639 		if (ring->wr == 0) {
4640 			DHD_INFO(("%s: flipping the phase now\n", ring->name));
4641 				ring->current_phase = ring->current_phase ?
4642 				0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
4643 		}
4644 
4645 		alloced = i;
4646 	}
4647 
4648 	/* update ring's WR index and ring doorbell to dongle */
4649 	if (alloced > 0) {
4650 		dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced);
4651 	}
4652 
4653 	DHD_RING_UNLOCK(ring->ring_lock, flags);
4654 
4655 cleanup:
4656 	for (i = alloced; i < count; i++) {
4657 		p = pktbuf[i];
4658 		pa = pktbuf_pa[i];
4659 
4660 		if (SECURE_DMA_ENAB(dhd->osh))
4661 			SECURE_DMA_UNMAP(dhd->osh, pa, pktlen[i], DMA_RX, 0,
4662 				DHD_DMAH_NULL, ring->dma_buf.secdma, 0);
4663 		else
4664 			DMA_UNMAP(dhd->osh, pa, pktlen[i], DMA_RX, 0, DHD_DMAH_NULL);
4665 		PKTFREE(dhd->osh, p, FALSE);
4666 	}
4667 
4668 	MFREE(dhd->osh, lcl_buf, lcl_buf_size);
4669 
4670 	return alloced;
4671 } /* dhd_prot_rxbufpost */
4672 
4673 static int
dhd_prot_infobufpost(dhd_pub_t * dhd,msgbuf_ring_t * ring)4674 dhd_prot_infobufpost(dhd_pub_t *dhd, msgbuf_ring_t *ring)
4675 {
4676 	unsigned long flags;
4677 	uint32 pktid;
4678 	dhd_prot_t *prot = dhd->prot;
4679 	uint16 alloced = 0;
4680 	uint16 pktsz = DHD_INFOBUF_RX_BUFPOST_PKTSZ;
4681 	uint32 pktlen;
4682 	info_buf_post_msg_t *infobuf_post;
4683 	uint8 *infobuf_post_tmp;
4684 	void *p;
4685 	void* msg_start;
4686 	uint8 i = 0;
4687 	dmaaddr_t pa;
4688 	int16 count = 0;
4689 
4690 	if (ring == NULL)
4691 		return 0;
4692 
4693 	if (ring->inited != TRUE)
4694 		return 0;
4695 	if (ring == dhd->prot->h2dring_info_subn) {
4696 		if (prot->max_infobufpost == 0)
4697 			return 0;
4698 
4699 		count = prot->max_infobufpost - prot->infobufpost;
4700 	}
4701 	else {
4702 		DHD_ERROR(("Unknown ring\n"));
4703 		return 0;
4704 	}
4705 
4706 	if (count <= 0) {
4707 		DHD_INFO(("%s: Cannot post more than max info resp buffers\n",
4708 			__FUNCTION__));
4709 		return 0;
4710 	}
4711 
4712 	/* grab the ring lock to allocate pktid and post on ring */
4713 	DHD_RING_LOCK(ring->ring_lock, flags);
4714 
4715 	/* Claim space for exactly 'count' no of messages, for mitigation purpose */
4716 	msg_start = (void *) dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, FALSE);
4717 
4718 	if (msg_start == NULL) {
4719 		DHD_INFO(("%s:%d: infobufpost Msgbuf Not available\n", __FUNCTION__, __LINE__));
4720 		DHD_RING_UNLOCK(ring->ring_lock, flags);
4721 		return -1;
4722 	}
4723 
4724 	/* if msg_start !=  NULL, we should have alloced space for atleast 1 item */
4725 	ASSERT(alloced > 0);
4726 
4727 	infobuf_post_tmp = (uint8*) msg_start;
4728 
4729 	/* loop through each allocated message in the host ring */
4730 	for (i = 0; i < alloced; i++) {
4731 		infobuf_post = (info_buf_post_msg_t *) infobuf_post_tmp;
4732 		/* Create a rx buffer */
4733 #ifdef DHD_USE_STATIC_CTRLBUF
4734 		p = PKTGET_STATIC(dhd->osh, pktsz, FALSE);
4735 #else
4736 		p = PKTGET(dhd->osh, pktsz, FALSE);
4737 #endif /* DHD_USE_STATIC_CTRLBUF */
4738 		if (p == NULL) {
4739 			DHD_ERROR(("%s:%d: PKTGET for infobuf failed\n", __FUNCTION__, __LINE__));
4740 			dhd->rx_pktgetfail++;
4741 			break;
4742 		}
4743 		pktlen = PKTLEN(dhd->osh, p);
4744 		if (SECURE_DMA_ENAB(dhd->osh)) {
4745 			pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen,
4746 				DMA_RX, p, 0, ring->dma_buf.secdma, 0);
4747 		}
4748 #ifndef BCM_SECURE_DMA
4749 		else
4750 			pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
4751 #endif /* #ifndef BCM_SECURE_DMA */
4752 		if (PHYSADDRISZERO(pa)) {
4753 			if (SECURE_DMA_ENAB(dhd->osh)) {
4754 				SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
4755 					ring->dma_buf.secdma, 0);
4756 			}
4757 			else
4758 				DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
4759 #ifdef DHD_USE_STATIC_CTRLBUF
4760 			PKTFREE_STATIC(dhd->osh, p, FALSE);
4761 #else
4762 			PKTFREE(dhd->osh, p, FALSE);
4763 #endif /* DHD_USE_STATIC_CTRLBUF */
4764 			DHD_ERROR(("Invalid phyaddr 0\n"));
4765 			ASSERT(0);
4766 			break;
4767 		}
4768 #ifdef DMAMAP_STATS
4769 		dhd->dma_stats.info_rx++;
4770 		dhd->dma_stats.info_rx_sz += pktlen;
4771 #endif /* DMAMAP_STATS */
4772 		pktlen = PKTLEN(dhd->osh, p);
4773 
4774 		/* Common msg header */
4775 		infobuf_post->cmn_hdr.msg_type = MSG_TYPE_INFO_BUF_POST;
4776 		infobuf_post->cmn_hdr.if_id = 0;
4777 		infobuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
4778 		infobuf_post->cmn_hdr.flags = ring->current_phase;
4779 		ring->seqnum++;
4780 
4781 		pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_ctrl_map, p, pa,
4782 			pktlen, DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_INFO_RX);
4783 
4784 #if defined(DHD_PCIE_PKTID)
4785 		if (pktid == DHD_PKTID_INVALID) {
4786 			if (SECURE_DMA_ENAB(dhd->osh)) {
4787 				SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, 0,
4788 					ring->dma_buf.secdma, 0);
4789 			} else
4790 				DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, 0);
4791 
4792 #ifdef DHD_USE_STATIC_CTRLBUF
4793 			PKTFREE_STATIC(dhd->osh, p, FALSE);
4794 #else
4795 			PKTFREE(dhd->osh, p, FALSE);
4796 #endif /* DHD_USE_STATIC_CTRLBUF */
4797 			DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__));
4798 			break;
4799 		}
4800 #endif /* DHD_PCIE_PKTID */
4801 
4802 		infobuf_post->host_buf_len = htol16((uint16)pktlen);
4803 		infobuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
4804 		infobuf_post->host_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
4805 
4806 #ifdef DHD_PKTID_AUDIT_RING
4807 		DHD_PKTID_AUDIT(dhd, prot->pktid_ctrl_map, pktid, DHD_DUPLICATE_ALLOC);
4808 #endif /* DHD_PKTID_AUDIT_RING */
4809 
4810 		DHD_INFO(("ID %d, low_addr 0x%08x, high_addr 0x%08x\n",
4811 			infobuf_post->cmn_hdr.request_id,  infobuf_post->host_buf_addr.low_addr,
4812 			infobuf_post->host_buf_addr.high_addr));
4813 
4814 		infobuf_post->cmn_hdr.request_id = htol32(pktid);
4815 		/* Move rxbuf_post_tmp to next item */
4816 		infobuf_post_tmp = infobuf_post_tmp + ring->item_len;
4817 #ifdef DHD_LBUF_AUDIT
4818 		PKTAUDIT(dhd->osh, p);
4819 #endif // endif
4820 	}
4821 
4822 	if (i < alloced) {
4823 		if (ring->wr < (alloced - i))
4824 			ring->wr = ring->max_items - (alloced - i);
4825 		else
4826 			ring->wr -= (alloced - i);
4827 
4828 		alloced = i;
4829 		if (alloced && ring->wr == 0) {
4830 			DHD_INFO(("%s: flipping the phase now\n", ring->name));
4831 			ring->current_phase = ring->current_phase ?
4832 				0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
4833 		}
4834 	}
4835 
4836 	/* Update the write pointer in TCM & ring bell */
4837 	if (alloced > 0) {
4838 		if (ring == dhd->prot->h2dring_info_subn) {
4839 			prot->infobufpost += alloced;
4840 		}
4841 		dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced);
4842 	}
4843 
4844 	DHD_RING_UNLOCK(ring->ring_lock, flags);
4845 
4846 	return alloced;
4847 } /* dhd_prot_infobufpost */
4848 
4849 #ifdef IOCTLRESP_USE_CONSTMEM
4850 static int
alloc_ioctl_return_buffer(dhd_pub_t * dhd,dhd_dma_buf_t * retbuf)4851 alloc_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf)
4852 {
4853 	int err;
4854 	memset(retbuf, 0, sizeof(dhd_dma_buf_t));
4855 
4856 	if ((err = dhd_dma_buf_alloc(dhd, retbuf, IOCT_RETBUF_SIZE)) != BCME_OK) {
4857 		DHD_ERROR(("%s: dhd_dma_buf_alloc err %d\n", __FUNCTION__, err));
4858 		ASSERT(0);
4859 		return BCME_NOMEM;
4860 	}
4861 
4862 	return BCME_OK;
4863 }
4864 
4865 static void
free_ioctl_return_buffer(dhd_pub_t * dhd,dhd_dma_buf_t * retbuf)4866 free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf)
4867 {
4868 	/* retbuf (declared on stack) not fully populated ...  */
4869 	if (retbuf->va) {
4870 		uint32 dma_pad;
4871 		dma_pad = (IOCT_RETBUF_SIZE % DHD_DMA_PAD) ? DHD_DMA_PAD : 0;
4872 		retbuf->len = IOCT_RETBUF_SIZE;
4873 		retbuf->_alloced = retbuf->len + dma_pad;
4874 	}
4875 
4876 	dhd_dma_buf_free(dhd, retbuf);
4877 	return;
4878 }
4879 #endif /* IOCTLRESP_USE_CONSTMEM */
4880 
4881 static int
dhd_prot_rxbufpost_ctrl(dhd_pub_t * dhd,uint8 msg_type)4882 dhd_prot_rxbufpost_ctrl(dhd_pub_t *dhd, uint8 msg_type)
4883 {
4884 	void *p;
4885 	uint16 pktsz;
4886 	ioctl_resp_evt_buf_post_msg_t *rxbuf_post;
4887 	dmaaddr_t pa;
4888 	uint32 pktlen;
4889 	dhd_prot_t *prot = dhd->prot;
4890 	uint16 alloced = 0;
4891 	unsigned long flags;
4892 	dhd_dma_buf_t retbuf;
4893 	void *dmah = NULL;
4894 	uint32 pktid;
4895 	void *map_handle;
4896 	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
4897 	bool non_ioctl_resp_buf = 0;
4898 	dhd_pkttype_t buf_type;
4899 
4900 	if (dhd->busstate == DHD_BUS_DOWN) {
4901 		DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
4902 		return -1;
4903 	}
4904 	memset(&retbuf, 0, sizeof(dhd_dma_buf_t));
4905 
4906 	if (msg_type == MSG_TYPE_IOCTLRESP_BUF_POST)
4907 		buf_type = PKTTYPE_IOCTL_RX;
4908 	else if (msg_type == MSG_TYPE_EVENT_BUF_POST)
4909 		buf_type = PKTTYPE_EVENT_RX;
4910 	else if (msg_type == MSG_TYPE_TIMSTAMP_BUFPOST)
4911 		buf_type = PKTTYPE_TSBUF_RX;
4912 	else {
4913 		DHD_ERROR(("invalid message type to be posted to Ctrl ring %d\n", msg_type));
4914 		return -1;
4915 	}
4916 
4917 	if ((msg_type == MSG_TYPE_EVENT_BUF_POST) || (msg_type == MSG_TYPE_TIMSTAMP_BUFPOST))
4918 		non_ioctl_resp_buf = TRUE;
4919 	else
4920 		non_ioctl_resp_buf = FALSE;
4921 
4922 	if (non_ioctl_resp_buf) {
4923 		/* Allocate packet for not ioctl resp buffer post */
4924 		pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
4925 	} else {
4926 		/* Allocate packet for ctrl/ioctl buffer post */
4927 		pktsz = DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ;
4928 	}
4929 
4930 #ifdef IOCTLRESP_USE_CONSTMEM
4931 	if (!non_ioctl_resp_buf) {
4932 		if (alloc_ioctl_return_buffer(dhd, &retbuf) != BCME_OK) {
4933 			DHD_ERROR(("Could not allocate IOCTL response buffer\n"));
4934 			return -1;
4935 		}
4936 		ASSERT(retbuf.len == IOCT_RETBUF_SIZE);
4937 		p = retbuf.va;
4938 		pktlen = retbuf.len;
4939 		pa = retbuf.pa;
4940 		dmah = retbuf.dmah;
4941 	} else
4942 #endif /* IOCTLRESP_USE_CONSTMEM */
4943 	{
4944 #ifdef DHD_USE_STATIC_CTRLBUF
4945 		p = PKTGET_STATIC(dhd->osh, pktsz, FALSE);
4946 #else
4947 		p = PKTGET(dhd->osh, pktsz, FALSE);
4948 #endif /* DHD_USE_STATIC_CTRLBUF */
4949 		if (p == NULL) {
4950 			DHD_ERROR(("%s:%d: PKTGET for %s buf failed\n",
4951 				__FUNCTION__, __LINE__, non_ioctl_resp_buf ?
4952 				"EVENT" : "IOCTL RESP"));
4953 			dhd->rx_pktgetfail++;
4954 			return -1;
4955 		}
4956 
4957 		pktlen = PKTLEN(dhd->osh, p);
4958 
4959 		if (SECURE_DMA_ENAB(dhd->osh)) {
4960 			pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen,
4961 				DMA_RX, p, 0, ring->dma_buf.secdma, 0);
4962 		}
4963 #ifndef BCM_SECURE_DMA
4964 		else
4965 			pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
4966 #endif /* #ifndef BCM_SECURE_DMA */
4967 
4968 		if (PHYSADDRISZERO(pa)) {
4969 			DHD_ERROR(("Invalid physaddr 0\n"));
4970 			ASSERT(0);
4971 			goto free_pkt_return;
4972 		}
4973 
4974 #ifdef DMAMAP_STATS
4975 		switch (buf_type) {
4976 #ifndef IOCTLRESP_USE_CONSTMEM
4977 			case PKTTYPE_IOCTL_RX:
4978 				dhd->dma_stats.ioctl_rx++;
4979 				dhd->dma_stats.ioctl_rx_sz += pktlen;
4980 				break;
4981 #endif /* !IOCTLRESP_USE_CONSTMEM */
4982 			case PKTTYPE_EVENT_RX:
4983 				dhd->dma_stats.event_rx++;
4984 				dhd->dma_stats.event_rx_sz += pktlen;
4985 				break;
4986 			case PKTTYPE_TSBUF_RX:
4987 				dhd->dma_stats.tsbuf_rx++;
4988 				dhd->dma_stats.tsbuf_rx_sz += pktlen;
4989 				break;
4990 			default:
4991 				break;
4992 		}
4993 #endif /* DMAMAP_STATS */
4994 
4995 	}
4996 
4997 	/* grab the ring lock to allocate pktid and post on ring */
4998 	DHD_RING_LOCK(ring->ring_lock, flags);
4999 
5000 	rxbuf_post = (ioctl_resp_evt_buf_post_msg_t *)
5001 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
5002 
5003 	if (rxbuf_post == NULL) {
5004 		DHD_RING_UNLOCK(ring->ring_lock, flags);
5005 		DHD_ERROR(("%s:%d: Ctrl submit Msgbuf Not available to post buffer \n",
5006 			__FUNCTION__, __LINE__));
5007 
5008 #ifdef IOCTLRESP_USE_CONSTMEM
5009 		if (non_ioctl_resp_buf)
5010 #endif /* IOCTLRESP_USE_CONSTMEM */
5011 		{
5012 			if (SECURE_DMA_ENAB(dhd->osh)) {
5013 				SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
5014 					ring->dma_buf.secdma, 0);
5015 			} else {
5016 				DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
5017 			}
5018 		}
5019 		goto free_pkt_return;
5020 	}
5021 
5022 	/* CMN msg header */
5023 	rxbuf_post->cmn_hdr.msg_type = msg_type;
5024 
5025 #ifdef IOCTLRESP_USE_CONSTMEM
5026 	if (!non_ioctl_resp_buf) {
5027 		map_handle = dhd->prot->pktid_map_handle_ioctl;
5028 		pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle, p, pa, pktlen, DMA_RX, dmah,
5029 			ring->dma_buf.secdma, buf_type);
5030 	} else
5031 #endif /* IOCTLRESP_USE_CONSTMEM */
5032 	{
5033 		map_handle = dhd->prot->pktid_ctrl_map;
5034 		pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle,
5035 			p, pa, pktlen, DMA_RX, dmah, ring->dma_buf.secdma,
5036 			buf_type);
5037 	}
5038 
5039 	if (pktid == DHD_PKTID_INVALID) {
5040 		if (ring->wr == 0) {
5041 			ring->wr = ring->max_items - 1;
5042 		} else {
5043 			ring->wr--;
5044 			if (ring->wr == 0) {
5045 				ring->current_phase = ring->current_phase ? 0 :
5046 					BCMPCIE_CMNHDR_PHASE_BIT_INIT;
5047 			}
5048 		}
5049 		DHD_RING_UNLOCK(ring->ring_lock, flags);
5050 		DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
5051 		DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__));
5052 		goto free_pkt_return;
5053 	}
5054 
5055 #ifdef DHD_PKTID_AUDIT_RING
5056 	DHD_PKTID_AUDIT(dhd, map_handle, pktid, DHD_DUPLICATE_ALLOC);
5057 #endif /* DHD_PKTID_AUDIT_RING */
5058 
5059 	rxbuf_post->cmn_hdr.request_id = htol32(pktid);
5060 	rxbuf_post->cmn_hdr.if_id = 0;
5061 	rxbuf_post->cmn_hdr.epoch =  ring->seqnum % H2D_EPOCH_MODULO;
5062 	ring->seqnum++;
5063 	rxbuf_post->cmn_hdr.flags = ring->current_phase;
5064 
5065 #if defined(DHD_PCIE_PKTID)
5066 	if (rxbuf_post->cmn_hdr.request_id == DHD_PKTID_INVALID) {
5067 		if (ring->wr == 0) {
5068 			ring->wr = ring->max_items - 1;
5069 		} else {
5070 			if (ring->wr == 0) {
5071 				ring->current_phase = ring->current_phase ? 0 :
5072 					BCMPCIE_CMNHDR_PHASE_BIT_INIT;
5073 			}
5074 		}
5075 		DHD_RING_UNLOCK(ring->ring_lock, flags);
5076 #ifdef IOCTLRESP_USE_CONSTMEM
5077 		if (non_ioctl_resp_buf)
5078 #endif /* IOCTLRESP_USE_CONSTMEM */
5079 		{
5080 			if (SECURE_DMA_ENAB(dhd->osh)) {
5081 				SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
5082 					ring->dma_buf.secdma, 0);
5083 			} else
5084 				DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
5085 		}
5086 		goto free_pkt_return;
5087 	}
5088 #endif /* DHD_PCIE_PKTID */
5089 
5090 #ifndef IOCTLRESP_USE_CONSTMEM
5091 	rxbuf_post->host_buf_len = htol16((uint16)PKTLEN(dhd->osh, p));
5092 #else
5093 	rxbuf_post->host_buf_len = htol16((uint16)pktlen);
5094 #endif /* IOCTLRESP_USE_CONSTMEM */
5095 	rxbuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
5096 	rxbuf_post->host_buf_addr.low_addr  = htol32(PHYSADDRLO(pa));
5097 
5098 #ifdef DHD_LBUF_AUDIT
5099 	if (non_ioctl_resp_buf)
5100 		PKTAUDIT(dhd->osh, p);
5101 #endif // endif
5102 
5103 	/* update ring's WR index and ring doorbell to dongle */
5104 	dhd_prot_ring_write_complete(dhd, ring, rxbuf_post, 1);
5105 
5106 	DHD_RING_UNLOCK(ring->ring_lock, flags);
5107 
5108 	return 1;
5109 
5110 free_pkt_return:
5111 	if (!non_ioctl_resp_buf) {
5112 #ifdef IOCTLRESP_USE_CONSTMEM
5113 		free_ioctl_return_buffer(dhd, &retbuf);
5114 #else
5115 		dhd_prot_packet_free(dhd, p, buf_type, FALSE);
5116 #endif /* IOCTLRESP_USE_CONSTMEM */
5117 	} else {
5118 		dhd_prot_packet_free(dhd, p, buf_type, FALSE);
5119 	}
5120 
5121 	return -1;
5122 } /* dhd_prot_rxbufpost_ctrl */
5123 
5124 static uint16
dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t * dhd,uint8 msg_type,uint32 max_to_post)5125 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, uint8 msg_type, uint32 max_to_post)
5126 {
5127 	uint32 i = 0;
5128 	int32 ret_val;
5129 
5130 	DHD_INFO(("max to post %d, event %d \n", max_to_post, msg_type));
5131 
5132 	if (dhd->busstate == DHD_BUS_DOWN) {
5133 		DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
5134 		return 0;
5135 	}
5136 
5137 	while (i < max_to_post) {
5138 		ret_val  = dhd_prot_rxbufpost_ctrl(dhd, msg_type);
5139 		if (ret_val < 0)
5140 			break;
5141 		i++;
5142 	}
5143 	DHD_INFO(("posted %d buffers of type %d\n", i, msg_type));
5144 	return (uint16)i;
5145 }
5146 
5147 static void
dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t * dhd)5148 dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *dhd)
5149 {
5150 	dhd_prot_t *prot = dhd->prot;
5151 	int max_to_post;
5152 
5153 	DHD_INFO(("ioctl resp buf post\n"));
5154 	max_to_post = prot->max_ioctlrespbufpost - prot->cur_ioctlresp_bufs_posted;
5155 	if (max_to_post <= 0) {
5156 		DHD_INFO(("%s: Cannot post more than max IOCTL resp buffers\n",
5157 			__FUNCTION__));
5158 		return;
5159 	}
5160 	prot->cur_ioctlresp_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
5161 		MSG_TYPE_IOCTLRESP_BUF_POST, max_to_post);
5162 }
5163 
5164 static void
dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t * dhd)5165 dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *dhd)
5166 {
5167 	dhd_prot_t *prot = dhd->prot;
5168 	int max_to_post;
5169 
5170 	max_to_post = prot->max_eventbufpost - prot->cur_event_bufs_posted;
5171 	if (max_to_post <= 0) {
5172 		DHD_ERROR(("%s: Cannot post more than max event buffers\n",
5173 			__FUNCTION__));
5174 		return;
5175 	}
5176 	prot->cur_event_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
5177 		MSG_TYPE_EVENT_BUF_POST, max_to_post);
5178 }
5179 
5180 static int
dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t * dhd)5181 dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t *dhd)
5182 {
5183 	return 0;
5184 }
5185 
5186 bool BCMFASTPATH
dhd_prot_process_msgbuf_infocpl(dhd_pub_t * dhd,uint bound)5187 dhd_prot_process_msgbuf_infocpl(dhd_pub_t *dhd, uint bound)
5188 {
5189 	dhd_prot_t *prot = dhd->prot;
5190 	bool more = TRUE;
5191 	uint n = 0;
5192 	msgbuf_ring_t *ring = prot->d2hring_info_cpln;
5193 	unsigned long flags;
5194 
5195 	if (ring == NULL)
5196 		return FALSE;
5197 	if (ring->inited != TRUE)
5198 		return FALSE;
5199 
5200 	/* Process all the messages - DTOH direction */
5201 	while (!dhd_is_device_removed(dhd)) {
5202 		uint8 *msg_addr;
5203 		uint32 msg_len;
5204 
5205 		if (dhd_query_bus_erros(dhd)) {
5206 			more = FALSE;
5207 			break;
5208 		}
5209 
5210 		if (dhd->hang_was_sent) {
5211 			more = FALSE;
5212 			break;
5213 		}
5214 
5215 		if (dhd->smmu_fault_occurred) {
5216 			more = FALSE;
5217 			break;
5218 		}
5219 
5220 		DHD_RING_LOCK(ring->ring_lock, flags);
5221 		/* Get the message from ring */
5222 		msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
5223 		DHD_RING_UNLOCK(ring->ring_lock, flags);
5224 		if (msg_addr == NULL) {
5225 			more = FALSE;
5226 			break;
5227 		}
5228 
5229 		/* Prefetch data to populate the cache */
5230 		OSL_PREFETCH(msg_addr);
5231 
5232 		if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
5233 			DHD_ERROR(("%s: Error at  process rxpl msgbuf of len %d\n",
5234 				__FUNCTION__, msg_len));
5235 		}
5236 
5237 		/* Update read pointer */
5238 		dhd_prot_upd_read_idx(dhd, ring);
5239 
5240 		/* After batch processing, check RX bound */
5241 		n += msg_len / ring->item_len;
5242 		if (n >= bound) {
5243 			break;
5244 		}
5245 	}
5246 
5247 	return more;
5248 }
5249 
5250 #ifdef EWP_EDL
5251 bool
dhd_prot_process_msgbuf_edl(dhd_pub_t * dhd)5252 dhd_prot_process_msgbuf_edl(dhd_pub_t *dhd)
5253 {
5254 	dhd_prot_t *prot = dhd->prot;
5255 	msgbuf_ring_t *ring = prot->d2hring_edl;
5256 	unsigned long flags = 0;
5257 	uint32 items = 0;
5258 	uint16 rd = 0;
5259 	uint16 depth = 0;
5260 
5261 	if (ring == NULL)
5262 		return FALSE;
5263 	if (ring->inited != TRUE)
5264 		return FALSE;
5265 	if (ring->item_len == 0) {
5266 		DHD_ERROR(("%s: Bad ring ! ringidx %d, item_len %d \n",
5267 			__FUNCTION__, ring->idx, ring->item_len));
5268 		return FALSE;
5269 	}
5270 
5271 	if (dhd_query_bus_erros(dhd)) {
5272 		return FALSE;
5273 	}
5274 
5275 	if (dhd->hang_was_sent) {
5276 		return FALSE;
5277 	}
5278 
5279 	/* in this DPC context just check if wr index has moved
5280 	 * and schedule deferred context to actually process the
5281 	 * work items.
5282 	*/
5283 	/* update the write index */
5284 	DHD_RING_LOCK(ring->ring_lock, flags);
5285 	if (dhd->dma_d2h_ring_upd_support) {
5286 		/* DMAing write/read indices supported */
5287 		ring->wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
5288 	} else {
5289 		dhd_bus_cmn_readshared(dhd->bus, &ring->wr, RING_WR_UPD, ring->idx);
5290 	}
5291 	rd = ring->rd;
5292 	DHD_RING_UNLOCK(ring->ring_lock, flags);
5293 
5294 	depth = ring->max_items;
5295 	/* check for avail space, in number of ring items */
5296 	items = READ_AVAIL_SPACE(ring->wr, rd, depth);
5297 	if (items == 0) {
5298 		/* no work items in edl ring */
5299 		return FALSE;
5300 	}
5301 	if (items > ring->max_items) {
5302 		DHD_ERROR(("\r\n======================= \r\n"));
5303 		DHD_ERROR(("%s(): ring %p, ring->name %s, ring->max_items %d, items %d \r\n",
5304 			__FUNCTION__, ring, ring->name, ring->max_items, items));
5305 		DHD_ERROR(("wr: %d,  rd: %d,  depth: %d  \r\n",
5306 			ring->wr, ring->rd, depth));
5307 		DHD_ERROR(("dhd->busstate %d bus->wait_for_d3_ack %d \r\n",
5308 			dhd->busstate, dhd->bus->wait_for_d3_ack));
5309 		DHD_ERROR(("\r\n======================= \r\n"));
5310 #ifdef DHD_FW_COREDUMP
5311 		if (dhd->memdump_enabled) {
5312 			/* collect core dump */
5313 			dhd->memdump_type = DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR;
5314 			dhd_bus_mem_dump(dhd);
5315 
5316 		}
5317 #endif /* DHD_FW_COREDUMP */
5318 		dhd_schedule_reset(dhd);
5319 
5320 		return FALSE;
5321 	}
5322 
5323 	if (items > D2HRING_EDL_WATERMARK) {
5324 		DHD_ERROR_RLMT(("%s: WARNING! EDL watermark hit, num items=%u;"
5325 			" rd=%u; wr=%u; depth=%u;\n", __FUNCTION__, items,
5326 			ring->rd, ring->wr, depth));
5327 	}
5328 
5329 	dhd_schedule_logtrace(dhd->info);
5330 
5331 	return FALSE;
5332 }
5333 
5334 /* This is called either from work queue context of 'event_log_dispatcher_work' or
5335 * from the kthread context of dhd_logtrace_thread
5336 */
5337 int
dhd_prot_process_edl_complete(dhd_pub_t * dhd,void * evt_decode_data)5338 dhd_prot_process_edl_complete(dhd_pub_t *dhd, void *evt_decode_data)
5339 {
5340 	dhd_prot_t *prot = NULL;
5341 	msgbuf_ring_t *ring = NULL;
5342 	int err = 0;
5343 	unsigned long flags = 0;
5344 	cmn_msg_hdr_t *msg = NULL;
5345 	uint8 *msg_addr = NULL;
5346 	uint32 max_items_to_process = 0, n = 0;
5347 	uint32 num_items = 0, new_items = 0;
5348 	uint16 depth = 0;
5349 	volatile uint16 wr = 0;
5350 
5351 	if (!dhd || !dhd->prot)
5352 		return 0;
5353 
5354 	prot = dhd->prot;
5355 	ring = prot->d2hring_edl;
5356 	if (!ring || !evt_decode_data) {
5357 		return 0;
5358 	}
5359 
5360 	if (dhd->hang_was_sent) {
5361 		return FALSE;
5362 	}
5363 
5364 	DHD_RING_LOCK(ring->ring_lock, flags);
5365 	ring->curr_rd = ring->rd;
5366 	wr = ring->wr;
5367 	depth = ring->max_items;
5368 	/* check for avail space, in number of ring items
5369 	 * Note, that this will only give the # of items
5370 	 * from rd to wr if wr>=rd, or from rd to ring end
5371 	 * if wr < rd. So in the latter case strictly speaking
5372 	 * not all the items are read. But this is OK, because
5373 	 * these will be processed in the next doorbell as rd
5374 	 * would have wrapped around. Processing in the next
5375 	 * doorbell is acceptable since EDL only contains debug data
5376 	 */
5377 	num_items = READ_AVAIL_SPACE(wr, ring->rd, depth);
5378 
5379 	if (num_items == 0) {
5380 		/* no work items in edl ring */
5381 		DHD_RING_UNLOCK(ring->ring_lock, flags);
5382 		return 0;
5383 	}
5384 
5385 	DHD_INFO(("%s: EDL work items [%u] available \n",
5386 			__FUNCTION__, num_items));
5387 
5388 	/* if space is available, calculate address to be read */
5389 	msg_addr = (char*)ring->dma_buf.va + (ring->rd * ring->item_len);
5390 
5391 	max_items_to_process = MIN(num_items, DHD_EVENT_LOGTRACE_BOUND);
5392 
5393 	DHD_RING_UNLOCK(ring->ring_lock, flags);
5394 
5395 	/* Prefetch data to populate the cache */
5396 	OSL_PREFETCH(msg_addr);
5397 
5398 	n = max_items_to_process;
5399 	while (n > 0) {
5400 		msg = (cmn_msg_hdr_t *)msg_addr;
5401 		/* wait for DMA of work item to complete */
5402 		if ((err = prot->d2h_edl_sync_cb(dhd, ring, msg)) != BCME_OK) {
5403 			DHD_ERROR(("%s: Error waiting for DMA to cmpl in EDL "
5404 				"ring; err = %d\n", __FUNCTION__, err));
5405 		}
5406 
5407 		/*
5408 		 * Update the curr_rd to the current index in the ring, from where
5409 		 * the work item is fetched. This way if the fetched work item
5410 		 * fails in LIVELOCK, we can print the exact read index in the ring
5411 		 * that shows up the corrupted work item.
5412 		 */
5413 		if ((ring->curr_rd + 1) >= ring->max_items) {
5414 			ring->curr_rd = 0;
5415 		} else {
5416 			ring->curr_rd += 1;
5417 		}
5418 
5419 		if (err != BCME_OK) {
5420 			return 0;
5421 		}
5422 
5423 		/* process the edl work item, i.e, the event log */
5424 		err = dhd_event_logtrace_process_edl(dhd, msg_addr, evt_decode_data);
5425 
5426 		/* Dummy sleep so that scheduler kicks in after processing any logprints */
5427 		OSL_SLEEP(0);
5428 
5429 		/* Prefetch data to populate the cache */
5430 		OSL_PREFETCH(msg_addr + ring->item_len);
5431 
5432 		msg_addr += ring->item_len;
5433 		--n;
5434 	}
5435 
5436 	DHD_RING_LOCK(ring->ring_lock, flags);
5437 	/* update host ring read pointer */
5438 	if ((ring->rd + max_items_to_process) >= ring->max_items)
5439 		ring->rd = 0;
5440 	else
5441 		ring->rd += max_items_to_process;
5442 	DHD_RING_UNLOCK(ring->ring_lock, flags);
5443 
5444 	/* Now after processing max_items_to_process update dongle rd index.
5445 	 * The TCM rd index is updated only if bus is not
5446 	 * in D3. Else, the rd index is updated from resume
5447 	 * context in - 'dhdpcie_bus_suspend'
5448 	 */
5449 	DHD_GENERAL_LOCK(dhd, flags);
5450 	if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhd)) {
5451 		DHD_INFO(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
5452 			__FUNCTION__, dhd->busstate, dhd->dhd_bus_busy_state));
5453 		DHD_GENERAL_UNLOCK(dhd, flags);
5454 	} else {
5455 		DHD_GENERAL_UNLOCK(dhd, flags);
5456 		DHD_EDL_RING_TCM_RD_UPDATE(dhd);
5457 	}
5458 
5459 	/* if num_items > bound, then anyway we will reschedule and
5460 	 * this function runs again, so that if in between the DPC has
5461 	 * updated the wr index, then the updated wr is read. But if
5462 	 * num_items <= bound, and if DPC executes and updates the wr index
5463 	 * when the above while loop is running, then the updated 'wr' index
5464 	 * needs to be re-read from here, If we don't do so, then till
5465 	 * the next time this function is scheduled
5466 	 * the event logs will not be processed.
5467 	*/
5468 	if (num_items <= DHD_EVENT_LOGTRACE_BOUND) {
5469 		/* read the updated wr index if reqd. and update num_items */
5470 		DHD_RING_LOCK(ring->ring_lock, flags);
5471 		if (wr != (volatile uint16)ring->wr) {
5472 			wr = (volatile uint16)ring->wr;
5473 			new_items = READ_AVAIL_SPACE(wr, ring->rd, depth);
5474 			DHD_INFO(("%s: new items [%u] avail in edl\n",
5475 				__FUNCTION__, new_items));
5476 			num_items += new_items;
5477 		}
5478 		DHD_RING_UNLOCK(ring->ring_lock, flags);
5479 	}
5480 
5481 	/* if # of items processed is less than num_items, need to re-schedule
5482 	* the deferred ctx
5483 	*/
5484 	if (max_items_to_process < num_items) {
5485 		DHD_INFO(("%s: EDL bound hit / new items found, "
5486 				"items processed=%u; remaining=%u, "
5487 				"resched deferred ctx...\n",
5488 				__FUNCTION__, max_items_to_process,
5489 				num_items - max_items_to_process));
5490 		return (num_items - max_items_to_process);
5491 	}
5492 
5493 	return 0;
5494 
5495 }
5496 
5497 void
dhd_prot_edl_ring_tcm_rd_update(dhd_pub_t * dhd)5498 dhd_prot_edl_ring_tcm_rd_update(dhd_pub_t *dhd)
5499 {
5500 	dhd_prot_t *prot = NULL;
5501 	unsigned long flags = 0;
5502 	msgbuf_ring_t *ring = NULL;
5503 
5504 	if (!dhd)
5505 		return;
5506 
5507 	prot = dhd->prot;
5508 	if (!prot || !prot->d2hring_edl)
5509 		return;
5510 
5511 	ring = prot->d2hring_edl;
5512 	DHD_RING_LOCK(ring->ring_lock, flags);
5513 	dhd_prot_upd_read_idx(dhd, ring);
5514 	DHD_RING_UNLOCK(ring->ring_lock, flags);
5515 }
5516 #endif /* EWP_EDL */
5517 
5518 /* called when DHD needs to check for 'receive complete' messages from the dongle */
5519 bool BCMFASTPATH
dhd_prot_process_msgbuf_rxcpl(dhd_pub_t * dhd,uint bound,int ringtype)5520 dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound, int ringtype)
5521 {
5522 	bool more = FALSE;
5523 	uint n = 0;
5524 	dhd_prot_t *prot = dhd->prot;
5525 	msgbuf_ring_t *ring;
5526 	uint16 item_len;
5527 	host_rxbuf_cmpl_t *msg = NULL;
5528 	uint8 *msg_addr;
5529 	uint32 msg_len;
5530 	uint16 pkt_cnt, pkt_cnt_newidx;
5531 	unsigned long flags;
5532 	dmaaddr_t pa;
5533 	uint32 len;
5534 	void *dmah;
5535 	void *secdma;
5536 	int ifidx = 0, if_newidx = 0;
5537 	void *pkt, *pktqhead = NULL, *prevpkt = NULL, *pkt_newidx, *nextpkt;
5538 	uint32 pktid;
5539 	int i;
5540 	uint8 sync;
5541 	ts_timestamp_t *ts;
5542 
5543 	BCM_REFERENCE(ts);
5544 #ifdef DHD_HP2P
5545 	if (ringtype == DHD_HP2P_RING && prot->d2hring_hp2p_rxcpl)
5546 		ring = prot->d2hring_hp2p_rxcpl;
5547 	else
5548 #endif /* DHD_HP2P */
5549 		ring = &prot->d2hring_rx_cpln;
5550 	item_len = ring->item_len;
5551 	while (1) {
5552 		if (dhd_is_device_removed(dhd))
5553 			break;
5554 
5555 		if (dhd_query_bus_erros(dhd))
5556 			break;
5557 
5558 		if (dhd->hang_was_sent)
5559 			break;
5560 
5561 		if (dhd->smmu_fault_occurred) {
5562 			break;
5563 		}
5564 
5565 		pkt_cnt = 0;
5566 		pktqhead = pkt_newidx = NULL;
5567 		pkt_cnt_newidx = 0;
5568 
5569 		DHD_RING_LOCK(ring->ring_lock, flags);
5570 
5571 		/* Get the address of the next message to be read from ring */
5572 		msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
5573 		if (msg_addr == NULL) {
5574 			DHD_RING_UNLOCK(ring->ring_lock, flags);
5575 			break;
5576 		}
5577 
5578 		while (msg_len > 0) {
5579 			msg = (host_rxbuf_cmpl_t *)msg_addr;
5580 
5581 			/* Wait until DMA completes, then fetch msg_type */
5582 			sync = prot->d2h_sync_cb(dhd, ring, &msg->cmn_hdr, item_len);
5583 			/*
5584 			 * Update the curr_rd to the current index in the ring, from where
5585 			 * the work item is fetched. This way if the fetched work item
5586 			 * fails in LIVELOCK, we can print the exact read index in the ring
5587 			 * that shows up the corrupted work item.
5588 			 */
5589 			if ((ring->curr_rd + 1) >= ring->max_items) {
5590 				ring->curr_rd = 0;
5591 			} else {
5592 				ring->curr_rd += 1;
5593 			}
5594 
5595 			if (!sync) {
5596 				msg_len -= item_len;
5597 				msg_addr += item_len;
5598 				continue;
5599 			}
5600 
5601 			pktid = ltoh32(msg->cmn_hdr.request_id);
5602 
5603 #ifdef DHD_PKTID_AUDIT_RING
5604 			DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_rx_map, pktid,
5605 				DHD_DUPLICATE_FREE, msg, D2HRING_RXCMPLT_ITEMSIZE);
5606 #endif /* DHD_PKTID_AUDIT_RING */
5607 
5608 			pkt = DHD_PKTID_TO_NATIVE(dhd, prot->pktid_rx_map, pktid, pa,
5609 			        len, dmah, secdma, PKTTYPE_DATA_RX);
5610 			if (!pkt) {
5611 				msg_len -= item_len;
5612 				msg_addr += item_len;
5613 				continue;
5614 			}
5615 
5616 			if (SECURE_DMA_ENAB(dhd->osh))
5617 				SECURE_DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0,
5618 				    dmah, secdma, 0);
5619 			else
5620 				DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
5621 
5622 #ifdef DMAMAP_STATS
5623 			dhd->dma_stats.rxdata--;
5624 			dhd->dma_stats.rxdata_sz -= len;
5625 #endif /* DMAMAP_STATS */
5626 			DHD_INFO(("id 0x%04x, offset %d, len %d, idx %d, phase 0x%02x, "
5627 				"pktdata %p, metalen %d\n",
5628 				ltoh32(msg->cmn_hdr.request_id),
5629 				ltoh16(msg->data_offset),
5630 				ltoh16(msg->data_len), msg->cmn_hdr.if_id,
5631 				msg->cmn_hdr.flags, PKTDATA(dhd->osh, pkt),
5632 				ltoh16(msg->metadata_len)));
5633 
5634 			pkt_cnt++;
5635 			msg_len -= item_len;
5636 			msg_addr += item_len;
5637 
5638 #if DHD_DBG_SHOW_METADATA
5639 			if (prot->metadata_dbg && prot->rx_metadata_offset &&
5640 			        msg->metadata_len) {
5641 				uchar *ptr;
5642 				ptr = PKTDATA(dhd->osh, pkt) - (prot->rx_metadata_offset);
5643 				/* header followed by data */
5644 				bcm_print_bytes("rxmetadata", ptr, msg->metadata_len);
5645 				dhd_prot_print_metadata(dhd, ptr, msg->metadata_len);
5646 			}
5647 #endif /* DHD_DBG_SHOW_METADATA */
5648 
5649 			/* data_offset from buf start */
5650 			if (ltoh16(msg->data_offset)) {
5651 				/* data offset given from dongle after split rx */
5652 				PKTPULL(dhd->osh, pkt, ltoh16(msg->data_offset));
5653 			}
5654 			else if (prot->rx_dataoffset) {
5655 				/* DMA RX offset updated through shared area */
5656 				PKTPULL(dhd->osh, pkt, prot->rx_dataoffset);
5657 			}
5658 			/* Actual length of the packet */
5659 			PKTSETLEN(dhd->osh, pkt, ltoh16(msg->data_len));
5660 
5661 #if defined(WL_MONITOR)
5662 			if (dhd_monitor_enabled(dhd, ifidx)) {
5663 				if (msg->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11) {
5664 					dhd_rx_mon_pkt(dhd, msg, pkt, ifidx);
5665 					continue;
5666 				} else {
5667 					DHD_ERROR(("Received non 802.11 packet, "
5668 						"when monitor mode is enabled\n"));
5669 				}
5670 			}
5671 #endif /* WL_MONITOR */
5672 
5673 			if (!pktqhead) {
5674 				pktqhead = prevpkt = pkt;
5675 				ifidx = msg->cmn_hdr.if_id;
5676 			} else {
5677 				if (ifidx != msg->cmn_hdr.if_id) {
5678 					pkt_newidx = pkt;
5679 					if_newidx = msg->cmn_hdr.if_id;
5680 					pkt_cnt--;
5681 					pkt_cnt_newidx = 1;
5682 					break;
5683 				} else {
5684 					PKTSETNEXT(dhd->osh, prevpkt, pkt);
5685 					prevpkt = pkt;
5686 				}
5687 			}
5688 
5689 #ifdef DHD_HP2P
5690 			if (dhd->hp2p_capable && ring == prot->d2hring_hp2p_rxcpl) {
5691 #ifdef DHD_HP2P_DEBUG
5692 				bcm_print_bytes("Rxcpl", (uchar *)msg,  sizeof(host_rxbuf_cmpl_t));
5693 #endif /* DHD_HP2P_DEBUG */
5694 				dhd_update_hp2p_rxstats(dhd, msg);
5695 			}
5696 #endif /* DHD_HP2P */
5697 
5698 #ifdef DHD_LBUF_AUDIT
5699 			PKTAUDIT(dhd->osh, pkt);
5700 #endif // endif
5701 		}
5702 
5703 		/* roll back read pointer for unprocessed message */
5704 		if (msg_len > 0) {
5705 			if (ring->rd < msg_len / item_len)
5706 				ring->rd = ring->max_items - msg_len / item_len;
5707 			else
5708 				ring->rd -= msg_len / item_len;
5709 		}
5710 
5711 		/* Update read pointer */
5712 		dhd_prot_upd_read_idx(dhd, ring);
5713 
5714 		DHD_RING_UNLOCK(ring->ring_lock, flags);
5715 
5716 		pkt = pktqhead;
5717 		for (i = 0; pkt && i < pkt_cnt; i++, pkt = nextpkt) {
5718 			nextpkt = PKTNEXT(dhd->osh, pkt);
5719 			PKTSETNEXT(dhd->osh, pkt, NULL);
5720 #ifdef DHD_LB_RXP
5721 			dhd_lb_rx_pkt_enqueue(dhd, pkt, ifidx);
5722 #elif defined(DHD_RX_CHAINING)
5723 			dhd_rxchain_frame(dhd, pkt, ifidx);
5724 #else
5725 			dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1);
5726 #endif /* DHD_LB_RXP */
5727 		}
5728 
5729 		if (pkt_newidx) {
5730 #ifdef DHD_LB_RXP
5731 			dhd_lb_rx_pkt_enqueue(dhd, pkt_newidx, if_newidx);
5732 #elif defined(DHD_RX_CHAINING)
5733 			dhd_rxchain_frame(dhd, pkt_newidx, if_newidx);
5734 #else
5735 			dhd_bus_rx_frame(dhd->bus, pkt_newidx, if_newidx, 1);
5736 #endif /* DHD_LB_RXP */
5737 		}
5738 
5739 		pkt_cnt += pkt_cnt_newidx;
5740 
5741 		/* Post another set of rxbufs to the device */
5742 		dhd_prot_return_rxbuf(dhd, 0, pkt_cnt);
5743 
5744 #ifdef DHD_RX_CHAINING
5745 		dhd_rxchain_commit(dhd);
5746 #endif // endif
5747 
5748 		/* After batch processing, check RX bound */
5749 		n += pkt_cnt;
5750 		if (n >= bound) {
5751 			more = TRUE;
5752 			break;
5753 		}
5754 	}
5755 
5756 	/* Call lb_dispatch only if packets are queued */
5757 	if (n &&
5758 #ifdef WL_MONITOR
5759 	!(dhd_monitor_enabled(dhd, ifidx)) &&
5760 #endif /* WL_MONITOR */
5761 	TRUE) {
5762 		DHD_LB_DISPATCH_RX_COMPL(dhd);
5763 		DHD_LB_DISPATCH_RX_PROCESS(dhd);
5764 	}
5765 
5766 	return more;
5767 
5768 }
5769 
5770 /**
5771  * Hands transmit packets (with a caller provided flow_id) over to dongle territory (the flow ring)
5772  */
5773 void
dhd_prot_update_txflowring(dhd_pub_t * dhd,uint16 flowid,void * msgring)5774 dhd_prot_update_txflowring(dhd_pub_t *dhd, uint16 flowid, void *msgring)
5775 {
5776 	msgbuf_ring_t *ring = (msgbuf_ring_t *)msgring;
5777 
5778 	if (ring == NULL) {
5779 		DHD_ERROR(("%s: NULL txflowring. exiting...\n",  __FUNCTION__));
5780 		return;
5781 	}
5782 	/* Update read pointer */
5783 	if (dhd->dma_d2h_ring_upd_support) {
5784 		ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
5785 	}
5786 
5787 	DHD_TRACE(("ringid %d flowid %d write %d read %d \n\n",
5788 		ring->idx, flowid, ring->wr, ring->rd));
5789 
5790 	/* Need more logic here, but for now use it directly */
5791 	dhd_bus_schedule_queue(dhd->bus, flowid, TRUE); /* from queue to flowring */
5792 }
5793 
5794 /** called when DHD needs to check for 'transmit complete' messages from the dongle */
5795 bool BCMFASTPATH
dhd_prot_process_msgbuf_txcpl(dhd_pub_t * dhd,uint bound,int ringtype)5796 dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound, int ringtype)
5797 {
5798 	bool more = TRUE;
5799 	uint n = 0;
5800 	msgbuf_ring_t *ring;
5801 	unsigned long flags;
5802 
5803 #ifdef DHD_HP2P
5804 	if (ringtype == DHD_HP2P_RING && dhd->prot->d2hring_hp2p_txcpl)
5805 		ring = dhd->prot->d2hring_hp2p_txcpl;
5806 	else
5807 #endif /* DHD_HP2P */
5808 		ring = &dhd->prot->d2hring_tx_cpln;
5809 
5810 	/* Process all the messages - DTOH direction */
5811 	while (!dhd_is_device_removed(dhd)) {
5812 		uint8 *msg_addr;
5813 		uint32 msg_len;
5814 
5815 		if (dhd_query_bus_erros(dhd)) {
5816 			more = FALSE;
5817 			break;
5818 		}
5819 
5820 		if (dhd->hang_was_sent) {
5821 			more = FALSE;
5822 			break;
5823 		}
5824 
5825 		if (dhd->smmu_fault_occurred) {
5826 			more = FALSE;
5827 			break;
5828 		}
5829 
5830 		DHD_RING_LOCK(ring->ring_lock, flags);
5831 		/* Get the address of the next message to be read from ring */
5832 		msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
5833 		DHD_RING_UNLOCK(ring->ring_lock, flags);
5834 
5835 		if (msg_addr == NULL) {
5836 			more = FALSE;
5837 			break;
5838 		}
5839 
5840 		/* Prefetch data to populate the cache */
5841 		OSL_PREFETCH(msg_addr);
5842 
5843 		if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
5844 			DHD_ERROR(("%s: process %s msg addr %p len %d\n",
5845 				__FUNCTION__, ring->name, msg_addr, msg_len));
5846 		}
5847 
5848 		/* Write to dngl rd ptr */
5849 		dhd_prot_upd_read_idx(dhd, ring);
5850 
5851 		/* After batch processing, check bound */
5852 		n += msg_len / ring->item_len;
5853 		if (n >= bound) {
5854 			break;
5855 		}
5856 	}
5857 
5858 	DHD_LB_DISPATCH_TX_COMPL(dhd);
5859 
5860 	return more;
5861 }
5862 
5863 int BCMFASTPATH
dhd_prot_process_trapbuf(dhd_pub_t * dhd)5864 dhd_prot_process_trapbuf(dhd_pub_t *dhd)
5865 {
5866 	uint32 data;
5867 	dhd_dma_buf_t *trap_addr = &dhd->prot->fw_trap_buf;
5868 
5869 	/* Interrupts can come in before this struct
5870 	 *  has been initialized.
5871 	 */
5872 	if (trap_addr->va == NULL) {
5873 		DHD_ERROR(("%s: trap_addr->va is NULL\n", __FUNCTION__));
5874 		return 0;
5875 	}
5876 
5877 	OSL_CACHE_INV((void *)trap_addr->va, sizeof(uint32));
5878 	data = *(uint32 *)(trap_addr->va);
5879 
5880 	if (data & D2H_DEV_FWHALT) {
5881 		DHD_ERROR(("Firmware trapped and trap_data is 0x%04x\n", data));
5882 
5883 		if (data & D2H_DEV_EXT_TRAP_DATA)
5884 		{
5885 			if (dhd->extended_trap_data) {
5886 				OSL_CACHE_INV((void *)trap_addr->va,
5887 				       BCMPCIE_EXT_TRAP_DATA_MAXLEN);
5888 				memcpy(dhd->extended_trap_data, (uint32 *)trap_addr->va,
5889 				       BCMPCIE_EXT_TRAP_DATA_MAXLEN);
5890 			}
5891 			DHD_ERROR(("Extended trap data available\n"));
5892 		}
5893 		return data;
5894 	}
5895 	return 0;
5896 }
5897 
5898 /** called when DHD needs to check for 'ioctl complete' messages from the dongle */
5899 int BCMFASTPATH
dhd_prot_process_ctrlbuf(dhd_pub_t * dhd)5900 dhd_prot_process_ctrlbuf(dhd_pub_t *dhd)
5901 {
5902 	dhd_prot_t *prot = dhd->prot;
5903 	msgbuf_ring_t *ring = &prot->d2hring_ctrl_cpln;
5904 	unsigned long flags;
5905 
5906 	/* Process all the messages - DTOH direction */
5907 	while (!dhd_is_device_removed(dhd)) {
5908 		uint8 *msg_addr;
5909 		uint32 msg_len;
5910 
5911 		if (dhd_query_bus_erros(dhd)) {
5912 			break;
5913 		}
5914 
5915 		if (dhd->hang_was_sent) {
5916 			break;
5917 		}
5918 
5919 		if (dhd->smmu_fault_occurred) {
5920 			break;
5921 		}
5922 
5923 		DHD_RING_LOCK(ring->ring_lock, flags);
5924 		/* Get the address of the next message to be read from ring */
5925 		msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
5926 		DHD_RING_UNLOCK(ring->ring_lock, flags);
5927 
5928 		if (msg_addr == NULL) {
5929 			break;
5930 		}
5931 
5932 		/* Prefetch data to populate the cache */
5933 		OSL_PREFETCH(msg_addr);
5934 		if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
5935 			DHD_ERROR(("%s: process %s msg addr %p len %d\n",
5936 				__FUNCTION__, ring->name, msg_addr, msg_len));
5937 		}
5938 
5939 		/* Write to dngl rd ptr */
5940 		dhd_prot_upd_read_idx(dhd, ring);
5941 	}
5942 
5943 	return 0;
5944 }
5945 
5946 /**
5947  * Consume messages out of the D2H ring. Ensure that the message's DMA to host
5948  * memory has completed, before invoking the message handler via a table lookup
5949  * of the cmn_msg_hdr::msg_type.
5950  */
5951 static int BCMFASTPATH
dhd_prot_process_msgtype(dhd_pub_t * dhd,msgbuf_ring_t * ring,uint8 * buf,uint32 len)5952 dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len)
5953 {
5954 	uint32 buf_len = len;
5955 	uint16 item_len;
5956 	uint8 msg_type;
5957 	cmn_msg_hdr_t *msg = NULL;
5958 	int ret = BCME_OK;
5959 
5960 	ASSERT(ring);
5961 	item_len = ring->item_len;
5962 	if (item_len == 0) {
5963 		DHD_ERROR(("%s: ringidx %d, item_len %d buf_len %d \n",
5964 			__FUNCTION__, ring->idx, item_len, buf_len));
5965 		return BCME_ERROR;
5966 	}
5967 
5968 	while (buf_len > 0) {
5969 		if (dhd->hang_was_sent) {
5970 			ret = BCME_ERROR;
5971 			goto done;
5972 		}
5973 
5974 		if (dhd->smmu_fault_occurred) {
5975 			ret = BCME_ERROR;
5976 			goto done;
5977 		}
5978 
5979 		msg = (cmn_msg_hdr_t *)buf;
5980 
5981 		/* Wait until DMA completes, then fetch msg_type */
5982 		msg_type = dhd->prot->d2h_sync_cb(dhd, ring, msg, item_len);
5983 
5984 		/*
5985 		 * Update the curr_rd to the current index in the ring, from where
5986 		 * the work item is fetched. This way if the fetched work item
5987 		 * fails in LIVELOCK, we can print the exact read index in the ring
5988 		 * that shows up the corrupted work item.
5989 		 */
5990 		if ((ring->curr_rd + 1) >= ring->max_items) {
5991 			ring->curr_rd = 0;
5992 		} else {
5993 			ring->curr_rd += 1;
5994 		}
5995 
5996 		/* Prefetch data to populate the cache */
5997 		OSL_PREFETCH(buf + item_len);
5998 
5999 		DHD_INFO(("msg_type %d item_len %d buf_len %d\n",
6000 			msg_type, item_len, buf_len));
6001 
6002 		if (msg_type == MSG_TYPE_LOOPBACK) {
6003 			bcm_print_bytes("LPBK RESP: ", (uint8 *)msg, item_len);
6004 			DHD_ERROR((" MSG_TYPE_LOOPBACK, len %d\n", item_len));
6005 		}
6006 
6007 		ASSERT(msg_type < DHD_PROT_FUNCS);
6008 		if (msg_type >= DHD_PROT_FUNCS) {
6009 			DHD_ERROR(("%s: msg_type %d, item_len %d buf_len %d\n",
6010 				__FUNCTION__, msg_type, item_len, buf_len));
6011 			ret = BCME_ERROR;
6012 			goto done;
6013 		}
6014 
6015 		if (msg_type == MSG_TYPE_INFO_BUF_CMPLT) {
6016 			if (ring == dhd->prot->d2hring_info_cpln) {
6017 				if (!dhd->prot->infobufpost) {
6018 					DHD_ERROR(("infobuf posted are zero,"
6019 						   "but there is a completion\n"));
6020 					goto done;
6021 				}
6022 				dhd->prot->infobufpost--;
6023 				dhd_prot_infobufpost(dhd, dhd->prot->h2dring_info_subn);
6024 				dhd_prot_process_infobuf_complete(dhd, buf);
6025 			}
6026 		} else
6027 		if (table_lookup[msg_type]) {
6028 			table_lookup[msg_type](dhd, buf);
6029 		}
6030 
6031 		if (buf_len < item_len) {
6032 			ret = BCME_ERROR;
6033 			goto done;
6034 		}
6035 		buf_len = buf_len - item_len;
6036 		buf = buf + item_len;
6037 	}
6038 
6039 done:
6040 
6041 #ifdef DHD_RX_CHAINING
6042 	dhd_rxchain_commit(dhd);
6043 #endif // endif
6044 
6045 	return ret;
6046 } /* dhd_prot_process_msgtype */
6047 
6048 static void
dhd_prot_noop(dhd_pub_t * dhd,void * msg)6049 dhd_prot_noop(dhd_pub_t *dhd, void *msg)
6050 {
6051 	return;
6052 }
6053 
6054 /** called on MSG_TYPE_RING_STATUS message received from dongle */
6055 static void
dhd_prot_ringstatus_process(dhd_pub_t * dhd,void * msg)6056 dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg)
6057 {
6058 	pcie_ring_status_t *ring_status = (pcie_ring_status_t *) msg;
6059 	uint32 request_id = ltoh32(ring_status->cmn_hdr.request_id);
6060 	uint16 status = ltoh16(ring_status->compl_hdr.status);
6061 	uint16 ring_id = ltoh16(ring_status->compl_hdr.flow_ring_id);
6062 
6063 	DHD_ERROR(("ring status: request_id %d, status 0x%04x, flow ring %d, write_idx %d \n",
6064 		request_id, status, ring_id, ltoh16(ring_status->write_idx)));
6065 
6066 	if (ltoh16(ring_status->compl_hdr.ring_id) != BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT)
6067 		return;
6068 	if (status == BCMPCIE_BAD_PHASE) {
6069 		/* bad phase report from */
6070 		DHD_ERROR(("Bad phase\n"));
6071 	}
6072 	if (status != BCMPCIE_BADOPTION)
6073 		return;
6074 
6075 	if (request_id == DHD_H2D_DBGRING_REQ_PKTID) {
6076 		if (dhd->prot->h2dring_info_subn != NULL) {
6077 			if (dhd->prot->h2dring_info_subn->create_pending == TRUE) {
6078 				DHD_ERROR(("H2D ring create failed for info ring\n"));
6079 				dhd->prot->h2dring_info_subn->create_pending = FALSE;
6080 			}
6081 			else
6082 				DHD_ERROR(("ring create ID for a ring, create not pending\n"));
6083 		} else {
6084 			DHD_ERROR(("%s info submit ring doesn't exist\n", __FUNCTION__));
6085 		}
6086 	}
6087 	else if (request_id == DHD_D2H_DBGRING_REQ_PKTID) {
6088 		if (dhd->prot->d2hring_info_cpln != NULL) {
6089 			if (dhd->prot->d2hring_info_cpln->create_pending == TRUE) {
6090 				DHD_ERROR(("D2H ring create failed for info ring\n"));
6091 				dhd->prot->d2hring_info_cpln->create_pending = FALSE;
6092 			}
6093 			else
6094 				DHD_ERROR(("ring create ID for info ring, create not pending\n"));
6095 		} else {
6096 			DHD_ERROR(("%s info cpl ring doesn't exist\n", __FUNCTION__));
6097 		}
6098 	}
6099 #ifdef DHD_HP2P
6100 	else if (request_id == DHD_D2H_HPPRING_TXREQ_PKTID) {
6101 		if (dhd->prot->d2hring_hp2p_txcpl != NULL) {
6102 			if (dhd->prot->d2hring_hp2p_txcpl->create_pending == TRUE) {
6103 				DHD_ERROR(("H2D ring create failed for hp2p ring\n"));
6104 				dhd->prot->d2hring_hp2p_txcpl->create_pending = FALSE;
6105 			}
6106 			else
6107 				DHD_ERROR(("ring create ID for a ring, create not pending\n"));
6108 		} else {
6109 			DHD_ERROR(("%s hp2p txcmpl ring doesn't exist\n", __FUNCTION__));
6110 		}
6111 	}
6112 	else if (request_id == DHD_D2H_HPPRING_RXREQ_PKTID) {
6113 		if (dhd->prot->d2hring_hp2p_rxcpl != NULL) {
6114 			if (dhd->prot->d2hring_hp2p_rxcpl->create_pending == TRUE) {
6115 				DHD_ERROR(("D2H ring create failed for hp2p rxcmpl ring\n"));
6116 				dhd->prot->d2hring_hp2p_rxcpl->create_pending = FALSE;
6117 			}
6118 			else
6119 				DHD_ERROR(("ring create ID for hp2p rxcmpl ring, not pending\n"));
6120 		} else {
6121 			DHD_ERROR(("%s hp2p rxcpl ring doesn't exist\n", __FUNCTION__));
6122 		}
6123 	}
6124 #endif /* DHD_HP2P */
6125 	else {
6126 		DHD_ERROR(("don;t know how to pair with original request\n"));
6127 	}
6128 	/* How do we track this to pair it with ??? */
6129 	return;
6130 }
6131 
6132 /** called on MSG_TYPE_GEN_STATUS ('general status') message received from dongle */
6133 static void
dhd_prot_genstatus_process(dhd_pub_t * dhd,void * msg)6134 dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg)
6135 {
6136 	pcie_gen_status_t *gen_status = (pcie_gen_status_t *)msg;
6137 	DHD_ERROR(("ERROR: gen status: request_id %d, STATUS 0x%04x, flow ring %d \n",
6138 		gen_status->cmn_hdr.request_id, gen_status->compl_hdr.status,
6139 		gen_status->compl_hdr.flow_ring_id));
6140 
6141 	/* How do we track this to pair it with ??? */
6142 	return;
6143 }
6144 
6145 /**
6146  * Called on MSG_TYPE_IOCTLPTR_REQ_ACK ('ioctl ack') message received from dongle, meaning that the
6147  * dongle received the ioctl message in dongle memory.
6148  */
6149 static void
dhd_prot_ioctack_process(dhd_pub_t * dhd,void * msg)6150 dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg)
6151 {
6152 	ioctl_req_ack_msg_t *ioct_ack = (ioctl_req_ack_msg_t *)msg;
6153 	unsigned long flags;
6154 #if defined(DHD_PKTID_AUDIT_RING)
6155 	uint32 pktid = ltoh32(ioct_ack->cmn_hdr.request_id);
6156 #endif // endif
6157 
6158 #if defined(DHD_PKTID_AUDIT_RING)
6159 	/* Skip audit for ADHD_IOCTL_REQ_PKTID = 0xFFFE */
6160 	if (pktid != DHD_IOCTL_REQ_PKTID) {
6161 #ifndef IOCTLRESP_USE_CONSTMEM
6162 		DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid,
6163 			DHD_TEST_IS_ALLOC, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
6164 #else
6165 		DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_map_handle_ioctl, pktid,
6166 			DHD_TEST_IS_ALLOC, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
6167 #endif /* !IOCTLRESP_USE_CONSTMEM */
6168 	}
6169 #endif // endif
6170 
6171 	dhd->prot->ioctl_ack_time = OSL_LOCALTIME_NS();
6172 
6173 	DHD_GENERAL_LOCK(dhd, flags);
6174 	if ((dhd->prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) &&
6175 		(dhd->prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) {
6176 		dhd->prot->ioctl_state &= ~MSGBUF_IOCTL_ACK_PENDING;
6177 	} else {
6178 		DHD_ERROR(("%s: received ioctl ACK with state %02x trans_id = %d\n",
6179 			__FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id));
6180 		prhex("dhd_prot_ioctack_process:",
6181 			(uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
6182 	}
6183 	DHD_GENERAL_UNLOCK(dhd, flags);
6184 
6185 	DHD_CTL(("ioctl req ack: request_id %d, status 0x%04x, flow ring %d \n",
6186 		ioct_ack->cmn_hdr.request_id, ioct_ack->compl_hdr.status,
6187 		ioct_ack->compl_hdr.flow_ring_id));
6188 	if (ioct_ack->compl_hdr.status != 0)  {
6189 		DHD_ERROR(("got an error status for the ioctl request...need to handle that\n"));
6190 	}
6191 }
6192 
6193 /** called on MSG_TYPE_IOCTL_CMPLT message received from dongle */
6194 static void
dhd_prot_ioctcmplt_process(dhd_pub_t * dhd,void * msg)6195 dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg)
6196 {
6197 	dhd_prot_t *prot = dhd->prot;
6198 	uint32 pkt_id, xt_id;
6199 	ioctl_comp_resp_msg_t *ioct_resp = (ioctl_comp_resp_msg_t *)msg;
6200 	void *pkt;
6201 	unsigned long flags;
6202 	dhd_dma_buf_t retbuf;
6203 
6204 	/* Check for ioctl timeout induce flag, which is set by firing
6205 	 * dhd iovar to induce IOCTL timeout. If flag is set,
6206 	 * return from here, which results in to IOCTL timeout.
6207 	 */
6208 	if (dhd->dhd_induce_error == DHD_INDUCE_IOCTL_TIMEOUT) {
6209 		DHD_ERROR(("%s: Inducing resumed on timeout\n", __FUNCTION__));
6210 		return;
6211 	}
6212 
6213 	memset(&retbuf, 0, sizeof(dhd_dma_buf_t));
6214 
6215 	pkt_id = ltoh32(ioct_resp->cmn_hdr.request_id);
6216 
6217 #if defined(DHD_PKTID_AUDIT_RING)
6218 #ifndef IOCTLRESP_USE_CONSTMEM
6219 	DHD_PKTID_AUDIT_RING_DEBUG(dhd, prot->pktid_ctrl_map, pkt_id,
6220 		DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
6221 #else
6222 	DHD_PKTID_AUDIT_RING_DEBUG(dhd, prot->pktid_map_handle_ioctl, pkt_id,
6223 		DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
6224 #endif /* !IOCTLRESP_USE_CONSTMEM */
6225 #endif // endif
6226 
6227 	DHD_GENERAL_LOCK(dhd, flags);
6228 	if ((prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) ||
6229 		!(prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) {
6230 		DHD_ERROR(("%s: received ioctl response with state %02x trans_id = %d\n",
6231 			__FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id));
6232 		prhex("dhd_prot_ioctcmplt_process:",
6233 			(uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
6234 		DHD_GENERAL_UNLOCK(dhd, flags);
6235 		return;
6236 	}
6237 
6238 	dhd->prot->ioctl_cmplt_time = OSL_LOCALTIME_NS();
6239 
6240 	/* Clear Response pending bit */
6241 	prot->ioctl_state &= ~MSGBUF_IOCTL_RESP_PENDING;
6242 	DHD_GENERAL_UNLOCK(dhd, flags);
6243 
6244 #ifndef IOCTLRESP_USE_CONSTMEM
6245 	pkt = dhd_prot_packet_get(dhd, pkt_id, PKTTYPE_IOCTL_RX, TRUE);
6246 #else
6247 	dhd_prot_ioctl_ret_buffer_get(dhd, pkt_id, &retbuf);
6248 	pkt = retbuf.va;
6249 #endif /* !IOCTLRESP_USE_CONSTMEM */
6250 	if (!pkt) {
6251 		DHD_ERROR(("%s: received ioctl response with NULL pkt\n", __FUNCTION__));
6252 		prhex("dhd_prot_ioctcmplt_process:",
6253 			(uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
6254 		return;
6255 	}
6256 
6257 	prot->ioctl_resplen = ltoh16(ioct_resp->resp_len);
6258 	prot->ioctl_status = ltoh16(ioct_resp->compl_hdr.status);
6259 	xt_id = ltoh16(ioct_resp->trans_id);
6260 
6261 	if (xt_id != prot->ioctl_trans_id || prot->curr_ioctl_cmd != ioct_resp->cmd) {
6262 		DHD_ERROR(("%s: transaction id(%d %d) or cmd(%d %d) mismatch\n",
6263 			__FUNCTION__, xt_id, prot->ioctl_trans_id,
6264 			prot->curr_ioctl_cmd, ioct_resp->cmd));
6265 		dhd_wakeup_ioctl_event(dhd, IOCTL_RETURN_ON_ERROR);
6266 		dhd_prot_debug_info_print(dhd);
6267 #ifdef DHD_FW_COREDUMP
6268 		if (dhd->memdump_enabled) {
6269 			/* collect core dump */
6270 			dhd->memdump_type = DUMP_TYPE_TRANS_ID_MISMATCH;
6271 			dhd_bus_mem_dump(dhd);
6272 		}
6273 #else
6274 		ASSERT(0);
6275 #endif /* DHD_FW_COREDUMP */
6276 		dhd_schedule_reset(dhd);
6277 		goto exit;
6278 	}
6279 	DHD_CTL(("IOCTL_COMPLETE: req_id %x transid %d status %x resplen %d\n",
6280 		pkt_id, xt_id, prot->ioctl_status, prot->ioctl_resplen));
6281 
6282 	if (prot->ioctl_resplen > 0) {
6283 #ifndef IOCTLRESP_USE_CONSTMEM
6284 		bcopy(PKTDATA(dhd->osh, pkt), prot->retbuf.va, prot->ioctl_resplen);
6285 #else
6286 		bcopy(pkt, prot->retbuf.va, prot->ioctl_resplen);
6287 #endif /* !IOCTLRESP_USE_CONSTMEM */
6288 	}
6289 
6290 	/* wake up any dhd_os_ioctl_resp_wait() */
6291 	dhd_wakeup_ioctl_event(dhd, IOCTL_RETURN_ON_SUCCESS);
6292 
6293 exit:
6294 #ifndef IOCTLRESP_USE_CONSTMEM
6295 	dhd_prot_packet_free(dhd, pkt,
6296 		PKTTYPE_IOCTL_RX, FALSE);
6297 #else
6298 	free_ioctl_return_buffer(dhd, &retbuf);
6299 #endif /* !IOCTLRESP_USE_CONSTMEM */
6300 
6301 	/* Post another ioctl buf to the device */
6302 	if (prot->cur_ioctlresp_bufs_posted > 0) {
6303 		prot->cur_ioctlresp_bufs_posted--;
6304 	}
6305 
6306 	dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
6307 }
6308 
6309 int
dhd_prot_check_tx_resource(dhd_pub_t * dhd)6310 dhd_prot_check_tx_resource(dhd_pub_t *dhd)
6311 {
6312 	return dhd->prot->no_tx_resource;
6313 }
6314 
6315 void
dhd_prot_update_pktid_txq_stop_cnt(dhd_pub_t * dhd)6316 dhd_prot_update_pktid_txq_stop_cnt(dhd_pub_t *dhd)
6317 {
6318 	dhd->prot->pktid_txq_stop_cnt++;
6319 }
6320 
6321 void
dhd_prot_update_pktid_txq_start_cnt(dhd_pub_t * dhd)6322 dhd_prot_update_pktid_txq_start_cnt(dhd_pub_t *dhd)
6323 {
6324 	dhd->prot->pktid_txq_start_cnt++;
6325 }
6326 
6327 /** called on MSG_TYPE_TX_STATUS message received from dongle */
6328 static void BCMFASTPATH
dhd_prot_txstatus_process(dhd_pub_t * dhd,void * msg)6329 dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg)
6330 {
6331 	dhd_prot_t *prot = dhd->prot;
6332 	host_txbuf_cmpl_t * txstatus;
6333 	unsigned long flags;
6334 	uint32 pktid;
6335 	void *pkt;
6336 	dmaaddr_t pa;
6337 	uint32 len;
6338 	void *dmah;
6339 	void *secdma;
6340 	bool pkt_fate;
6341 	msgbuf_ring_t *ring = &dhd->prot->d2hring_tx_cpln;
6342 #if defined(TX_STATUS_LATENCY_STATS) || defined(DHD_HP2P)
6343 	flow_info_t *flow_info;
6344 	uint64 tx_status_latency;
6345 #endif /* TX_STATUS_LATENCY_STATS || DHD_HP2P */
6346 #if defined(TX_STATUS_LATENCY_STATS)
6347 	flow_ring_node_t *flow_ring_node;
6348 	uint16 flowid;
6349 #endif // endif
6350 	ts_timestamp_t *ts;
6351 
6352 	BCM_REFERENCE(ts);
6353 	txstatus = (host_txbuf_cmpl_t *)msg;
6354 #if defined(TX_STATUS_LATENCY_STATS)
6355 	flowid = txstatus->compl_hdr.flow_ring_id;
6356 	flow_ring_node = DHD_FLOW_RING(dhd, flowid);
6357 #endif // endif
6358 
6359 	/* locks required to protect circular buffer accesses */
6360 	DHD_RING_LOCK(ring->ring_lock, flags);
6361 	pktid = ltoh32(txstatus->cmn_hdr.request_id);
6362 	pkt_fate = TRUE;
6363 
6364 #if defined(DHD_PKTID_AUDIT_RING)
6365 	DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_tx_map, pktid,
6366 			DHD_DUPLICATE_FREE, msg, D2HRING_TXCMPLT_ITEMSIZE);
6367 #endif // endif
6368 
6369 	DHD_INFO(("txstatus for pktid 0x%04x\n", pktid));
6370 	if (OSL_ATOMIC_DEC_RETURN(dhd->osh, &prot->active_tx_count) < 0) {
6371 		DHD_ERROR(("Extra packets are freed\n"));
6372 	}
6373 	ASSERT(pktid != 0);
6374 
6375 	pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, pktid,
6376 		pa, len, dmah, secdma, PKTTYPE_DATA_TX);
6377 	if (!pkt) {
6378 		DHD_RING_UNLOCK(ring->ring_lock, flags);
6379 		DHD_ERROR(("%s: received txstatus with NULL pkt\n", __FUNCTION__));
6380 		prhex("dhd_prot_txstatus_process:", (uchar *)msg, D2HRING_TXCMPLT_ITEMSIZE);
6381 #ifdef DHD_FW_COREDUMP
6382 		if (dhd->memdump_enabled) {
6383 			/* collect core dump */
6384 			dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
6385 			dhd_bus_mem_dump(dhd);
6386 		}
6387 #else
6388 		ASSERT(0);
6389 #endif /* DHD_FW_COREDUMP */
6390 		return;
6391 	}
6392 
6393 	if (DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map) == DHD_PKTID_MIN_AVAIL_COUNT) {
6394 		dhd->prot->no_tx_resource = FALSE;
6395 		dhd_bus_start_queue(dhd->bus);
6396 	}
6397 
6398 	if (SECURE_DMA_ENAB(dhd->osh)) {
6399 		int offset = 0;
6400 		BCM_REFERENCE(offset);
6401 
6402 		if (dhd->prot->tx_metadata_offset)
6403 			offset = dhd->prot->tx_metadata_offset + ETHER_HDR_LEN;
6404 		SECURE_DMA_UNMAP(dhd->osh, (uint) pa,
6405 			(uint) dhd->prot->tx_metadata_offset, DMA_RX, 0, dmah,
6406 			secdma, offset);
6407 	} else {
6408 		DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
6409 	}
6410 
6411 #ifdef TX_STATUS_LATENCY_STATS
6412 	/* update the tx status latency for flowid */
6413 	flow_info = &flow_ring_node->flow_info;
6414 	tx_status_latency = OSL_SYSUPTIME_US() - DHD_PKT_GET_QTIME(pkt);
6415 	flow_info->cum_tx_status_latency += tx_status_latency;
6416 	flow_info->num_tx_status++;
6417 #endif /* TX_STATUS_LATENCY_STATS */
6418 #if defined(DHD_LB_TXC) && !defined(BCM_SECURE_DMA)
6419 	{
6420 		int elem_ix;
6421 		void **elem;
6422 		bcm_workq_t *workq;
6423 
6424 		workq = &prot->tx_compl_prod;
6425 		/*
6426 		 * Produce the packet into the tx_compl workq for the tx compl tasklet
6427 		 * to consume.
6428 		 */
6429 		OSL_PREFETCH(PKTTAG(pkt));
6430 
6431 		/* fetch next available slot in workq */
6432 		elem_ix = bcm_ring_prod(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
6433 
6434 		DHD_PKTTAG_SET_PA((dhd_pkttag_fr_t *)PKTTAG(pkt), pa);
6435 		DHD_PKTTAG_SET_PA_LEN((dhd_pkttag_fr_t *)PKTTAG(pkt), len);
6436 
6437 		if (elem_ix == BCM_RING_FULL) {
6438 			DHD_ERROR(("tx_compl_prod BCM_RING_FULL\n"));
6439 			goto workq_ring_full;
6440 		}
6441 
6442 		elem = WORKQ_ELEMENT(void *, &prot->tx_compl_prod, elem_ix);
6443 		*elem = pkt;
6444 
6445 		smp_wmb();
6446 
6447 		/* Sync WR index to consumer if the SYNC threshold has been reached */
6448 		if (++prot->tx_compl_prod_sync >= DHD_LB_WORKQ_SYNC) {
6449 			bcm_workq_prod_sync(workq);
6450 			prot->tx_compl_prod_sync = 0;
6451 		}
6452 
6453 		DHD_INFO(("%s: tx_compl_prod pkt<%p> sync<%d>\n",
6454 			__FUNCTION__, pkt, prot->tx_compl_prod_sync));
6455 
6456 		DHD_RING_UNLOCK(ring->ring_lock, flags);
6457 		return;
6458 	}
6459 
6460 workq_ring_full:
6461 
6462 #endif /* !DHD_LB_TXC */
6463 
6464 #ifdef DMAMAP_STATS
6465 	dhd->dma_stats.txdata--;
6466 	dhd->dma_stats.txdata_sz -= len;
6467 #endif /* DMAMAP_STATS */
6468 	pkt_fate = dhd_dbg_process_tx_status(dhd, pkt, pktid,
6469 			ltoh16(txstatus->compl_hdr.status) & WLFC_CTL_PKTFLAG_MASK);
6470 
6471 #if defined(BCMPCIE)
6472 	dhd_txcomplete(dhd, pkt, pkt_fate);
6473 #ifdef DHD_4WAYM4_FAIL_DISCONNECT
6474 	dhd_eap_txcomplete(dhd, pkt, pkt_fate, txstatus->cmn_hdr.if_id);
6475 #endif /* DHD_4WAYM4_FAIL_DISCONNECT */
6476 #endif // endif
6477 
6478 #if DHD_DBG_SHOW_METADATA
6479 	if (dhd->prot->metadata_dbg &&
6480 		dhd->prot->tx_metadata_offset && txstatus->metadata_len) {
6481 		uchar *ptr;
6482 		/* The Ethernet header of TX frame was copied and removed.
6483 		 * Here, move the data pointer forward by Ethernet header size.
6484 		 */
6485 		PKTPULL(dhd->osh, pkt, ETHER_HDR_LEN);
6486 		ptr = PKTDATA(dhd->osh, pkt)  - (dhd->prot->tx_metadata_offset);
6487 		bcm_print_bytes("txmetadata", ptr, txstatus->metadata_len);
6488 		dhd_prot_print_metadata(dhd, ptr, txstatus->metadata_len);
6489 	}
6490 #endif /* DHD_DBG_SHOW_METADATA */
6491 
6492 #ifdef DHD_HP2P
6493 	if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) {
6494 #ifdef DHD_HP2P_DEBUG
6495 		bcm_print_bytes("txcpl", (uint8 *)txstatus, sizeof(host_txbuf_cmpl_t));
6496 #endif /* DHD_HP2P_DEBUG */
6497 		dhd_update_hp2p_txstats(dhd, txstatus);
6498 	}
6499 #endif /* DHD_HP2P */
6500 
6501 #ifdef DHD_LBUF_AUDIT
6502 	PKTAUDIT(dhd->osh, pkt);
6503 #endif // endif
6504 
6505 	DHD_FLOWRING_TXSTATUS_CNT_UPDATE(dhd->bus, txstatus->compl_hdr.flow_ring_id,
6506 		txstatus->tx_status);
6507 	DHD_RING_UNLOCK(ring->ring_lock, flags);
6508 	PKTFREE(dhd->osh, pkt, TRUE);
6509 	return;
6510 } /* dhd_prot_txstatus_process */
6511 
6512 /** called on MSG_TYPE_WL_EVENT message received from dongle */
6513 static void
dhd_prot_event_process(dhd_pub_t * dhd,void * msg)6514 dhd_prot_event_process(dhd_pub_t *dhd, void *msg)
6515 {
6516 	wlevent_req_msg_t *evnt;
6517 	uint32 bufid;
6518 	uint16 buflen;
6519 	int ifidx = 0;
6520 	void* pkt;
6521 	dhd_prot_t *prot = dhd->prot;
6522 
6523 	/* Event complete header */
6524 	evnt = (wlevent_req_msg_t *)msg;
6525 	bufid = ltoh32(evnt->cmn_hdr.request_id);
6526 
6527 #if defined(DHD_PKTID_AUDIT_RING)
6528 	DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, bufid,
6529 			DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
6530 #endif // endif
6531 
6532 	buflen = ltoh16(evnt->event_data_len);
6533 
6534 	ifidx = BCMMSGBUF_API_IFIDX(&evnt->cmn_hdr);
6535 
6536 	/* Post another rxbuf to the device */
6537 	if (prot->cur_event_bufs_posted)
6538 		prot->cur_event_bufs_posted--;
6539 	dhd_msgbuf_rxbuf_post_event_bufs(dhd);
6540 
6541 	pkt = dhd_prot_packet_get(dhd, bufid, PKTTYPE_EVENT_RX, TRUE);
6542 
6543 	if (!pkt) {
6544 		DHD_ERROR(("%s: pkt is NULL for pktid %d\n", __FUNCTION__, bufid));
6545 		return;
6546 	}
6547 
6548 	/* DMA RX offset updated through shared area */
6549 	if (dhd->prot->rx_dataoffset)
6550 		PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
6551 
6552 	PKTSETLEN(dhd->osh, pkt, buflen);
6553 #ifdef DHD_LBUF_AUDIT
6554 	PKTAUDIT(dhd->osh, pkt);
6555 #endif // endif
6556 	dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1);
6557 }
6558 
6559 /** called on MSG_TYPE_INFO_BUF_CMPLT message received from dongle */
6560 static void BCMFASTPATH
dhd_prot_process_infobuf_complete(dhd_pub_t * dhd,void * buf)6561 dhd_prot_process_infobuf_complete(dhd_pub_t *dhd, void* buf)
6562 {
6563 	info_buf_resp_t *resp;
6564 	uint32 pktid;
6565 	uint16 buflen;
6566 	void * pkt;
6567 
6568 	resp = (info_buf_resp_t *)buf;
6569 	pktid = ltoh32(resp->cmn_hdr.request_id);
6570 	buflen = ltoh16(resp->info_data_len);
6571 
6572 #ifdef DHD_PKTID_AUDIT_RING
6573 	DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid,
6574 			DHD_DUPLICATE_FREE, buf, D2HRING_INFO_BUFCMPLT_ITEMSIZE);
6575 #endif /* DHD_PKTID_AUDIT_RING */
6576 
6577 	DHD_INFO(("id 0x%04x, len %d, phase 0x%02x, seqnum %d, rx_dataoffset %d\n",
6578 		pktid, buflen, resp->cmn_hdr.flags, ltoh16(resp->seqnum),
6579 		dhd->prot->rx_dataoffset));
6580 
6581 	if (dhd->debug_buf_dest_support) {
6582 		if (resp->dest < DEBUG_BUF_DEST_MAX) {
6583 			dhd->debug_buf_dest_stat[resp->dest]++;
6584 		}
6585 	}
6586 
6587 	pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_INFO_RX, TRUE);
6588 	if (!pkt)
6589 		return;
6590 
6591 	/* DMA RX offset updated through shared area */
6592 	if (dhd->prot->rx_dataoffset)
6593 		PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
6594 
6595 	PKTSETLEN(dhd->osh, pkt, buflen);
6596 
6597 #ifdef DHD_LBUF_AUDIT
6598 	PKTAUDIT(dhd->osh, pkt);
6599 #endif // endif
6600 
6601 	/* info ring "debug" data, which is not a 802.3 frame, is sent/hacked with a
6602 	 * special ifidx of -1.  This is just internal to dhd to get the data to
6603 	 * dhd_linux.c:dhd_rx_frame() from here (dhd_prot_infobuf_cmplt_process).
6604 	 */
6605 	dhd_bus_rx_frame(dhd->bus, pkt, DHD_DUMMY_INFO_IF /* ifidx HACK */, 1);
6606 }
6607 
6608 /** called on MSG_TYPE_SNAPSHOT_CMPLT message received from dongle */
6609 static void BCMFASTPATH
dhd_prot_process_snapshot_complete(dhd_pub_t * dhd,void * buf)6610 dhd_prot_process_snapshot_complete(dhd_pub_t *dhd, void *buf)
6611 {
6612 }
6613 
6614 /** Stop protocol: sync w/dongle state. */
dhd_prot_stop(dhd_pub_t * dhd)6615 void dhd_prot_stop(dhd_pub_t *dhd)
6616 {
6617 	ASSERT(dhd);
6618 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
6619 
6620 }
6621 
6622 /* Add any protocol-specific data header.
6623  * Caller must reserve prot_hdrlen prepend space.
6624  */
6625 void BCMFASTPATH
dhd_prot_hdrpush(dhd_pub_t * dhd,int ifidx,void * PKTBUF)6626 dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, void *PKTBUF)
6627 {
6628 	return;
6629 }
6630 
6631 uint
dhd_prot_hdrlen(dhd_pub_t * dhd,void * PKTBUF)6632 dhd_prot_hdrlen(dhd_pub_t *dhd, void *PKTBUF)
6633 {
6634 	return 0;
6635 }
6636 
6637 #define MAX_MTU_SZ (1600u)
6638 
6639 #define PKTBUF pktbuf
6640 
6641 /**
6642  * Called when a tx ethernet packet has been dequeued from a flow queue, and has to be inserted in
6643  * the corresponding flow ring.
6644  */
6645 int BCMFASTPATH
dhd_prot_txdata(dhd_pub_t * dhd,void * PKTBUF,uint8 ifidx)6646 dhd_prot_txdata(dhd_pub_t *dhd, void *PKTBUF, uint8 ifidx)
6647 {
6648 	unsigned long flags;
6649 	dhd_prot_t *prot = dhd->prot;
6650 	host_txbuf_post_t *txdesc = NULL;
6651 	dmaaddr_t pa, meta_pa;
6652 	uint8 *pktdata;
6653 	uint32 pktlen;
6654 	uint32 pktid;
6655 	uint8	prio;
6656 	uint16 flowid = 0;
6657 	uint16 alloced = 0;
6658 	uint16	headroom;
6659 	msgbuf_ring_t *ring;
6660 	flow_ring_table_t *flow_ring_table;
6661 	flow_ring_node_t *flow_ring_node;
6662 
6663 	if (dhd->flow_ring_table == NULL) {
6664 		DHD_ERROR(("dhd flow_ring_table is NULL\n"));
6665 		return BCME_NORESOURCE;
6666 	}
6667 #ifdef DHD_PCIE_PKTID
6668 	if (!DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map)) {
6669 		if (dhd->prot->pktid_depleted_cnt == DHD_PKTID_DEPLETED_MAX_COUNT) {
6670 			dhd_bus_stop_queue(dhd->bus);
6671 			dhd->prot->no_tx_resource = TRUE;
6672 		}
6673 		dhd->prot->pktid_depleted_cnt++;
6674 		goto err_no_res;
6675 	} else {
6676 		dhd->prot->pktid_depleted_cnt = 0;
6677 	}
6678 #endif /* DHD_PCIE_PKTID */
6679 
6680 	flowid = DHD_PKT_GET_FLOWID(PKTBUF);
6681 	flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
6682 	flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
6683 
6684 	ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
6685 
6686 	DHD_RING_LOCK(ring->ring_lock, flags);
6687 
6688 	/* Create a unique 32-bit packet id */
6689 	pktid = DHD_NATIVE_TO_PKTID_RSV(dhd, dhd->prot->pktid_tx_map,
6690 		PKTBUF, PKTTYPE_DATA_TX);
6691 #if defined(DHD_PCIE_PKTID)
6692 	if (pktid == DHD_PKTID_INVALID) {
6693 		DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__));
6694 		/*
6695 		 * If we return error here, the caller would queue the packet
6696 		 * again. So we'll just free the skb allocated in DMA Zone.
6697 		 * Since we have not freed the original SKB yet the caller would
6698 		 * requeue the same.
6699 		 */
6700 		goto err_no_res_pktfree;
6701 	}
6702 #endif /* DHD_PCIE_PKTID */
6703 
6704 	/* Reserve space in the circular buffer */
6705 	txdesc = (host_txbuf_post_t *)
6706 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
6707 	if (txdesc == NULL) {
6708 		DHD_INFO(("%s:%d: HTOD Msgbuf Not available TxCount = %d\n",
6709 			__FUNCTION__, __LINE__, OSL_ATOMIC_READ(dhd->osh, &prot->active_tx_count)));
6710 		goto err_free_pktid;
6711 	}
6712 
6713 	/* Extract the data pointer and length information */
6714 	pktdata = PKTDATA(dhd->osh, PKTBUF);
6715 	pktlen  = PKTLEN(dhd->osh, PKTBUF);
6716 
6717 	DHD_DBG_PKT_MON_TX(dhd, PKTBUF, pktid);
6718 
6719 	/* Ethernet header: Copy before we cache flush packet using DMA_MAP */
6720 	bcopy(pktdata, txdesc->txhdr, ETHER_HDR_LEN);
6721 
6722 	/* Extract the ethernet header and adjust the data pointer and length */
6723 	pktdata = PKTPULL(dhd->osh, PKTBUF, ETHER_HDR_LEN);
6724 	pktlen -= ETHER_HDR_LEN;
6725 
6726 	/* Map the data pointer to a DMA-able address */
6727 	if (SECURE_DMA_ENAB(dhd->osh)) {
6728 		int offset = 0;
6729 		BCM_REFERENCE(offset);
6730 
6731 		if (prot->tx_metadata_offset)
6732 			offset = prot->tx_metadata_offset + ETHER_HDR_LEN;
6733 
6734 		pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen,
6735 			DMA_TX, PKTBUF, 0, ring->dma_buf.secdma, offset);
6736 	}
6737 #ifndef BCM_SECURE_DMA
6738 	else
6739 		pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen, DMA_TX, PKTBUF, 0);
6740 #endif /* #ifndef BCM_SECURE_DMA */
6741 
6742 	if (PHYSADDRISZERO(pa)) {
6743 		DHD_ERROR(("%s: Something really bad, unless 0 is "
6744 			"a valid phyaddr for pa\n", __FUNCTION__));
6745 		ASSERT(0);
6746 		goto err_rollback_idx;
6747 	}
6748 
6749 #ifdef DMAMAP_STATS
6750 	dhd->dma_stats.txdata++;
6751 	dhd->dma_stats.txdata_sz += pktlen;
6752 #endif /* DMAMAP_STATS */
6753 	/* No need to lock. Save the rest of the packet's metadata */
6754 	DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_tx_map, PKTBUF, pktid,
6755 	    pa, pktlen, DMA_TX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_TX);
6756 
6757 #ifdef TXP_FLUSH_NITEMS
6758 	if (ring->pend_items_count == 0)
6759 		ring->start_addr = (void *)txdesc;
6760 	ring->pend_items_count++;
6761 #endif // endif
6762 
6763 	/* Form the Tx descriptor message buffer */
6764 
6765 	/* Common message hdr */
6766 	txdesc->cmn_hdr.msg_type = MSG_TYPE_TX_POST;
6767 	txdesc->cmn_hdr.if_id = ifidx;
6768 	txdesc->cmn_hdr.flags = ring->current_phase;
6769 
6770 	txdesc->flags = BCMPCIE_PKT_FLAGS_FRAME_802_3;
6771 	prio = (uint8)PKTPRIO(PKTBUF);
6772 
6773 	txdesc->flags |= (prio & 0x7) << BCMPCIE_PKT_FLAGS_PRIO_SHIFT;
6774 	txdesc->seg_cnt = 1;
6775 
6776 	txdesc->data_len = htol16((uint16) pktlen);
6777 	txdesc->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
6778 	txdesc->data_buf_addr.low_addr  = htol32(PHYSADDRLO(pa));
6779 
6780 	/* Move data pointer to keep ether header in local PKTBUF for later reference */
6781 	PKTPUSH(dhd->osh, PKTBUF, ETHER_HDR_LEN);
6782 
6783 	/* Handle Tx metadata */
6784 	headroom = (uint16)PKTHEADROOM(dhd->osh, PKTBUF);
6785 	if (prot->tx_metadata_offset && (headroom < prot->tx_metadata_offset))
6786 		DHD_ERROR(("No headroom for Metadata tx %d %d\n",
6787 		prot->tx_metadata_offset, headroom));
6788 
6789 	if (prot->tx_metadata_offset && (headroom >= prot->tx_metadata_offset)) {
6790 		DHD_TRACE(("Metadata in tx %d\n", prot->tx_metadata_offset));
6791 
6792 		/* Adjust the data pointer to account for meta data in DMA_MAP */
6793 		PKTPUSH(dhd->osh, PKTBUF, prot->tx_metadata_offset);
6794 
6795 		if (SECURE_DMA_ENAB(dhd->osh)) {
6796 			meta_pa = SECURE_DMA_MAP_TXMETA(dhd->osh, PKTDATA(dhd->osh, PKTBUF),
6797 				prot->tx_metadata_offset + ETHER_HDR_LEN, DMA_RX, PKTBUF,
6798 				0, ring->dma_buf.secdma);
6799 		}
6800 #ifndef BCM_SECURE_DMA
6801 		else
6802 			meta_pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF),
6803 				prot->tx_metadata_offset, DMA_RX, PKTBUF, 0);
6804 #endif /* #ifndef BCM_SECURE_DMA */
6805 
6806 		if (PHYSADDRISZERO(meta_pa)) {
6807 			/* Unmap the data pointer to a DMA-able address */
6808 			if (SECURE_DMA_ENAB(dhd->osh)) {
6809 				int offset = 0;
6810 				BCM_REFERENCE(offset);
6811 
6812 				if (prot->tx_metadata_offset) {
6813 					offset = prot->tx_metadata_offset + ETHER_HDR_LEN;
6814 				}
6815 
6816 				SECURE_DMA_UNMAP(dhd->osh, pa, pktlen,
6817 					DMA_TX, 0, DHD_DMAH_NULL, ring->dma_buf.secdma, offset);
6818 			}
6819 #ifndef BCM_SECURE_DMA
6820 			else {
6821 				DMA_UNMAP(dhd->osh, pa, pktlen, DMA_TX, 0, DHD_DMAH_NULL);
6822 			}
6823 #endif /* #ifndef BCM_SECURE_DMA */
6824 #ifdef TXP_FLUSH_NITEMS
6825 			/* update pend_items_count */
6826 			ring->pend_items_count--;
6827 #endif /* TXP_FLUSH_NITEMS */
6828 
6829 			DHD_ERROR(("%s: Something really bad, unless 0 is "
6830 				"a valid phyaddr for meta_pa\n", __FUNCTION__));
6831 			ASSERT(0);
6832 			goto err_rollback_idx;
6833 		}
6834 
6835 		/* Adjust the data pointer back to original value */
6836 		PKTPULL(dhd->osh, PKTBUF, prot->tx_metadata_offset);
6837 
6838 		txdesc->metadata_buf_len = prot->tx_metadata_offset;
6839 		txdesc->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(meta_pa));
6840 		txdesc->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(meta_pa));
6841 	} else {
6842 #ifdef DHD_HP2P
6843 		if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) {
6844 			dhd_update_hp2p_txdesc(dhd, txdesc);
6845 		} else
6846 #endif /* DHD_HP2P */
6847 	if (1)
6848 	{
6849 			txdesc->metadata_buf_len = htol16(0);
6850 			txdesc->metadata_buf_addr.high_addr = 0;
6851 			txdesc->metadata_buf_addr.low_addr = 0;
6852 		}
6853 	}
6854 
6855 #ifdef DHD_PKTID_AUDIT_RING
6856 	DHD_PKTID_AUDIT(dhd, prot->pktid_tx_map, pktid, DHD_DUPLICATE_ALLOC);
6857 #endif /* DHD_PKTID_AUDIT_RING */
6858 
6859 	txdesc->cmn_hdr.request_id = htol32(pktid);
6860 
6861 	DHD_TRACE(("txpost: data_len %d, pktid 0x%04x\n", txdesc->data_len,
6862 		txdesc->cmn_hdr.request_id));
6863 
6864 #ifdef DHD_LBUF_AUDIT
6865 	PKTAUDIT(dhd->osh, PKTBUF);
6866 #endif // endif
6867 
6868 	if (pktlen > MAX_MTU_SZ) {
6869 		DHD_ERROR(("%s: ######## pktlen(%d) > MAX_MTU_SZ(%d) #######\n",
6870 			__FUNCTION__, pktlen, MAX_MTU_SZ));
6871 		dhd_prhex("txringitem", (volatile uchar*)txdesc,
6872 			sizeof(host_txbuf_post_t), DHD_ERROR_VAL);
6873 	}
6874 
6875 	/* Update the write pointer in TCM & ring bell */
6876 #if defined(DHD_HP2P) && defined(TXP_FLUSH_NITEMS)
6877 	if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) {
6878 		dhd_calc_hp2p_burst(dhd, ring, flowid);
6879 	} else {
6880 		if ((ring->pend_items_count == prot->txp_threshold) ||
6881 			((uint8 *) txdesc == (uint8 *) DHD_RING_END_VA(ring))) {
6882 			dhd_prot_txdata_write_flush(dhd, flowid);
6883 		}
6884 	}
6885 #elif defined(TXP_FLUSH_NITEMS)
6886 	/* Flush if we have either hit the txp_threshold or if this msg is */
6887 	/* occupying the last slot in the flow_ring - before wrap around.  */
6888 	if ((ring->pend_items_count == prot->txp_threshold) ||
6889 		((uint8 *) txdesc == (uint8 *) DHD_RING_END_VA(ring))) {
6890 		dhd_prot_txdata_write_flush(dhd, flowid);
6891 	}
6892 #else
6893 	/* update ring's WR index and ring doorbell to dongle */
6894 	dhd_prot_ring_write_complete(dhd, ring, txdesc, 1);
6895 #endif /* DHD_HP2P && TXP_FLUSH_NITEMS */
6896 
6897 #if defined(TX_STATUS_LATENCY_STATS)
6898 	/* set the time when pkt is queued to flowring */
6899 	DHD_PKT_SET_QTIME(PKTBUF, OSL_SYSUPTIME_US());
6900 #endif // endif
6901 
6902 	OSL_ATOMIC_INC(dhd->osh, &prot->active_tx_count);
6903 	/*
6904 	 * Take a wake lock, do not sleep if we have atleast one packet
6905 	 * to finish.
6906 	 */
6907 	DHD_TXFL_WAKE_LOCK_TIMEOUT(dhd, MAX_TX_TIMEOUT);
6908 
6909 	DHD_RING_UNLOCK(ring->ring_lock, flags);
6910 
6911 #ifdef TX_STATUS_LATENCY_STATS
6912 	flow_ring_node->flow_info.num_tx_pkts++;
6913 #endif /* TX_STATUS_LATENCY_STATS */
6914 	return BCME_OK;
6915 
6916 err_rollback_idx:
6917 	/* roll back write pointer for unprocessed message */
6918 	if (ring->wr == 0) {
6919 		ring->wr = ring->max_items - 1;
6920 	} else {
6921 		ring->wr--;
6922 		if (ring->wr == 0) {
6923 			DHD_INFO(("%s: flipping the phase now\n", ring->name));
6924 			ring->current_phase = ring->current_phase ?
6925 				0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
6926 		}
6927 	}
6928 
6929 err_free_pktid:
6930 #if defined(DHD_PCIE_PKTID)
6931 	{
6932 		void *dmah;
6933 		void *secdma;
6934 		/* Free up the PKTID. physaddr and pktlen will be garbage. */
6935 		DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, pktid,
6936 			pa, pktlen, dmah, secdma, PKTTYPE_NO_CHECK);
6937 	}
6938 
6939 err_no_res_pktfree:
6940 #endif /* DHD_PCIE_PKTID */
6941 
6942 	DHD_RING_UNLOCK(ring->ring_lock, flags);
6943 err_no_res:
6944 	return BCME_NORESOURCE;
6945 } /* dhd_prot_txdata */
6946 
6947 /* called with a ring_lock */
6948 /** optimization to write "n" tx items at a time to ring */
6949 void BCMFASTPATH
dhd_prot_txdata_write_flush(dhd_pub_t * dhd,uint16 flowid)6950 dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flowid)
6951 {
6952 #ifdef TXP_FLUSH_NITEMS
6953 	flow_ring_table_t *flow_ring_table;
6954 	flow_ring_node_t *flow_ring_node;
6955 	msgbuf_ring_t *ring;
6956 
6957 	if (dhd->flow_ring_table == NULL) {
6958 		return;
6959 	}
6960 
6961 	flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
6962 	flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
6963 	ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
6964 
6965 	if (ring->pend_items_count) {
6966 		/* update ring's WR index and ring doorbell to dongle */
6967 		dhd_prot_ring_write_complete(dhd, ring, ring->start_addr,
6968 			ring->pend_items_count);
6969 		ring->pend_items_count = 0;
6970 		ring->start_addr = NULL;
6971 	}
6972 #endif /* TXP_FLUSH_NITEMS */
6973 }
6974 
6975 #undef PKTBUF	/* Only defined in the above routine */
6976 
6977 int BCMFASTPATH
dhd_prot_hdrpull(dhd_pub_t * dhd,int * ifidx,void * pkt,uchar * buf,uint * len)6978 dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pkt, uchar *buf, uint *len)
6979 {
6980 	return 0;
6981 }
6982 
6983 /** post a set of receive buffers to the dongle */
6984 static void BCMFASTPATH
dhd_prot_return_rxbuf(dhd_pub_t * dhd,uint32 pktid,uint32 rxcnt)6985 dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint32 pktid, uint32 rxcnt)
6986 {
6987 	dhd_prot_t *prot = dhd->prot;
6988 #if defined(DHD_LB_RXC)
6989 	int elem_ix;
6990 	uint32 *elem;
6991 	bcm_workq_t *workq;
6992 
6993 	workq = &prot->rx_compl_prod;
6994 
6995 	/* Produce the work item */
6996 	elem_ix = bcm_ring_prod(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
6997 	if (elem_ix == BCM_RING_FULL) {
6998 		DHD_ERROR(("%s LB RxCompl workQ is full\n", __FUNCTION__));
6999 		ASSERT(0);
7000 		return;
7001 	}
7002 
7003 	elem = WORKQ_ELEMENT(uint32, workq, elem_ix);
7004 	*elem = pktid;
7005 
7006 	smp_wmb();
7007 
7008 	/* Sync WR index to consumer if the SYNC threshold has been reached */
7009 	if (++prot->rx_compl_prod_sync >= DHD_LB_WORKQ_SYNC) {
7010 		bcm_workq_prod_sync(workq);
7011 		prot->rx_compl_prod_sync = 0;
7012 	}
7013 
7014 	DHD_INFO(("%s: rx_compl_prod pktid<%u> sync<%d>\n",
7015 		__FUNCTION__, pktid, prot->rx_compl_prod_sync));
7016 
7017 #endif /* DHD_LB_RXC */
7018 
7019 	if (prot->rxbufpost >= rxcnt) {
7020 		prot->rxbufpost -= (uint16)rxcnt;
7021 	} else {
7022 		/* ASSERT(0); */
7023 		prot->rxbufpost = 0;
7024 	}
7025 
7026 #if !defined(DHD_LB_RXC)
7027 	if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD))
7028 		dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
7029 #endif /* !DHD_LB_RXC */
7030 	return;
7031 }
7032 
7033 /* called before an ioctl is sent to the dongle */
7034 static void
dhd_prot_wlioctl_intercept(dhd_pub_t * dhd,wl_ioctl_t * ioc,void * buf)7035 dhd_prot_wlioctl_intercept(dhd_pub_t *dhd, wl_ioctl_t * ioc, void * buf)
7036 {
7037 	dhd_prot_t *prot = dhd->prot;
7038 	int slen = 0;
7039 
7040 	if (ioc->cmd == WLC_SET_VAR && buf != NULL && !strcmp(buf, "pcie_bus_tput")) {
7041 		pcie_bus_tput_params_t *tput_params;
7042 
7043 		slen = strlen("pcie_bus_tput") + 1;
7044 		tput_params = (pcie_bus_tput_params_t*)((char *)buf + slen);
7045 		bcopy(&prot->host_bus_throughput_buf.pa, &tput_params->host_buf_addr,
7046 			sizeof(tput_params->host_buf_addr));
7047 		tput_params->host_buf_len = DHD_BUS_TPUT_BUF_LEN;
7048 	}
7049 
7050 }
7051 
7052 /* called after an ioctl returns from dongle */
7053 static void
dhd_prot_wl_ioctl_ret_intercept(dhd_pub_t * dhd,wl_ioctl_t * ioc,void * buf,int ifidx,int ret,int len)7054 dhd_prot_wl_ioctl_ret_intercept(dhd_pub_t *dhd, wl_ioctl_t * ioc, void * buf,
7055 	int ifidx, int ret, int len)
7056 {
7057 
7058 	if (!ret && ioc->cmd == WLC_SET_VAR && buf != NULL) {
7059 		/* Intercept the wme_dp ioctl here */
7060 		if (!strcmp(buf, "wme_dp")) {
7061 			int slen, val = 0;
7062 
7063 			slen = strlen("wme_dp") + 1;
7064 			if (len >= (int)(slen + sizeof(int)))
7065 				bcopy(((char *)buf + slen), &val, sizeof(int));
7066 			dhd->wme_dp = (uint8) ltoh32(val);
7067 		}
7068 
7069 	}
7070 
7071 }
7072 
7073 #ifdef DHD_PM_CONTROL_FROM_FILE
7074 extern bool g_pm_control;
7075 #endif /* DHD_PM_CONTROL_FROM_FILE */
7076 
7077 /** Use protocol to issue ioctl to dongle. Only one ioctl may be in transit. */
dhd_prot_ioctl(dhd_pub_t * dhd,int ifidx,wl_ioctl_t * ioc,void * buf,int len)7078 int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len)
7079 {
7080 	int ret = -1;
7081 	uint8 action;
7082 
7083 	if (dhd->bus->is_linkdown) {
7084 		DHD_ERROR_RLMT(("%s : PCIe link is down. we have nothing to do\n", __FUNCTION__));
7085 		goto done;
7086 	}
7087 
7088 	if (dhd_query_bus_erros(dhd)) {
7089 		DHD_ERROR_RLMT(("%s : some BUS error. we have nothing to do\n", __FUNCTION__));
7090 		goto done;
7091 	}
7092 
7093 	if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) {
7094 		DHD_ERROR_RLMT(("%s : bus is down. we have nothing to do -"
7095 			" bus state: %d, sent hang: %d\n", __FUNCTION__,
7096 			dhd->busstate, dhd->hang_was_sent));
7097 		goto done;
7098 	}
7099 
7100 	if (dhd->busstate == DHD_BUS_SUSPEND) {
7101 		DHD_ERROR(("%s : bus is suspended\n", __FUNCTION__));
7102 		goto done;
7103 	}
7104 
7105 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7106 
7107 	if (ioc->cmd == WLC_SET_PM) {
7108 #ifdef DHD_PM_CONTROL_FROM_FILE
7109 		if (g_pm_control == TRUE) {
7110 			DHD_ERROR(("%s: SET PM ignored!(Requested:%d)\n",
7111 				__FUNCTION__, buf ? *(char *)buf : 0));
7112 			goto done;
7113 		}
7114 #endif /* DHD_PM_CONTROL_FROM_FILE */
7115 		DHD_TRACE_HW4(("%s: SET PM to %d\n", __FUNCTION__, buf ? *(char *)buf : 0));
7116 	}
7117 
7118 	ASSERT(len <= WLC_IOCTL_MAXLEN);
7119 
7120 	if (len > WLC_IOCTL_MAXLEN)
7121 		goto done;
7122 
7123 	action = ioc->set;
7124 
7125 	dhd_prot_wlioctl_intercept(dhd, ioc, buf);
7126 
7127 	if (action & WL_IOCTL_ACTION_SET) {
7128 		ret = dhd_msgbuf_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
7129 	} else {
7130 		ret = dhd_msgbuf_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
7131 		if (ret > 0)
7132 			ioc->used = ret;
7133 	}
7134 
7135 	/* Too many programs assume ioctl() returns 0 on success */
7136 	if (ret >= 0) {
7137 		ret = 0;
7138 	} else {
7139 		DHD_INFO(("%s: status ret value is %d \n", __FUNCTION__, ret));
7140 		dhd->dongle_error = ret;
7141 	}
7142 
7143 	dhd_prot_wl_ioctl_ret_intercept(dhd, ioc, buf, ifidx, ret, len);
7144 
7145 done:
7146 	return ret;
7147 
7148 } /* dhd_prot_ioctl */
7149 
7150 /** test / loopback */
7151 
7152 int
dhdmsgbuf_lpbk_req(dhd_pub_t * dhd,uint len)7153 dhdmsgbuf_lpbk_req(dhd_pub_t *dhd, uint len)
7154 {
7155 	unsigned long flags;
7156 	dhd_prot_t *prot = dhd->prot;
7157 	uint16 alloced = 0;
7158 
7159 	ioct_reqst_hdr_t *ioct_rqst;
7160 
7161 	uint16 hdrlen = sizeof(ioct_reqst_hdr_t);
7162 	uint16 msglen = len + hdrlen;
7163 	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
7164 
7165 	msglen = ALIGN_SIZE(msglen, DMA_ALIGN_LEN);
7166 	msglen = LIMIT_TO_MAX(msglen, MSGBUF_MAX_MSG_SIZE);
7167 
7168 	DHD_RING_LOCK(ring->ring_lock, flags);
7169 
7170 	ioct_rqst = (ioct_reqst_hdr_t *)
7171 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
7172 
7173 	if (ioct_rqst == NULL) {
7174 		DHD_RING_UNLOCK(ring->ring_lock, flags);
7175 		return 0;
7176 	}
7177 
7178 	{
7179 		uint8 *ptr;
7180 		uint16 i;
7181 
7182 		ptr = (uint8 *)ioct_rqst;
7183 		for (i = 0; i < msglen; i++) {
7184 			ptr[i] = i % 256;
7185 		}
7186 	}
7187 
7188 	/* Common msg buf hdr */
7189 	ioct_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
7190 	ring->seqnum++;
7191 
7192 	ioct_rqst->msg.msg_type = MSG_TYPE_LOOPBACK;
7193 	ioct_rqst->msg.if_id = 0;
7194 	ioct_rqst->msg.flags = ring->current_phase;
7195 
7196 	bcm_print_bytes("LPBK REQ: ", (uint8 *)ioct_rqst, msglen);
7197 
7198 	/* update ring's WR index and ring doorbell to dongle */
7199 	dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1);
7200 
7201 	DHD_RING_UNLOCK(ring->ring_lock, flags);
7202 
7203 	return 0;
7204 }
7205 
7206 /** test / loopback */
dmaxfer_free_dmaaddr(dhd_pub_t * dhd,dhd_dmaxfer_t * dmaxfer)7207 void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dmaxfer)
7208 {
7209 	if (dmaxfer == NULL)
7210 		return;
7211 
7212 	dhd_dma_buf_free(dhd, &dmaxfer->srcmem);
7213 	dhd_dma_buf_free(dhd, &dmaxfer->dstmem);
7214 }
7215 
7216 /** test / loopback */
7217 int
dhd_prepare_schedule_dmaxfer_free(dhd_pub_t * dhdp)7218 dhd_prepare_schedule_dmaxfer_free(dhd_pub_t *dhdp)
7219 {
7220 	dhd_prot_t *prot = dhdp->prot;
7221 	dhd_dmaxfer_t *dmaxfer = &prot->dmaxfer;
7222 	dmaxref_mem_map_t *dmap = NULL;
7223 
7224 	dmap = MALLOCZ(dhdp->osh, sizeof(dmaxref_mem_map_t));
7225 	if (!dmap) {
7226 		DHD_ERROR(("%s: dmap alloc failed\n", __FUNCTION__));
7227 		goto mem_alloc_fail;
7228 	}
7229 	dmap->srcmem = &(dmaxfer->srcmem);
7230 	dmap->dstmem = &(dmaxfer->dstmem);
7231 
7232 	DMAXFER_FREE(dhdp, dmap);
7233 	return BCME_OK;
7234 
7235 mem_alloc_fail:
7236 	if (dmap) {
7237 		MFREE(dhdp->osh, dmap, sizeof(dmaxref_mem_map_t));
7238 		dmap = NULL;
7239 	}
7240 	return BCME_NOMEM;
7241 } /* dhd_prepare_schedule_dmaxfer_free */
7242 
7243 /** test / loopback */
7244 void
dmaxfer_free_prev_dmaaddr(dhd_pub_t * dhdp,dmaxref_mem_map_t * dmmap)7245 dmaxfer_free_prev_dmaaddr(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap)
7246 {
7247 
7248 	dhd_dma_buf_free(dhdp, dmmap->srcmem);
7249 	dhd_dma_buf_free(dhdp, dmmap->dstmem);
7250 
7251 	MFREE(dhdp->osh, dmmap, sizeof(dmaxref_mem_map_t));
7252 
7253 	dhdp->bus->dmaxfer_complete = TRUE;
7254 	dhd_os_dmaxfer_wake(dhdp);
7255 
7256 	dmmap = NULL;
7257 
7258 } /* dmaxfer_free_prev_dmaaddr */
7259 
7260 /** test / loopback */
dmaxfer_prepare_dmaaddr(dhd_pub_t * dhd,uint len,uint srcdelay,uint destdelay,dhd_dmaxfer_t * dmaxfer)7261 int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len,
7262 	uint srcdelay, uint destdelay, dhd_dmaxfer_t *dmaxfer)
7263 {
7264 	uint i = 0, j = 0;
7265 	if (!dmaxfer)
7266 		return BCME_ERROR;
7267 
7268 	/* First free up existing buffers */
7269 	dmaxfer_free_dmaaddr(dhd, dmaxfer);
7270 
7271 	if (dhd_dma_buf_alloc(dhd, &dmaxfer->srcmem, len)) {
7272 		return BCME_NOMEM;
7273 	}
7274 
7275 	if (dhd_dma_buf_alloc(dhd, &dmaxfer->dstmem, len + 8)) {
7276 		dhd_dma_buf_free(dhd, &dmaxfer->srcmem);
7277 		return BCME_NOMEM;
7278 	}
7279 
7280 	dmaxfer->len = len;
7281 
7282 	/* Populate source with a pattern like below
7283 	 * 0x00000000
7284 	 * 0x01010101
7285 	 * 0x02020202
7286 	 * 0x03030303
7287 	 * 0x04040404
7288 	 * 0x05050505
7289 	 * ...
7290 	 * 0xFFFFFFFF
7291 	 */
7292 	while (i < dmaxfer->len) {
7293 		((uint8*)dmaxfer->srcmem.va)[i] = j % 256;
7294 		i++;
7295 		if (i % 4 == 0) {
7296 			j++;
7297 		}
7298 	}
7299 
7300 	OSL_CACHE_FLUSH(dmaxfer->srcmem.va, dmaxfer->len);
7301 
7302 	dmaxfer->srcdelay = srcdelay;
7303 	dmaxfer->destdelay = destdelay;
7304 
7305 	return BCME_OK;
7306 } /* dmaxfer_prepare_dmaaddr */
7307 
7308 static void
dhd_msgbuf_dmaxfer_process(dhd_pub_t * dhd,void * msg)7309 dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg)
7310 {
7311 	dhd_prot_t *prot = dhd->prot;
7312 	uint64 end_usec;
7313 	pcie_dmaxfer_cmplt_t *cmplt = (pcie_dmaxfer_cmplt_t *)msg;
7314 	int buf_free_scheduled;
7315 
7316 	BCM_REFERENCE(cmplt);
7317 	end_usec = OSL_SYSUPTIME_US();
7318 
7319 	DHD_ERROR(("DMA loopback status: %d\n", cmplt->compl_hdr.status));
7320 	prot->dmaxfer.status = cmplt->compl_hdr.status;
7321 	OSL_CACHE_INV(prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
7322 	if (prot->dmaxfer.srcmem.va && prot->dmaxfer.dstmem.va) {
7323 		if (memcmp(prot->dmaxfer.srcmem.va,
7324 		        prot->dmaxfer.dstmem.va, prot->dmaxfer.len) ||
7325 		        cmplt->compl_hdr.status != BCME_OK) {
7326 		        DHD_ERROR(("DMA loopback failed\n"));
7327 			/* it is observed that some times the completion
7328 			 * header status is set as OK, but the memcmp fails
7329 			 * hence always explicitly set the dmaxfer status
7330 			 * as error if this happens.
7331 			 */
7332 			prot->dmaxfer.status = BCME_ERROR;
7333 			prhex("XFER SRC: ",
7334 			    prot->dmaxfer.srcmem.va, prot->dmaxfer.len);
7335 			prhex("XFER DST: ",
7336 			    prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
7337 		}
7338 		else {
7339 			switch (prot->dmaxfer.d11_lpbk) {
7340 			case M2M_DMA_LPBK: {
7341 				DHD_ERROR(("DMA successful pcie m2m DMA loopback\n"));
7342 				} break;
7343 			case D11_LPBK: {
7344 				DHD_ERROR(("DMA successful with d11 loopback\n"));
7345 				} break;
7346 			case BMC_LPBK: {
7347 				DHD_ERROR(("DMA successful with bmc loopback\n"));
7348 				} break;
7349 			case M2M_NON_DMA_LPBK: {
7350 				DHD_ERROR(("DMA successful pcie m2m NON DMA loopback\n"));
7351 				} break;
7352 			case D11_HOST_MEM_LPBK: {
7353 				DHD_ERROR(("DMA successful d11 host mem loopback\n"));
7354 				} break;
7355 			case BMC_HOST_MEM_LPBK: {
7356 				DHD_ERROR(("DMA successful bmc host mem loopback\n"));
7357 				} break;
7358 			default: {
7359 				DHD_ERROR(("Invalid loopback option\n"));
7360 				} break;
7361 			}
7362 
7363 			if (DHD_LPBKDTDUMP_ON()) {
7364 				/* debug info print of the Tx and Rx buffers */
7365 				dhd_prhex("XFER SRC: ", prot->dmaxfer.srcmem.va,
7366 					prot->dmaxfer.len, DHD_INFO_VAL);
7367 				dhd_prhex("XFER DST: ", prot->dmaxfer.dstmem.va,
7368 					prot->dmaxfer.len, DHD_INFO_VAL);
7369 			}
7370 		}
7371 	}
7372 
7373 	buf_free_scheduled = dhd_prepare_schedule_dmaxfer_free(dhd);
7374 	end_usec -= prot->dmaxfer.start_usec;
7375 	if (end_usec) {
7376 		prot->dmaxfer.time_taken = end_usec;
7377 		DHD_ERROR(("DMA loopback %d bytes in %lu usec, %u kBps\n",
7378 			prot->dmaxfer.len, (unsigned long)end_usec,
7379 			(prot->dmaxfer.len * (1000 * 1000 / 1024) / (uint32)end_usec)));
7380 	}
7381 	dhd->prot->dmaxfer.in_progress = FALSE;
7382 
7383 	if (buf_free_scheduled != BCME_OK) {
7384 		dhd->bus->dmaxfer_complete = TRUE;
7385 		dhd_os_dmaxfer_wake(dhd);
7386 	}
7387 }
7388 
7389 /** Test functionality.
7390  * Transfers bytes from host to dongle and to host again using DMA
7391  * This function is not reentrant, as prot->dmaxfer.in_progress is not protected
7392  * by a spinlock.
7393  */
7394 int
dhdmsgbuf_dmaxfer_req(dhd_pub_t * dhd,uint len,uint srcdelay,uint destdelay,uint d11_lpbk,uint core_num)7395 dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay,
7396 	uint d11_lpbk, uint core_num)
7397 {
7398 	unsigned long flags;
7399 	int ret = BCME_OK;
7400 	dhd_prot_t *prot = dhd->prot;
7401 	pcie_dma_xfer_params_t *dmap;
7402 	uint32 xferlen = LIMIT_TO_MAX(len, DMA_XFER_LEN_LIMIT);
7403 	uint16 alloced = 0;
7404 	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
7405 
7406 	if (prot->dmaxfer.in_progress) {
7407 		DHD_ERROR(("DMA is in progress...\n"));
7408 		return BCME_ERROR;
7409 	}
7410 
7411 	if (d11_lpbk >= MAX_LPBK) {
7412 		DHD_ERROR(("loopback mode should be either"
7413 			" 0-PCIE_M2M_DMA, 1-D11, 2-BMC or 3-PCIE_M2M_NonDMA\n"));
7414 		return BCME_ERROR;
7415 	}
7416 
7417 	DHD_RING_LOCK(ring->ring_lock, flags);
7418 
7419 	prot->dmaxfer.in_progress = TRUE;
7420 	if ((ret = dmaxfer_prepare_dmaaddr(dhd, xferlen, srcdelay, destdelay,
7421 	        &prot->dmaxfer)) != BCME_OK) {
7422 		prot->dmaxfer.in_progress = FALSE;
7423 		DHD_RING_UNLOCK(ring->ring_lock, flags);
7424 		return ret;
7425 	}
7426 
7427 	dmap = (pcie_dma_xfer_params_t *)
7428 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
7429 
7430 	if (dmap == NULL) {
7431 		dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer);
7432 		prot->dmaxfer.in_progress = FALSE;
7433 		DHD_RING_UNLOCK(ring->ring_lock, flags);
7434 		return BCME_NOMEM;
7435 	}
7436 
7437 	/* Common msg buf hdr */
7438 	dmap->cmn_hdr.msg_type = MSG_TYPE_LPBK_DMAXFER;
7439 	dmap->cmn_hdr.request_id = htol32(DHD_FAKE_PKTID);
7440 	dmap->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
7441 	dmap->cmn_hdr.flags = ring->current_phase;
7442 	ring->seqnum++;
7443 
7444 	dmap->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.srcmem.pa));
7445 	dmap->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.srcmem.pa));
7446 	dmap->host_ouput_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.dstmem.pa));
7447 	dmap->host_ouput_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.dstmem.pa));
7448 	dmap->xfer_len = htol32(prot->dmaxfer.len);
7449 	dmap->srcdelay = htol32(prot->dmaxfer.srcdelay);
7450 	dmap->destdelay = htol32(prot->dmaxfer.destdelay);
7451 	prot->dmaxfer.d11_lpbk = d11_lpbk;
7452 	dmap->flags = (((core_num & PCIE_DMA_XFER_FLG_CORE_NUMBER_MASK)
7453 			<< PCIE_DMA_XFER_FLG_CORE_NUMBER_SHIFT) |
7454 			((prot->dmaxfer.d11_lpbk & PCIE_DMA_XFER_FLG_D11_LPBK_MASK)
7455 			 << PCIE_DMA_XFER_FLG_D11_LPBK_SHIFT));
7456 	prot->dmaxfer.start_usec = OSL_SYSUPTIME_US();
7457 
7458 	/* update ring's WR index and ring doorbell to dongle */
7459 	dhd_prot_ring_write_complete(dhd, ring, dmap, 1);
7460 
7461 	DHD_RING_UNLOCK(ring->ring_lock, flags);
7462 
7463 	DHD_ERROR(("DMA loopback Started...\n"));
7464 
7465 	return BCME_OK;
7466 } /* dhdmsgbuf_dmaxfer_req */
7467 
7468 int
dhdmsgbuf_dmaxfer_status(dhd_pub_t * dhd,dma_xfer_info_t * result)7469 dhdmsgbuf_dmaxfer_status(dhd_pub_t *dhd, dma_xfer_info_t *result)
7470 {
7471 	dhd_prot_t *prot = dhd->prot;
7472 
7473 	if (prot->dmaxfer.in_progress)
7474 		result->status = DMA_XFER_IN_PROGRESS;
7475 	else if (prot->dmaxfer.status == 0)
7476 		result->status = DMA_XFER_SUCCESS;
7477 	else
7478 		result->status = DMA_XFER_FAILED;
7479 
7480 	result->type = prot->dmaxfer.d11_lpbk;
7481 	result->error_code = prot->dmaxfer.status;
7482 	result->num_bytes = prot->dmaxfer.len;
7483 	result->time_taken = prot->dmaxfer.time_taken;
7484 	if (prot->dmaxfer.time_taken) {
7485 		/* throughput in kBps */
7486 		result->tput =
7487 			(prot->dmaxfer.len * (1000 * 1000 / 1024)) /
7488 			(uint32)prot->dmaxfer.time_taken;
7489 	}
7490 
7491 	return BCME_OK;
7492 }
7493 
7494 /** Called in the process of submitting an ioctl to the dongle */
7495 static int
dhd_msgbuf_query_ioctl(dhd_pub_t * dhd,int ifidx,uint cmd,void * buf,uint len,uint8 action)7496 dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
7497 {
7498 	int ret = 0;
7499 	uint copylen = 0;
7500 
7501 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7502 
7503 	if (dhd->bus->is_linkdown) {
7504 		DHD_ERROR(("%s : PCIe link is down. we have nothing to do\n",
7505 			__FUNCTION__));
7506 		return -EIO;
7507 	}
7508 
7509 	if (dhd->busstate == DHD_BUS_DOWN) {
7510 		DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
7511 		return -EIO;
7512 	}
7513 
7514 	/* don't talk to the dongle if fw is about to be reloaded */
7515 	if (dhd->hang_was_sent) {
7516 		DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n",
7517 			__FUNCTION__));
7518 		return -EIO;
7519 	}
7520 
7521 	if (cmd == WLC_GET_VAR && buf)
7522 	{
7523 		if (!len || !*(uint8 *)buf) {
7524 			DHD_ERROR(("%s(): Zero length bailing\n", __FUNCTION__));
7525 			ret = BCME_BADARG;
7526 			goto done;
7527 		}
7528 
7529 		/* Respond "bcmerror" and "bcmerrorstr" with local cache */
7530 		copylen = MIN(len, BCME_STRLEN);
7531 
7532 		if ((len >= strlen("bcmerrorstr")) &&
7533 			(!strcmp((char *)buf, "bcmerrorstr"))) {
7534 			strncpy((char *)buf, bcmerrorstr(dhd->dongle_error), copylen);
7535 			*(uint8 *)((uint8 *)buf + (copylen - 1)) = '\0';
7536 			goto done;
7537 		} else if ((len >= strlen("bcmerror")) &&
7538 			!strcmp((char *)buf, "bcmerror")) {
7539 			*(uint32 *)(uint32 *)buf = dhd->dongle_error;
7540 			goto done;
7541 		}
7542 	}
7543 
7544 	DHD_CTL(("query_ioctl: ACTION %d ifdix %d cmd %d len %d \n",
7545 	    action, ifidx, cmd, len));
7546 
7547 	ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx);
7548 
7549 	if (ret < 0) {
7550 		DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
7551 		goto done;
7552 	}
7553 
7554 	/* wait for IOCTL completion message from dongle and get first fragment */
7555 	ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf);
7556 
7557 done:
7558 	return ret;
7559 }
7560 
7561 void
dhd_msgbuf_iovar_timeout_dump(dhd_pub_t * dhd)7562 dhd_msgbuf_iovar_timeout_dump(dhd_pub_t *dhd)
7563 {
7564 	uint32 intstatus;
7565 	dhd_prot_t *prot = dhd->prot;
7566 	dhd->rxcnt_timeout++;
7567 	dhd->rx_ctlerrs++;
7568 	dhd->iovar_timeout_occured = TRUE;
7569 	DHD_ERROR(("%s: resumed on timeout rxcnt_timeout%s %d ioctl_cmd %d "
7570 		"trans_id %d state %d busstate=%d ioctl_received=%d\n", __FUNCTION__,
7571 		dhd->is_sched_error ? " due to scheduling problem" : "",
7572 		dhd->rxcnt_timeout, prot->curr_ioctl_cmd, prot->ioctl_trans_id,
7573 		prot->ioctl_state, dhd->busstate, prot->ioctl_received));
7574 #if defined(DHD_KERNEL_SCHED_DEBUG) && defined(DHD_FW_COREDUMP)
7575 		if (dhd->is_sched_error && dhd->memdump_enabled == DUMP_MEMFILE_BUGON) {
7576 			/* change g_assert_type to trigger Kernel panic */
7577 			g_assert_type = 2;
7578 			/* use ASSERT() to trigger panic */
7579 			ASSERT(0);
7580 		}
7581 #endif /* DHD_KERNEL_SCHED_DEBUG && DHD_FW_COREDUMP */
7582 
7583 	if (prot->curr_ioctl_cmd == WLC_SET_VAR ||
7584 			prot->curr_ioctl_cmd == WLC_GET_VAR) {
7585 		char iovbuf[32];
7586 		int dump_size = 128;
7587 		uint8 *ioctl_buf = (uint8 *)prot->ioctbuf.va;
7588 		memset(iovbuf, 0, sizeof(iovbuf));
7589 		strncpy(iovbuf, ioctl_buf, sizeof(iovbuf) - 1);
7590 		iovbuf[sizeof(iovbuf) - 1] = '\0';
7591 		DHD_ERROR(("Current IOVAR (%s): %s\n",
7592 			prot->curr_ioctl_cmd == WLC_SET_VAR ?
7593 			"WLC_SET_VAR" : "WLC_GET_VAR", iovbuf));
7594 		DHD_ERROR(("========== START IOCTL REQBUF DUMP ==========\n"));
7595 		prhex("ioctl_buf", (const u8 *) ioctl_buf, dump_size);
7596 		DHD_ERROR(("\n========== END IOCTL REQBUF DUMP ==========\n"));
7597 	}
7598 
7599 	/* Check the PCIe link status by reading intstatus register */
7600 	intstatus = si_corereg(dhd->bus->sih,
7601 		dhd->bus->sih->buscoreidx, dhd->bus->pcie_mailbox_int, 0, 0);
7602 	if (intstatus == (uint32)-1) {
7603 		DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__));
7604 		dhd->bus->is_linkdown = TRUE;
7605 	}
7606 
7607 	dhd_bus_dump_console_buffer(dhd->bus);
7608 	dhd_prot_debug_info_print(dhd);
7609 }
7610 
7611 /**
7612  * Waits for IOCTL completion message from the dongle, copies this into caller
7613  * provided parameter 'buf'.
7614  */
7615 static int
dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t * dhd,uint32 len,void * buf)7616 dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf)
7617 {
7618 	dhd_prot_t *prot = dhd->prot;
7619 	int timeleft;
7620 	unsigned long flags;
7621 	int ret = 0;
7622 	static uint cnt = 0;
7623 
7624 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7625 
7626 	if (dhd_query_bus_erros(dhd)) {
7627 		ret = -EIO;
7628 		goto out;
7629 	}
7630 
7631 	timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received);
7632 
7633 #ifdef DHD_RECOVER_TIMEOUT
7634 	if (prot->ioctl_received == 0) {
7635 		uint32 intstatus = si_corereg(dhd->bus->sih,
7636 			dhd->bus->sih->buscoreidx, dhd->bus->pcie_mailbox_int, 0, 0);
7637 		int host_irq_disbled = dhdpcie_irq_disabled(dhd->bus);
7638 		if ((intstatus) && (intstatus != (uint32)-1) &&
7639 			(timeleft == 0) && (!dhd_query_bus_erros(dhd))) {
7640 			DHD_ERROR(("%s: iovar timeout trying again intstatus=%x"
7641 				" host_irq_disabled=%d\n",
7642 				__FUNCTION__, intstatus, host_irq_disbled));
7643 			dhd_pcie_intr_count_dump(dhd);
7644 			dhd_print_tasklet_status(dhd);
7645 			dhd_prot_process_ctrlbuf(dhd);
7646 			timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received);
7647 			/* Clear Interrupts */
7648 			dhdpcie_bus_clear_intstatus(dhd->bus);
7649 		}
7650 	}
7651 #endif /* DHD_RECOVER_TIMEOUT */
7652 
7653 	if (dhd->conf->ctrl_resched > 0 && timeleft == 0 && (!dhd_query_bus_erros(dhd))) {
7654 		cnt++;
7655 		if (cnt <= dhd->conf->ctrl_resched) {
7656 			uint buscorerev = dhd->bus->sih->buscorerev;
7657 			uint32 intstatus = 0, intmask = 0;
7658 			intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxInt(buscorerev), 0, 0);
7659 			intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxMask(buscorerev), 0, 0);
7660 			if (intstatus) {
7661 				DHD_ERROR(("%s: reschedule dhd_dpc, cnt=%d, intstatus=0x%x, intmask=0x%x\n",
7662 					__FUNCTION__, cnt, intstatus, intmask));
7663 				dhd->bus->intstatus = intstatus;
7664 				dhd->bus->ipend = TRUE;
7665 				dhd->bus->dpc_sched = TRUE;
7666 				dhd_sched_dpc(dhd);
7667 				timeleft = dhd_os_ioctl_resp_wait(dhd, &prot->ioctl_received);
7668 			}
7669 		}
7670 	} else {
7671 		cnt = 0;
7672 	}
7673 
7674 	if (timeleft == 0 && (!dhd_query_bus_erros(dhd))) {
7675 		/* check if resumed on time out related to scheduling issue */
7676 		dhd->is_sched_error = FALSE;
7677 		if (dhd->bus->isr_entry_time > prot->ioctl_fillup_time) {
7678 			dhd->is_sched_error = dhd_bus_query_dpc_sched_errors(dhd);
7679 		}
7680 
7681 		dhd_msgbuf_iovar_timeout_dump(dhd);
7682 
7683 #ifdef DHD_FW_COREDUMP
7684 		/* Collect socram dump */
7685 		if (dhd->memdump_enabled) {
7686 			/* collect core dump */
7687 			dhd->memdump_type = DUMP_TYPE_RESUMED_ON_TIMEOUT;
7688 			dhd_bus_mem_dump(dhd);
7689 		}
7690 #endif /* DHD_FW_COREDUMP */
7691 
7692 		ret = -ETIMEDOUT;
7693 		goto out;
7694 	} else {
7695 		if (prot->ioctl_received != IOCTL_RETURN_ON_SUCCESS) {
7696 			DHD_ERROR(("%s: IOCTL failure due to ioctl_received = %d\n",
7697 				__FUNCTION__, prot->ioctl_received));
7698 			ret = -EINVAL;
7699 			goto out;
7700 		}
7701 		dhd->rxcnt_timeout = 0;
7702 		dhd->rx_ctlpkts++;
7703 		DHD_CTL(("%s: ioctl resp resumed, got %d\n",
7704 			__FUNCTION__, prot->ioctl_resplen));
7705 	}
7706 
7707 	if (dhd->prot->ioctl_resplen > len)
7708 		dhd->prot->ioctl_resplen = (uint16)len;
7709 	if (buf)
7710 		bcopy(dhd->prot->retbuf.va, buf, dhd->prot->ioctl_resplen);
7711 
7712 	ret = (int)(dhd->prot->ioctl_status);
7713 
7714 out:
7715 	DHD_GENERAL_LOCK(dhd, flags);
7716 	dhd->prot->ioctl_state = 0;
7717 	dhd->prot->ioctl_resplen = 0;
7718 	dhd->prot->ioctl_received = IOCTL_WAIT;
7719 	dhd->prot->curr_ioctl_cmd = 0;
7720 	DHD_GENERAL_UNLOCK(dhd, flags);
7721 
7722 	return ret;
7723 } /* dhd_msgbuf_wait_ioctl_cmplt */
7724 
7725 static int
dhd_msgbuf_set_ioctl(dhd_pub_t * dhd,int ifidx,uint cmd,void * buf,uint len,uint8 action)7726 dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
7727 {
7728 	int ret = 0;
7729 
7730 	DHD_TRACE(("%s: Enter \n", __FUNCTION__));
7731 
7732 	if (dhd->bus->is_linkdown) {
7733 		DHD_ERROR(("%s : PCIe link is down. we have nothing to do\n",
7734 			__FUNCTION__));
7735 		return -EIO;
7736 	}
7737 
7738 	if (dhd->busstate == DHD_BUS_DOWN) {
7739 		DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
7740 		return -EIO;
7741 	}
7742 
7743 	/* don't talk to the dongle if fw is about to be reloaded */
7744 	if (dhd->hang_was_sent) {
7745 		DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n",
7746 			__FUNCTION__));
7747 		return -EIO;
7748 	}
7749 
7750 	DHD_CTL(("ACTION %d ifdix %d cmd %d len %d \n",
7751 		action, ifidx, cmd, len));
7752 
7753 	/* Fill up msgbuf for ioctl req */
7754 	ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx);
7755 
7756 	if (ret < 0) {
7757 		DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
7758 		goto done;
7759 	}
7760 
7761 	ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf);
7762 
7763 done:
7764 	return ret;
7765 }
7766 
7767 /** Called by upper DHD layer. Handles a protocol control response asynchronously. */
dhd_prot_ctl_complete(dhd_pub_t * dhd)7768 int dhd_prot_ctl_complete(dhd_pub_t *dhd)
7769 {
7770 	return 0;
7771 }
7772 
7773 /** Called by upper DHD layer. Check for and handle local prot-specific iovar commands */
dhd_prot_iovar_op(dhd_pub_t * dhd,const char * name,void * params,int plen,void * arg,int len,bool set)7774 int dhd_prot_iovar_op(dhd_pub_t *dhd, const char *name,
7775                              void *params, int plen, void *arg, int len, bool set)
7776 {
7777 	return BCME_UNSUPPORTED;
7778 }
7779 
7780 #ifdef DHD_DUMP_PCIE_RINGS
dhd_d2h_h2d_ring_dump(dhd_pub_t * dhd,void * file,const void * user_buf,unsigned long * file_posn,bool file_write)7781 int dhd_d2h_h2d_ring_dump(dhd_pub_t *dhd, void *file, const void *user_buf,
7782 	unsigned long *file_posn, bool file_write)
7783 {
7784 	dhd_prot_t *prot;
7785 	msgbuf_ring_t *ring;
7786 	int ret = 0;
7787 	uint16 h2d_flowrings_total;
7788 	uint16 flowid;
7789 
7790 	if (!(dhd) || !(dhd->prot)) {
7791 		goto exit;
7792 	}
7793 	prot = dhd->prot;
7794 
7795 	/* Below is the same ring dump sequence followed in parser as well. */
7796 	ring = &prot->h2dring_ctrl_subn;
7797 	if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
7798 		goto exit;
7799 
7800 	ring = &prot->h2dring_rxp_subn;
7801 	if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
7802 		goto exit;
7803 
7804 	ring = &prot->d2hring_ctrl_cpln;
7805 	if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
7806 		goto exit;
7807 
7808 	ring = &prot->d2hring_tx_cpln;
7809 	if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
7810 		goto exit;
7811 
7812 	ring = &prot->d2hring_rx_cpln;
7813 	if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
7814 		goto exit;
7815 
7816 	h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
7817 	FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
7818 		if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0) {
7819 			goto exit;
7820 		}
7821 	}
7822 
7823 #ifdef EWP_EDL
7824 	if (dhd->dongle_edl_support) {
7825 		ring = prot->d2hring_edl;
7826 		if ((ret = dhd_edl_ring_hdr_write(dhd, ring, file, user_buf, file_posn)) < 0)
7827 			goto exit;
7828 	}
7829 	else if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !dhd->dongle_edl_support)
7830 #else
7831 	if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
7832 #endif /* EWP_EDL */
7833 	{
7834 		ring = prot->h2dring_info_subn;
7835 		if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
7836 			goto exit;
7837 
7838 		ring = prot->d2hring_info_cpln;
7839 		if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
7840 			goto exit;
7841 	}
7842 
7843 exit :
7844 	return ret;
7845 }
7846 
7847 /* Write to file */
7848 static
dhd_ring_write(dhd_pub_t * dhd,msgbuf_ring_t * ring,void * file,const void * user_buf,unsigned long * file_posn)7849 int dhd_ring_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file,
7850 	const void *user_buf, unsigned long *file_posn)
7851 {
7852 	int ret = 0;
7853 
7854 	if (ring == NULL) {
7855 		DHD_ERROR(("%s: Ring not initialised, failed to dump ring contents\n",
7856 			__FUNCTION__));
7857 		return BCME_ERROR;
7858 	}
7859 	if (file) {
7860 		ret = dhd_os_write_file_posn(file, file_posn, (char *)(ring->dma_buf.va),
7861 				((unsigned long)(ring->max_items) * (ring->item_len)));
7862 		if (ret < 0) {
7863 			DHD_ERROR(("%s: write file error !\n", __FUNCTION__));
7864 			ret = BCME_ERROR;
7865 		}
7866 	} else if (user_buf) {
7867 		ret = dhd_export_debug_data((char *)(ring->dma_buf.va), NULL, user_buf,
7868 			((unsigned long)(ring->max_items) * (ring->item_len)), (int *)file_posn);
7869 	}
7870 	return ret;
7871 }
7872 #endif /* DHD_DUMP_PCIE_RINGS */
7873 
7874 #ifdef EWP_EDL
7875 /* Write to file */
7876 static
dhd_edl_ring_hdr_write(dhd_pub_t * dhd,msgbuf_ring_t * ring,void * file,const void * user_buf,unsigned long * file_posn)7877 int dhd_edl_ring_hdr_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file, const void *user_buf,
7878 	unsigned long *file_posn)
7879 {
7880 	int ret = 0, nitems = 0;
7881 	char *buf = NULL, *ptr = NULL;
7882 	uint8 *msg_addr = NULL;
7883 	uint16	rd = 0;
7884 
7885 	if (ring == NULL) {
7886 		DHD_ERROR(("%s: Ring not initialised, failed to dump ring contents\n",
7887 			__FUNCTION__));
7888 		ret = BCME_ERROR;
7889 		goto done;
7890 	}
7891 
7892 	buf = MALLOCZ(dhd->osh, (D2HRING_EDL_MAX_ITEM * D2HRING_EDL_HDR_SIZE));
7893 	if (buf == NULL) {
7894 		DHD_ERROR(("%s: buffer allocation failed\n", __FUNCTION__));
7895 		ret = BCME_ERROR;
7896 		goto done;
7897 	}
7898 	ptr = buf;
7899 
7900 	for (; nitems < D2HRING_EDL_MAX_ITEM; nitems++, rd++) {
7901 		msg_addr = (uint8 *)ring->dma_buf.va + (rd * ring->item_len);
7902 		memcpy(ptr, (char *)msg_addr, D2HRING_EDL_HDR_SIZE);
7903 		ptr += D2HRING_EDL_HDR_SIZE;
7904 	}
7905 	if (file) {
7906 		ret = dhd_os_write_file_posn(file, file_posn, buf,
7907 				(D2HRING_EDL_HDR_SIZE * D2HRING_EDL_MAX_ITEM));
7908 		if (ret < 0) {
7909 			DHD_ERROR(("%s: write file error !\n", __FUNCTION__));
7910 			goto done;
7911 		}
7912 	}
7913 	else {
7914 		ret = dhd_export_debug_data(buf, NULL, user_buf,
7915 			(D2HRING_EDL_HDR_SIZE * D2HRING_EDL_MAX_ITEM), file_posn);
7916 	}
7917 
7918 done:
7919 	if (buf) {
7920 		MFREE(dhd->osh, buf, (D2HRING_EDL_MAX_ITEM * D2HRING_EDL_HDR_SIZE));
7921 	}
7922 	return ret;
7923 }
7924 #endif /* EWP_EDL */
7925 
7926 /** Add prot dump output to a buffer */
dhd_prot_dump(dhd_pub_t * dhd,struct bcmstrbuf * b)7927 void dhd_prot_dump(dhd_pub_t *dhd, struct bcmstrbuf *b)
7928 {
7929 
7930 	if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM)
7931 		bcm_bprintf(b, "\nd2h_sync: SEQNUM:");
7932 	else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM)
7933 		bcm_bprintf(b, "\nd2h_sync: XORCSUM:");
7934 	else
7935 		bcm_bprintf(b, "\nd2h_sync: NONE:");
7936 	bcm_bprintf(b, " d2h_sync_wait max<%lu> tot<%lu>\n",
7937 		dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot);
7938 
7939 	bcm_bprintf(b, "\nDongle DMA Indices: h2d %d  d2h %d index size %d bytes\n",
7940 		dhd->dma_h2d_ring_upd_support,
7941 		dhd->dma_d2h_ring_upd_support,
7942 		dhd->prot->rw_index_sz);
7943 	bcm_bprintf(b, "h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n",
7944 		h2d_max_txpost, dhd->prot->h2d_max_txpost);
7945 	bcm_bprintf(b, "pktid_txq_start_cnt: %d\n", dhd->prot->pktid_txq_start_cnt);
7946 	bcm_bprintf(b, "pktid_txq_stop_cnt: %d\n", dhd->prot->pktid_txq_stop_cnt);
7947 	bcm_bprintf(b, "pktid_depleted_cnt: %d\n", dhd->prot->pktid_depleted_cnt);
7948 }
7949 
7950 /* Update local copy of dongle statistics */
dhd_prot_dstats(dhd_pub_t * dhd)7951 void dhd_prot_dstats(dhd_pub_t *dhd)
7952 {
7953 	return;
7954 }
7955 
7956 /** Called by upper DHD layer */
dhd_process_pkt_reorder_info(dhd_pub_t * dhd,uchar * reorder_info_buf,uint reorder_info_len,void ** pkt,uint32 * free_buf_count)7957 int dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf,
7958 	uint reorder_info_len, void **pkt, uint32 *free_buf_count)
7959 {
7960 	return 0;
7961 }
7962 
7963 /** Debug related, post a dummy message to interrupt dongle. Used to process cons commands. */
7964 int
dhd_post_dummy_msg(dhd_pub_t * dhd)7965 dhd_post_dummy_msg(dhd_pub_t *dhd)
7966 {
7967 	unsigned long flags;
7968 	hostevent_hdr_t *hevent = NULL;
7969 	uint16 alloced = 0;
7970 
7971 	dhd_prot_t *prot = dhd->prot;
7972 	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
7973 
7974 	DHD_RING_LOCK(ring->ring_lock, flags);
7975 
7976 	hevent = (hostevent_hdr_t *)
7977 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
7978 
7979 	if (hevent == NULL) {
7980 		DHD_RING_UNLOCK(ring->ring_lock, flags);
7981 		return -1;
7982 	}
7983 
7984 	/* CMN msg header */
7985 	hevent->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
7986 	ring->seqnum++;
7987 	hevent->msg.msg_type = MSG_TYPE_HOST_EVNT;
7988 	hevent->msg.if_id = 0;
7989 	hevent->msg.flags = ring->current_phase;
7990 
7991 	/* Event payload */
7992 	hevent->evnt_pyld = htol32(HOST_EVENT_CONS_CMD);
7993 
7994 	/* Since, we are filling the data directly into the bufptr obtained
7995 	 * from the msgbuf, we can directly call the write_complete
7996 	 */
7997 	dhd_prot_ring_write_complete(dhd, ring, hevent, 1);
7998 
7999 	DHD_RING_UNLOCK(ring->ring_lock, flags);
8000 
8001 	return 0;
8002 }
8003 
8004 /**
8005  * If exactly_nitems is true, this function will allocate space for nitems or fail
8006  * If exactly_nitems is false, this function will allocate space for nitems or less
8007  */
8008 static void * BCMFASTPATH
dhd_prot_alloc_ring_space(dhd_pub_t * dhd,msgbuf_ring_t * ring,uint16 nitems,uint16 * alloced,bool exactly_nitems)8009 dhd_prot_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring,
8010 	uint16 nitems, uint16 * alloced, bool exactly_nitems)
8011 {
8012 	void * ret_buf;
8013 
8014 	/* Alloc space for nitems in the ring */
8015 	ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems);
8016 
8017 	if (ret_buf == NULL) {
8018 		/* HWA TODO, need to get RD pointer from different array
8019 		 * which HWA will directly write into host memory
8020 		 */
8021 		/* if alloc failed , invalidate cached read ptr */
8022 		if (dhd->dma_d2h_ring_upd_support) {
8023 			ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
8024 		} else {
8025 			dhd_bus_cmn_readshared(dhd->bus, &(ring->rd), RING_RD_UPD, ring->idx);
8026 		}
8027 
8028 		/* Try allocating once more */
8029 		ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems);
8030 
8031 		if (ret_buf == NULL) {
8032 			DHD_INFO(("%s: Ring space not available  \n", ring->name));
8033 			return NULL;
8034 		}
8035 	}
8036 
8037 	if (ret_buf == HOST_RING_BASE(ring)) {
8038 		DHD_INFO(("%s: setting the phase now\n", ring->name));
8039 		ring->current_phase = ring->current_phase ? 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
8040 	}
8041 
8042 	/* Return alloced space */
8043 	return ret_buf;
8044 }
8045 
8046 /**
8047  * Non inline ioct request.
8048  * Form a ioctl request first as per ioctptr_reqst_hdr_t header in the circular buffer
8049  * Form a separate request buffer where a 4 byte cmn header is added in the front
8050  * buf contents from parent function is copied to remaining section of this buffer
8051  */
8052 static int
dhd_fillup_ioct_reqst(dhd_pub_t * dhd,uint16 len,uint cmd,void * buf,int ifidx)8053 dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd, void* buf, int ifidx)
8054 {
8055 	dhd_prot_t *prot = dhd->prot;
8056 	ioctl_req_msg_t *ioct_rqst;
8057 	void * ioct_buf;	/* For ioctl payload */
8058 	uint16  rqstlen, resplen;
8059 	unsigned long flags;
8060 	uint16 alloced = 0;
8061 	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
8062 
8063 	if (dhd_query_bus_erros(dhd)) {
8064 		return -EIO;
8065 	}
8066 
8067 	rqstlen = len;
8068 	resplen = len;
8069 
8070 	/* Limit ioct request to MSGBUF_MAX_MSG_SIZE bytes including hdrs */
8071 	/* 8K allocation of dongle buffer fails */
8072 	/* dhd doesnt give separate input & output buf lens */
8073 	/* so making the assumption that input length can never be more than 2k */
8074 	rqstlen = MIN(rqstlen, MSGBUF_IOCTL_MAX_RQSTLEN);
8075 
8076 	DHD_RING_LOCK(ring->ring_lock, flags);
8077 
8078 	if (prot->ioctl_state) {
8079 		DHD_ERROR(("%s: pending ioctl %02x\n", __FUNCTION__, prot->ioctl_state));
8080 		DHD_RING_UNLOCK(ring->ring_lock, flags);
8081 		return BCME_BUSY;
8082 	} else {
8083 		prot->ioctl_state = MSGBUF_IOCTL_ACK_PENDING | MSGBUF_IOCTL_RESP_PENDING;
8084 	}
8085 
8086 	/* Request for cbuf space */
8087 	ioct_rqst = (ioctl_req_msg_t*)
8088 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
8089 	if (ioct_rqst == NULL) {
8090 		DHD_ERROR(("couldn't allocate space on msgring to send ioctl request\n"));
8091 		prot->ioctl_state = 0;
8092 		prot->curr_ioctl_cmd = 0;
8093 		prot->ioctl_received = IOCTL_WAIT;
8094 		DHD_RING_UNLOCK(ring->ring_lock, flags);
8095 		return -1;
8096 	}
8097 
8098 	/* Common msg buf hdr */
8099 	ioct_rqst->cmn_hdr.msg_type = MSG_TYPE_IOCTLPTR_REQ;
8100 	ioct_rqst->cmn_hdr.if_id = (uint8)ifidx;
8101 	ioct_rqst->cmn_hdr.flags = ring->current_phase;
8102 	ioct_rqst->cmn_hdr.request_id = htol32(DHD_IOCTL_REQ_PKTID);
8103 	ioct_rqst->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
8104 	ring->seqnum++;
8105 
8106 	ioct_rqst->cmd = htol32(cmd);
8107 	prot->curr_ioctl_cmd = cmd;
8108 	ioct_rqst->output_buf_len = htol16(resplen);
8109 	prot->ioctl_trans_id++;
8110 	ioct_rqst->trans_id = prot->ioctl_trans_id;
8111 
8112 	/* populate ioctl buffer info */
8113 	ioct_rqst->input_buf_len = htol16(rqstlen);
8114 	ioct_rqst->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->ioctbuf.pa));
8115 	ioct_rqst->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->ioctbuf.pa));
8116 	/* copy ioct payload */
8117 	ioct_buf = (void *) prot->ioctbuf.va;
8118 
8119 	prot->ioctl_fillup_time = OSL_LOCALTIME_NS();
8120 
8121 	if (buf)
8122 		memcpy(ioct_buf, buf, len);
8123 
8124 	OSL_CACHE_FLUSH((void *) prot->ioctbuf.va, len);
8125 
8126 	if (!ISALIGNED(ioct_buf, DMA_ALIGN_LEN))
8127 		DHD_ERROR(("host ioct address unaligned !!!!! \n"));
8128 
8129 	DHD_CTL(("submitted IOCTL request request_id %d, cmd %d, output_buf_len %d, tx_id %d\n",
8130 		ioct_rqst->cmn_hdr.request_id, cmd, ioct_rqst->output_buf_len,
8131 		ioct_rqst->trans_id));
8132 
8133 	/* update ring's WR index and ring doorbell to dongle */
8134 	dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1);
8135 
8136 	DHD_RING_UNLOCK(ring->ring_lock, flags);
8137 
8138 	return 0;
8139 } /* dhd_fillup_ioct_reqst */
8140 
8141 /**
8142  * dhd_prot_ring_attach - Initialize the msgbuf_ring object and attach a
8143  * DMA-able buffer to it. The ring is NOT tagged as inited until all the ring
8144  * information is posted to the dongle.
8145  *
8146  * Invoked in dhd_prot_attach for the common rings, and in dhd_prot_init for
8147  * each flowring in pool of flowrings.
8148  *
8149  * returns BCME_OK=0 on success
8150  * returns non-zero negative error value on failure.
8151  */
8152 static int
dhd_prot_ring_attach(dhd_pub_t * dhd,msgbuf_ring_t * ring,const char * name,uint16 max_items,uint16 item_len,uint16 ringid)8153 dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring, const char *name,
8154 	uint16 max_items, uint16 item_len, uint16 ringid)
8155 {
8156 	int dma_buf_alloced = BCME_NOMEM;
8157 	uint32 dma_buf_len = max_items * item_len;
8158 	dhd_prot_t *prot = dhd->prot;
8159 	uint16 max_flowrings = dhd->bus->max_tx_flowrings;
8160 	dhd_dma_buf_t *dma_buf = NULL;
8161 
8162 	ASSERT(ring);
8163 	ASSERT(name);
8164 	ASSERT((max_items < 0xFFFF) && (item_len < 0xFFFF) && (ringid < 0xFFFF));
8165 
8166 	/* Init name */
8167 	strncpy(ring->name, name, RING_NAME_MAX_LENGTH);
8168 	ring->name[RING_NAME_MAX_LENGTH - 1] = '\0';
8169 
8170 	ring->idx = ringid;
8171 
8172 	ring->max_items = max_items;
8173 	ring->item_len = item_len;
8174 
8175 	/* A contiguous space may be reserved for all flowrings */
8176 	if (DHD_IS_FLOWRING(ringid, max_flowrings) && (prot->flowrings_dma_buf.va)) {
8177 		/* Carve out from the contiguous DMA-able flowring buffer */
8178 		uint16 flowid;
8179 		uint32 base_offset;
8180 
8181 		dhd_dma_buf_t *rsv_buf = &prot->flowrings_dma_buf;
8182 		dma_buf = &ring->dma_buf;
8183 
8184 		flowid = DHD_RINGID_TO_FLOWID(ringid);
8185 		base_offset = (flowid - BCMPCIE_H2D_COMMON_MSGRINGS) * dma_buf_len;
8186 
8187 		ASSERT(base_offset + dma_buf_len <= rsv_buf->len);
8188 
8189 		dma_buf->len = dma_buf_len;
8190 		dma_buf->va = (void *)((uintptr)rsv_buf->va + base_offset);
8191 		PHYSADDRHISET(dma_buf->pa, PHYSADDRHI(rsv_buf->pa));
8192 		PHYSADDRLOSET(dma_buf->pa, PHYSADDRLO(rsv_buf->pa) + base_offset);
8193 
8194 		/* On 64bit, contiguous space may not span across 0x00000000FFFFFFFF */
8195 		ASSERT(PHYSADDRLO(dma_buf->pa) >= PHYSADDRLO(rsv_buf->pa));
8196 
8197 		dma_buf->dmah   = rsv_buf->dmah;
8198 		dma_buf->secdma = rsv_buf->secdma;
8199 
8200 		(void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
8201 	} else {
8202 #ifdef EWP_EDL
8203 		if (ring == dhd->prot->d2hring_edl) {
8204 			/* For EDL ring, memory is alloced during attach,
8205 			* so just need to copy the dma_buf to the ring's dma_buf
8206 			*/
8207 			memcpy(&ring->dma_buf, &dhd->edl_ring_mem, sizeof(ring->dma_buf));
8208 			dma_buf = &ring->dma_buf;
8209 			if (dma_buf->va == NULL) {
8210 				return BCME_NOMEM;
8211 			}
8212 		} else
8213 #endif /* EWP_EDL */
8214 		{
8215 			/* Allocate a dhd_dma_buf */
8216 			dma_buf_alloced = dhd_dma_buf_alloc(dhd, &ring->dma_buf, dma_buf_len);
8217 			if (dma_buf_alloced != BCME_OK) {
8218 				return BCME_NOMEM;
8219 			}
8220 		}
8221 	}
8222 
8223 	/* CAUTION: Save ring::base_addr in little endian format! */
8224 	dhd_base_addr_htolpa(&ring->base_addr, ring->dma_buf.pa);
8225 
8226 #ifdef BCM_SECURE_DMA
8227 	if (SECURE_DMA_ENAB(prot->osh)) {
8228 		ring->dma_buf.secdma = MALLOCZ(prot->osh, sizeof(sec_cma_info_t));
8229 		if (ring->dma_buf.secdma == NULL) {
8230 			goto free_dma_buf;
8231 		}
8232 	}
8233 #endif /* BCM_SECURE_DMA */
8234 
8235 	ring->ring_lock = dhd_os_spin_lock_init(dhd->osh);
8236 
8237 	DHD_INFO(("RING_ATTACH : %s Max item %d len item %d total size %d "
8238 		"ring start %p buf phys addr  %x:%x \n",
8239 		ring->name, ring->max_items, ring->item_len,
8240 		dma_buf_len, ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
8241 		ltoh32(ring->base_addr.low_addr)));
8242 
8243 	return BCME_OK;
8244 
8245 #ifdef BCM_SECURE_DMA
8246 free_dma_buf:
8247 	if (dma_buf_alloced == BCME_OK) {
8248 		dhd_dma_buf_free(dhd, &ring->dma_buf);
8249 	}
8250 #endif /* BCM_SECURE_DMA */
8251 
8252 	return BCME_NOMEM;
8253 
8254 } /* dhd_prot_ring_attach */
8255 
8256 /**
8257  * dhd_prot_ring_init - Post the common ring information to dongle.
8258  *
8259  * Used only for common rings.
8260  *
8261  * The flowrings information is passed via the create flowring control message
8262  * (tx_flowring_create_request_t) sent over the H2D control submission common
8263  * ring.
8264  */
8265 static void
dhd_prot_ring_init(dhd_pub_t * dhd,msgbuf_ring_t * ring)8266 dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring)
8267 {
8268 	ring->wr = 0;
8269 	ring->rd = 0;
8270 	ring->curr_rd = 0;
8271 	/* Reset hwa_db_type for all rings,
8272 	 * for data path rings, it will be assigned separately post init
8273 	 * from dhd_prot_d2h_sync_init and dhd_prot_h2d_sync_init
8274 	 */
8275 	ring->hwa_db_type = 0;
8276 
8277 	/* CAUTION: ring::base_addr already in Little Endian */
8278 	dhd_bus_cmn_writeshared(dhd->bus, &ring->base_addr,
8279 		sizeof(sh_addr_t), RING_BUF_ADDR, ring->idx);
8280 	dhd_bus_cmn_writeshared(dhd->bus, &ring->max_items,
8281 		sizeof(uint16), RING_MAX_ITEMS, ring->idx);
8282 	dhd_bus_cmn_writeshared(dhd->bus, &ring->item_len,
8283 		sizeof(uint16), RING_ITEM_LEN, ring->idx);
8284 
8285 	dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
8286 		sizeof(uint16), RING_WR_UPD, ring->idx);
8287 	dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd),
8288 		sizeof(uint16), RING_RD_UPD, ring->idx);
8289 
8290 	/* ring inited */
8291 	ring->inited = TRUE;
8292 
8293 } /* dhd_prot_ring_init */
8294 
8295 /**
8296  * dhd_prot_ring_reset - bzero a ring's DMA-ble buffer and cache flush
8297  * Reset WR and RD indices to 0.
8298  */
8299 static void
dhd_prot_ring_reset(dhd_pub_t * dhd,msgbuf_ring_t * ring)8300 dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring)
8301 {
8302 	DHD_TRACE(("%s\n", __FUNCTION__));
8303 
8304 	dhd_dma_buf_reset(dhd, &ring->dma_buf);
8305 
8306 	ring->rd = ring->wr = 0;
8307 	ring->curr_rd = 0;
8308 	ring->inited = FALSE;
8309 	ring->create_pending = FALSE;
8310 }
8311 
8312 /**
8313  * dhd_prot_ring_detach - Detach the DMA-able buffer and any other objects
8314  * hanging off the msgbuf_ring.
8315  */
8316 static void
dhd_prot_ring_detach(dhd_pub_t * dhd,msgbuf_ring_t * ring)8317 dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring)
8318 {
8319 	dhd_prot_t *prot = dhd->prot;
8320 	uint16 max_flowrings = dhd->bus->max_tx_flowrings;
8321 	ASSERT(ring);
8322 
8323 	ring->inited = FALSE;
8324 	/* rd = ~0, wr = ring->rd - 1, max_items = 0, len_item = ~0 */
8325 
8326 #ifdef BCM_SECURE_DMA
8327 	if (SECURE_DMA_ENAB(prot->osh)) {
8328 		if (ring->dma_buf.secdma) {
8329 			SECURE_DMA_UNMAP_ALL(prot->osh, ring->dma_buf.secdma);
8330 			MFREE(prot->osh, ring->dma_buf.secdma, sizeof(sec_cma_info_t));
8331 			ring->dma_buf.secdma = NULL;
8332 		}
8333 	}
8334 #endif /* BCM_SECURE_DMA */
8335 
8336 	/* If the DMA-able buffer was carved out of a pre-reserved contiguous
8337 	 * memory, then simply stop using it.
8338 	 */
8339 	if (DHD_IS_FLOWRING(ring->idx, max_flowrings) && (prot->flowrings_dma_buf.va)) {
8340 		(void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
8341 		memset(&ring->dma_buf, 0, sizeof(dhd_dma_buf_t));
8342 	} else {
8343 		dhd_dma_buf_free(dhd, &ring->dma_buf);
8344 	}
8345 
8346 	dhd_os_spin_lock_deinit(dhd->osh, ring->ring_lock);
8347 
8348 } /* dhd_prot_ring_detach */
8349 
8350 /* Fetch number of H2D flowrings given the total number of h2d rings */
8351 uint16
dhd_get_max_flow_rings(dhd_pub_t * dhd)8352 dhd_get_max_flow_rings(dhd_pub_t *dhd)
8353 {
8354 	if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
8355 		return dhd->bus->max_tx_flowrings;
8356 	else
8357 		return (dhd->bus->max_tx_flowrings - BCMPCIE_H2D_COMMON_MSGRINGS);
8358 }
8359 
8360 /**
8361  * dhd_prot_flowrings_pool_attach - Initialize a pool of flowring msgbuf_ring_t.
8362  *
8363  * Allocate a pool of msgbuf_ring along with DMA-able buffers for flowrings.
8364  * Dongle includes common rings when it advertizes the number of H2D rings.
8365  * Allocates a pool of msgbuf_ring_t and invokes dhd_prot_ring_attach to
8366  * allocate the DMA-able buffer and initialize each msgbuf_ring_t object.
8367  *
8368  * dhd_prot_ring_attach is invoked to perform the actual initialization and
8369  * attaching the DMA-able buffer.
8370  *
8371  * Later dhd_prot_flowrings_pool_fetch() may be used to fetch a preallocated and
8372  * initialized msgbuf_ring_t object.
8373  *
8374  * returns BCME_OK=0 on success
8375  * returns non-zero negative error value on failure.
8376  */
8377 static int
dhd_prot_flowrings_pool_attach(dhd_pub_t * dhd)8378 dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd)
8379 {
8380 	uint16 flowid;
8381 	msgbuf_ring_t *ring;
8382 	uint16 h2d_flowrings_total; /* exclude H2D common rings */
8383 	dhd_prot_t *prot = dhd->prot;
8384 	char ring_name[RING_NAME_MAX_LENGTH];
8385 
8386 	if (prot->h2d_flowrings_pool != NULL)
8387 		return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
8388 
8389 	ASSERT(prot->h2d_rings_total == 0);
8390 
8391 	/* h2d_rings_total includes H2D common rings: ctrl and rxbuf subn */
8392 	prot->h2d_rings_total = (uint16)dhd_bus_max_h2d_queues(dhd->bus);
8393 
8394 	if (prot->h2d_rings_total < BCMPCIE_H2D_COMMON_MSGRINGS) {
8395 		DHD_ERROR(("%s: h2d_rings_total advertized as %u\n",
8396 			__FUNCTION__, prot->h2d_rings_total));
8397 		return BCME_ERROR;
8398 	}
8399 
8400 	/* Subtract number of H2D common rings, to determine number of flowrings */
8401 	h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
8402 
8403 	DHD_ERROR(("Attach flowrings pool for %d rings\n", h2d_flowrings_total));
8404 
8405 	/* Allocate pool of msgbuf_ring_t objects for all flowrings */
8406 	prot->h2d_flowrings_pool = (msgbuf_ring_t *)MALLOCZ(prot->osh,
8407 		(h2d_flowrings_total * sizeof(msgbuf_ring_t)));
8408 
8409 	if (prot->h2d_flowrings_pool == NULL) {
8410 		DHD_ERROR(("%s: flowrings pool for %d flowrings, alloc failure\n",
8411 			__FUNCTION__, h2d_flowrings_total));
8412 		goto fail;
8413 	}
8414 
8415 	/* Setup & Attach a DMA-able buffer to each flowring in the flowring pool */
8416 	FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
8417 		snprintf(ring_name, sizeof(ring_name), "h2dflr_%03u", flowid);
8418 		if (dhd_prot_ring_attach(dhd, ring, ring_name,
8419 		        prot->h2d_max_txpost, H2DRING_TXPOST_ITEMSIZE,
8420 		        DHD_FLOWID_TO_RINGID(flowid)) != BCME_OK) {
8421 			goto attach_fail;
8422 		}
8423 		/*
8424 		 * TOD0 - Currently flowrings hwa is disabled and can be enabled like below
8425 		 * (dhd->bus->hwa_enab_bmap & HWA_ENAB_BITMAP_TXPOSTS) ? HWA_DB_TYPE_TXPOSTS : 0;
8426 		 */
8427 		ring->hwa_db_type = 0;
8428 	}
8429 
8430 	return BCME_OK;
8431 
8432 attach_fail:
8433 	dhd_prot_flowrings_pool_detach(dhd); /* Free entire pool of flowrings */
8434 
8435 fail:
8436 	prot->h2d_rings_total = 0;
8437 	return BCME_NOMEM;
8438 
8439 } /* dhd_prot_flowrings_pool_attach */
8440 
8441 /**
8442  * dhd_prot_flowrings_pool_reset - Reset all msgbuf_ring_t objects in the pool.
8443  * Invokes dhd_prot_ring_reset to perform the actual reset.
8444  *
8445  * The DMA-able buffer is not freed during reset and neither is the flowring
8446  * pool freed.
8447  *
8448  * dhd_prot_flowrings_pool_reset will be invoked in dhd_prot_reset. Following
8449  * the dhd_prot_reset, dhd_prot_init will be re-invoked, and the flowring pool
8450  * from a previous flowring pool instantiation will be reused.
8451  *
8452  * This will avoid a fragmented DMA-able memory condition, if multiple
8453  * dhd_prot_reset were invoked to reboot the dongle without a full detach/attach
8454  * cycle.
8455  */
8456 static void
dhd_prot_flowrings_pool_reset(dhd_pub_t * dhd)8457 dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd)
8458 {
8459 	uint16 flowid, h2d_flowrings_total;
8460 	msgbuf_ring_t *ring;
8461 	dhd_prot_t *prot = dhd->prot;
8462 
8463 	if (prot->h2d_flowrings_pool == NULL) {
8464 		ASSERT(prot->h2d_rings_total == 0);
8465 		return;
8466 	}
8467 	h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
8468 	/* Reset each flowring in the flowring pool */
8469 	FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
8470 		dhd_prot_ring_reset(dhd, ring);
8471 		ring->inited = FALSE;
8472 	}
8473 
8474 	/* Flowring pool state must be as-if dhd_prot_flowrings_pool_attach */
8475 }
8476 
8477 /**
8478  * dhd_prot_flowrings_pool_detach - Free pool of msgbuf_ring along with
8479  * DMA-able buffers for flowrings.
8480  * dhd_prot_ring_detach is invoked to free the DMA-able buffer and perform any
8481  * de-initialization of each msgbuf_ring_t.
8482  */
8483 static void
dhd_prot_flowrings_pool_detach(dhd_pub_t * dhd)8484 dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd)
8485 {
8486 	int flowid;
8487 	msgbuf_ring_t *ring;
8488 	uint16 h2d_flowrings_total; /* exclude H2D common rings */
8489 	dhd_prot_t *prot = dhd->prot;
8490 
8491 	if (prot->h2d_flowrings_pool == NULL) {
8492 		ASSERT(prot->h2d_rings_total == 0);
8493 		return;
8494 	}
8495 
8496 	h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
8497 	/* Detach the DMA-able buffer for each flowring in the flowring pool */
8498 	FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
8499 		dhd_prot_ring_detach(dhd, ring);
8500 	}
8501 
8502 	MFREE(prot->osh, prot->h2d_flowrings_pool,
8503 		(h2d_flowrings_total * sizeof(msgbuf_ring_t)));
8504 
8505 	prot->h2d_flowrings_pool = (msgbuf_ring_t*)NULL;
8506 	prot->h2d_rings_total = 0;
8507 
8508 } /* dhd_prot_flowrings_pool_detach */
8509 
8510 /**
8511  * dhd_prot_flowrings_pool_fetch - Fetch a preallocated and initialized
8512  * msgbuf_ring from the flowring pool, and assign it.
8513  *
8514  * Unlike common rings, which uses a dhd_prot_ring_init() to pass the common
8515  * ring information to the dongle, a flowring's information is passed via a
8516  * flowring create control message.
8517  *
8518  * Only the ring state (WR, RD) index are initialized.
8519  */
8520 static msgbuf_ring_t *
dhd_prot_flowrings_pool_fetch(dhd_pub_t * dhd,uint16 flowid)8521 dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd, uint16 flowid)
8522 {
8523 	msgbuf_ring_t *ring;
8524 	dhd_prot_t *prot = dhd->prot;
8525 
8526 	ASSERT(flowid >= DHD_FLOWRING_START_FLOWID);
8527 	ASSERT(flowid < prot->h2d_rings_total);
8528 	ASSERT(prot->h2d_flowrings_pool != NULL);
8529 
8530 	ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid);
8531 
8532 	/* ASSERT flow_ring->inited == FALSE */
8533 
8534 	ring->wr = 0;
8535 	ring->rd = 0;
8536 	ring->curr_rd = 0;
8537 	ring->inited = TRUE;
8538 	/**
8539 	 * Every time a flowring starts dynamically, initialize current_phase with 0
8540 	 * then flip to BCMPCIE_CMNHDR_PHASE_BIT_INIT
8541 	 */
8542 	ring->current_phase = 0;
8543 	return ring;
8544 }
8545 
8546 /**
8547  * dhd_prot_flowrings_pool_release - release a previously fetched flowring's
8548  * msgbuf_ring back to the flow_ring pool.
8549  */
8550 void
dhd_prot_flowrings_pool_release(dhd_pub_t * dhd,uint16 flowid,void * flow_ring)8551 dhd_prot_flowrings_pool_release(dhd_pub_t *dhd, uint16 flowid, void *flow_ring)
8552 {
8553 	msgbuf_ring_t *ring;
8554 	dhd_prot_t *prot = dhd->prot;
8555 
8556 	ASSERT(flowid >= DHD_FLOWRING_START_FLOWID);
8557 	ASSERT(flowid < prot->h2d_rings_total);
8558 	ASSERT(prot->h2d_flowrings_pool != NULL);
8559 
8560 	ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid);
8561 
8562 	ASSERT(ring == (msgbuf_ring_t*)flow_ring);
8563 	/* ASSERT flow_ring->inited == TRUE */
8564 
8565 	(void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
8566 
8567 	ring->wr = 0;
8568 	ring->rd = 0;
8569 	ring->inited = FALSE;
8570 
8571 	ring->curr_rd = 0;
8572 }
8573 
8574 /* Assumes only one index is updated at a time */
8575 /* If exactly_nitems is true, this function will allocate space for nitems or fail */
8576 /*    Exception: when wrap around is encountered, to prevent hangup (last nitems of ring buffer) */
8577 /* If exactly_nitems is false, this function will allocate space for nitems or less */
8578 static void *BCMFASTPATH
dhd_prot_get_ring_space(msgbuf_ring_t * ring,uint16 nitems,uint16 * alloced,bool exactly_nitems)8579 dhd_prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems, uint16 * alloced,
8580 	bool exactly_nitems)
8581 {
8582 	void *ret_ptr = NULL;
8583 	uint16 ring_avail_cnt;
8584 
8585 	ASSERT(nitems <= ring->max_items);
8586 
8587 	ring_avail_cnt = CHECK_WRITE_SPACE(ring->rd, ring->wr, ring->max_items);
8588 
8589 	if ((ring_avail_cnt == 0) ||
8590 	       (exactly_nitems && (ring_avail_cnt < nitems) &&
8591 	       ((ring->max_items - ring->wr) >= nitems))) {
8592 		DHD_INFO(("Space not available: ring %s items %d write %d read %d\n",
8593 			ring->name, nitems, ring->wr, ring->rd));
8594 		return NULL;
8595 	}
8596 	*alloced = MIN(nitems, ring_avail_cnt);
8597 
8598 	/* Return next available space */
8599 	ret_ptr = (char *)DHD_RING_BGN_VA(ring) + (ring->wr * ring->item_len);
8600 
8601 	/* Update write index */
8602 	if ((ring->wr + *alloced) == ring->max_items)
8603 		ring->wr = 0;
8604 	else if ((ring->wr + *alloced) < ring->max_items)
8605 		ring->wr += *alloced;
8606 	else {
8607 		/* Should never hit this */
8608 		ASSERT(0);
8609 		return NULL;
8610 	}
8611 
8612 	return ret_ptr;
8613 } /* dhd_prot_get_ring_space */
8614 
8615 /**
8616  * dhd_prot_ring_write_complete - Host updates the new WR index on producing
8617  * new messages in a H2D ring. The messages are flushed from cache prior to
8618  * posting the new WR index. The new WR index will be updated in the DMA index
8619  * array or directly in the dongle's ring state memory.
8620  * A PCIE doorbell will be generated to wake up the dongle.
8621  * This is a non-atomic function, make sure the callers
8622  * always hold appropriate locks.
8623  */
8624 static void BCMFASTPATH
__dhd_prot_ring_write_complete(dhd_pub_t * dhd,msgbuf_ring_t * ring,void * p,uint16 nitems)8625 __dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p,
8626 	uint16 nitems)
8627 {
8628 	dhd_prot_t *prot = dhd->prot;
8629 	uint32 db_index;
8630 	uint16 max_flowrings = dhd->bus->max_tx_flowrings;
8631 	uint corerev;
8632 
8633 	/* cache flush */
8634 	OSL_CACHE_FLUSH(p, ring->item_len * nitems);
8635 
8636 	/* For HWA, update db_index and ring mb2 DB and return */
8637 	if (HWA_ACTIVE(dhd) && ring->hwa_db_type) {
8638 		db_index = HWA_DB_INDEX_VALUE(ring->wr) | ring->hwa_db_type;
8639 		DHD_TRACE(("%s: ring(%s) wr(%d) hwa_db_type(0x%x) db_index(0x%x)\n",
8640 			__FUNCTION__, ring->name, ring->wr, ring->hwa_db_type, db_index));
8641 		prot->mb_2_ring_fn(dhd->bus, db_index, TRUE);
8642 		return;
8643 	}
8644 
8645 	if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
8646 			dhd_prot_dma_indx_set(dhd, ring->wr,
8647 			                      H2D_DMA_INDX_WR_UPD, ring->idx);
8648 	} else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings)) {
8649 			dhd_prot_dma_indx_set(dhd, ring->wr,
8650 			H2D_IFRM_INDX_WR_UPD, ring->idx);
8651 	} else {
8652 			dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
8653 				sizeof(uint16), RING_WR_UPD, ring->idx);
8654 	}
8655 
8656 	/* raise h2d interrupt */
8657 	if (IDMA_ACTIVE(dhd) ||
8658 		(IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings))) {
8659 		db_index = IDMA_IDX0;
8660 		/* this api is called in wl down path..in that case sih is freed already */
8661 		if (dhd->bus->sih) {
8662 			corerev = dhd->bus->sih->buscorerev;
8663 			/* We need to explictly configure the type of DMA for core rev >= 24 */
8664 			if (corerev >= 24) {
8665 				db_index |= (DMA_TYPE_IDMA << DMA_TYPE_SHIFT);
8666 			}
8667 		}
8668 		prot->mb_2_ring_fn(dhd->bus, db_index, TRUE);
8669 	} else {
8670 		prot->mb_ring_fn(dhd->bus, ring->wr);
8671 	}
8672 }
8673 
8674 static void BCMFASTPATH
dhd_prot_ring_write_complete(dhd_pub_t * dhd,msgbuf_ring_t * ring,void * p,uint16 nitems)8675 dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p,
8676 	uint16 nitems)
8677 {
8678 	unsigned long flags_bus;
8679 	DHD_BUS_LOCK(dhd->bus->bus_lock, flags_bus);
8680 	__dhd_prot_ring_write_complete(dhd, ring, p, nitems);
8681 	DHD_BUS_UNLOCK(dhd->bus->bus_lock, flags_bus);
8682 }
8683 
8684 /**
8685  * dhd_prot_ring_write_complete_mbdata - will be called from dhd_prot_h2d_mbdata_send_ctrlmsg,
8686  * which will hold DHD_BUS_LOCK to update WR pointer, Ring DB and also update bus_low_power_state
8687  * to indicate D3_INFORM sent in the same BUS_LOCK.
8688  */
8689 static void BCMFASTPATH
dhd_prot_ring_write_complete_mbdata(dhd_pub_t * dhd,msgbuf_ring_t * ring,void * p,uint16 nitems,uint32 mb_data)8690 dhd_prot_ring_write_complete_mbdata(dhd_pub_t *dhd, msgbuf_ring_t * ring, void *p,
8691 	uint16 nitems, uint32 mb_data)
8692 {
8693 	unsigned long flags_bus;
8694 
8695 	DHD_BUS_LOCK(dhd->bus->bus_lock, flags_bus);
8696 
8697 	__dhd_prot_ring_write_complete(dhd, ring, p, nitems);
8698 
8699 	/* Mark D3_INFORM in the same context to skip ringing H2D DB after D3_INFORM */
8700 	if (mb_data == H2D_HOST_D3_INFORM) {
8701 		dhd->bus->bus_low_power_state = DHD_BUS_D3_INFORM_SENT;
8702 	}
8703 
8704 	DHD_BUS_UNLOCK(dhd->bus->bus_lock, flags_bus);
8705 }
8706 
8707 /**
8708  * dhd_prot_upd_read_idx - Host updates the new RD index on consuming messages
8709  * from a D2H ring. The new RD index will be updated in the DMA Index array or
8710  * directly in dongle's ring state memory.
8711  */
8712 static void
dhd_prot_upd_read_idx(dhd_pub_t * dhd,msgbuf_ring_t * ring)8713 dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring)
8714 {
8715 	dhd_prot_t *prot = dhd->prot;
8716 	uint32 db_index;
8717 	uint corerev;
8718 
8719 	/* For HWA, update db_index and ring mb2 DB and return */
8720 	if (HWA_ACTIVE(dhd) && ring->hwa_db_type) {
8721 		db_index = HWA_DB_INDEX_VALUE(ring->rd) | ring->hwa_db_type;
8722 		DHD_TRACE(("%s: ring(%s) rd(0x%x) hwa_db_type(0x%x) db_index(0x%x)\n",
8723 			__FUNCTION__, ring->name, ring->rd, ring->hwa_db_type, db_index));
8724 		prot->mb_2_ring_fn(dhd->bus, db_index, FALSE);
8725 		return;
8726 	}
8727 
8728 	/* update read index */
8729 	/* If dma'ing h2d indices supported
8730 	 * update the r -indices in the
8731 	 * host memory o/w in TCM
8732 	 */
8733 	if (IDMA_ACTIVE(dhd)) {
8734 		dhd_prot_dma_indx_set(dhd, ring->rd,
8735 			D2H_DMA_INDX_RD_UPD, ring->idx);
8736 		db_index = IDMA_IDX1;
8737 		if (dhd->bus->sih) {
8738 			corerev = dhd->bus->sih->buscorerev;
8739 			/* We need to explictly configure the type of DMA for core rev >= 24 */
8740 			if (corerev >= 24) {
8741 				db_index |= (DMA_TYPE_IDMA << DMA_TYPE_SHIFT);
8742 			}
8743 		}
8744 		prot->mb_2_ring_fn(dhd->bus, db_index, FALSE);
8745 	} else if (dhd->dma_h2d_ring_upd_support) {
8746 		dhd_prot_dma_indx_set(dhd, ring->rd,
8747 		                      D2H_DMA_INDX_RD_UPD, ring->idx);
8748 	} else {
8749 		dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd),
8750 			sizeof(uint16), RING_RD_UPD, ring->idx);
8751 	}
8752 }
8753 
8754 static int
dhd_send_d2h_ringcreate(dhd_pub_t * dhd,msgbuf_ring_t * ring_to_create,uint16 ring_type,uint32 req_id)8755 dhd_send_d2h_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create,
8756 	uint16 ring_type, uint32 req_id)
8757 {
8758 	unsigned long flags;
8759 	d2h_ring_create_req_t  *d2h_ring;
8760 	uint16 alloced = 0;
8761 	int ret = BCME_OK;
8762 	uint16 max_h2d_rings = dhd->bus->max_submission_rings;
8763 	msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn;
8764 
8765 	DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
8766 
8767 	DHD_TRACE(("%s trying to send D2H ring create Req\n", __FUNCTION__));
8768 
8769 	if (ring_to_create == NULL) {
8770 		DHD_ERROR(("%s: FATAL: ring_to_create is NULL\n", __FUNCTION__));
8771 		ret = BCME_ERROR;
8772 		goto err;
8773 	}
8774 
8775 	/* Request for ring buffer space */
8776 	d2h_ring = (d2h_ring_create_req_t *) dhd_prot_alloc_ring_space(dhd,
8777 		ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
8778 		&alloced, FALSE);
8779 
8780 	if (d2h_ring == NULL) {
8781 		DHD_ERROR(("%s: FATAL: No space in control ring to send D2H ring create\n",
8782 			__FUNCTION__));
8783 		ret = BCME_NOMEM;
8784 		goto err;
8785 	}
8786 	ring_to_create->create_req_id = (uint16)req_id;
8787 	ring_to_create->create_pending = TRUE;
8788 
8789 	/* Common msg buf hdr */
8790 	d2h_ring->msg.msg_type = MSG_TYPE_D2H_RING_CREATE;
8791 	d2h_ring->msg.if_id = 0;
8792 	d2h_ring->msg.flags = ctrl_ring->current_phase;
8793 	d2h_ring->msg.request_id = htol32(ring_to_create->create_req_id);
8794 	d2h_ring->ring_id = htol16(DHD_D2H_RING_OFFSET(ring_to_create->idx, max_h2d_rings));
8795 	DHD_ERROR(("%s ringid: %d idx: %d max_h2d: %d\n", __FUNCTION__, d2h_ring->ring_id,
8796 			ring_to_create->idx, max_h2d_rings));
8797 
8798 	d2h_ring->ring_type = ring_type;
8799 	d2h_ring->max_items = htol16(ring_to_create->max_items);
8800 	d2h_ring->len_item = htol16(ring_to_create->item_len);
8801 	d2h_ring->ring_ptr.low_addr = ring_to_create->base_addr.low_addr;
8802 	d2h_ring->ring_ptr.high_addr = ring_to_create->base_addr.high_addr;
8803 
8804 	d2h_ring->flags = 0;
8805 	d2h_ring->msg.epoch =
8806 		ctrl_ring->seqnum % H2D_EPOCH_MODULO;
8807 	ctrl_ring->seqnum++;
8808 #ifdef EWP_EDL
8809 	if (ring_type == BCMPCIE_D2H_RING_TYPE_EDL) {
8810 		DHD_ERROR(("%s: sending d2h EDL ring create: "
8811 			"\n max items=%u; len_item=%u; ring_id=%u; low_addr=0x%x; high_addr=0x%x\n",
8812 			__FUNCTION__, ltoh16(d2h_ring->max_items),
8813 			ltoh16(d2h_ring->len_item),
8814 			ltoh16(d2h_ring->ring_id),
8815 			d2h_ring->ring_ptr.low_addr,
8816 			d2h_ring->ring_ptr.high_addr));
8817 	}
8818 #endif /* EWP_EDL */
8819 
8820 	/* Update the flow_ring's WRITE index */
8821 	dhd_prot_ring_write_complete(dhd, ctrl_ring, d2h_ring,
8822 		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
8823 
8824 	DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
8825 
8826 	return ret;
8827 err:
8828 	DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
8829 
8830 	return ret;
8831 }
8832 
8833 static int
dhd_send_h2d_ringcreate(dhd_pub_t * dhd,msgbuf_ring_t * ring_to_create,uint8 ring_type,uint32 id)8834 dhd_send_h2d_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create, uint8 ring_type, uint32 id)
8835 {
8836 	unsigned long flags;
8837 	h2d_ring_create_req_t  *h2d_ring;
8838 	uint16 alloced = 0;
8839 	uint8 i = 0;
8840 	int ret = BCME_OK;
8841 	msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn;
8842 
8843 	DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
8844 
8845 	DHD_TRACE(("%s trying to send H2D ring create Req\n", __FUNCTION__));
8846 
8847 	if (ring_to_create == NULL) {
8848 		DHD_ERROR(("%s: FATAL: ring_to_create is NULL\n", __FUNCTION__));
8849 		ret = BCME_ERROR;
8850 		goto err;
8851 	}
8852 
8853 	/* Request for ring buffer space */
8854 	h2d_ring = (h2d_ring_create_req_t *)dhd_prot_alloc_ring_space(dhd,
8855 		ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
8856 		&alloced, FALSE);
8857 
8858 	if (h2d_ring == NULL) {
8859 		DHD_ERROR(("%s: FATAL: No space in control ring to send H2D ring create\n",
8860 			__FUNCTION__));
8861 		ret = BCME_NOMEM;
8862 		goto err;
8863 	}
8864 	ring_to_create->create_req_id = (uint16)id;
8865 	ring_to_create->create_pending = TRUE;
8866 
8867 	/* Common msg buf hdr */
8868 	h2d_ring->msg.msg_type = MSG_TYPE_H2D_RING_CREATE;
8869 	h2d_ring->msg.if_id = 0;
8870 	h2d_ring->msg.request_id = htol32(ring_to_create->create_req_id);
8871 	h2d_ring->msg.flags = ctrl_ring->current_phase;
8872 	h2d_ring->ring_id = htol16(DHD_H2D_RING_OFFSET(ring_to_create->idx));
8873 	h2d_ring->ring_type = ring_type;
8874 	h2d_ring->max_items = htol16(H2DRING_DYNAMIC_INFO_MAX_ITEM);
8875 	h2d_ring->n_completion_ids = ring_to_create->n_completion_ids;
8876 	h2d_ring->len_item = htol16(H2DRING_INFO_BUFPOST_ITEMSIZE);
8877 	h2d_ring->ring_ptr.low_addr = ring_to_create->base_addr.low_addr;
8878 	h2d_ring->ring_ptr.high_addr = ring_to_create->base_addr.high_addr;
8879 
8880 	for (i = 0; i < ring_to_create->n_completion_ids; i++) {
8881 		h2d_ring->completion_ring_ids[i] = htol16(ring_to_create->compeltion_ring_ids[i]);
8882 	}
8883 
8884 	h2d_ring->flags = 0;
8885 	h2d_ring->msg.epoch =
8886 		ctrl_ring->seqnum % H2D_EPOCH_MODULO;
8887 	ctrl_ring->seqnum++;
8888 
8889 	/* Update the flow_ring's WRITE index */
8890 	dhd_prot_ring_write_complete(dhd, ctrl_ring, h2d_ring,
8891 		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
8892 
8893 	DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
8894 
8895 	return ret;
8896 err:
8897 	DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
8898 
8899 	return ret;
8900 }
8901 
8902 /**
8903  * dhd_prot_dma_indx_set - set a new WR or RD index in the DMA index array.
8904  * Dongle will DMA the entire array (if DMA_INDX feature is enabled).
8905  * See dhd_prot_dma_indx_init()
8906  */
8907 void
dhd_prot_dma_indx_set(dhd_pub_t * dhd,uint16 new_index,uint8 type,uint16 ringid)8908 dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type, uint16 ringid)
8909 {
8910 	uint8 *ptr;
8911 	uint16 offset;
8912 	dhd_prot_t *prot = dhd->prot;
8913 	uint16 max_h2d_rings = dhd->bus->max_submission_rings;
8914 
8915 	switch (type) {
8916 		case H2D_DMA_INDX_WR_UPD:
8917 			ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va);
8918 			offset = DHD_H2D_RING_OFFSET(ringid);
8919 			break;
8920 
8921 		case D2H_DMA_INDX_RD_UPD:
8922 			ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va);
8923 			offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
8924 			break;
8925 
8926 		case H2D_IFRM_INDX_WR_UPD:
8927 			ptr = (uint8 *)(prot->h2d_ifrm_indx_wr_buf.va);
8928 			offset = DHD_H2D_FRM_FLOW_RING_OFFSET(ringid);
8929 			break;
8930 
8931 		default:
8932 			DHD_ERROR(("%s: Invalid option for DMAing read/write index\n",
8933 				__FUNCTION__));
8934 			return;
8935 	}
8936 
8937 	ASSERT(prot->rw_index_sz != 0);
8938 	ptr += offset * prot->rw_index_sz;
8939 
8940 	*(uint16*)ptr = htol16(new_index);
8941 
8942 	OSL_CACHE_FLUSH((void *)ptr, prot->rw_index_sz);
8943 
8944 	DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n",
8945 		__FUNCTION__, new_index, type, ringid, ptr, offset));
8946 
8947 } /* dhd_prot_dma_indx_set */
8948 
8949 /**
8950  * dhd_prot_dma_indx_get - Fetch a WR or RD index from the dongle DMA-ed index
8951  * array.
8952  * Dongle DMAes an entire array to host memory (if the feature is enabled).
8953  * See dhd_prot_dma_indx_init()
8954  */
8955 static uint16
dhd_prot_dma_indx_get(dhd_pub_t * dhd,uint8 type,uint16 ringid)8956 dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid)
8957 {
8958 	uint8 *ptr;
8959 	uint16 data;
8960 	uint16 offset;
8961 	dhd_prot_t *prot = dhd->prot;
8962 	uint16 max_h2d_rings = dhd->bus->max_submission_rings;
8963 
8964 	switch (type) {
8965 		case H2D_DMA_INDX_WR_UPD:
8966 			ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va);
8967 			offset = DHD_H2D_RING_OFFSET(ringid);
8968 			break;
8969 
8970 		case H2D_DMA_INDX_RD_UPD:
8971 			ptr = (uint8 *)(prot->h2d_dma_indx_rd_buf.va);
8972 			offset = DHD_H2D_RING_OFFSET(ringid);
8973 			break;
8974 
8975 		case D2H_DMA_INDX_WR_UPD:
8976 			ptr = (uint8 *)(prot->d2h_dma_indx_wr_buf.va);
8977 			offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
8978 			break;
8979 
8980 		case D2H_DMA_INDX_RD_UPD:
8981 			ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va);
8982 			offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
8983 			break;
8984 
8985 		default:
8986 			DHD_ERROR(("%s: Invalid option for DMAing read/write index\n",
8987 				__FUNCTION__));
8988 			return 0;
8989 	}
8990 
8991 	ASSERT(prot->rw_index_sz != 0);
8992 	ptr += offset * prot->rw_index_sz;
8993 
8994 	OSL_CACHE_INV((void *)ptr, prot->rw_index_sz);
8995 
8996 	data = LTOH16(*((uint16*)ptr));
8997 
8998 	DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n",
8999 		__FUNCTION__, data, type, ringid, ptr, offset));
9000 
9001 	return (data);
9002 
9003 } /* dhd_prot_dma_indx_get */
9004 
9005 /**
9006  * An array of DMA read/write indices, containing information about host rings, can be maintained
9007  * either in host memory or in device memory, dependent on preprocessor options. This function is,
9008  * dependent on these options, called during driver initialization. It reserves and initializes
9009  * blocks of DMA'able host memory containing an array of DMA read or DMA write indices. The physical
9010  * address of these host memory blocks are communicated to the dongle later on. By reading this host
9011  * memory, the dongle learns about the state of the host rings.
9012  */
9013 
9014 static INLINE int
dhd_prot_dma_indx_alloc(dhd_pub_t * dhd,uint8 type,dhd_dma_buf_t * dma_buf,uint32 bufsz)9015 dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type,
9016 	dhd_dma_buf_t *dma_buf, uint32 bufsz)
9017 {
9018 	int rc;
9019 
9020 	if ((dma_buf->len == bufsz) || (dma_buf->va != NULL))
9021 		return BCME_OK;
9022 
9023 	rc = dhd_dma_buf_alloc(dhd, dma_buf, bufsz);
9024 
9025 	return rc;
9026 }
9027 
9028 int
dhd_prot_dma_indx_init(dhd_pub_t * dhd,uint32 rw_index_sz,uint8 type,uint32 length)9029 dhd_prot_dma_indx_init(dhd_pub_t *dhd, uint32 rw_index_sz, uint8 type, uint32 length)
9030 {
9031 	uint32 bufsz;
9032 	dhd_prot_t *prot = dhd->prot;
9033 	dhd_dma_buf_t *dma_buf;
9034 
9035 	if (prot == NULL) {
9036 		DHD_ERROR(("prot is not inited\n"));
9037 		return BCME_ERROR;
9038 	}
9039 
9040 	/* Dongle advertizes 2B or 4B RW index size */
9041 	ASSERT(rw_index_sz != 0);
9042 	prot->rw_index_sz = rw_index_sz;
9043 
9044 	bufsz = rw_index_sz * length;
9045 
9046 	switch (type) {
9047 		case H2D_DMA_INDX_WR_BUF:
9048 			dma_buf = &prot->h2d_dma_indx_wr_buf;
9049 			if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
9050 				goto ret_no_mem;
9051 			DHD_ERROR(("H2D DMA WR INDX : array size %d = %d * %d\n",
9052 				dma_buf->len, rw_index_sz, length));
9053 			break;
9054 
9055 		case H2D_DMA_INDX_RD_BUF:
9056 			dma_buf = &prot->h2d_dma_indx_rd_buf;
9057 			if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
9058 				goto ret_no_mem;
9059 			DHD_ERROR(("H2D DMA RD INDX : array size %d = %d * %d\n",
9060 				dma_buf->len, rw_index_sz, length));
9061 			break;
9062 
9063 		case D2H_DMA_INDX_WR_BUF:
9064 			dma_buf = &prot->d2h_dma_indx_wr_buf;
9065 			if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
9066 				goto ret_no_mem;
9067 			DHD_ERROR(("D2H DMA WR INDX : array size %d = %d * %d\n",
9068 				dma_buf->len, rw_index_sz, length));
9069 			break;
9070 
9071 		case D2H_DMA_INDX_RD_BUF:
9072 			dma_buf = &prot->d2h_dma_indx_rd_buf;
9073 			if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
9074 				goto ret_no_mem;
9075 			DHD_ERROR(("D2H DMA RD INDX : array size %d = %d * %d\n",
9076 				dma_buf->len, rw_index_sz, length));
9077 			break;
9078 
9079 		case H2D_IFRM_INDX_WR_BUF:
9080 			dma_buf = &prot->h2d_ifrm_indx_wr_buf;
9081 			if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
9082 				goto ret_no_mem;
9083 			DHD_ERROR(("H2D IFRM WR INDX : array size %d = %d * %d\n",
9084 				dma_buf->len, rw_index_sz, length));
9085 			break;
9086 
9087 		default:
9088 			DHD_ERROR(("%s: Unexpected option\n", __FUNCTION__));
9089 			return BCME_BADOPTION;
9090 	}
9091 
9092 	return BCME_OK;
9093 
9094 ret_no_mem:
9095 	DHD_ERROR(("%s: dhd_prot_dma_indx_alloc type %d buf_sz %d failure\n",
9096 		__FUNCTION__, type, bufsz));
9097 	return BCME_NOMEM;
9098 
9099 } /* dhd_prot_dma_indx_init */
9100 
9101 /**
9102  * Called on checking for 'completion' messages from the dongle. Returns next host buffer to read
9103  * from, or NULL if there are no more messages to read.
9104  */
9105 static uint8*
dhd_prot_get_read_addr(dhd_pub_t * dhd,msgbuf_ring_t * ring,uint32 * available_len)9106 dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 *available_len)
9107 {
9108 	uint16 wr;
9109 	uint16 rd;
9110 	uint16 depth;
9111 	uint16 items;
9112 	void  *read_addr = NULL; /* address of next msg to be read in ring */
9113 	uint16 d2h_wr = 0;
9114 
9115 	DHD_TRACE(("%s: d2h_dma_indx_rd_buf %p, d2h_dma_indx_wr_buf %p\n",
9116 		__FUNCTION__, (uint32 *)(dhd->prot->d2h_dma_indx_rd_buf.va),
9117 		(uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va)));
9118 
9119 	/* Remember the read index in a variable.
9120 	 * This is becuase ring->rd gets updated in the end of this function
9121 	 * So if we have to print the exact read index from which the
9122 	 * message is read its not possible.
9123 	 */
9124 	ring->curr_rd = ring->rd;
9125 
9126 	/* update write pointer */
9127 	if (dhd->dma_d2h_ring_upd_support) {
9128 		/* DMAing write/read indices supported */
9129 		d2h_wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
9130 		ring->wr = d2h_wr;
9131 	} else {
9132 		dhd_bus_cmn_readshared(dhd->bus, &(ring->wr), RING_WR_UPD, ring->idx);
9133 	}
9134 
9135 	wr = ring->wr;
9136 	rd = ring->rd;
9137 	depth = ring->max_items;
9138 
9139 	/* check for avail space, in number of ring items */
9140 	items = READ_AVAIL_SPACE(wr, rd, depth);
9141 	if (items == 0)
9142 		return NULL;
9143 
9144 	/*
9145 	 * Note that there are builds where Assert translates to just printk
9146 	 * so, even if we had hit this condition we would never halt. Now
9147 	 * dhd_prot_process_msgtype can get into an big loop if this
9148 	 * happens.
9149 	 */
9150 	if (items > ring->max_items) {
9151 		DHD_ERROR(("\r\n======================= \r\n"));
9152 		DHD_ERROR(("%s(): ring %p, ring->name %s, ring->max_items %d, items %d \r\n",
9153 			__FUNCTION__, ring, ring->name, ring->max_items, items));
9154 		DHD_ERROR(("wr: %d,  rd: %d,  depth: %d  \r\n", wr, rd, depth));
9155 		DHD_ERROR(("dhd->busstate %d bus->wait_for_d3_ack %d \r\n",
9156 			dhd->busstate, dhd->bus->wait_for_d3_ack));
9157 		DHD_ERROR(("\r\n======================= \r\n"));
9158 #ifdef DHD_FW_COREDUMP
9159 		if (dhd->memdump_enabled) {
9160 			/* collect core dump */
9161 			dhd->memdump_type = DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR;
9162 			dhd_bus_mem_dump(dhd);
9163 
9164 		}
9165 #endif /* DHD_FW_COREDUMP */
9166 
9167 		*available_len = 0;
9168 		dhd_schedule_reset(dhd);
9169 
9170 		return NULL;
9171 	}
9172 
9173 	/* if space is available, calculate address to be read */
9174 	read_addr = (char*)ring->dma_buf.va + (rd * ring->item_len);
9175 
9176 	/* update read pointer */
9177 	if ((ring->rd + items) >= ring->max_items)
9178 		ring->rd = 0;
9179 	else
9180 		ring->rd += items;
9181 
9182 	ASSERT(ring->rd < ring->max_items);
9183 
9184 	/* convert items to bytes : available_len must be 32bits */
9185 	*available_len = (uint32)(items * ring->item_len);
9186 
9187 	OSL_CACHE_INV(read_addr, *available_len);
9188 
9189 	/* return read address */
9190 	return read_addr;
9191 
9192 } /* dhd_prot_get_read_addr */
9193 
9194 /**
9195  * dhd_prot_h2d_mbdata_send_ctrlmsg is a non-atomic function,
9196  * make sure the callers always hold appropriate locks.
9197  */
dhd_prot_h2d_mbdata_send_ctrlmsg(dhd_pub_t * dhd,uint32 mb_data)9198 int dhd_prot_h2d_mbdata_send_ctrlmsg(dhd_pub_t *dhd, uint32 mb_data)
9199 {
9200 	h2d_mailbox_data_t *h2d_mb_data;
9201 	uint16 alloced = 0;
9202 	msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn;
9203 	unsigned long flags;
9204 	int num_post = 1;
9205 	int i;
9206 
9207 	DHD_INFO(("%s Sending H2D MB data Req data 0x%04x\n",
9208 		__FUNCTION__, mb_data));
9209 	if (!ctrl_ring->inited) {
9210 		DHD_ERROR(("%s: Ctrl Submit Ring: not inited\n", __FUNCTION__));
9211 		return BCME_ERROR;
9212 	}
9213 
9214 	for (i = 0; i < num_post; i ++) {
9215 		DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
9216 		/* Request for ring buffer space */
9217 		h2d_mb_data = (h2d_mailbox_data_t *)dhd_prot_alloc_ring_space(dhd,
9218 			ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
9219 			&alloced, FALSE);
9220 
9221 		if (h2d_mb_data == NULL) {
9222 			DHD_ERROR(("%s: FATAL: No space in control ring to send H2D Mb data\n",
9223 				__FUNCTION__));
9224 			DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
9225 			return BCME_NOMEM;
9226 		}
9227 
9228 		memset(h2d_mb_data, 0, sizeof(h2d_mailbox_data_t));
9229 		/* Common msg buf hdr */
9230 		h2d_mb_data->msg.msg_type = MSG_TYPE_H2D_MAILBOX_DATA;
9231 		h2d_mb_data->msg.flags = ctrl_ring->current_phase;
9232 
9233 		h2d_mb_data->msg.epoch =
9234 			ctrl_ring->seqnum % H2D_EPOCH_MODULO;
9235 		ctrl_ring->seqnum++;
9236 
9237 		/* Update flow create message */
9238 		h2d_mb_data->mail_box_data = htol32(mb_data);
9239 		{
9240 			h2d_mb_data->mail_box_data = htol32(mb_data);
9241 		}
9242 
9243 		DHD_INFO(("%s Send H2D MB data Req data 0x%04x\n", __FUNCTION__, mb_data));
9244 
9245 		/* upd wrt ptr and raise interrupt */
9246 		dhd_prot_ring_write_complete_mbdata(dhd, ctrl_ring, h2d_mb_data,
9247 			DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, mb_data);
9248 
9249 		DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
9250 
9251 	}
9252 	return 0;
9253 }
9254 
9255 /** Creates a flow ring and informs dongle of this event */
9256 int
dhd_prot_flow_ring_create(dhd_pub_t * dhd,flow_ring_node_t * flow_ring_node)9257 dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
9258 {
9259 	tx_flowring_create_request_t *flow_create_rqst;
9260 	msgbuf_ring_t *flow_ring;
9261 	dhd_prot_t *prot = dhd->prot;
9262 	unsigned long flags;
9263 	uint16 alloced = 0;
9264 	msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
9265 	uint16 max_flowrings = dhd->bus->max_tx_flowrings;
9266 
9267 	/* Fetch a pre-initialized msgbuf_ring from the flowring pool */
9268 	flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid);
9269 	if (flow_ring == NULL) {
9270 		DHD_ERROR(("%s: dhd_prot_flowrings_pool_fetch TX Flowid %d failed\n",
9271 			__FUNCTION__, flow_ring_node->flowid));
9272 		return BCME_NOMEM;
9273 	}
9274 
9275 	DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
9276 
9277 	/* Request for ctrl_ring buffer space */
9278 	flow_create_rqst = (tx_flowring_create_request_t *)
9279 		dhd_prot_alloc_ring_space(dhd, ctrl_ring, 1, &alloced, FALSE);
9280 
9281 	if (flow_create_rqst == NULL) {
9282 		dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring);
9283 		DHD_ERROR(("%s: Flow Create Req flowid %d - failure ring space\n",
9284 			__FUNCTION__, flow_ring_node->flowid));
9285 		DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
9286 		return BCME_NOMEM;
9287 	}
9288 
9289 	flow_ring_node->prot_info = (void *)flow_ring;
9290 
9291 	/* Common msg buf hdr */
9292 	flow_create_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_CREATE;
9293 	flow_create_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
9294 	flow_create_rqst->msg.request_id = htol32(0); /* TBD */
9295 	flow_create_rqst->msg.flags = ctrl_ring->current_phase;
9296 
9297 	flow_create_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
9298 	ctrl_ring->seqnum++;
9299 
9300 	/* Update flow create message */
9301 	flow_create_rqst->tid = flow_ring_node->flow_info.tid;
9302 	flow_create_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
9303 	memcpy(flow_create_rqst->sa, flow_ring_node->flow_info.sa, sizeof(flow_create_rqst->sa));
9304 	memcpy(flow_create_rqst->da, flow_ring_node->flow_info.da, sizeof(flow_create_rqst->da));
9305 	/* CAUTION: ring::base_addr already in Little Endian */
9306 	flow_create_rqst->flow_ring_ptr.low_addr = flow_ring->base_addr.low_addr;
9307 	flow_create_rqst->flow_ring_ptr.high_addr = flow_ring->base_addr.high_addr;
9308 	flow_create_rqst->max_items = htol16(prot->h2d_max_txpost);
9309 	flow_create_rqst->len_item = htol16(H2DRING_TXPOST_ITEMSIZE);
9310 	flow_create_rqst->if_flags = 0;
9311 
9312 #ifdef DHD_HP2P
9313 	/* Create HPP flow ring if HP2P is enabled and TID=7  and AWDL interface */
9314 	/* and traffic is not multicast */
9315 	/* Allow infra interface only if user enabled hp2p_infra_enable thru iovar */
9316 	/* Allow only one HP2P Flow active at a time */
9317 	if (dhd->hp2p_capable && !dhd->hp2p_ring_active &&
9318 		flow_ring_node->flow_info.tid == HP2P_PRIO &&
9319 		(dhd->hp2p_infra_enable || flow_create_rqst->msg.if_id) &&
9320 		!ETHER_ISMULTI(flow_create_rqst->da)) {
9321 		flow_create_rqst->if_flags |= BCMPCIE_FLOW_RING_INTF_HP2P;
9322 		flow_ring_node->hp2p_ring = TRUE;
9323 		dhd->hp2p_ring_active = TRUE;
9324 
9325 		DHD_ERROR(("%s: flow ring for HP2P tid = %d flowid = %d\n",
9326 				__FUNCTION__, flow_ring_node->flow_info.tid,
9327 				flow_ring_node->flowid));
9328 	}
9329 #endif /* DHD_HP2P */
9330 
9331 	/* definition for ifrm mask : bit0:d11ac core, bit1:d11ad core
9332 	 * currently it is not used for priority. so uses solely for ifrm mask
9333 	 */
9334 	if (IFRM_ACTIVE(dhd))
9335 		flow_create_rqst->priority_ifrmmask = (1 << IFRM_DEV_0);
9336 
9337 	DHD_ERROR(("%s: Send Flow Create Req flow ID %d for peer " MACDBG
9338 		" prio %d ifindex %d\n", __FUNCTION__, flow_ring_node->flowid,
9339 		MAC2STRDBG(flow_ring_node->flow_info.da), flow_ring_node->flow_info.tid,
9340 		flow_ring_node->flow_info.ifindex));
9341 
9342 	/* Update the flow_ring's WRITE index */
9343 	if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
9344 		dhd_prot_dma_indx_set(dhd, flow_ring->wr,
9345 		                      H2D_DMA_INDX_WR_UPD, flow_ring->idx);
9346 	} else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(flow_ring->idx, max_flowrings)) {
9347 		dhd_prot_dma_indx_set(dhd, flow_ring->wr,
9348 			H2D_IFRM_INDX_WR_UPD, flow_ring->idx);
9349 	} else {
9350 		dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr),
9351 			sizeof(uint16), RING_WR_UPD, flow_ring->idx);
9352 	}
9353 
9354 	/* update control subn ring's WR index and ring doorbell to dongle */
9355 	dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_create_rqst, 1);
9356 
9357 	DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
9358 
9359 	return BCME_OK;
9360 } /* dhd_prot_flow_ring_create */
9361 
9362 /** called on receiving MSG_TYPE_FLOW_RING_CREATE_CMPLT message from dongle */
9363 static void
dhd_prot_flow_ring_create_response_process(dhd_pub_t * dhd,void * msg)9364 dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg)
9365 {
9366 	tx_flowring_create_response_t *flow_create_resp = (tx_flowring_create_response_t *)msg;
9367 
9368 	DHD_ERROR(("%s: Flow Create Response status = %d Flow %d\n", __FUNCTION__,
9369 		ltoh16(flow_create_resp->cmplt.status),
9370 		ltoh16(flow_create_resp->cmplt.flow_ring_id)));
9371 
9372 	dhd_bus_flow_ring_create_response(dhd->bus,
9373 		ltoh16(flow_create_resp->cmplt.flow_ring_id),
9374 		ltoh16(flow_create_resp->cmplt.status));
9375 }
9376 
9377 static void
dhd_prot_process_h2d_ring_create_complete(dhd_pub_t * dhd,void * buf)9378 dhd_prot_process_h2d_ring_create_complete(dhd_pub_t *dhd, void *buf)
9379 {
9380 	h2d_ring_create_response_t *resp = (h2d_ring_create_response_t *)buf;
9381 	DHD_INFO(("%s ring create Response status = %d ring %d, id 0x%04x\n", __FUNCTION__,
9382 		ltoh16(resp->cmplt.status),
9383 		ltoh16(resp->cmplt.ring_id),
9384 		ltoh32(resp->cmn_hdr.request_id)));
9385 	if ((ltoh32(resp->cmn_hdr.request_id) != DHD_H2D_DBGRING_REQ_PKTID) &&
9386 		(ltoh32(resp->cmn_hdr.request_id) != DHD_H2D_BTLOGRING_REQ_PKTID)) {
9387 		DHD_ERROR(("invalid request ID with h2d ring create complete\n"));
9388 		return;
9389 	}
9390 	if (dhd->prot->h2dring_info_subn->create_req_id == ltoh32(resp->cmn_hdr.request_id) &&
9391 		!dhd->prot->h2dring_info_subn->create_pending) {
9392 		DHD_ERROR(("info ring create status for not pending submit ring\n"));
9393 	}
9394 
9395 	if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
9396 		DHD_ERROR(("info/btlog ring create failed with status %d\n",
9397 			ltoh16(resp->cmplt.status)));
9398 		return;
9399 	}
9400 	if (dhd->prot->h2dring_info_subn->create_req_id == ltoh32(resp->cmn_hdr.request_id)) {
9401 		dhd->prot->h2dring_info_subn->create_pending = FALSE;
9402 		dhd->prot->h2dring_info_subn->inited = TRUE;
9403 		DHD_ERROR(("info buffer post after ring create\n"));
9404 		dhd_prot_infobufpost(dhd, dhd->prot->h2dring_info_subn);
9405 	}
9406 }
9407 
9408 static void
dhd_prot_process_d2h_ring_create_complete(dhd_pub_t * dhd,void * buf)9409 dhd_prot_process_d2h_ring_create_complete(dhd_pub_t *dhd, void *buf)
9410 {
9411 	d2h_ring_create_response_t *resp = (d2h_ring_create_response_t *)buf;
9412 	DHD_INFO(("%s ring create Response status = %d ring %d, id 0x%04x\n", __FUNCTION__,
9413 		ltoh16(resp->cmplt.status),
9414 		ltoh16(resp->cmplt.ring_id),
9415 		ltoh32(resp->cmn_hdr.request_id)));
9416 	if ((ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_DBGRING_REQ_PKTID) &&
9417 		(ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_BTLOGRING_REQ_PKTID) &&
9418 #ifdef DHD_HP2P
9419 		(ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_HPPRING_TXREQ_PKTID) &&
9420 		(ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_HPPRING_RXREQ_PKTID) &&
9421 #endif /* DHD_HP2P */
9422 		TRUE) {
9423 		DHD_ERROR(("invalid request ID with d2h ring create complete\n"));
9424 		return;
9425 	}
9426 	if (ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_DBGRING_REQ_PKTID) {
9427 #ifdef EWP_EDL
9428 		if (!dhd->dongle_edl_support)
9429 #endif // endif
9430 		{
9431 			if (!dhd->prot->d2hring_info_cpln->create_pending) {
9432 				DHD_ERROR(("info ring create status for not pending cpl ring\n"));
9433 				return;
9434 			}
9435 
9436 			if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
9437 				DHD_ERROR(("info cpl ring create failed with status %d\n",
9438 					ltoh16(resp->cmplt.status)));
9439 				return;
9440 			}
9441 			dhd->prot->d2hring_info_cpln->create_pending = FALSE;
9442 			dhd->prot->d2hring_info_cpln->inited = TRUE;
9443 		}
9444 #ifdef EWP_EDL
9445 		else {
9446 			if (!dhd->prot->d2hring_edl->create_pending) {
9447 				DHD_ERROR(("edl ring create status for not pending cpl ring\n"));
9448 				return;
9449 			}
9450 
9451 			if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
9452 				DHD_ERROR(("edl cpl ring create failed with status %d\n",
9453 					ltoh16(resp->cmplt.status)));
9454 				return;
9455 			}
9456 			dhd->prot->d2hring_edl->create_pending = FALSE;
9457 			dhd->prot->d2hring_edl->inited = TRUE;
9458 		}
9459 #endif /* EWP_EDL */
9460 	}
9461 
9462 #ifdef DHD_HP2P
9463 	if (dhd->prot->d2hring_hp2p_txcpl &&
9464 		ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_HPPRING_TXREQ_PKTID) {
9465 		if (!dhd->prot->d2hring_hp2p_txcpl->create_pending) {
9466 			DHD_ERROR(("HPP tx ring create status for not pending cpl ring\n"));
9467 			return;
9468 		}
9469 
9470 		if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
9471 			DHD_ERROR(("HPP tx cpl ring create failed with status %d\n",
9472 				ltoh16(resp->cmplt.status)));
9473 			return;
9474 		}
9475 		dhd->prot->d2hring_hp2p_txcpl->create_pending = FALSE;
9476 		dhd->prot->d2hring_hp2p_txcpl->inited = TRUE;
9477 	}
9478 	if (dhd->prot->d2hring_hp2p_rxcpl &&
9479 		ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_HPPRING_RXREQ_PKTID) {
9480 		if (!dhd->prot->d2hring_hp2p_rxcpl->create_pending) {
9481 			DHD_ERROR(("HPP rx ring create status for not pending cpl ring\n"));
9482 			return;
9483 		}
9484 
9485 		if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
9486 			DHD_ERROR(("HPP rx cpl ring create failed with status %d\n",
9487 				ltoh16(resp->cmplt.status)));
9488 			return;
9489 		}
9490 		dhd->prot->d2hring_hp2p_rxcpl->create_pending = FALSE;
9491 		dhd->prot->d2hring_hp2p_rxcpl->inited = TRUE;
9492 	}
9493 #endif /* DHD_HP2P */
9494 }
9495 
9496 static void
dhd_prot_process_d2h_mb_data(dhd_pub_t * dhd,void * buf)9497 dhd_prot_process_d2h_mb_data(dhd_pub_t *dhd, void* buf)
9498 {
9499 	d2h_mailbox_data_t *d2h_data;
9500 
9501 	d2h_data = (d2h_mailbox_data_t *)buf;
9502 	DHD_INFO(("%s dhd_prot_process_d2h_mb_data, 0x%04x\n", __FUNCTION__,
9503 		d2h_data->d2h_mailbox_data));
9504 	dhd_bus_handle_mb_data(dhd->bus, d2h_data->d2h_mailbox_data);
9505 }
9506 
9507 static void
dhd_prot_process_d2h_host_ts_complete(dhd_pub_t * dhd,void * buf)9508 dhd_prot_process_d2h_host_ts_complete(dhd_pub_t *dhd, void* buf)
9509 {
9510 	DHD_ERROR(("Timesunc feature not compiled in but GOT HOST_TS_COMPLETE\n"));
9511 
9512 }
9513 
9514 /** called on e.g. flow ring delete */
dhd_prot_clean_flow_ring(dhd_pub_t * dhd,void * msgbuf_flow_info)9515 void dhd_prot_clean_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info)
9516 {
9517 	msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info;
9518 	dhd_prot_ring_detach(dhd, flow_ring);
9519 	DHD_INFO(("%s Cleaning up Flow \n", __FUNCTION__));
9520 }
9521 
dhd_prot_print_flow_ring(dhd_pub_t * dhd,void * msgbuf_flow_info,struct bcmstrbuf * strbuf,const char * fmt)9522 void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info,
9523 	struct bcmstrbuf *strbuf, const char * fmt)
9524 {
9525 	const char *default_fmt =
9526 		"RD %d WR %d BASE(VA) %p BASE(PA) %x:%x SIZE %d "
9527 		"WORK_ITEM_SIZE %d MAX_WORK_ITEMS %d TOTAL_SIZE %d\n";
9528 	msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info;
9529 	uint16 rd, wr;
9530 	uint32 dma_buf_len = flow_ring->max_items * flow_ring->item_len;
9531 
9532 	if (fmt == NULL) {
9533 		fmt = default_fmt;
9534 	}
9535 
9536 	if (dhd->bus->is_linkdown) {
9537 		DHD_ERROR(("%s: Skip dumping flowring due to Link down\n", __FUNCTION__));
9538 		return;
9539 	}
9540 
9541 	dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx);
9542 	dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx);
9543 	bcm_bprintf(strbuf, fmt, rd, wr, flow_ring->dma_buf.va,
9544 		ltoh32(flow_ring->base_addr.high_addr),
9545 		ltoh32(flow_ring->base_addr.low_addr),
9546 		flow_ring->item_len, flow_ring->max_items,
9547 		dma_buf_len);
9548 }
9549 
dhd_prot_print_info(dhd_pub_t * dhd,struct bcmstrbuf * strbuf)9550 void dhd_prot_print_info(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
9551 {
9552 	dhd_prot_t *prot = dhd->prot;
9553 	bcm_bprintf(strbuf, "IPCrevs: Dev %d, \t Host %d, \tactive %d\n",
9554 		dhd->prot->device_ipc_version,
9555 		dhd->prot->host_ipc_version,
9556 		dhd->prot->active_ipc_version);
9557 
9558 	bcm_bprintf(strbuf, "max Host TS bufs to post: %d, \t posted %d \n",
9559 		dhd->prot->max_tsbufpost, dhd->prot->cur_ts_bufs_posted);
9560 	bcm_bprintf(strbuf, "max INFO bufs to post: %d, \t posted %d \n",
9561 		dhd->prot->max_infobufpost, dhd->prot->infobufpost);
9562 	bcm_bprintf(strbuf, "max event bufs to post: %d, \t posted %d \n",
9563 		dhd->prot->max_eventbufpost, dhd->prot->cur_event_bufs_posted);
9564 	bcm_bprintf(strbuf, "max ioctlresp bufs to post: %d, \t posted %d \n",
9565 		dhd->prot->max_ioctlrespbufpost, dhd->prot->cur_ioctlresp_bufs_posted);
9566 	bcm_bprintf(strbuf, "max RX bufs to post: %d, \t posted %d \n",
9567 		dhd->prot->max_rxbufpost, dhd->prot->rxbufpost);
9568 
9569 	bcm_bprintf(strbuf,
9570 		"%14s %5s %5s %17s %17s %14s %14s %10s\n",
9571 		"Type", "RD", "WR", "BASE(VA)", "BASE(PA)",
9572 		"WORK_ITEM_SIZE", "MAX_WORK_ITEMS", "TOTAL_SIZE");
9573 	bcm_bprintf(strbuf, "%14s", "H2DCtrlPost");
9574 	dhd_prot_print_flow_ring(dhd, &prot->h2dring_ctrl_subn, strbuf,
9575 		" %5d %5d %17p %8x:%8x %14d %14d %10d\n");
9576 	bcm_bprintf(strbuf, "%14s", "D2HCtrlCpl");
9577 	dhd_prot_print_flow_ring(dhd, &prot->d2hring_ctrl_cpln, strbuf,
9578 		" %5d %5d %17p %8x:%8x %14d %14d %10d\n");
9579 	bcm_bprintf(strbuf, "%14s", "H2DRxPost", prot->rxbufpost);
9580 	dhd_prot_print_flow_ring(dhd, &prot->h2dring_rxp_subn, strbuf,
9581 		" %5d %5d %17p %8x:%8x %14d %14d %10d\n");
9582 	bcm_bprintf(strbuf, "%14s", "D2HRxCpl");
9583 	dhd_prot_print_flow_ring(dhd, &prot->d2hring_rx_cpln, strbuf,
9584 		" %5d %5d %17p %8x:%8x %14d %14d %10d\n");
9585 	bcm_bprintf(strbuf, "%14s", "D2HTxCpl");
9586 	dhd_prot_print_flow_ring(dhd, &prot->d2hring_tx_cpln, strbuf,
9587 		" %5d %5d %17p %8x:%8x %14d %14d %10d\n");
9588 	if (dhd->prot->h2dring_info_subn != NULL && dhd->prot->d2hring_info_cpln != NULL) {
9589 		bcm_bprintf(strbuf, "%14s", "H2DRingInfoSub");
9590 		dhd_prot_print_flow_ring(dhd, prot->h2dring_info_subn, strbuf,
9591 			" %5d %5d %17p %8x:%8x %14d %14d %10d\n");
9592 		bcm_bprintf(strbuf, "%14s", "D2HRingInfoCpl");
9593 		dhd_prot_print_flow_ring(dhd, prot->d2hring_info_cpln, strbuf,
9594 			" %5d %5d %17p %8x:%8x %14d %14d %10d\n");
9595 	}
9596 	if (dhd->prot->d2hring_edl != NULL) {
9597 		bcm_bprintf(strbuf, "%14s", "D2HRingEDL");
9598 		dhd_prot_print_flow_ring(dhd, prot->d2hring_edl, strbuf,
9599 			" %5d %5d %17p %8x:%8x %14d %14d %10d\n");
9600 	}
9601 
9602 	bcm_bprintf(strbuf, "active_tx_count %d	 pktidmap_avail(ctrl/rx/tx) %d %d %d\n",
9603 		OSL_ATOMIC_READ(dhd->osh, &dhd->prot->active_tx_count),
9604 		DHD_PKTID_AVAIL(dhd->prot->pktid_ctrl_map),
9605 		DHD_PKTID_AVAIL(dhd->prot->pktid_rx_map),
9606 		DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map));
9607 
9608 }
9609 
9610 int
dhd_prot_flow_ring_delete(dhd_pub_t * dhd,flow_ring_node_t * flow_ring_node)9611 dhd_prot_flow_ring_delete(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
9612 {
9613 	tx_flowring_delete_request_t *flow_delete_rqst;
9614 	dhd_prot_t *prot = dhd->prot;
9615 	unsigned long flags;
9616 	uint16 alloced = 0;
9617 	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
9618 
9619 	DHD_RING_LOCK(ring->ring_lock, flags);
9620 
9621 	/* Request for ring buffer space */
9622 	flow_delete_rqst = (tx_flowring_delete_request_t *)
9623 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
9624 
9625 	if (flow_delete_rqst == NULL) {
9626 		DHD_RING_UNLOCK(ring->ring_lock, flags);
9627 		DHD_ERROR(("%s: Flow Delete Req - failure ring space\n", __FUNCTION__));
9628 		return BCME_NOMEM;
9629 	}
9630 
9631 	/* Common msg buf hdr */
9632 	flow_delete_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_DELETE;
9633 	flow_delete_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
9634 	flow_delete_rqst->msg.request_id = htol32(0); /* TBD */
9635 	flow_delete_rqst->msg.flags = ring->current_phase;
9636 
9637 	flow_delete_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
9638 	ring->seqnum++;
9639 
9640 	/* Update Delete info */
9641 	flow_delete_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
9642 	flow_delete_rqst->reason = htol16(BCME_OK);
9643 
9644 	DHD_ERROR(("%s: Send Flow Delete Req RING ID %d for peer %pM"
9645 		" prio %d ifindex %d\n", __FUNCTION__, flow_ring_node->flowid,
9646 		flow_ring_node->flow_info.da, flow_ring_node->flow_info.tid,
9647 		flow_ring_node->flow_info.ifindex));
9648 
9649 	/* update ring's WR index and ring doorbell to dongle */
9650 	dhd_prot_ring_write_complete(dhd, ring, flow_delete_rqst, 1);
9651 
9652 	DHD_RING_UNLOCK(ring->ring_lock, flags);
9653 
9654 	return BCME_OK;
9655 }
9656 
9657 static void BCMFASTPATH
dhd_prot_flow_ring_fastdelete(dhd_pub_t * dhd,uint16 flowid,uint16 rd_idx)9658 dhd_prot_flow_ring_fastdelete(dhd_pub_t *dhd, uint16 flowid, uint16 rd_idx)
9659 {
9660 	flow_ring_node_t *flow_ring_node = DHD_FLOW_RING(dhd, flowid);
9661 	msgbuf_ring_t *ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
9662 	host_txbuf_cmpl_t txstatus;
9663 	host_txbuf_post_t *txdesc;
9664 	uint16 wr_idx;
9665 
9666 	DHD_INFO(("%s: FAST delete ring, flowid=%d, rd_idx=%d, wr_idx=%d\n",
9667 		__FUNCTION__, flowid, rd_idx, ring->wr));
9668 
9669 	memset(&txstatus, 0, sizeof(txstatus));
9670 	txstatus.compl_hdr.flow_ring_id = flowid;
9671 	txstatus.cmn_hdr.if_id = flow_ring_node->flow_info.ifindex;
9672 	wr_idx = ring->wr;
9673 
9674 	while (wr_idx != rd_idx) {
9675 		if (wr_idx)
9676 			wr_idx--;
9677 		else
9678 			wr_idx = ring->max_items - 1;
9679 		txdesc = (host_txbuf_post_t *)((char *)DHD_RING_BGN_VA(ring) +
9680 			(wr_idx * ring->item_len));
9681 		txstatus.cmn_hdr.request_id = txdesc->cmn_hdr.request_id;
9682 		dhd_prot_txstatus_process(dhd, &txstatus);
9683 	}
9684 }
9685 
9686 static void
dhd_prot_flow_ring_delete_response_process(dhd_pub_t * dhd,void * msg)9687 dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg)
9688 {
9689 	tx_flowring_delete_response_t *flow_delete_resp = (tx_flowring_delete_response_t *)msg;
9690 
9691 	DHD_ERROR(("%s: Flow Delete Response status = %d Flow %d\n", __FUNCTION__,
9692 		flow_delete_resp->cmplt.status, flow_delete_resp->cmplt.flow_ring_id));
9693 
9694 	if (dhd->fast_delete_ring_support) {
9695 		dhd_prot_flow_ring_fastdelete(dhd, flow_delete_resp->cmplt.flow_ring_id,
9696 			flow_delete_resp->read_idx);
9697 	}
9698 	dhd_bus_flow_ring_delete_response(dhd->bus, flow_delete_resp->cmplt.flow_ring_id,
9699 		flow_delete_resp->cmplt.status);
9700 }
9701 
9702 static void
dhd_prot_process_flow_ring_resume_response(dhd_pub_t * dhd,void * msg)9703 dhd_prot_process_flow_ring_resume_response(dhd_pub_t *dhd, void* msg)
9704 {
9705 #ifdef IDLE_TX_FLOW_MGMT
9706 	tx_idle_flowring_resume_response_t	*flow_resume_resp =
9707 		(tx_idle_flowring_resume_response_t *)msg;
9708 
9709 	DHD_ERROR(("%s Flow resume Response status = %d Flow %d\n", __FUNCTION__,
9710 		flow_resume_resp->cmplt.status, flow_resume_resp->cmplt.flow_ring_id));
9711 
9712 	dhd_bus_flow_ring_resume_response(dhd->bus, flow_resume_resp->cmplt.flow_ring_id,
9713 		flow_resume_resp->cmplt.status);
9714 #endif /* IDLE_TX_FLOW_MGMT */
9715 }
9716 
9717 static void
dhd_prot_process_flow_ring_suspend_response(dhd_pub_t * dhd,void * msg)9718 dhd_prot_process_flow_ring_suspend_response(dhd_pub_t *dhd, void* msg)
9719 {
9720 #ifdef IDLE_TX_FLOW_MGMT
9721 	int16 status;
9722 	tx_idle_flowring_suspend_response_t	*flow_suspend_resp =
9723 		(tx_idle_flowring_suspend_response_t *)msg;
9724 	status = flow_suspend_resp->cmplt.status;
9725 
9726 	DHD_ERROR(("%s Flow id %d suspend Response status = %d\n",
9727 		__FUNCTION__, flow_suspend_resp->cmplt.flow_ring_id,
9728 		status));
9729 
9730 	if (status != BCME_OK) {
9731 
9732 		DHD_ERROR(("%s Error in Suspending Flow rings!!"
9733 			"Dongle will still be polling idle rings!!Status = %d \n",
9734 			__FUNCTION__, status));
9735 	}
9736 #endif /* IDLE_TX_FLOW_MGMT */
9737 }
9738 
9739 int
dhd_prot_flow_ring_flush(dhd_pub_t * dhd,flow_ring_node_t * flow_ring_node)9740 dhd_prot_flow_ring_flush(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
9741 {
9742 	tx_flowring_flush_request_t *flow_flush_rqst;
9743 	dhd_prot_t *prot = dhd->prot;
9744 	unsigned long flags;
9745 	uint16 alloced = 0;
9746 	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
9747 
9748 	DHD_RING_LOCK(ring->ring_lock, flags);
9749 
9750 	/* Request for ring buffer space */
9751 	flow_flush_rqst = (tx_flowring_flush_request_t *)
9752 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
9753 	if (flow_flush_rqst == NULL) {
9754 		DHD_RING_UNLOCK(ring->ring_lock, flags);
9755 		DHD_ERROR(("%s: Flow Flush Req - failure ring space\n", __FUNCTION__));
9756 		return BCME_NOMEM;
9757 	}
9758 
9759 	/* Common msg buf hdr */
9760 	flow_flush_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_FLUSH;
9761 	flow_flush_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
9762 	flow_flush_rqst->msg.request_id = htol32(0); /* TBD */
9763 	flow_flush_rqst->msg.flags = ring->current_phase;
9764 	flow_flush_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
9765 	ring->seqnum++;
9766 
9767 	flow_flush_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
9768 	flow_flush_rqst->reason = htol16(BCME_OK);
9769 
9770 	DHD_INFO(("%s: Send Flow Flush Req\n", __FUNCTION__));
9771 
9772 	/* update ring's WR index and ring doorbell to dongle */
9773 	dhd_prot_ring_write_complete(dhd, ring, flow_flush_rqst, 1);
9774 
9775 	DHD_RING_UNLOCK(ring->ring_lock, flags);
9776 
9777 	return BCME_OK;
9778 } /* dhd_prot_flow_ring_flush */
9779 
9780 static void
dhd_prot_flow_ring_flush_response_process(dhd_pub_t * dhd,void * msg)9781 dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg)
9782 {
9783 	tx_flowring_flush_response_t *flow_flush_resp = (tx_flowring_flush_response_t *)msg;
9784 
9785 	DHD_INFO(("%s: Flow Flush Response status = %d\n", __FUNCTION__,
9786 		flow_flush_resp->cmplt.status));
9787 
9788 	dhd_bus_flow_ring_flush_response(dhd->bus, flow_flush_resp->cmplt.flow_ring_id,
9789 		flow_flush_resp->cmplt.status);
9790 }
9791 
9792 /**
9793  * Request dongle to configure soft doorbells for D2H rings. Host populated soft
9794  * doorbell information is transferred to dongle via the d2h ring config control
9795  * message.
9796  */
9797 void
dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t * dhd)9798 dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd)
9799 {
9800 #if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT)
9801 	uint16 ring_idx;
9802 	uint8 *msg_next;
9803 	void *msg_start;
9804 	uint16 alloced = 0;
9805 	unsigned long flags;
9806 	dhd_prot_t *prot = dhd->prot;
9807 	ring_config_req_t *ring_config_req;
9808 	bcmpcie_soft_doorbell_t *soft_doorbell;
9809 	msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
9810 	const uint16 d2h_rings = BCMPCIE_D2H_COMMON_MSGRINGS;
9811 
9812 	/* Claim space for d2h_ring number of d2h_ring_config_req_t messages */
9813 	DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
9814 	msg_start = dhd_prot_alloc_ring_space(dhd, ctrl_ring, d2h_rings, &alloced, TRUE);
9815 
9816 	if (msg_start == NULL) {
9817 		DHD_ERROR(("%s Msgbuf no space for %d D2H ring config soft doorbells\n",
9818 			__FUNCTION__, d2h_rings));
9819 		DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
9820 		return;
9821 	}
9822 
9823 	msg_next = (uint8*)msg_start;
9824 
9825 	for (ring_idx = 0; ring_idx < d2h_rings; ring_idx++) {
9826 
9827 		/* position the ring_config_req into the ctrl subm ring */
9828 		ring_config_req = (ring_config_req_t *)msg_next;
9829 
9830 		/* Common msg header */
9831 		ring_config_req->msg.msg_type = MSG_TYPE_D2H_RING_CONFIG;
9832 		ring_config_req->msg.if_id = 0;
9833 		ring_config_req->msg.flags = 0;
9834 
9835 		ring_config_req->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
9836 		ctrl_ring->seqnum++;
9837 
9838 		ring_config_req->msg.request_id = htol32(DHD_FAKE_PKTID); /* unused */
9839 
9840 		/* Ring Config subtype and d2h ring_id */
9841 		ring_config_req->subtype = htol16(D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL);
9842 		ring_config_req->ring_id = htol16(DHD_D2H_RINGID(ring_idx));
9843 
9844 		/* Host soft doorbell configuration */
9845 		soft_doorbell = &prot->soft_doorbell[ring_idx];
9846 
9847 		ring_config_req->soft_doorbell.value = htol32(soft_doorbell->value);
9848 		ring_config_req->soft_doorbell.haddr.high =
9849 			htol32(soft_doorbell->haddr.high);
9850 		ring_config_req->soft_doorbell.haddr.low =
9851 			htol32(soft_doorbell->haddr.low);
9852 		ring_config_req->soft_doorbell.items = htol16(soft_doorbell->items);
9853 		ring_config_req->soft_doorbell.msecs = htol16(soft_doorbell->msecs);
9854 
9855 		DHD_INFO(("%s: Soft doorbell haddr 0x%08x 0x%08x value 0x%08x\n",
9856 			__FUNCTION__, ring_config_req->soft_doorbell.haddr.high,
9857 			ring_config_req->soft_doorbell.haddr.low,
9858 			ring_config_req->soft_doorbell.value));
9859 
9860 		msg_next = msg_next + ctrl_ring->item_len;
9861 	}
9862 
9863 	/* update control subn ring's WR index and ring doorbell to dongle */
9864 	dhd_prot_ring_write_complete(dhd, ctrl_ring, msg_start, d2h_rings);
9865 
9866 	DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
9867 
9868 #endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */
9869 }
9870 
9871 static void
dhd_prot_process_d2h_ring_config_complete(dhd_pub_t * dhd,void * msg)9872 dhd_prot_process_d2h_ring_config_complete(dhd_pub_t *dhd, void *msg)
9873 {
9874 	DHD_INFO(("%s: Ring Config Response - status %d ringid %d\n",
9875 		__FUNCTION__, ltoh16(((ring_config_resp_t *)msg)->compl_hdr.status),
9876 		ltoh16(((ring_config_resp_t *)msg)->compl_hdr.flow_ring_id)));
9877 }
9878 
9879 int
dhd_prot_debug_info_print(dhd_pub_t * dhd)9880 dhd_prot_debug_info_print(dhd_pub_t *dhd)
9881 {
9882 	dhd_prot_t *prot = dhd->prot;
9883 	msgbuf_ring_t *ring;
9884 	uint16 rd, wr;
9885 	uint32 dma_buf_len;
9886 	uint64 current_time;
9887 	ulong ring_tcm_rd_addr; /* dongle address */
9888 	ulong ring_tcm_wr_addr; /* dongle address */
9889 
9890 	DHD_ERROR(("\n ------- DUMPING VERSION INFORMATION ------- \r\n"));
9891 	DHD_ERROR(("DHD: %s\n", dhd_version));
9892 	DHD_ERROR(("Firmware: %s\n", fw_version));
9893 
9894 #ifdef DHD_FW_COREDUMP
9895 	DHD_ERROR(("\n ------- DUMPING CONFIGURATION INFORMATION ------ \r\n"));
9896 	DHD_ERROR(("memdump mode: %d\n", dhd->memdump_enabled));
9897 #endif /* DHD_FW_COREDUMP */
9898 
9899 	DHD_ERROR(("\n ------- DUMPING PROTOCOL INFORMATION ------- \r\n"));
9900 	DHD_ERROR(("ICPrevs: Dev %d, Host %d, active %d\n",
9901 		prot->device_ipc_version,
9902 		prot->host_ipc_version,
9903 		prot->active_ipc_version));
9904 	DHD_ERROR(("d2h_intr_method -> %s\n",
9905 			dhd->bus->d2h_intr_method ? "PCIE_MSI" : "PCIE_INTX"));
9906 	DHD_ERROR(("max Host TS bufs to post: %d, posted %d\n",
9907 		prot->max_tsbufpost, prot->cur_ts_bufs_posted));
9908 	DHD_ERROR(("max INFO bufs to post: %d, posted %d\n",
9909 		prot->max_infobufpost, prot->infobufpost));
9910 	DHD_ERROR(("max event bufs to post: %d, posted %d\n",
9911 		prot->max_eventbufpost, prot->cur_event_bufs_posted));
9912 	DHD_ERROR(("max ioctlresp bufs to post: %d, posted %d\n",
9913 		prot->max_ioctlrespbufpost, prot->cur_ioctlresp_bufs_posted));
9914 	DHD_ERROR(("max RX bufs to post: %d, posted %d\n",
9915 		prot->max_rxbufpost, prot->rxbufpost));
9916 	DHD_ERROR(("h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n",
9917 		h2d_max_txpost, prot->h2d_max_txpost));
9918 
9919 	current_time = OSL_LOCALTIME_NS();
9920 	DHD_ERROR(("current_time="SEC_USEC_FMT"\n", GET_SEC_USEC(current_time)));
9921 	DHD_ERROR(("ioctl_fillup_time="SEC_USEC_FMT
9922 		" ioctl_ack_time="SEC_USEC_FMT
9923 		" ioctl_cmplt_time="SEC_USEC_FMT"\n",
9924 		GET_SEC_USEC(prot->ioctl_fillup_time),
9925 		GET_SEC_USEC(prot->ioctl_ack_time),
9926 		GET_SEC_USEC(prot->ioctl_cmplt_time)));
9927 
9928 	/* Check PCIe INT registers */
9929 	if (!dhd_pcie_dump_int_regs(dhd)) {
9930 		DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__));
9931 		dhd->bus->is_linkdown = TRUE;
9932 	}
9933 
9934 	DHD_ERROR(("\n ------- DUMPING IOCTL RING RD WR Pointers ------- \r\n"));
9935 
9936 	ring = &prot->h2dring_ctrl_subn;
9937 	dma_buf_len = ring->max_items * ring->item_len;
9938 	ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
9939 	ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
9940 	DHD_ERROR(("CtrlPost: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
9941 		"SIZE %d \r\n",
9942 		ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
9943 		ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr, dma_buf_len));
9944 	DHD_ERROR(("CtrlPost: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
9945 	if (dhd->bus->is_linkdown) {
9946 		DHD_ERROR(("CtrlPost: From Shared Mem: RD and WR are invalid"
9947 			" due to PCIe link down\r\n"));
9948 	} else {
9949 		dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
9950 		dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
9951 		DHD_ERROR(("CtrlPost: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
9952 	}
9953 	DHD_ERROR(("CtrlPost: seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
9954 
9955 	ring = &prot->d2hring_ctrl_cpln;
9956 	dma_buf_len = ring->max_items * ring->item_len;
9957 	ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
9958 	ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
9959 	DHD_ERROR(("CtrlCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
9960 		"SIZE %d \r\n",
9961 		ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
9962 		ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr, dma_buf_len));
9963 	DHD_ERROR(("CtrlCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
9964 	if (dhd->bus->is_linkdown) {
9965 		DHD_ERROR(("CtrlCpl: From Shared Mem: RD and WR are invalid"
9966 			" due to PCIe link down\r\n"));
9967 	} else {
9968 		dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
9969 		dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
9970 		DHD_ERROR(("CtrlCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
9971 	}
9972 	DHD_ERROR(("CtrlCpl: Expected seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
9973 
9974 	ring = prot->h2dring_info_subn;
9975 	if (ring) {
9976 		dma_buf_len = ring->max_items * ring->item_len;
9977 		ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
9978 		ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
9979 		DHD_ERROR(("InfoSub: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
9980 			"SIZE %d \r\n",
9981 			ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
9982 			ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
9983 			dma_buf_len));
9984 		DHD_ERROR(("InfoSub: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
9985 		if (dhd->bus->is_linkdown) {
9986 			DHD_ERROR(("InfoSub: From Shared Mem: RD and WR are invalid"
9987 				" due to PCIe link down\r\n"));
9988 		} else {
9989 			dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
9990 			dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
9991 			DHD_ERROR(("InfoSub: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
9992 		}
9993 		DHD_ERROR(("InfoSub: seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
9994 	}
9995 	ring = prot->d2hring_info_cpln;
9996 	if (ring) {
9997 		dma_buf_len = ring->max_items * ring->item_len;
9998 		ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
9999 		ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
10000 		DHD_ERROR(("InfoCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
10001 			"SIZE %d \r\n",
10002 			ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
10003 			ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
10004 			dma_buf_len));
10005 		DHD_ERROR(("InfoCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
10006 		if (dhd->bus->is_linkdown) {
10007 			DHD_ERROR(("InfoCpl: From Shared Mem: RD and WR are invalid"
10008 				" due to PCIe link down\r\n"));
10009 		} else {
10010 			dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
10011 			dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
10012 			DHD_ERROR(("InfoCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
10013 		}
10014 		DHD_ERROR(("InfoCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
10015 	}
10016 
10017 	ring = &prot->d2hring_tx_cpln;
10018 	if (ring) {
10019 		ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
10020 		ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
10021 		dma_buf_len = ring->max_items * ring->item_len;
10022 		DHD_ERROR(("TxCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
10023 			"SIZE %d \r\n",
10024 			ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
10025 			ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
10026 			dma_buf_len));
10027 		DHD_ERROR(("TxCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
10028 		if (dhd->bus->is_linkdown) {
10029 			DHD_ERROR(("TxCpl: From Shared Mem: RD and WR are invalid"
10030 				" due to PCIe link down\r\n"));
10031 		} else {
10032 			dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
10033 			dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
10034 			DHD_ERROR(("TxCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
10035 		}
10036 		DHD_ERROR(("TxCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
10037 	}
10038 
10039 	ring = &prot->d2hring_rx_cpln;
10040 	if (ring) {
10041 		ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
10042 		ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
10043 		dma_buf_len = ring->max_items * ring->item_len;
10044 		DHD_ERROR(("RxCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
10045 			"SIZE %d \r\n",
10046 			ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
10047 			ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
10048 			dma_buf_len));
10049 		DHD_ERROR(("RxCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
10050 		if (dhd->bus->is_linkdown) {
10051 			DHD_ERROR(("RxCpl: From Shared Mem: RD and WR are invalid"
10052 				" due to PCIe link down\r\n"));
10053 		} else {
10054 			dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
10055 			dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
10056 			DHD_ERROR(("RxCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
10057 		}
10058 		DHD_ERROR(("RxCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
10059 	}
10060 #ifdef EWP_EDL
10061 	ring = prot->d2hring_edl;
10062 	if (ring) {
10063 		ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
10064 		ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
10065 		dma_buf_len = ring->max_items * ring->item_len;
10066 		DHD_ERROR(("EdlRing: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
10067 			"SIZE %d \r\n",
10068 			ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
10069 			ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
10070 			dma_buf_len));
10071 		DHD_ERROR(("EdlRing: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
10072 		if (dhd->bus->is_linkdown) {
10073 			DHD_ERROR(("EdlRing: From Shared Mem: RD and WR are invalid"
10074 				" due to PCIe link down\r\n"));
10075 		} else {
10076 			dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
10077 			dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
10078 			DHD_ERROR(("EdlRing: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
10079 		}
10080 		DHD_ERROR(("EdlRing: Expected seq num: %d \r\n",
10081 			ring->seqnum % D2H_EPOCH_MODULO));
10082 	}
10083 #endif /* EWP_EDL */
10084 
10085 	DHD_ERROR(("%s: cur_ioctlresp_bufs_posted %d cur_event_bufs_posted %d\n",
10086 		__FUNCTION__, prot->cur_ioctlresp_bufs_posted, prot->cur_event_bufs_posted));
10087 #ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
10088 	DHD_ERROR(("%s: multi_client_flow_rings:%d max_multi_client_flow_rings:%d\n",
10089 		__FUNCTION__, dhd->multi_client_flow_rings, dhd->max_multi_client_flow_rings));
10090 #endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */
10091 
10092 	DHD_ERROR(("pktid_txq_start_cnt: %d\n", prot->pktid_txq_start_cnt));
10093 	DHD_ERROR(("pktid_txq_stop_cnt: %d\n", prot->pktid_txq_stop_cnt));
10094 	DHD_ERROR(("pktid_depleted_cnt: %d\n", prot->pktid_depleted_cnt));
10095 
10096 	dhd_pcie_debug_info_dump(dhd);
10097 
10098 	return 0;
10099 }
10100 
10101 int
dhd_prot_ringupd_dump(dhd_pub_t * dhd,struct bcmstrbuf * b)10102 dhd_prot_ringupd_dump(dhd_pub_t *dhd, struct bcmstrbuf *b)
10103 {
10104 	uint32 *ptr;
10105 	uint32 value;
10106 
10107 	if (dhd->prot->d2h_dma_indx_wr_buf.va) {
10108 		uint32 i;
10109 		uint32 max_h2d_queues = dhd_bus_max_h2d_queues(dhd->bus);
10110 
10111 		OSL_CACHE_INV((void *)dhd->prot->d2h_dma_indx_wr_buf.va,
10112 			dhd->prot->d2h_dma_indx_wr_buf.len);
10113 
10114 		ptr = (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va);
10115 
10116 		bcm_bprintf(b, "\n max_tx_queues %d\n", max_h2d_queues);
10117 
10118 		bcm_bprintf(b, "\nRPTR block H2D common rings, 0x%04x\n", ptr);
10119 		value = ltoh32(*ptr);
10120 		bcm_bprintf(b, "\tH2D CTRL: value 0x%04x\n", value);
10121 		ptr++;
10122 		value = ltoh32(*ptr);
10123 		bcm_bprintf(b, "\tH2D RXPOST: value 0x%04x\n", value);
10124 
10125 		ptr++;
10126 		bcm_bprintf(b, "RPTR block Flow rings , 0x%04x\n", ptr);
10127 		for (i = BCMPCIE_H2D_COMMON_MSGRINGS; i < max_h2d_queues; i++) {
10128 			value = ltoh32(*ptr);
10129 			bcm_bprintf(b, "\tflowring ID %d: value 0x%04x\n", i, value);
10130 			ptr++;
10131 		}
10132 	}
10133 
10134 	if (dhd->prot->h2d_dma_indx_rd_buf.va) {
10135 		OSL_CACHE_INV((void *)dhd->prot->h2d_dma_indx_rd_buf.va,
10136 			dhd->prot->h2d_dma_indx_rd_buf.len);
10137 
10138 		ptr = (uint32 *)(dhd->prot->h2d_dma_indx_rd_buf.va);
10139 
10140 		bcm_bprintf(b, "\nWPTR block D2H common rings, 0x%04x\n", ptr);
10141 		value = ltoh32(*ptr);
10142 		bcm_bprintf(b, "\tD2H CTRLCPLT: value 0x%04x\n", value);
10143 		ptr++;
10144 		value = ltoh32(*ptr);
10145 		bcm_bprintf(b, "\tD2H TXCPLT: value 0x%04x\n", value);
10146 		ptr++;
10147 		value = ltoh32(*ptr);
10148 		bcm_bprintf(b, "\tD2H RXCPLT: value 0x%04x\n", value);
10149 	}
10150 
10151 	return 0;
10152 }
10153 
10154 uint32
dhd_prot_metadata_dbg_set(dhd_pub_t * dhd,bool val)10155 dhd_prot_metadata_dbg_set(dhd_pub_t *dhd, bool val)
10156 {
10157 	dhd_prot_t *prot = dhd->prot;
10158 #if DHD_DBG_SHOW_METADATA
10159 	prot->metadata_dbg = val;
10160 #endif // endif
10161 	return (uint32)prot->metadata_dbg;
10162 }
10163 
10164 uint32
dhd_prot_metadata_dbg_get(dhd_pub_t * dhd)10165 dhd_prot_metadata_dbg_get(dhd_pub_t *dhd)
10166 {
10167 	dhd_prot_t *prot = dhd->prot;
10168 	return (uint32)prot->metadata_dbg;
10169 }
10170 
10171 uint32
dhd_prot_metadatalen_set(dhd_pub_t * dhd,uint32 val,bool rx)10172 dhd_prot_metadatalen_set(dhd_pub_t *dhd, uint32 val, bool rx)
10173 {
10174 	dhd_prot_t *prot = dhd->prot;
10175 	if (rx)
10176 		prot->rx_metadata_offset = (uint16)val;
10177 	else
10178 		prot->tx_metadata_offset = (uint16)val;
10179 	return dhd_prot_metadatalen_get(dhd, rx);
10180 }
10181 
10182 uint32
dhd_prot_metadatalen_get(dhd_pub_t * dhd,bool rx)10183 dhd_prot_metadatalen_get(dhd_pub_t *dhd, bool rx)
10184 {
10185 	dhd_prot_t *prot = dhd->prot;
10186 	if (rx)
10187 		return prot->rx_metadata_offset;
10188 	else
10189 		return prot->tx_metadata_offset;
10190 }
10191 
10192 /** optimization to write "n" tx items at a time to ring */
10193 uint32
dhd_prot_txp_threshold(dhd_pub_t * dhd,bool set,uint32 val)10194 dhd_prot_txp_threshold(dhd_pub_t *dhd, bool set, uint32 val)
10195 {
10196 	dhd_prot_t *prot = dhd->prot;
10197 	if (set)
10198 		prot->txp_threshold = (uint16)val;
10199 	val = prot->txp_threshold;
10200 	return val;
10201 }
10202 
10203 #ifdef DHD_RX_CHAINING
10204 
10205 static INLINE void BCMFASTPATH
dhd_rxchain_reset(rxchain_info_t * rxchain)10206 dhd_rxchain_reset(rxchain_info_t *rxchain)
10207 {
10208 	rxchain->pkt_count = 0;
10209 }
10210 
10211 static void BCMFASTPATH
dhd_rxchain_frame(dhd_pub_t * dhd,void * pkt,uint ifidx)10212 dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx)
10213 {
10214 	uint8 *eh;
10215 	uint8 prio;
10216 	dhd_prot_t *prot = dhd->prot;
10217 	rxchain_info_t *rxchain = &prot->rxchain;
10218 
10219 	ASSERT(!PKTISCHAINED(pkt));
10220 	ASSERT(PKTCLINK(pkt) == NULL);
10221 	ASSERT(PKTCGETATTR(pkt) == 0);
10222 
10223 	eh = PKTDATA(dhd->osh, pkt);
10224 	prio = IP_TOS46(eh + ETHER_HDR_LEN) >> IPV4_TOS_PREC_SHIFT;
10225 
10226 	if (rxchain->pkt_count && !(PKT_CTF_CHAINABLE(dhd, ifidx, eh, prio, rxchain->h_sa,
10227 		rxchain->h_da, rxchain->h_prio))) {
10228 		/* Different flow - First release the existing chain */
10229 		dhd_rxchain_commit(dhd);
10230 	}
10231 
10232 	/* For routers, with HNDCTF, link the packets using PKTSETCLINK, */
10233 	/* so that the chain can be handed off to CTF bridge as is. */
10234 	if (rxchain->pkt_count == 0) {
10235 		/* First packet in chain */
10236 		rxchain->pkthead = rxchain->pkttail = pkt;
10237 
10238 		/* Keep a copy of ptr to ether_da, ether_sa and prio */
10239 		rxchain->h_da = ((struct ether_header *)eh)->ether_dhost;
10240 		rxchain->h_sa = ((struct ether_header *)eh)->ether_shost;
10241 		rxchain->h_prio = prio;
10242 		rxchain->ifidx = ifidx;
10243 		rxchain->pkt_count++;
10244 	} else {
10245 		/* Same flow - keep chaining */
10246 		PKTSETCLINK(rxchain->pkttail, pkt);
10247 		rxchain->pkttail = pkt;
10248 		rxchain->pkt_count++;
10249 	}
10250 
10251 	if ((dhd_rx_pkt_chainable(dhd, ifidx)) && (!ETHER_ISMULTI(rxchain->h_da)) &&
10252 		((((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IP)) ||
10253 		(((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IPV6)))) {
10254 		PKTSETCHAINED(dhd->osh, pkt);
10255 		PKTCINCRCNT(rxchain->pkthead);
10256 		PKTCADDLEN(rxchain->pkthead, PKTLEN(dhd->osh, pkt));
10257 	} else {
10258 		dhd_rxchain_commit(dhd);
10259 		return;
10260 	}
10261 
10262 	/* If we have hit the max chain length, dispatch the chain and reset */
10263 	if (rxchain->pkt_count >= DHD_PKT_CTF_MAX_CHAIN_LEN) {
10264 		dhd_rxchain_commit(dhd);
10265 	}
10266 }
10267 
10268 static void BCMFASTPATH
dhd_rxchain_commit(dhd_pub_t * dhd)10269 dhd_rxchain_commit(dhd_pub_t *dhd)
10270 {
10271 	dhd_prot_t *prot = dhd->prot;
10272 	rxchain_info_t *rxchain = &prot->rxchain;
10273 
10274 	if (rxchain->pkt_count == 0)
10275 		return;
10276 
10277 	/* Release the packets to dhd_linux */
10278 	dhd_bus_rx_frame(dhd->bus, rxchain->pkthead, rxchain->ifidx, rxchain->pkt_count);
10279 
10280 	/* Reset the chain */
10281 	dhd_rxchain_reset(rxchain);
10282 }
10283 
10284 #endif /* DHD_RX_CHAINING */
10285 
10286 #ifdef IDLE_TX_FLOW_MGMT
10287 int
dhd_prot_flow_ring_resume(dhd_pub_t * dhd,flow_ring_node_t * flow_ring_node)10288 dhd_prot_flow_ring_resume(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
10289 {
10290 	tx_idle_flowring_resume_request_t *flow_resume_rqst;
10291 	msgbuf_ring_t *flow_ring;
10292 	dhd_prot_t *prot = dhd->prot;
10293 	unsigned long flags;
10294 	uint16 alloced = 0;
10295 	msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
10296 
10297 	/* Fetch a pre-initialized msgbuf_ring from the flowring pool */
10298 	flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid);
10299 	if (flow_ring == NULL) {
10300 		DHD_ERROR(("%s: dhd_prot_flowrings_pool_fetch TX Flowid %d failed\n",
10301 			__FUNCTION__, flow_ring_node->flowid));
10302 		return BCME_NOMEM;
10303 	}
10304 
10305 	DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
10306 
10307 	/* Request for ctrl_ring buffer space */
10308 	flow_resume_rqst = (tx_idle_flowring_resume_request_t *)
10309 		dhd_prot_alloc_ring_space(dhd, ctrl_ring, 1, &alloced, FALSE);
10310 
10311 	if (flow_resume_rqst == NULL) {
10312 		dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring);
10313 		DHD_ERROR(("%s: Flow resume Req flowid %d - failure ring space\n",
10314 			__FUNCTION__, flow_ring_node->flowid));
10315 		DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
10316 		return BCME_NOMEM;
10317 	}
10318 
10319 	flow_ring_node->prot_info = (void *)flow_ring;
10320 
10321 	/* Common msg buf hdr */
10322 	flow_resume_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_RESUME;
10323 	flow_resume_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
10324 	flow_resume_rqst->msg.request_id = htol32(0); /* TBD */
10325 
10326 	flow_resume_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
10327 	ctrl_ring->seqnum++;
10328 
10329 	flow_resume_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
10330 	DHD_ERROR(("%s Send Flow resume Req flow ID %d\n",
10331 		__FUNCTION__, flow_ring_node->flowid));
10332 
10333 	/* Update the flow_ring's WRITE index */
10334 	if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
10335 		dhd_prot_dma_indx_set(dhd, flow_ring->wr,
10336 		                      H2D_DMA_INDX_WR_UPD, flow_ring->idx);
10337 	} else if (IFRM_ACTIVE(dhd) && (flow_ring->idx >= BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START)) {
10338 		dhd_prot_dma_indx_set(dhd, flow_ring->wr,
10339 			H2D_IFRM_INDX_WR_UPD,
10340 			(flow_ring->idx - BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START));
10341 	} else {
10342 		dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr),
10343 			sizeof(uint16), RING_WR_UPD, flow_ring->idx);
10344 	}
10345 
10346 	/* update control subn ring's WR index and ring doorbell to dongle */
10347 	dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_resume_rqst, 1);
10348 
10349 	DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
10350 
10351 	return BCME_OK;
10352 } /* dhd_prot_flow_ring_create */
10353 
10354 int
dhd_prot_flow_ring_batch_suspend_request(dhd_pub_t * dhd,uint16 * ringid,uint16 count)10355 dhd_prot_flow_ring_batch_suspend_request(dhd_pub_t *dhd, uint16 *ringid, uint16 count)
10356 {
10357 	tx_idle_flowring_suspend_request_t *flow_suspend_rqst;
10358 	dhd_prot_t *prot = dhd->prot;
10359 	unsigned long flags;
10360 	uint16 index;
10361 	uint16 alloced = 0;
10362 	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
10363 
10364 	DHD_RING_LOCK(ring->ring_lock, flags);
10365 
10366 	/* Request for ring buffer space */
10367 	flow_suspend_rqst = (tx_idle_flowring_suspend_request_t *)
10368 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
10369 
10370 	if (flow_suspend_rqst == NULL) {
10371 		DHD_RING_UNLOCK(ring->ring_lock, flags);
10372 		DHD_ERROR(("%s: Flow suspend Req - failure ring space\n", __FUNCTION__));
10373 		return BCME_NOMEM;
10374 	}
10375 
10376 	/* Common msg buf hdr */
10377 	flow_suspend_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_SUSPEND;
10378 	/* flow_suspend_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; */
10379 	flow_suspend_rqst->msg.request_id = htol32(0); /* TBD */
10380 
10381 	flow_suspend_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
10382 	ring->seqnum++;
10383 
10384 	/* Update flow id  info */
10385 	for (index = 0; index < count; index++)
10386 	{
10387 		flow_suspend_rqst->ring_id[index] = ringid[index];
10388 	}
10389 	flow_suspend_rqst->num = count;
10390 
10391 	DHD_ERROR(("%s sending batch suspend!! count is %d\n", __FUNCTION__, count));
10392 
10393 	/* update ring's WR index and ring doorbell to dongle */
10394 	dhd_prot_ring_write_complete(dhd, ring, flow_suspend_rqst, 1);
10395 
10396 	DHD_RING_UNLOCK(ring->ring_lock, flags);
10397 
10398 	return BCME_OK;
10399 }
10400 #endif /* IDLE_TX_FLOW_MGMT */
10401 
etd_trap_name(hnd_ext_tag_trap_t tag)10402 static const char* etd_trap_name(hnd_ext_tag_trap_t tag)
10403 {
10404 	switch (tag)
10405 	{
10406 	case TAG_TRAP_SIGNATURE: return "TAG_TRAP_SIGNATURE";
10407 	case TAG_TRAP_STACK: return "TAG_TRAP_STACK";
10408 	case TAG_TRAP_MEMORY: return "TAG_TRAP_MEMORY";
10409 	case TAG_TRAP_DEEPSLEEP: return "TAG_TRAP_DEEPSLEEP";
10410 	case TAG_TRAP_PSM_WD: return "TAG_TRAP_PSM_WD";
10411 	case TAG_TRAP_PHY: return "TAG_TRAP_PHY";
10412 	case TAG_TRAP_BUS: return "TAG_TRAP_BUS";
10413 	case TAG_TRAP_MAC_SUSP: return "TAG_TRAP_MAC_SUSP";
10414 	case TAG_TRAP_BACKPLANE: return "TAG_TRAP_BACKPLANE";
10415 	case TAG_TRAP_PCIE_Q: return "TAG_TRAP_PCIE_Q";
10416 	case TAG_TRAP_WLC_STATE: return "TAG_TRAP_WLC_STATE";
10417 	case TAG_TRAP_MAC_WAKE: return "TAG_TRAP_MAC_WAKE";
10418 	case TAG_TRAP_HMAP: return "TAG_TRAP_HMAP";
10419 	case TAG_TRAP_PHYTXERR_THRESH: return "TAG_TRAP_PHYTXERR_THRESH";
10420 	case TAG_TRAP_HC_DATA: return "TAG_TRAP_HC_DATA";
10421 	case TAG_TRAP_LOG_DATA: return "TAG_TRAP_LOG_DATA";
10422 	case TAG_TRAP_CODE: return "TAG_TRAP_CODE";
10423 	case TAG_TRAP_LAST:
10424 	default:
10425 		return "Unknown";
10426 	}
10427 	return "Unknown";
10428 }
10429 
dhd_prot_dump_extended_trap(dhd_pub_t * dhdp,struct bcmstrbuf * b,bool raw)10430 int dhd_prot_dump_extended_trap(dhd_pub_t *dhdp, struct bcmstrbuf *b, bool raw)
10431 {
10432 	uint32 i;
10433 	uint32 *ext_data;
10434 	hnd_ext_trap_hdr_t *hdr;
10435 	const bcm_tlv_t *tlv;
10436 	const trap_t *tr;
10437 	const uint32 *stack;
10438 	const hnd_ext_trap_bp_err_t *bpe;
10439 	uint32 raw_len;
10440 
10441 	ext_data = dhdp->extended_trap_data;
10442 
10443 	/* return if there is no extended trap data */
10444 	if (!ext_data || !(dhdp->dongle_trap_data & D2H_DEV_EXT_TRAP_DATA))
10445 	{
10446 		bcm_bprintf(b, "%d (0x%x)", dhdp->dongle_trap_data, dhdp->dongle_trap_data);
10447 		return BCME_OK;
10448 	}
10449 
10450 	bcm_bprintf(b, "Extended trap data\n");
10451 
10452 	/* First word is original trap_data */
10453 	bcm_bprintf(b, "trap_data = 0x%08x\n", *ext_data);
10454 	ext_data++;
10455 
10456 	/* Followed by the extended trap data header */
10457 	hdr = (hnd_ext_trap_hdr_t *)ext_data;
10458 	bcm_bprintf(b, "version: %d, len: %d\n", hdr->version, hdr->len);
10459 
10460 	/* Dump a list of all tags found  before parsing data */
10461 	bcm_bprintf(b, "\nTags Found:\n");
10462 	for (i = 0; i < TAG_TRAP_LAST; i++) {
10463 		tlv = bcm_parse_tlvs(hdr->data, hdr->len, i);
10464 		if (tlv)
10465 			bcm_bprintf(b, "Tag: %d (%s), Length: %d\n", i, etd_trap_name(i), tlv->len);
10466 	}
10467 
10468 	if (raw)
10469 	{
10470 		raw_len = sizeof(hnd_ext_trap_hdr_t) + (hdr->len / 4) + (hdr->len % 4 ? 1 : 0);
10471 		for (i = 0; i < raw_len; i++)
10472 		{
10473 			bcm_bprintf(b, "0x%08x ", ext_data[i]);
10474 			if (i % 4 == 3)
10475 				bcm_bprintf(b, "\n");
10476 		}
10477 		return BCME_OK;
10478 	}
10479 
10480 	/* Extract the various supported TLVs from the extended trap data */
10481 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_CODE);
10482 	if (tlv)
10483 	{
10484 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_CODE), tlv->len);
10485 		bcm_bprintf(b, "ETD TYPE: %d\n", tlv->data[0]);
10486 	}
10487 
10488 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE);
10489 	if (tlv)
10490 	{
10491 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_SIGNATURE), tlv->len);
10492 		tr = (const trap_t *)tlv->data;
10493 
10494 		bcm_bprintf(b, "TRAP %x: pc %x, lr %x, sp %x, cpsr %x, spsr %x\n",
10495 		       tr->type, tr->pc, tr->r14, tr->r13, tr->cpsr, tr->spsr);
10496 		bcm_bprintf(b, "  r0 %x, r1 %x, r2 %x, r3 %x, r4 %x, r5 %x, r6 %x\n",
10497 		       tr->r0, tr->r1, tr->r2, tr->r3, tr->r4, tr->r5, tr->r6);
10498 		bcm_bprintf(b, "  r7 %x, r8 %x, r9 %x, r10 %x, r11 %x, r12 %x\n",
10499 		       tr->r7, tr->r8, tr->r9, tr->r10, tr->r11, tr->r12);
10500 	}
10501 
10502 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK);
10503 	if (tlv)
10504 	{
10505 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_STACK), tlv->len);
10506 		stack = (const uint32 *)tlv->data;
10507 		for (i = 0; i < (uint32)(tlv->len / 4); i++)
10508 		{
10509 			bcm_bprintf(b, "  0x%08x\n", *stack);
10510 			stack++;
10511 		}
10512 	}
10513 
10514 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_BACKPLANE);
10515 	if (tlv)
10516 	{
10517 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_BACKPLANE), tlv->len);
10518 		bpe = (const hnd_ext_trap_bp_err_t *)tlv->data;
10519 		bcm_bprintf(b, " error: %x\n", bpe->error);
10520 		bcm_bprintf(b, " coreid: %x\n", bpe->coreid);
10521 		bcm_bprintf(b, " baseaddr: %x\n", bpe->baseaddr);
10522 		bcm_bprintf(b, " ioctrl: %x\n", bpe->ioctrl);
10523 		bcm_bprintf(b, " iostatus: %x\n", bpe->iostatus);
10524 		bcm_bprintf(b, " resetctrl: %x\n", bpe->resetctrl);
10525 		bcm_bprintf(b, " resetstatus: %x\n", bpe->resetstatus);
10526 		bcm_bprintf(b, " errlogctrl: %x\n", bpe->errlogctrl);
10527 		bcm_bprintf(b, " errlogdone: %x\n", bpe->errlogdone);
10528 		bcm_bprintf(b, " errlogstatus: %x\n", bpe->errlogstatus);
10529 		bcm_bprintf(b, " errlogaddrlo: %x\n", bpe->errlogaddrlo);
10530 		bcm_bprintf(b, " errlogaddrhi: %x\n", bpe->errlogaddrhi);
10531 		bcm_bprintf(b, " errlogid: %x\n", bpe->errlogid);
10532 		bcm_bprintf(b, " errloguser: %x\n", bpe->errloguser);
10533 		bcm_bprintf(b, " errlogflags: %x\n", bpe->errlogflags);
10534 	}
10535 
10536 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MEMORY);
10537 	if (tlv)
10538 	{
10539 		const hnd_ext_trap_heap_err_t* hme;
10540 
10541 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MEMORY), tlv->len);
10542 		hme = (const hnd_ext_trap_heap_err_t *)tlv->data;
10543 		bcm_bprintf(b, " arena total: %d\n", hme->arena_total);
10544 		bcm_bprintf(b, " heap free: %d\n", hme->heap_free);
10545 		bcm_bprintf(b, " heap in use: %d\n", hme->heap_inuse);
10546 		bcm_bprintf(b, " mf count: %d\n", hme->mf_count);
10547 		bcm_bprintf(b, " stack LWM: %x\n", hme->stack_lwm);
10548 
10549 		bcm_bprintf(b, " Histogram:\n");
10550 		for (i = 0; i < (HEAP_HISTOGRAM_DUMP_LEN * 2); i += 2) {
10551 			if (hme->heap_histogm[i] == 0xfffe)
10552 				bcm_bprintf(b, " Others\t%d\t?\n", hme->heap_histogm[i + 1]);
10553 			else if (hme->heap_histogm[i] == 0xffff)
10554 				bcm_bprintf(b, " >= 256K\t%d\t?\n", hme->heap_histogm[i + 1]);
10555 			else
10556 				bcm_bprintf(b, " %d\t%d\t%d\n", hme->heap_histogm[i] << 2,
10557 					hme->heap_histogm[i + 1], (hme->heap_histogm[i] << 2)
10558 					* hme->heap_histogm[i + 1]);
10559 		}
10560 
10561 		bcm_bprintf(b, " Max free block: %d\n", hme->max_sz_free_blk[0] << 2);
10562 		for (i = 1; i < HEAP_MAX_SZ_BLKS_LEN; i++) {
10563 			bcm_bprintf(b, " Next lgst free block: %d\n", hme->max_sz_free_blk[i] << 2);
10564 		}
10565 	}
10566 
10567 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PCIE_Q);
10568 	if (tlv)
10569 	{
10570 		const hnd_ext_trap_pcie_mem_err_t* pqme;
10571 
10572 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PCIE_Q), tlv->len);
10573 		pqme = (const hnd_ext_trap_pcie_mem_err_t *)tlv->data;
10574 		bcm_bprintf(b, " d2h queue len: %x\n", pqme->d2h_queue_len);
10575 		bcm_bprintf(b, " d2h req queue len: %x\n", pqme->d2h_req_queue_len);
10576 	}
10577 
10578 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_WLC_STATE);
10579 	if (tlv)
10580 	{
10581 		const hnd_ext_trap_wlc_mem_err_t* wsme;
10582 
10583 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_WLC_STATE), tlv->len);
10584 		wsme = (const hnd_ext_trap_wlc_mem_err_t *)tlv->data;
10585 		bcm_bprintf(b, " instance: %d\n", wsme->instance);
10586 		bcm_bprintf(b, " associated: %d\n", wsme->associated);
10587 		bcm_bprintf(b, " peer count: %d\n", wsme->peer_cnt);
10588 		bcm_bprintf(b, " client count: %d\n", wsme->soft_ap_client_cnt);
10589 		bcm_bprintf(b, " TX_AC_BK_FIFO: %d\n", wsme->txqueue_len[0]);
10590 		bcm_bprintf(b, " TX_AC_BE_FIFO: %d\n", wsme->txqueue_len[1]);
10591 		bcm_bprintf(b, " TX_AC_VI_FIFO: %d\n", wsme->txqueue_len[2]);
10592 		bcm_bprintf(b, " TX_AC_VO_FIFO: %d\n", wsme->txqueue_len[3]);
10593 
10594 		if (tlv->len >= (sizeof(*wsme) * 2)) {
10595 			wsme++;
10596 			bcm_bprintf(b, "\n instance: %d\n", wsme->instance);
10597 			bcm_bprintf(b, " associated: %d\n", wsme->associated);
10598 			bcm_bprintf(b, " peer count: %d\n", wsme->peer_cnt);
10599 			bcm_bprintf(b, " client count: %d\n", wsme->soft_ap_client_cnt);
10600 			bcm_bprintf(b, " TX_AC_BK_FIFO: %d\n", wsme->txqueue_len[0]);
10601 			bcm_bprintf(b, " TX_AC_BE_FIFO: %d\n", wsme->txqueue_len[1]);
10602 			bcm_bprintf(b, " TX_AC_VI_FIFO: %d\n", wsme->txqueue_len[2]);
10603 			bcm_bprintf(b, " TX_AC_VO_FIFO: %d\n", wsme->txqueue_len[3]);
10604 		}
10605 	}
10606 
10607 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PHY);
10608 	if (tlv)
10609 	{
10610 		const hnd_ext_trap_phydbg_t* phydbg;
10611 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PHY), tlv->len);
10612 		phydbg = (const hnd_ext_trap_phydbg_t *)tlv->data;
10613 		bcm_bprintf(b, " err: 0x%x\n", phydbg->err);
10614 		bcm_bprintf(b, " RxFeStatus: 0x%x\n", phydbg->RxFeStatus);
10615 		bcm_bprintf(b, " TxFIFOStatus0: 0x%x\n", phydbg->TxFIFOStatus0);
10616 		bcm_bprintf(b, " TxFIFOStatus1: 0x%x\n", phydbg->TxFIFOStatus1);
10617 		bcm_bprintf(b, " RfseqMode: 0x%x\n", phydbg->RfseqMode);
10618 		bcm_bprintf(b, " RfseqStatus0: 0x%x\n", phydbg->RfseqStatus0);
10619 		bcm_bprintf(b, " RfseqStatus1: 0x%x\n", phydbg->RfseqStatus1);
10620 		bcm_bprintf(b, " RfseqStatus_Ocl: 0x%x\n", phydbg->RfseqStatus_Ocl);
10621 		bcm_bprintf(b, " RfseqStatus_Ocl1: 0x%x\n", phydbg->RfseqStatus_Ocl1);
10622 		bcm_bprintf(b, " OCLControl1: 0x%x\n", phydbg->OCLControl1);
10623 		bcm_bprintf(b, " TxError: 0x%x\n", phydbg->TxError);
10624 		bcm_bprintf(b, " bphyTxError: 0x%x\n", phydbg->bphyTxError);
10625 		bcm_bprintf(b, " TxCCKError: 0x%x\n", phydbg->TxCCKError);
10626 		bcm_bprintf(b, " TxCtrlWrd0: 0x%x\n", phydbg->TxCtrlWrd0);
10627 		bcm_bprintf(b, " TxCtrlWrd1: 0x%x\n", phydbg->TxCtrlWrd1);
10628 		bcm_bprintf(b, " TxCtrlWrd2: 0x%x\n", phydbg->TxCtrlWrd2);
10629 		bcm_bprintf(b, " TxLsig0: 0x%x\n", phydbg->TxLsig0);
10630 		bcm_bprintf(b, " TxLsig1: 0x%x\n", phydbg->TxLsig1);
10631 		bcm_bprintf(b, " TxVhtSigA10: 0x%x\n", phydbg->TxVhtSigA10);
10632 		bcm_bprintf(b, " TxVhtSigA11: 0x%x\n", phydbg->TxVhtSigA11);
10633 		bcm_bprintf(b, " TxVhtSigA20: 0x%x\n", phydbg->TxVhtSigA20);
10634 		bcm_bprintf(b, " TxVhtSigA21: 0x%x\n", phydbg->TxVhtSigA21);
10635 		bcm_bprintf(b, " txPktLength: 0x%x\n", phydbg->txPktLength);
10636 		bcm_bprintf(b, " txPsdulengthCtr: 0x%x\n", phydbg->txPsdulengthCtr);
10637 		bcm_bprintf(b, " gpioClkControl: 0x%x\n", phydbg->gpioClkControl);
10638 		bcm_bprintf(b, " gpioSel: 0x%x\n", phydbg->gpioSel);
10639 		bcm_bprintf(b, " pktprocdebug: 0x%x\n", phydbg->pktprocdebug);
10640 		for (i = 0; i < 3; i++)
10641 			bcm_bprintf(b, " gpioOut[%d]: 0x%x\n", i, phydbg->gpioOut[i]);
10642 	}
10643 
10644 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PSM_WD);
10645 	if (tlv)
10646 	{
10647 		const hnd_ext_trap_psmwd_t* psmwd;
10648 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PSM_WD), tlv->len);
10649 		psmwd = (const hnd_ext_trap_psmwd_t *)tlv;
10650 		bcm_bprintf(b, " version: 0x%x\n", psmwd->version);
10651 		bcm_bprintf(b, " maccontrol: 0x%x\n", psmwd->i32_maccontrol);
10652 		bcm_bprintf(b, " maccommand: 0x%x\n", psmwd->i32_maccommand);
10653 		bcm_bprintf(b, " macintstatus: 0x%x\n", psmwd->i32_macintstatus);
10654 		bcm_bprintf(b, " phydebug: 0x%x\n", psmwd->i32_phydebug);
10655 		bcm_bprintf(b, " clk_ctl_st: 0x%x\n", psmwd->i32_clk_ctl_st);
10656 		for (i = 0; i < 3; i++)
10657 			bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, psmwd->i32_psmdebug[i]);
10658 		bcm_bprintf(b, " gated clock en: 0x%x\n", psmwd->i16_0x1a8);
10659 		bcm_bprintf(b, " Rcv Fifo Ctrl: 0x%x\n", psmwd->i16_0x406);
10660 		bcm_bprintf(b, " Rx ctrl 1: 0x%x\n", psmwd->i16_0x408);
10661 		bcm_bprintf(b, " Rxe Status 1: 0x%x\n", psmwd->i16_0x41a);
10662 		bcm_bprintf(b, " Rxe Status 2: 0x%x\n", psmwd->i16_0x41c);
10663 		bcm_bprintf(b, " rcv wrd count 0: 0x%x\n", psmwd->i16_0x424);
10664 		bcm_bprintf(b, " rcv wrd count 1: 0x%x\n", psmwd->i16_0x426);
10665 		bcm_bprintf(b, " RCV_LFIFO_STS: 0x%x\n", psmwd->i16_0x456);
10666 		bcm_bprintf(b, " PSM_SLP_TMR: 0x%x\n", psmwd->i16_0x480);
10667 		bcm_bprintf(b, " PSM BRC: 0x%x\n", psmwd->i16_0x490);
10668 		bcm_bprintf(b, " TXE CTRL: 0x%x\n", psmwd->i16_0x500);
10669 		bcm_bprintf(b, " TXE Status: 0x%x\n", psmwd->i16_0x50e);
10670 		bcm_bprintf(b, " TXE_xmtdmabusy: 0x%x\n", psmwd->i16_0x55e);
10671 		bcm_bprintf(b, " TXE_XMTfifosuspflush: 0x%x\n", psmwd->i16_0x566);
10672 		bcm_bprintf(b, " IFS Stat: 0x%x\n", psmwd->i16_0x690);
10673 		bcm_bprintf(b, " IFS_MEDBUSY_CTR: 0x%x\n", psmwd->i16_0x692);
10674 		bcm_bprintf(b, " IFS_TX_DUR: 0x%x\n", psmwd->i16_0x694);
10675 		bcm_bprintf(b, " SLow_CTL: 0x%x\n", psmwd->i16_0x6a0);
10676 		bcm_bprintf(b, " TXE_AQM fifo Ready: 0x%x\n", psmwd->i16_0x838);
10677 		bcm_bprintf(b, " Dagg ctrl: 0x%x\n", psmwd->i16_0x8c0);
10678 		bcm_bprintf(b, " shm_prewds_cnt: 0x%x\n", psmwd->shm_prewds_cnt);
10679 		bcm_bprintf(b, " shm_txtplufl_cnt: 0x%x\n", psmwd->shm_txtplufl_cnt);
10680 		bcm_bprintf(b, " shm_txphyerr_cnt: 0x%x\n", psmwd->shm_txphyerr_cnt);
10681 	}
10682 
10683 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MAC_SUSP);
10684 	if (tlv)
10685 	{
10686 		const hnd_ext_trap_macsusp_t* macsusp;
10687 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MAC_SUSP), tlv->len);
10688 		macsusp = (const hnd_ext_trap_macsusp_t *)tlv;
10689 		bcm_bprintf(b, " version: %d\n", macsusp->version);
10690 		bcm_bprintf(b, " trap_reason: %d\n", macsusp->trap_reason);
10691 		bcm_bprintf(b, " maccontrol: 0x%x\n", macsusp->i32_maccontrol);
10692 		bcm_bprintf(b, " maccommand: 0x%x\n", macsusp->i32_maccommand);
10693 		bcm_bprintf(b, " macintstatus: 0x%x\n", macsusp->i32_macintstatus);
10694 		for (i = 0; i < 4; i++)
10695 			bcm_bprintf(b, " phydebug[%d]: 0x%x\n", i, macsusp->i32_phydebug[i]);
10696 		for (i = 0; i < 8; i++)
10697 			bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, macsusp->i32_psmdebug[i]);
10698 		bcm_bprintf(b, " Rxe Status_1: 0x%x\n", macsusp->i16_0x41a);
10699 		bcm_bprintf(b, " Rxe Status_2: 0x%x\n", macsusp->i16_0x41c);
10700 		bcm_bprintf(b, " PSM BRC: 0x%x\n", macsusp->i16_0x490);
10701 		bcm_bprintf(b, " TXE Status: 0x%x\n", macsusp->i16_0x50e);
10702 		bcm_bprintf(b, " TXE xmtdmabusy: 0x%x\n", macsusp->i16_0x55e);
10703 		bcm_bprintf(b, " TXE XMTfifosuspflush: 0x%x\n", macsusp->i16_0x566);
10704 		bcm_bprintf(b, " IFS Stat: 0x%x\n", macsusp->i16_0x690);
10705 		bcm_bprintf(b, " IFS MEDBUSY CTR: 0x%x\n", macsusp->i16_0x692);
10706 		bcm_bprintf(b, " IFS TX DUR: 0x%x\n", macsusp->i16_0x694);
10707 		bcm_bprintf(b, " WEP CTL: 0x%x\n", macsusp->i16_0x7c0);
10708 		bcm_bprintf(b, " TXE AQM fifo Ready: 0x%x\n", macsusp->i16_0x838);
10709 		bcm_bprintf(b, " MHP status: 0x%x\n", macsusp->i16_0x880);
10710 		bcm_bprintf(b, " shm_prewds_cnt: 0x%x\n", macsusp->shm_prewds_cnt);
10711 		bcm_bprintf(b, " shm_ucode_dbgst: 0x%x\n", macsusp->shm_ucode_dbgst);
10712 	}
10713 
10714 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MAC_WAKE);
10715 	if (tlv)
10716 	{
10717 		const hnd_ext_trap_macenab_t* macwake;
10718 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MAC_WAKE), tlv->len);
10719 		macwake = (const hnd_ext_trap_macenab_t *)tlv;
10720 		bcm_bprintf(b, " version: 0x%x\n", macwake->version);
10721 		bcm_bprintf(b, " trap_reason: 0x%x\n", macwake->trap_reason);
10722 		bcm_bprintf(b, " maccontrol: 0x%x\n", macwake->i32_maccontrol);
10723 		bcm_bprintf(b, " maccommand: 0x%x\n", macwake->i32_maccommand);
10724 		bcm_bprintf(b, " macintstatus: 0x%x\n", macwake->i32_macintstatus);
10725 		for (i = 0; i < 8; i++)
10726 			bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, macwake->i32_psmdebug[i]);
10727 		bcm_bprintf(b, " clk_ctl_st: 0x%x\n", macwake->i32_clk_ctl_st);
10728 		bcm_bprintf(b, " powerctl: 0x%x\n", macwake->i32_powerctl);
10729 		bcm_bprintf(b, " gated clock en: 0x%x\n", macwake->i16_0x1a8);
10730 		bcm_bprintf(b, " PSM_SLP_TMR: 0x%x\n", macwake->i16_0x480);
10731 		bcm_bprintf(b, " PSM BRC: 0x%x\n", macwake->i16_0x490);
10732 		bcm_bprintf(b, " TSF CTL: 0x%x\n", macwake->i16_0x600);
10733 		bcm_bprintf(b, " IFS Stat: 0x%x\n", macwake->i16_0x690);
10734 		bcm_bprintf(b, " IFS_MEDBUSY_CTR: 0x%x\n", macwake->i16_0x692);
10735 		bcm_bprintf(b, " Slow_CTL: 0x%x\n", macwake->i16_0x6a0);
10736 		bcm_bprintf(b, " Slow_FRAC: 0x%x\n", macwake->i16_0x6a6);
10737 		bcm_bprintf(b, " fast power up delay: 0x%x\n", macwake->i16_0x6a8);
10738 		bcm_bprintf(b, " Slow_PER: 0x%x\n", macwake->i16_0x6aa);
10739 		bcm_bprintf(b, " shm_ucode_dbgst: 0x%x\n", macwake->shm_ucode_dbgst);
10740 	}
10741 
10742 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_BUS);
10743 	if (tlv)
10744 	{
10745 		const bcm_dngl_pcie_hc_t* hc;
10746 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_BUS), tlv->len);
10747 		hc = (const bcm_dngl_pcie_hc_t *)tlv->data;
10748 		bcm_bprintf(b, " version: 0x%x\n", hc->version);
10749 		bcm_bprintf(b, " reserved: 0x%x\n", hc->reserved);
10750 		bcm_bprintf(b, " pcie_err_ind_type: 0x%x\n", hc->pcie_err_ind_type);
10751 		bcm_bprintf(b, " pcie_flag: 0x%x\n", hc->pcie_flag);
10752 		bcm_bprintf(b, " pcie_control_reg: 0x%x\n", hc->pcie_control_reg);
10753 		for (i = 0; i < HC_PCIEDEV_CONFIG_REGLIST_MAX; i++)
10754 			bcm_bprintf(b, " pcie_config_regs[%d]: 0x%x\n", i, hc->pcie_config_regs[i]);
10755 	}
10756 
10757 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_HMAP);
10758 	if (tlv)
10759 	{
10760 		const pcie_hmapviolation_t* hmap;
10761 		hmap = (const pcie_hmapviolation_t *)tlv->data;
10762 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_HMAP), tlv->len);
10763 		bcm_bprintf(b, " HMAP Vio Addr Low: 0x%x\n", hmap->hmap_violationaddr_lo);
10764 		bcm_bprintf(b, " HMAP Vio Addr Hi: 0x%x\n", hmap->hmap_violationaddr_hi);
10765 		bcm_bprintf(b, " HMAP Vio Info: 0x%x\n", hmap->hmap_violation_info);
10766 	}
10767 
10768 	return BCME_OK;
10769 }
10770 
10771 #ifdef BCMPCIE
10772 int
dhd_prot_send_host_timestamp(dhd_pub_t * dhdp,uchar * tlvs,uint16 tlv_len,uint16 seqnum,uint16 xt_id)10773 dhd_prot_send_host_timestamp(dhd_pub_t *dhdp, uchar *tlvs, uint16 tlv_len,
10774 	uint16 seqnum, uint16 xt_id)
10775 {
10776 	dhd_prot_t *prot = dhdp->prot;
10777 	host_timestamp_msg_t *ts_req;
10778 	unsigned long flags;
10779 	uint16 alloced = 0;
10780 	uchar *ts_tlv_buf;
10781 	msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
10782 
10783 	if ((tlvs == NULL) || (tlv_len == 0)) {
10784 		DHD_ERROR(("%s: argument error tlv: %p, tlv_len %d\n",
10785 			__FUNCTION__, tlvs, tlv_len));
10786 		return -1;
10787 	}
10788 
10789 	DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
10790 
10791 	/* if Host TS req already pending go away */
10792 	if (prot->hostts_req_buf_inuse == TRUE) {
10793 		DHD_ERROR(("one host TS request already pending at device\n"));
10794 		DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
10795 		return -1;
10796 	}
10797 
10798 	/* Request for cbuf space */
10799 	ts_req = (host_timestamp_msg_t*)dhd_prot_alloc_ring_space(dhdp, ctrl_ring,
10800 		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,	&alloced, FALSE);
10801 	if (ts_req == NULL) {
10802 		DHD_ERROR(("couldn't allocate space on msgring to send host TS request\n"));
10803 		DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
10804 		return -1;
10805 	}
10806 
10807 	/* Common msg buf hdr */
10808 	ts_req->msg.msg_type = MSG_TYPE_HOSTTIMSTAMP;
10809 	ts_req->msg.if_id = 0;
10810 	ts_req->msg.flags =  ctrl_ring->current_phase;
10811 	ts_req->msg.request_id = DHD_H2D_HOSTTS_REQ_PKTID;
10812 
10813 	ts_req->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
10814 	ctrl_ring->seqnum++;
10815 
10816 	ts_req->xt_id = xt_id;
10817 	ts_req->seqnum = seqnum;
10818 	/* populate TS req buffer info */
10819 	ts_req->input_data_len = htol16(tlv_len);
10820 	ts_req->host_buf_addr.high = htol32(PHYSADDRHI(prot->hostts_req_buf.pa));
10821 	ts_req->host_buf_addr.low = htol32(PHYSADDRLO(prot->hostts_req_buf.pa));
10822 	/* copy ioct payload */
10823 	ts_tlv_buf = (void *) prot->hostts_req_buf.va;
10824 	prot->hostts_req_buf_inuse = TRUE;
10825 	memcpy(ts_tlv_buf, tlvs, tlv_len);
10826 
10827 	OSL_CACHE_FLUSH((void *) prot->hostts_req_buf.va, tlv_len);
10828 
10829 	if (ISALIGNED(ts_tlv_buf, DMA_ALIGN_LEN) == FALSE) {
10830 		DHD_ERROR(("host TS req buffer address unaligned !!!!! \n"));
10831 	}
10832 
10833 	DHD_CTL(("submitted Host TS request request_id %d, data_len %d, tx_id %d, seq %d\n",
10834 		ts_req->msg.request_id, ts_req->input_data_len,
10835 		ts_req->xt_id, ts_req->seqnum));
10836 
10837 	/* upd wrt ptr and raise interrupt */
10838 	dhd_prot_ring_write_complete(dhdp, ctrl_ring, ts_req,
10839 		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
10840 
10841 	DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
10842 
10843 	return 0;
10844 } /* dhd_prot_send_host_timestamp */
10845 
10846 bool
dhd_prot_data_path_tx_timestamp_logging(dhd_pub_t * dhd,bool enable,bool set)10847 dhd_prot_data_path_tx_timestamp_logging(dhd_pub_t *dhd,  bool enable, bool set)
10848 {
10849 	if (set)
10850 		dhd->prot->tx_ts_log_enabled = enable;
10851 
10852 	return dhd->prot->tx_ts_log_enabled;
10853 }
10854 
10855 bool
dhd_prot_data_path_rx_timestamp_logging(dhd_pub_t * dhd,bool enable,bool set)10856 dhd_prot_data_path_rx_timestamp_logging(dhd_pub_t *dhd,  bool enable, bool set)
10857 {
10858 	if (set)
10859 		dhd->prot->rx_ts_log_enabled = enable;
10860 
10861 	return dhd->prot->rx_ts_log_enabled;
10862 }
10863 
10864 bool
dhd_prot_pkt_noretry(dhd_pub_t * dhd,bool enable,bool set)10865 dhd_prot_pkt_noretry(dhd_pub_t *dhd, bool enable, bool set)
10866 {
10867 	if (set)
10868 		dhd->prot->no_retry = enable;
10869 
10870 	return dhd->prot->no_retry;
10871 }
10872 
10873 bool
dhd_prot_pkt_noaggr(dhd_pub_t * dhd,bool enable,bool set)10874 dhd_prot_pkt_noaggr(dhd_pub_t *dhd, bool enable, bool set)
10875 {
10876 	if (set)
10877 		dhd->prot->no_aggr = enable;
10878 
10879 	return dhd->prot->no_aggr;
10880 }
10881 
10882 bool
dhd_prot_pkt_fixed_rate(dhd_pub_t * dhd,bool enable,bool set)10883 dhd_prot_pkt_fixed_rate(dhd_pub_t *dhd, bool enable, bool set)
10884 {
10885 	if (set)
10886 		dhd->prot->fixed_rate = enable;
10887 
10888 	return dhd->prot->fixed_rate;
10889 }
10890 #endif /* BCMPCIE */
10891 
10892 void
dhd_prot_dma_indx_free(dhd_pub_t * dhd)10893 dhd_prot_dma_indx_free(dhd_pub_t *dhd)
10894 {
10895 	dhd_prot_t *prot = dhd->prot;
10896 
10897 	dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf);
10898 	dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf);
10899 }
10900 
10901 void
dhd_msgbuf_delay_post_ts_bufs(dhd_pub_t * dhd)10902 dhd_msgbuf_delay_post_ts_bufs(dhd_pub_t *dhd)
10903 {
10904 	if (dhd->prot->max_tsbufpost > 0)
10905 		dhd_msgbuf_rxbuf_post_ts_bufs(dhd);
10906 }
10907 
10908 static void BCMFASTPATH
dhd_prot_process_fw_timestamp(dhd_pub_t * dhd,void * buf)10909 dhd_prot_process_fw_timestamp(dhd_pub_t *dhd, void* buf)
10910 {
10911 	DHD_ERROR(("Timesunc feature not compiled in but GOT FW TS message\n"));
10912 
10913 }
10914 
10915 uint16
dhd_prot_get_ioctl_trans_id(dhd_pub_t * dhdp)10916 dhd_prot_get_ioctl_trans_id(dhd_pub_t *dhdp)
10917 {
10918 	return dhdp->prot->ioctl_trans_id;
10919 }
10920 
dhd_get_hscb_info(dhd_pub_t * dhd,void ** va,uint32 * len)10921 int dhd_get_hscb_info(dhd_pub_t *dhd, void ** va, uint32 *len)
10922 {
10923 	if (!dhd->hscb_enable) {
10924 		if (len) {
10925 			/* prevent "Operation not supported" dhd message */
10926 			*len = 0;
10927 			return BCME_OK;
10928 		}
10929 		return BCME_UNSUPPORTED;
10930 	}
10931 
10932 	if (va) {
10933 		*va = dhd->prot->host_scb_buf.va;
10934 	}
10935 	if (len) {
10936 		*len = dhd->prot->host_scb_buf.len;
10937 	}
10938 
10939 	return BCME_OK;
10940 }
10941 
10942 #ifdef DHD_HP2P
10943 uint32
dhd_prot_pkt_threshold(dhd_pub_t * dhd,bool set,uint32 val)10944 dhd_prot_pkt_threshold(dhd_pub_t *dhd, bool set, uint32 val)
10945 {
10946 	if (set)
10947 		dhd->pkt_thresh = (uint16)val;
10948 
10949 	val = dhd->pkt_thresh;
10950 
10951 	return val;
10952 }
10953 
10954 uint32
dhd_prot_time_threshold(dhd_pub_t * dhd,bool set,uint32 val)10955 dhd_prot_time_threshold(dhd_pub_t *dhd, bool set, uint32 val)
10956 {
10957 	if (set)
10958 		dhd->time_thresh = (uint16)val;
10959 
10960 	val = dhd->time_thresh;
10961 
10962 	return val;
10963 }
10964 
10965 uint32
dhd_prot_pkt_expiry(dhd_pub_t * dhd,bool set,uint32 val)10966 dhd_prot_pkt_expiry(dhd_pub_t *dhd, bool set, uint32 val)
10967 {
10968 	if (set)
10969 		dhd->pkt_expiry = (uint16)val;
10970 
10971 	val = dhd->pkt_expiry;
10972 
10973 	return val;
10974 }
10975 
10976 uint8
dhd_prot_hp2p_enable(dhd_pub_t * dhd,bool set,int enable)10977 dhd_prot_hp2p_enable(dhd_pub_t *dhd, bool set, int enable)
10978 {
10979 	uint8 ret = 0;
10980 	if (set) {
10981 		dhd->hp2p_enable = (enable & 0xf) ? TRUE : FALSE;
10982 		dhd->hp2p_infra_enable = ((enable >> 4) & 0xf) ? TRUE : FALSE;
10983 
10984 		if (enable) {
10985 			dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_TID_MAP);
10986 		} else {
10987 			dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_AC_MAP);
10988 		}
10989 	}
10990 	ret = dhd->hp2p_infra_enable ? 0x1:0x0;
10991 	ret <<= 4;
10992 	ret |= dhd->hp2p_enable ? 0x1:0x0;
10993 
10994 	return ret;
10995 }
10996 
10997 static void
dhd_update_hp2p_rxstats(dhd_pub_t * dhd,host_rxbuf_cmpl_t * rxstatus)10998 dhd_update_hp2p_rxstats(dhd_pub_t *dhd, host_rxbuf_cmpl_t *rxstatus)
10999 {
11000 	ts_timestamp_t *ts = (ts_timestamp_t *)&rxstatus->ts;
11001 	hp2p_info_t *hp2p_info;
11002 	uint32 dur1;
11003 
11004 	hp2p_info = &dhd->hp2p_info[0];
11005 	dur1 = ((ts->high & 0x3FF) * HP2P_TIME_SCALE) / 100;
11006 
11007 	if (dur1 > (MAX_RX_HIST_BIN - 1)) {
11008 		dur1 = MAX_RX_HIST_BIN - 1;
11009 		DHD_ERROR(("%s: 0x%x 0x%x\n",
11010 			__FUNCTION__, ts->low, ts->high));
11011 	}
11012 
11013 	hp2p_info->rx_t0[dur1 % MAX_RX_HIST_BIN]++;
11014 	return;
11015 }
11016 
11017 static void
dhd_update_hp2p_txstats(dhd_pub_t * dhd,host_txbuf_cmpl_t * txstatus)11018 dhd_update_hp2p_txstats(dhd_pub_t *dhd, host_txbuf_cmpl_t *txstatus)
11019 {
11020 	ts_timestamp_t *ts = (ts_timestamp_t *)&txstatus->ts;
11021 	uint16 flowid = txstatus->compl_hdr.flow_ring_id;
11022 	uint32 hp2p_flowid, dur1, dur2;
11023 	hp2p_info_t *hp2p_info;
11024 
11025 	hp2p_flowid = dhd->bus->max_submission_rings -
11026 		dhd->bus->max_cmn_rings - flowid + 1;
11027 	hp2p_info = &dhd->hp2p_info[hp2p_flowid];
11028 	ts = (ts_timestamp_t *)&(txstatus->ts);
11029 
11030 	dur1 = ((ts->high & 0x3FF) * HP2P_TIME_SCALE) / 1000;
11031 	if (dur1 > (MAX_TX_HIST_BIN - 1)) {
11032 		dur1 = MAX_TX_HIST_BIN - 1;
11033 		DHD_ERROR(("%s: 0x%x 0x%x\n", __FUNCTION__, ts->low, ts->high));
11034 	}
11035 	hp2p_info->tx_t0[dur1 % MAX_TX_HIST_BIN]++;
11036 
11037 	dur2 = (((ts->high >> 10) & 0x3FF) * HP2P_TIME_SCALE) / 1000;
11038 	if (dur2 > (MAX_TX_HIST_BIN - 1)) {
11039 		dur2 = MAX_TX_HIST_BIN - 1;
11040 		DHD_ERROR(("%s: 0x%x 0x%x\n", __FUNCTION__, ts->low, ts->high));
11041 	}
11042 
11043 	hp2p_info->tx_t1[dur2 % MAX_TX_HIST_BIN]++;
11044 	return;
11045 }
11046 
dhd_hp2p_write(struct hrtimer * timer)11047 enum hrtimer_restart dhd_hp2p_write(struct hrtimer *timer)
11048 {
11049 	hp2p_info_t *hp2p_info;
11050 	unsigned long flags;
11051 	dhd_pub_t *dhdp;
11052 
11053 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
11054 #pragma GCC diagnostic push
11055 #pragma GCC diagnostic ignored "-Wcast-qual"
11056 #endif // endif
11057 	hp2p_info = container_of(timer, hp2p_info_t, timer.timer);
11058 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
11059 #pragma GCC diagnostic pop
11060 #endif // endif
11061 	dhdp = hp2p_info->dhd_pub;
11062 	if (!dhdp) {
11063 		goto done;
11064 	}
11065 
11066 	DHD_INFO(("%s: pend_item = %d flowid = %d\n",
11067 		__FUNCTION__, ((msgbuf_ring_t *)hp2p_info->ring)->pend_items_count,
11068 		hp2p_info->flowid));
11069 
11070 	flags = dhd_os_hp2plock(dhdp);
11071 
11072 	dhd_prot_txdata_write_flush(dhdp, hp2p_info->flowid);
11073 	hp2p_info->hrtimer_init = FALSE;
11074 	hp2p_info->num_timer_limit++;
11075 
11076 	dhd_os_hp2punlock(dhdp, flags);
11077 done:
11078 	return HRTIMER_NORESTART;
11079 }
11080 
11081 static void
dhd_calc_hp2p_burst(dhd_pub_t * dhd,msgbuf_ring_t * ring,uint16 flowid)11082 dhd_calc_hp2p_burst(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint16 flowid)
11083 {
11084 	hp2p_info_t *hp2p_info;
11085 	uint16 hp2p_flowid;
11086 
11087 	hp2p_flowid = dhd->bus->max_submission_rings -
11088 		dhd->bus->max_cmn_rings - flowid + 1;
11089 	hp2p_info = &dhd->hp2p_info[hp2p_flowid];
11090 
11091 	if (ring->pend_items_count == dhd->pkt_thresh) {
11092 		dhd_prot_txdata_write_flush(dhd, flowid);
11093 
11094 		hp2p_info->hrtimer_init = FALSE;
11095 		hp2p_info->ring = NULL;
11096 		hp2p_info->num_pkt_limit++;
11097 		hrtimer_cancel(&hp2p_info->timer.timer);
11098 
11099 		DHD_INFO(("%s: cancel hrtimer for flowid = %d \n"
11100 			"hp2p_flowid = %d pkt_thresh = %d\n",
11101 			__FUNCTION__, flowid, hp2p_flowid, dhd->pkt_thresh));
11102 	} else {
11103 		if (hp2p_info->hrtimer_init == FALSE) {
11104 			hp2p_info->hrtimer_init = TRUE;
11105 			hp2p_info->flowid = flowid;
11106 			hp2p_info->dhd_pub = dhd;
11107 			hp2p_info->ring = ring;
11108 			hp2p_info->num_timer_start++;
11109 
11110 			tasklet_hrtimer_start(&hp2p_info->timer,
11111 				ktime_set(0, dhd->time_thresh * 1000), HRTIMER_MODE_REL);
11112 
11113 			DHD_INFO(("%s: start hrtimer for flowid = %d hp2_flowid = %d\n",
11114 					__FUNCTION__, flowid, hp2p_flowid));
11115 		}
11116 	}
11117 	return;
11118 }
11119 
11120 static void
dhd_update_hp2p_txdesc(dhd_pub_t * dhd,host_txbuf_post_t * txdesc)11121 dhd_update_hp2p_txdesc(dhd_pub_t *dhd, host_txbuf_post_t *txdesc)
11122 {
11123 	uint64 ts;
11124 
11125 	ts = local_clock();
11126 	do_div(ts, 1000);
11127 
11128 	txdesc->metadata_buf_len = 0;
11129 	txdesc->metadata_buf_addr.high_addr = htol32((ts >> 32) & 0xFFFFFFFF);
11130 	txdesc->metadata_buf_addr.low_addr = htol32(ts & 0xFFFFFFFF);
11131 	txdesc->exp_time = dhd->pkt_expiry;
11132 
11133 	DHD_INFO(("%s: metadata_high = 0x%x metadata_low = 0x%x exp_time = %x\n",
11134 		__FUNCTION__, txdesc->metadata_buf_addr.high_addr,
11135 		txdesc->metadata_buf_addr.low_addr,
11136 		txdesc->exp_time));
11137 
11138 	return;
11139 }
11140 #endif /* DHD_HP2P */
11141 
11142 #ifdef DHD_MAP_LOGGING
11143 void
dhd_prot_smmu_fault_dump(dhd_pub_t * dhdp)11144 dhd_prot_smmu_fault_dump(dhd_pub_t *dhdp)
11145 {
11146 	dhd_prot_debug_info_print(dhdp);
11147 	OSL_DMA_MAP_DUMP(dhdp->osh);
11148 #ifdef DHD_MAP_PKTID_LOGGING
11149 	dhd_pktid_logging_dump(dhdp);
11150 #endif /* DHD_MAP_PKTID_LOGGING */
11151 #ifdef DHD_FW_COREDUMP
11152 	dhdp->memdump_type = DUMP_TYPE_SMMU_FAULT;
11153 #ifdef DNGL_AXI_ERROR_LOGGING
11154 	dhdp->memdump_enabled = DUMP_MEMFILE;
11155 	dhd_bus_get_mem_dump(dhdp);
11156 #else
11157 	dhdp->memdump_enabled = DUMP_MEMONLY;
11158 	dhd_bus_mem_dump(dhdp);
11159 #endif /* DNGL_AXI_ERROR_LOGGING */
11160 #endif /* DHD_FW_COREDUMP */
11161 }
11162 #endif /* DHD_MAP_LOGGING */
11163