• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * @file definition of host message ring functionality
3  * Provides type definitions and function prototypes used to link the
4  * DHD OS, bus, and protocol modules.
5  *
6  * Copyright (C) 2020, Broadcom.
7  *
8  *      Unless you and Broadcom execute a separate written software license
9  * agreement governing use of this software, this software is licensed to you
10  * under the terms of the GNU General Public License version 2 (the "GPL"),
11  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
12  * following added to such license:
13  *
14  *      As a special exception, the copyright holders of this software give you
15  * permission to link this software with independent modules, and to copy and
16  * distribute the resulting executable under terms of your choice, provided that
17  * you also meet, for each linked independent module, the terms and conditions of
18  * the license of that module.  An independent module is a module which is not
19  * derived from this software.  The special exception does not apply to any
20  * modifications of the software.
21  *
22  *
23  * <<Broadcom-WL-IPTag/Open:>>
24  *
25  * $Id$
26  */
27 
28 /** XXX Twiki: [PCIeFullDongleArchitecture] */
29 
30 #include <typedefs.h>
31 #include <osl.h>
32 
33 #include <bcmutils.h>
34 #include <bcmmsgbuf.h>
35 #include <bcmendian.h>
36 #include <bcmstdlib_s.h>
37 
38 #include <dngl_stats.h>
39 #include <dhd.h>
40 #include <dhd_proto.h>
41 
42 #ifdef BCMDBUS
43 #include <dbus.h>
44 #else
45 #include <dhd_bus.h>
46 #endif /* BCMDBUS */
47 
48 #include <dhd_dbg.h>
49 #include <siutils.h>
50 #include <dhd_debug.h>
51 #ifdef EXT_STA
52 #include <wlc_cfg.h>
53 #include <wlc_pub.h>
54 #include <wl_port_if.h>
55 #endif /* EXT_STA */
56 
57 #include <dhd_flowring.h>
58 
59 #include <pcie_core.h>
60 #include <bcmpcie.h>
61 #include <dhd_pcie.h>
62 #ifdef DHD_TIMESYNC
63 #include <dhd_timesync.h>
64 #endif /* DHD_TIMESYNC */
65 #ifdef DHD_PKTTS
66 #include <bcmudp.h>
67 #include <bcmtcp.h>
68 #endif /* DHD_PKTTS */
69 #include <dhd_config.h>
70 
71 #if defined(DHD_LB)
72 #if !defined(LINUX) && !defined(linux) && !defined(OEM_ANDROID)
73 #error "DHD Loadbalancing only supported on LINUX | OEM_ANDROID"
74 #endif /* !LINUX && !OEM_ANDROID */
75 #include <linux/cpu.h>
76 #include <bcm_ring.h>
77 #define DHD_LB_WORKQ_SZ			    (8192)
78 #define DHD_LB_WORKQ_SYNC           (16)
79 #define DHD_LB_WORK_SCHED           (DHD_LB_WORKQ_SYNC * 2)
80 #endif /* DHD_LB */
81 
82 #include <etd.h>
83 #include <hnd_debug.h>
84 #include <bcmtlv.h>
85 #include <hnd_armtrap.h>
86 #include <dnglevent.h>
87 
88 #ifdef DHD_PKT_LOGGING
89 #include <dhd_pktlog.h>
90 #include <dhd_linux_pktdump.h>
91 #endif /* DHD_PKT_LOGGING */
92 #ifdef DHD_EWPR_VER2
93 #include <dhd_bitpack.h>
94 #endif /* DHD_EWPR_VER2 */
95 
96 extern char dhd_version[];
97 extern char fw_version[];
98 
99 /**
100  * Host configures a soft doorbell for d2h rings, by specifying a 32bit host
101  * address where a value must be written. Host may also interrupt coalescing
102  * on this soft doorbell.
103  * Use Case: Hosts with network processors, may register with the dongle the
104  * network processor's thread wakeup register and a value corresponding to the
105  * core/thread context. Dongle will issue a write transaction <address,value>
106  * to the PCIE RC which will need to be routed to the mapped register space, by
107  * the host.
108  */
109 /* #define DHD_D2H_SOFT_DOORBELL_SUPPORT */
110 
111 /* Dependency Check */
112 #if defined(IOCTLRESP_USE_CONSTMEM) && defined(DHD_USE_STATIC_CTRLBUF)
113 #error "DHD_USE_STATIC_CTRLBUF is NOT working with DHD_USE_OSLPKT_FOR_RESPBUF"
114 #endif /* IOCTLRESP_USE_CONSTMEM && DHD_USE_STATIC_CTRLBUF */
115 
116 #define RETRIES 2		/* # of retries to retrieve matching ioctl response */
117 
118 #if defined(DHD_HTPUT_TUNABLES)
119 #define DEFAULT_RX_BUFFERS_TO_POST		1024
120 #define RX_BUF_BURST				64 /* Rx buffers for MSDU Data */
121 #define RXBUFPOST_THRESHOLD			64 /* Rxbuf post threshold */
122 #else
123 #define DEFAULT_RX_BUFFERS_TO_POST		256
124 #define RX_BUF_BURST				32 /* Rx buffers for MSDU Data */
125 #define RXBUFPOST_THRESHOLD			32 /* Rxbuf post threshold */
126 #endif /* DHD_HTPUT_TUNABLES */
127 
128 /* Read index update Magic sequence */
129 #define DHD_DMA_INDX_SEQ_H2D_DB_MAGIC	0xDDDDDDDDAu
130 #define DHD_RDPTR_UPDATE_H2D_DB_MAGIC(ring)	(0xDD000000 | (ring->idx << 16u) | ring->rd)
131 /* Write index update Magic sequence */
132 #define DHD_WRPTR_UPDATE_H2D_DB_MAGIC(ring)	(0xFF000000 | (ring->idx << 16u) | ring->wr)
133 #define DHD_AGGR_H2D_DB_MAGIC	0xFFFFFFFAu
134 
135 #define DHD_STOP_QUEUE_THRESHOLD	200
136 #define DHD_START_QUEUE_THRESHOLD	100
137 
138 #define RX_DMA_OFFSET		8 /* Mem2mem DMA inserts an extra 8 */
139 #define IOCT_RETBUF_SIZE	(RX_DMA_OFFSET + WLC_IOCTL_MAXLEN)
140 
141 /* flags for ioctl pending status */
142 #define MSGBUF_IOCTL_ACK_PENDING	(1<<0)
143 #define MSGBUF_IOCTL_RESP_PENDING	(1<<1)
144 
145 #define DHD_IOCTL_REQ_PKTBUFSZ		2048
146 #define MSGBUF_IOCTL_MAX_RQSTLEN	(DHD_IOCTL_REQ_PKTBUFSZ - H2DRING_CTRL_SUB_ITEMSIZE)
147 
148 /**
149  * XXX: DMA_ALIGN_LEN use is overloaded:
150  * - as align bits: in DMA_ALLOC_CONSISTENT 1 << 4
151  * - in ensuring that a buffer's va is 4 Byte aligned
152  * - in rounding up a buffer length to 4 Bytes.
153  */
154 #define DMA_ALIGN_LEN		4
155 
156 #define DMA_D2H_SCRATCH_BUF_LEN	8
157 #define DMA_XFER_LEN_LIMIT	0x400000
158 
159 #ifdef BCM_HOST_BUF
160 #ifndef DMA_HOST_BUFFER_LEN
161 #define DMA_HOST_BUFFER_LEN	0x200000
162 #endif
163 #endif /* BCM_HOST_BUF */
164 
165 #define DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ		8192
166 
167 #define DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D		1
168 #define DHD_FLOWRING_MAX_EVENTBUF_POST			32
169 #define DHD_FLOWRING_MAX_IOCTLRESPBUF_POST		8
170 #define DHD_H2D_INFORING_MAX_BUF_POST			32
171 #ifdef BTLOG
172 #define DHD_H2D_BTLOGRING_MAX_BUF_POST			32
173 #endif	/* BTLOG */
174 #define DHD_MAX_TSBUF_POST			8
175 
176 #define DHD_PROT_FUNCS	43
177 
178 /* Length of buffer in host for bus throughput measurement */
179 #define DHD_BUS_TPUT_BUF_LEN 2048
180 
181 #define TXP_FLUSH_NITEMS
182 
183 /* optimization to write "n" tx items at a time to ring */
184 #define TXP_FLUSH_MAX_ITEMS_FLUSH_CNT	48
185 
186 #define RING_NAME_MAX_LENGTH		24
187 #define CTRLSUB_HOSTTS_MEESAGE_SIZE		1024
188 /* Giving room before ioctl_trans_id rollsover. */
189 #define BUFFER_BEFORE_ROLLOVER 300
190 
191 /* 512K memory + 32K registers */
192 #define SNAPSHOT_UPLOAD_BUF_SIZE	((512 + 32) * 1024)
193 
194 struct msgbuf_ring; /* ring context for common and flow rings */
195 
196 #ifdef DHD_HMAPTEST
197 /* 5 * DMA_CONSISTENT_ALIGN as different tests use upto 4th page */
198 #define HMAP_SANDBOX_BUFFER_LEN	(DMA_CONSISTENT_ALIGN * 5) /* for a 4k page this is 20K */
199 /**
200  * for D11 DMA HMAPTEST thes states are as follows
201  * iovar sets ACTIVE state
202  * next TXPOST / RXPOST sets POSTED state
203  * on TXCPL / RXCPL POSTED + pktid match does buffer free nd state changed to INACTIVE
204  * This ensures that on an iovar only one buffer is replaced from sandbox area
205  */
206 #define HMAPTEST_D11_TX_INACTIVE 0
207 #define HMAPTEST_D11_TX_ACTIVE 1
208 #define HMAPTEST_D11_TX_POSTED 2
209 
210 #define HMAPTEST_D11_RX_INACTIVE 0
211 #define HMAPTEST_D11_RX_ACTIVE 1
212 #define HMAPTEST_D11_RX_POSTED 2
213 #endif /* DHD_HMAPTEST */
214 
215 #define PCIE_DMA_LOOPBACK	0
216 #define D11_DMA_LOOPBACK	1
217 #define BMC_DMA_LOOPBACK	2
218 
219 /**
220  * PCIE D2H DMA Complete Sync Modes
221  *
222  * Firmware may interrupt the host, prior to the D2H Mem2Mem DMA completes into
223  * Host system memory. A WAR using one of 3 approaches is needed:
224  * 1. Dongle places a modulo-253 seqnum in last word of each D2H message
225  * 2. XOR Checksum, with epoch# in each work item. Dongle builds an XOR checksum
226  *    writes in the last word of each work item. Each work item has a seqnum
227  *    number = sequence num % 253.
228  *
229  * 3. Read Barrier: Dongle does a host memory read access prior to posting an
230  *    interrupt, ensuring that D2H data transfer indeed completed.
231  * 4. Dongle DMA's all indices after producing items in the D2H ring, flushing
232  *    ring contents before the indices.
233  *
234  * Host does not sync for DMA to complete with option #3 or #4, and a noop sync
235  * callback (see dhd_prot_d2h_sync_none) may be bound.
236  *
237  * Dongle advertizes host side sync mechanism requirements.
238  */
239 
240 #define PCIE_D2H_SYNC_WAIT_TRIES    (512U)
241 #define PCIE_D2H_SYNC_NUM_OF_STEPS  (5U)
242 #define PCIE_D2H_SYNC_DELAY         (100UL)	/* in terms of usecs */
243 
244 #ifdef DHD_REPLACE_LOG_INFO_TO_TRACE
245 #define DHD_MSGBUF_INFO DHD_TRACE
246 #else
247 #define DHD_MSGBUF_INFO DHD_INFO
248 #endif /* DHD_REPLACE_LOG_INFO_TO_TRACE */
249 
250 /**
251  * Custom callback attached based upon D2H DMA Sync mode advertized by dongle.
252  *
253  * On success: return cmn_msg_hdr_t::msg_type
254  * On failure: return 0 (invalid msg_type)
255  */
256 typedef uint8 (* d2h_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring,
257                                 volatile cmn_msg_hdr_t *msg, int msglen);
258 
259 /**
260  * Custom callback attached based upon D2H DMA Sync mode advertized by dongle.
261  * For EDL messages.
262  *
263  * On success: return cmn_msg_hdr_t::msg_type
264  * On failure: return 0 (invalid msg_type)
265  */
266 #ifdef EWP_EDL
267 typedef int (* d2h_edl_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring,
268                                 volatile cmn_msg_hdr_t *msg);
269 #endif /* EWP_EDL */
270 
271 /*
272  * +----------------------------------------------------------------------------
273  *
274  * RingIds and FlowId are not equivalent as ringids include D2H rings whereas
275  * flowids do not.
276  *
277  * Dongle advertizes the max H2D rings, as max_sub_queues = 'N' which includes
278  * the H2D common rings as well as the (N-BCMPCIE_H2D_COMMON_MSGRINGS) flowrings
279  *
280  * Here is a sample mapping for (based on PCIE Full Dongle Rev5) where,
281  *  BCMPCIE_H2D_COMMON_MSGRINGS = 2, i.e. 2 H2D common rings,
282  *  BCMPCIE_COMMON_MSGRINGS     = 5, i.e. include 3 D2H common rings.
283  *
284  *  H2D Control  Submit   RingId = 0        FlowId = 0 reserved never allocated
285  *  H2D RxPost   Submit   RingId = 1        FlowId = 1 reserved never allocated
286  *
287  *  D2H Control  Complete RingId = 2
288  *  D2H Transmit Complete RingId = 3
289  *  D2H Receive  Complete RingId = 4
290  *
291  *  H2D TxPost   FLOWRING RingId = 5         FlowId = 2     (1st flowring)
292  *  H2D TxPost   FLOWRING RingId = 6         FlowId = 3     (2nd flowring)
293  *  H2D TxPost   FLOWRING RingId = 5 + (N-1) FlowId = (N-1) (Nth flowring)
294  *
295  * When TxPost FlowId(s) are allocated, the FlowIds [0..FLOWID_RESERVED) are
296  * unused, where FLOWID_RESERVED is BCMPCIE_H2D_COMMON_MSGRINGS.
297  *
298  * Example: when a system supports 4 bc/mc and 128 uc flowrings, with
299  * BCMPCIE_H2D_COMMON_MSGRINGS = 2, and BCMPCIE_H2D_COMMON_MSGRINGS = 5, and the
300  * FlowId values would be in the range [2..133] and the corresponding
301  * RingId values would be in the range [5..136].
302  *
303  * The flowId allocator, may chose to, allocate Flowids:
304  *   bc/mc (per virtual interface) in one consecutive range [2..(2+VIFS))
305  *   X# of uc flowids in consecutive ranges (per station Id), where X is the
306  *   packet's access category (e.g. 4 uc flowids per station).
307  *
308  * CAUTION:
309  * When DMA indices array feature is used, RingId=5, corresponding to the 0th
310  * FLOWRING, will actually use the FlowId as index into the H2D DMA index,
311  * since the FlowId truly represents the index in the H2D DMA indices array.
312  *
313  * Likewise, in the D2H direction, the RingId - BCMPCIE_H2D_COMMON_MSGRINGS,
314  * will represent the index in the D2H DMA indices array.
315  *
316  * +----------------------------------------------------------------------------
317  */
318 
319 /* First TxPost Flowring Id */
320 #define DHD_FLOWRING_START_FLOWID   BCMPCIE_H2D_COMMON_MSGRINGS
321 
322 /* Determine whether a ringid belongs to a TxPost flowring */
323 #define DHD_IS_FLOWRING(ringid, max_flow_rings) \
324 	((ringid) >= BCMPCIE_COMMON_MSGRINGS && \
325 	(ringid) < ((max_flow_rings) + BCMPCIE_COMMON_MSGRINGS))
326 
327 /* Convert a H2D TxPost FlowId to a MsgBuf RingId */
328 #define DHD_FLOWID_TO_RINGID(flowid) \
329 	(BCMPCIE_COMMON_MSGRINGS + ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS))
330 
331 /* Convert a MsgBuf RingId to a H2D TxPost FlowId */
332 #define DHD_RINGID_TO_FLOWID(ringid) \
333 	(BCMPCIE_H2D_COMMON_MSGRINGS + ((ringid) - BCMPCIE_COMMON_MSGRINGS))
334 
335 /* Convert a H2D MsgBuf RingId to an offset index into the H2D DMA indices array
336  * This may be used for the H2D DMA WR index array or H2D DMA RD index array or
337  * any array of H2D rings.
338  */
339 #define DHD_H2D_RING_OFFSET(ringid) \
340 	(((ringid) >= BCMPCIE_COMMON_MSGRINGS) ? DHD_RINGID_TO_FLOWID(ringid) : (ringid))
341 
342 /* Convert a H2D MsgBuf Flowring Id to an offset index into the H2D DMA indices array
343  * This may be used for IFRM.
344  */
345 #define DHD_H2D_FRM_FLOW_RING_OFFSET(ringid) \
346 	((ringid) - BCMPCIE_COMMON_MSGRINGS)
347 
348 /* Convert a D2H MsgBuf RingId to an offset index into the D2H DMA indices array
349  * This may be used for the D2H DMA WR index array or D2H DMA RD index array or
350  * any array of D2H rings.
351  * d2h debug ring is located at the end, i.e. after all the tx flow rings and h2d debug ring
352  * max_h2d_rings: total number of h2d rings
353  */
354 #define DHD_D2H_RING_OFFSET(ringid, max_h2d_rings) \
355 	((ringid) > (max_h2d_rings) ? \
356 		((ringid) - max_h2d_rings) : \
357 		((ringid) - BCMPCIE_H2D_COMMON_MSGRINGS))
358 
359 /* Convert a D2H DMA Indices Offset to a RingId */
360 #define DHD_D2H_RINGID(offset) \
361 	((offset) + BCMPCIE_H2D_COMMON_MSGRINGS)
362 
363 /* XXX: The ringid and flowid and dma indices array index idiosyncracy is error
364  * prone. While a simplification is possible, the backward compatability
365  * requirement (DHD should operate with any PCIE rev version of firmware),
366  * limits what may be accomplished.
367  *
368  * At the minimum, implementation should use macros for any conversions
369  * facilitating introduction of future PCIE FD revs that need more "common" or
370  * other dynamic rings.
371  */
372 
373 /* XXX: Presently there is no need for maintaining both a dmah and a secdmah */
374 #define DHD_DMAH_NULL      ((void*)NULL)
375 
376 /*
377  * Pad a DMA-able buffer by an additional cachline. If the end of the DMA-able
378  * buffer does not occupy the entire cacheline, and another object is placed
379  * following the DMA-able buffer, data corruption may occur if the DMA-able
380  * buffer is used to DMAing into (e.g. D2H direction), when HW cache coherency
381  * is not available.
382  */
383 #if defined(L1_CACHE_BYTES)
384 #define DHD_DMA_PAD        (L1_CACHE_BYTES)
385 #else
386 #define DHD_DMA_PAD        (128)
387 #endif
388 
389 /*
390  * +----------------------------------------------------------------------------
391  * Flowring Pool
392  *
393  * Unlike common rings, which are attached very early on (dhd_prot_attach),
394  * flowrings are dynamically instantiated. Moreover, flowrings may require a
395  * larger DMA-able buffer. To avoid issues with fragmented cache coherent
396  * DMA-able memory, a pre-allocated pool of msgbuf_ring_t is allocated once.
397  * The DMA-able buffers are attached to these pre-allocated msgbuf_ring.
398  *
399  * Each DMA-able buffer may be allocated independently, or may be carved out
400  * of a single large contiguous region that is registered with the protocol
401  * layer into flowrings_dma_buf. On a 64bit platform, this contiguous region
402  * may not span 0x00000000FFFFFFFF (avoid dongle side 64bit ptr arithmetic).
403  *
404  * No flowring pool action is performed in dhd_prot_attach(), as the number
405  * of h2d rings is not yet known.
406  *
407  * In dhd_prot_init(), the dongle advertized number of h2d rings is used to
408  * determine the number of flowrings required, and a pool of msgbuf_rings are
409  * allocated and a DMA-able buffer (carved or allocated) is attached.
410  * See: dhd_prot_flowrings_pool_attach()
411  *
412  * A flowring msgbuf_ring object may be fetched from this pool during flowring
413  * creation, using the flowid. Likewise, flowrings may be freed back into the
414  * pool on flowring deletion.
415  * See: dhd_prot_flowrings_pool_fetch(), dhd_prot_flowrings_pool_release()
416  *
417  * In dhd_prot_detach(), the flowring pool is detached. The DMA-able buffers
418  * are detached (returned back to the carved region or freed), and the pool of
419  * msgbuf_ring and any objects allocated against it are freed.
420  * See: dhd_prot_flowrings_pool_detach()
421  *
422  * In dhd_prot_reset(), the flowring pool is simply reset by returning it to a
423  * state as-if upon an attach. All DMA-able buffers are retained.
424  * Following a dhd_prot_reset(), in a subsequent dhd_prot_init(), the flowring
425  * pool attach will notice that the pool persists and continue to use it. This
426  * will avoid the case of a fragmented DMA-able region.
427  *
428  * +----------------------------------------------------------------------------
429  */
430 
431 /* Conversion of a flowid to a flowring pool index */
432 #define DHD_FLOWRINGS_POOL_OFFSET(flowid) \
433 	((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS)
434 
435 /* Fetch the msgbuf_ring_t from the flowring pool given a flowid */
436 #define DHD_RING_IN_FLOWRINGS_POOL(prot, flowid) \
437 	(msgbuf_ring_t*)((prot)->h2d_flowrings_pool) + \
438 	    DHD_FLOWRINGS_POOL_OFFSET(flowid)
439 
440 /* Traverse each flowring in the flowring pool, assigning ring and flowid */
441 #define FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, total_flowrings) \
442 	for ((flowid) = DHD_FLOWRING_START_FLOWID, \
443 		(ring) = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); \
444 		 (flowid) < ((total_flowrings) + DHD_FLOWRING_START_FLOWID); \
445 		 (ring)++, (flowid)++)
446 
447 /* Used in loopback tests */
448 typedef struct dhd_dmaxfer {
449 	dhd_dma_buf_t srcmem;
450 	dhd_dma_buf_t dstmem;
451 	uint32        srcdelay;
452 	uint32        destdelay;
453 	uint32        len;
454 	bool          in_progress;
455 	uint64        start_usec;
456 	uint64        time_taken;
457 	uint32        d11_lpbk;
458 	int           status;
459 } dhd_dmaxfer_t;
460 
461 #ifdef DHD_HMAPTEST
462 /* Used in HMAP test */
463 typedef struct dhd_hmaptest {
464 	dhd_dma_buf_t	mem;
465 	uint32		len;
466 	bool	in_progress;
467 	uint32	is_write;
468 	uint32	accesstype;
469 	uint64  start_usec;
470 	uint32	offset;
471 } dhd_hmaptest_t;
472 #endif /* DHD_HMAPTEST */
473 /**
474  * msgbuf_ring : This object manages the host side ring that includes a DMA-able
475  * buffer, the WR and RD indices, ring parameters such as max number of items
476  * an length of each items, and other miscellaneous runtime state.
477  * A msgbuf_ring may be used to represent a H2D or D2H common ring or a
478  * H2D TxPost ring as specified in the PCIE FullDongle Spec.
479  * Ring parameters are conveyed to the dongle, which maintains its own peer end
480  * ring state. Depending on whether the DMA Indices feature is supported, the
481  * host will update the WR/RD index in the DMA indices array in host memory or
482  * directly in dongle memory.
483  */
484 typedef struct msgbuf_ring {
485 	bool           inited;
486 	uint16         idx;       /* ring id */
487 	uint16         rd;        /* read index */
488 	uint16         curr_rd;   /* read index for debug */
489 	uint16         wr;        /* write index */
490 	uint16         max_items; /* maximum number of items in ring */
491 	uint16         item_len;  /* length of each item in the ring */
492 	sh_addr_t      base_addr; /* LITTLE ENDIAN formatted: base address */
493 	dhd_dma_buf_t  dma_buf;   /* DMA-able buffer: pa, va, len, dmah, secdma */
494 	uint32         seqnum;    /* next expected item's sequence number */
495 #ifdef TXP_FLUSH_NITEMS
496 	void           *start_addr;
497 	/* # of messages on ring not yet announced to dongle */
498 	uint16         pend_items_count;
499 #ifdef AGG_H2D_DB
500 	osl_atomic_t	inflight;
501 #endif /* AGG_H2D_DB */
502 #endif /* TXP_FLUSH_NITEMS */
503 
504 	uint8   ring_type;
505 	uint8   n_completion_ids;
506 	bool    create_pending;
507 	uint16  create_req_id;
508 	uint8   current_phase;
509 	uint16	compeltion_ring_ids[MAX_COMPLETION_RING_IDS_ASSOCIATED];
510 	uchar		name[RING_NAME_MAX_LENGTH];
511 	uint32		ring_mem_allocated;
512 	void	*ring_lock;
513 } msgbuf_ring_t;
514 
515 #define DHD_RING_BGN_VA(ring)           ((ring)->dma_buf.va)
516 #define DHD_RING_END_VA(ring) \
517 	((uint8 *)(DHD_RING_BGN_VA((ring))) + \
518 	 (((ring)->max_items - 1) * (ring)->item_len))
519 
520 #if defined(BCMINTERNAL) && defined(DHD_DBG_DUMP)
521 #define MAX_IOCTL_TRACE_SIZE    50
522 #define MAX_IOCTL_BUF_SIZE		64
523 typedef struct _dhd_ioctl_trace_t {
524 	uint32	cmd;
525 	uint16	transid;
526 	char	ioctl_buf[MAX_IOCTL_BUF_SIZE];
527 	uint64	timestamp;
528 } dhd_ioctl_trace_t;
529 #endif /* defined(BCMINTERNAL) && defined(DHD_DBG_DUMP) */
530 
531 #ifdef DHD_PKTTS
532 struct pktts_fwtx_v1 {
533 	uint32 ts[PKTTS_MAX_FWTX];
534 };
535 
536 struct pktts_fwtx_v2 {
537 	uint32 ts[PKTTS_MAX_FWTX];
538 	uint32 ut[PKTTS_MAX_UCTX];
539 	uint32 uc[PKTTS_MAX_UCCNT];
540 };
541 
542 static void dhd_msgbuf_send_msg_tx_ts(dhd_pub_t *dhd, void *pkt,
543 	void *fw_ts, uint16 version);
544 static void dhd_msgbuf_send_msg_rx_ts(dhd_pub_t *dhd, void *pkt,
545 	uint fwr1, uint fwr2);
546 #endif /* DHD_PKTTS */
547 
548 #if (defined(BCM_ROUTER_DHD) && defined(HNDCTF))
549 /** D2H WLAN Rx Packet Chaining context */
550 typedef struct rxchain_info {
551 	uint		pkt_count;
552 	uint		ifidx;
553 	void		*pkthead;
554 	void		*pkttail;
555 	uint8		*h_da;	/* pointer to da of chain head */
556 	uint8		*h_sa;	/* pointer to sa of chain head */
557 	uint8		h_prio; /* prio of chain head */
558 } rxchain_info_t;
559 #endif /* BCM_ROUTER_DHD && HNDCTF */
560 
561 /* This can be overwritten by module parameter defined in dhd_linux.c
562  * or by dhd iovar h2d_max_txpost.
563  */
564 int h2d_max_txpost = H2DRING_TXPOST_MAX_ITEM;
565 #if defined(DHD_HTPUT_TUNABLES)
566 int h2d_htput_max_txpost = H2DRING_HTPUT_TXPOST_MAX_ITEM;
567 #endif /* DHD_HTPUT_TUNABLES */
568 
569 #ifdef AGG_H2D_DB
570 bool agg_h2d_db_enab = TRUE;
571 
572 #define AGG_H2D_DB_TIMEOUT_USEC		(1000u)	/* 1 msec */
573 uint32 agg_h2d_db_timeout = AGG_H2D_DB_TIMEOUT_USEC;
574 
575 #ifndef AGG_H2D_DB_INFLIGHT_THRESH
576 /* Keep inflight threshold same as txp_threshold */
577 #define AGG_H2D_DB_INFLIGHT_THRESH TXP_FLUSH_MAX_ITEMS_FLUSH_CNT
578 #endif /* !AGG_H2D_DB_INFLIGHT_THRESH */
579 
580 uint32 agg_h2d_db_inflight_thresh = AGG_H2D_DB_INFLIGHT_THRESH;
581 
582 #define DHD_NUM_INFLIGHT_HISTO_ROWS (14u)
583 #define DHD_INFLIGHT_HISTO_SIZE (sizeof(uint64) * DHD_NUM_INFLIGHT_HISTO_ROWS)
584 
585 typedef struct _agg_h2d_db_info {
586 	void *dhd;
587 	struct hrtimer timer;
588 	bool init;
589 	uint32 direct_db_cnt;
590 	uint32 timer_db_cnt;
591 	uint64  *inflight_histo;
592 } agg_h2d_db_info_t;
593 #endif /* AGG_H2D_DB */
594 
595 /** DHD protocol handle. Is an opaque type to other DHD software layers. */
596 typedef struct dhd_prot {
597 	osl_t *osh;		/* OSL handle */
598 	uint16 rxbufpost_sz;
599 	uint16 rxbufpost;
600 	uint16 max_rxbufpost;
601 	uint32 tot_rxbufpost;
602 	uint32 tot_rxcpl;
603 	uint16 max_eventbufpost;
604 	uint16 max_ioctlrespbufpost;
605 	uint16 max_tsbufpost;
606 	uint16 max_infobufpost;
607 	uint16 infobufpost;
608 	uint16 cur_event_bufs_posted;
609 	uint16 cur_ioctlresp_bufs_posted;
610 	uint16 cur_ts_bufs_posted;
611 
612 	/* Flow control mechanism based on active transmits pending */
613 	osl_atomic_t active_tx_count; /* increments/decrements on every packet tx/tx_status */
614 	uint16 h2d_max_txpost;
615 #if defined(DHD_HTPUT_TUNABLES)
616 	uint16 h2d_htput_max_txpost;
617 #endif /* DHD_HTPUT_TUNABLES */
618 	uint16 txp_threshold;  /* optimization to write "n" tx items at a time to ring */
619 
620 	/* MsgBuf Ring info: has a dhd_dma_buf that is dynamically allocated */
621 	msgbuf_ring_t h2dring_ctrl_subn; /* H2D ctrl message submission ring */
622 	msgbuf_ring_t h2dring_rxp_subn; /* H2D RxBuf post ring */
623 	msgbuf_ring_t d2hring_ctrl_cpln; /* D2H ctrl completion ring */
624 	msgbuf_ring_t d2hring_tx_cpln; /* D2H Tx complete message ring */
625 	msgbuf_ring_t d2hring_rx_cpln; /* D2H Rx complete message ring */
626 	msgbuf_ring_t *h2dring_info_subn; /* H2D info submission ring */
627 	msgbuf_ring_t *d2hring_info_cpln; /* D2H info completion ring */
628 	msgbuf_ring_t *d2hring_edl; /* D2H Enhanced Debug Lane (EDL) ring */
629 
630 	msgbuf_ring_t *h2d_flowrings_pool; /* Pool of preallocated flowings */
631 	dhd_dma_buf_t flowrings_dma_buf; /* Contiguous DMA buffer for flowrings */
632 	uint16        h2d_rings_total; /* total H2D (common rings + flowrings) */
633 
634 	uint32		rx_dataoffset;
635 
636 	dhd_mb_ring_t	mb_ring_fn;	/* called when dongle needs to be notified of new msg */
637 	dhd_mb_ring_2_t	mb_2_ring_fn;	/* called when dongle needs to be notified of new msg */
638 
639 	/* ioctl related resources */
640 	uint8 ioctl_state;
641 	int16 ioctl_status;		/* status returned from dongle */
642 	uint16 ioctl_resplen;
643 	dhd_ioctl_recieved_status_t ioctl_received;
644 	uint curr_ioctl_cmd;
645 	dhd_dma_buf_t	retbuf;		/* For holding ioctl response */
646 	dhd_dma_buf_t	ioctbuf;	/* For holding ioctl request */
647 
648 	dhd_dma_buf_t	d2h_dma_scratch_buf;	/* For holding d2h scratch */
649 
650 	/* DMA-able arrays for holding WR and RD indices */
651 	uint32          rw_index_sz; /* Size of a RD or WR index in dongle */
652 	dhd_dma_buf_t   h2d_dma_indx_wr_buf;	/* Array of H2D WR indices */
653 	dhd_dma_buf_t	h2d_dma_indx_rd_buf;	/* Array of H2D RD indices */
654 	dhd_dma_buf_t	d2h_dma_indx_wr_buf;	/* Array of D2H WR indices */
655 	dhd_dma_buf_t	d2h_dma_indx_rd_buf;	/* Array of D2H RD indices */
656 	dhd_dma_buf_t h2d_ifrm_indx_wr_buf;	/* Array of H2D WR indices for ifrm */
657 
658 	dhd_dma_buf_t	host_bus_throughput_buf; /* bus throughput measure buffer */
659 
660 	dhd_dma_buf_t   *flowring_buf;    /* pool of flow ring buf */
661 #ifdef DHD_DMA_INDICES_SEQNUM
662 	char *h2d_dma_indx_rd_copy_buf; /* Local copy of H2D WR indices array */
663 	char *d2h_dma_indx_wr_copy_buf; /* Local copy of D2H WR indices array */
664 	uint32 h2d_dma_indx_rd_copy_bufsz; /* H2D WR indices array size */
665 	uint32 d2h_dma_indx_wr_copy_bufsz; /* D2H WR indices array size */
666 	uint32 host_seqnum;	/* Seqence number for D2H DMA Indices sync */
667 #endif /* DHD_DMA_INDICES_SEQNUM */
668 	uint32			flowring_num;
669 
670 	d2h_sync_cb_t d2h_sync_cb; /* Sync on D2H DMA done: SEQNUM or XORCSUM */
671 #ifdef EWP_EDL
672 	d2h_edl_sync_cb_t d2h_edl_sync_cb; /* Sync on EDL D2H DMA done: SEQNUM or XORCSUM */
673 #endif /* EWP_EDL */
674 	ulong d2h_sync_wait_max; /* max number of wait loops to receive one msg */
675 	ulong d2h_sync_wait_tot; /* total wait loops */
676 
677 	dhd_dmaxfer_t	dmaxfer; /* for test/DMA loopback */
678 
679 	uint16		ioctl_seq_no;
680 	uint16		data_seq_no;  /* XXX this field is obsolete */
681 	uint16		ioctl_trans_id;
682 	void		*pktid_ctrl_map; /* a pktid maps to a packet and its metadata */
683 	void		*pktid_rx_map;	/* pktid map for rx path */
684 	void		*pktid_tx_map;	/* pktid map for tx path */
685 	bool		metadata_dbg;
686 	void		*pktid_map_handle_ioctl;
687 #ifdef DHD_MAP_PKTID_LOGGING
688 	void		*pktid_dma_map;	/* pktid map for DMA MAP */
689 	void		*pktid_dma_unmap; /* pktid map for DMA UNMAP */
690 #endif /* DHD_MAP_PKTID_LOGGING */
691 	uint32		pktid_depleted_cnt;	/* pktid depleted count */
692 	/* netif tx queue stop count */
693 	uint8		pktid_txq_stop_cnt;
694 	/* netif tx queue start count */
695 	uint8		pktid_txq_start_cnt;
696 	uint64		ioctl_fillup_time;	/* timestamp for ioctl fillup */
697 	uint64		ioctl_ack_time;		/* timestamp for ioctl ack */
698 	uint64		ioctl_cmplt_time;	/* timestamp for ioctl completion */
699 
700 	/* Applications/utilities can read tx and rx metadata using IOVARs */
701 	uint16		rx_metadata_offset;
702 	uint16		tx_metadata_offset;
703 
704 #if (defined(BCM_ROUTER_DHD) && defined(HNDCTF))
705 	rxchain_info_t	rxchain;	/* chain of rx packets */
706 #endif
707 
708 #if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT)
709 	/* Host's soft doorbell configuration */
710 	bcmpcie_soft_doorbell_t soft_doorbell[BCMPCIE_D2H_COMMON_MSGRINGS];
711 #endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */
712 
713 	/* Work Queues to be used by the producer and the consumer, and threshold
714 	 * when the WRITE index must be synced to consumer's workq
715 	 */
716 	dhd_dma_buf_t	fw_trap_buf; /* firmware trap buffer */
717 
718 	uint32  host_ipc_version; /* Host sypported IPC rev */
719 	uint32  device_ipc_version; /* FW supported IPC rev */
720 	uint32  active_ipc_version; /* Host advertised IPC rev */
721 #if defined(BCMINTERNAL) && defined(DHD_DBG_DUMP)
722 	dhd_ioctl_trace_t	ioctl_trace[MAX_IOCTL_TRACE_SIZE];
723 	uint32				ioctl_trace_count;
724 #endif /* defined(BCMINTERNAL) && defined(DHD_DBG_DUMP) */
725 	dhd_dma_buf_t   hostts_req_buf; /* For holding host timestamp request buf */
726 	bool    hostts_req_buf_inuse;
727 	bool    rx_ts_log_enabled;
728 	bool    tx_ts_log_enabled;
729 #ifdef BTLOG
730 	msgbuf_ring_t *h2dring_btlog_subn; /* H2D btlog submission ring */
731 	msgbuf_ring_t *d2hring_btlog_cpln; /* D2H btlog completion ring */
732 	uint16 btlogbufpost;
733 	uint16 max_btlogbufpost;
734 #endif	/* BTLOG */
735 #ifdef DHD_HMAPTEST
736 	uint32 hmaptest_rx_active;
737 	uint32 hmaptest_rx_pktid;
738 	char *hmap_rx_buf_va;
739 	dmaaddr_t hmap_rx_buf_pa;
740 	uint32 hmap_rx_buf_len;
741 
742 	uint32 hmaptest_tx_active;
743 	uint32 hmaptest_tx_pktid;
744 	char *hmap_tx_buf_va;
745 	dmaaddr_t hmap_tx_buf_pa;
746 	uint32	  hmap_tx_buf_len;
747 	dhd_hmaptest_t	hmaptest; /* for hmaptest */
748 	bool hmap_enabled; /* TRUE = hmap is enabled */
749 #endif /* DHD_HMAPTEST */
750 #ifdef SNAPSHOT_UPLOAD
751 	dhd_dma_buf_t snapshot_upload_buf;	/* snapshot upload buffer */
752 	uint32 snapshot_upload_len;		/* snapshot uploaded len */
753 	uint8 snapshot_type;			/* snaphot uploaded type */
754 	bool snapshot_cmpl_pending;		/* snapshot completion pending */
755 #endif	/* SNAPSHOT_UPLOAD */
756 	bool no_retry;
757 	bool no_aggr;
758 	bool fixed_rate;
759 	dhd_dma_buf_t	host_scb_buf; /* scb host offload buffer */
760 #ifdef DHD_HP2P
761 	msgbuf_ring_t *d2hring_hp2p_txcpl; /* D2H HPP Tx completion ring */
762 	msgbuf_ring_t *d2hring_hp2p_rxcpl; /* D2H HPP Rx completion ring */
763 #endif /* DHD_HP2P */
764 	bool no_tx_resource;
765 	uint32 txcpl_db_cnt;
766 #ifdef AGG_H2D_DB
767 	agg_h2d_db_info_t agg_h2d_db_info;
768 #endif /* AGG_H2D_DB */
769 	uint64 tx_h2d_db_cnt;
770 } dhd_prot_t;
771 
772 #ifdef DHD_EWPR_VER2
773 #define HANG_INFO_BASE64_BUFFER_SIZE 640
774 #endif
775 
776 #ifdef DHD_DUMP_PCIE_RINGS
777 static
778 int dhd_ring_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file,
779 	const void *user_buf, unsigned long *file_posn);
780 #ifdef EWP_EDL
781 static
782 int dhd_edl_ring_hdr_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file, const void *user_buf,
783 	unsigned long *file_posn);
784 #endif /* EWP_EDL */
785 #endif /* DHD_DUMP_PCIE_RINGS */
786 extern bool dhd_timesync_delay_post_bufs(dhd_pub_t *dhdp);
787 extern void dhd_schedule_dmaxfer_free(dhd_pub_t* dhdp, dmaxref_mem_map_t *dmmap);
788 /* Convert a dmaaddr_t to a base_addr with htol operations */
789 static INLINE void dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa);
790 
791 /* APIs for managing a DMA-able buffer */
792 static int  dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
793 static void dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
794 
795 /* msgbuf ring management */
796 static int dhd_prot_allocate_bufs(dhd_pub_t *dhd, dhd_prot_t *prot);
797 static int dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring,
798 	const char *name, uint16 max_items, uint16 len_item, uint16 ringid);
799 static void dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring);
800 static void dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring);
801 static void dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring);
802 static void dhd_prot_process_fw_timestamp(dhd_pub_t *dhd, void* buf);
803 
804 /* Pool of pre-allocated msgbuf_ring_t with DMA-able buffers for Flowrings */
805 static int  dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd);
806 static void dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd);
807 static void dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd);
808 
809 /* Fetch and Release a flowring msgbuf_ring from flowring  pool */
810 static msgbuf_ring_t *dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd,
811 	uint16 flowid);
812 /* see also dhd_prot_flowrings_pool_release() in dhd_prot.h */
813 
814 /* Producer: Allocate space in a msgbuf ring */
815 static void* dhd_prot_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring,
816 	uint16 nitems, uint16 *alloced, bool exactly_nitems);
817 static void* dhd_prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems,
818 	uint16 *alloced, bool exactly_nitems);
819 
820 /* Consumer: Determine the location where the next message may be consumed */
821 static uint8* dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring,
822 	uint32 *available_len);
823 
824 /* Producer (WR index update) or Consumer (RD index update) indication */
825 static void dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t *ring,
826 	void *p, uint16 len);
827 
828 #ifdef AGG_H2D_DB
829 static void dhd_prot_agg_db_ring_write(dhd_pub_t *dhd, msgbuf_ring_t * ring,
830 		void* p, uint16 len);
831 static void dhd_prot_aggregate_db_ring_door_bell(dhd_pub_t *dhd, uint16 flowid, bool ring_db);
832 static void dhd_prot_txdata_aggr_db_write_flush(dhd_pub_t *dhd, uint16 flowid);
833 #endif /* AGG_H2D_DB */
834 static void dhd_prot_ring_doorbell(dhd_pub_t *dhd, uint32 value);
835 static void dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t *ring);
836 
837 static INLINE int dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type,
838 	dhd_dma_buf_t *dma_buf, uint32 bufsz);
839 
840 /* Set/Get a RD or WR index in the array of indices */
841 /* See also: dhd_prot_dma_indx_init() */
842 void dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type,
843 	uint16 ringid);
844 static uint16 dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid);
845 
846 /* Locate a packet given a pktid */
847 static INLINE void *dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype,
848 	bool free_pktid);
849 /* Locate a packet given a PktId and free it. */
850 static INLINE void dhd_prot_packet_free(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send);
851 
852 static int dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd,
853 	void *buf, uint len, uint8 action);
854 static int dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd,
855 	void *buf, uint len, uint8 action);
856 static int dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf);
857 static int dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd,
858 	void *buf, int ifidx);
859 
860 /* Post buffers for Rx, control ioctl response and events */
861 static uint16 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, uint8 msgid, uint32 max_to_post);
862 static void dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *pub);
863 static void dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *pub);
864 static void dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid);
865 static int dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid);
866 static int dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t *pub);
867 
868 static void dhd_prot_return_rxbuf(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 pktid, uint32 rxcnt);
869 
870 #if defined(BCMINTERNAL) && defined(DHD_DBG_DUMP)
871 static void dhd_prot_ioctl_trace(dhd_pub_t *dhd, ioctl_req_msg_t *ioct_rqst, uchar *buf, int len);
872 static void dhd_prot_ioctl_dump(dhd_prot_t *prot, struct bcmstrbuf *strbuf);
873 #endif /* defined(BCMINTERNAL) && defined(DHD_DBG_DUMP) */
874 
875 /* D2H Message handling */
876 static int dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len);
877 
878 /* D2H Message handlers */
879 static void dhd_prot_noop(dhd_pub_t *dhd, void *msg);
880 static void dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg);
881 static void dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg);
882 static void dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg);
883 static void dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg);
884 static void dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg);
885 static void dhd_prot_event_process(dhd_pub_t *dhd, void *msg);
886 
887 /* Loopback test with dongle */
888 static void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dma);
889 static int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len, uint srcdelay,
890 	uint destdelay, dhd_dmaxfer_t *dma);
891 static void dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg);
892 
893 /* Flowring management communication with dongle */
894 static void dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg);
895 static void dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg);
896 static void dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg);
897 static void dhd_prot_process_flow_ring_resume_response(dhd_pub_t *dhd, void* msg);
898 static void dhd_prot_process_flow_ring_suspend_response(dhd_pub_t *dhd, void* msg);
899 
900 /* Monitor Mode */
901 #ifdef WL_MONITOR
902 extern bool dhd_monitor_enabled(dhd_pub_t *dhd, int ifidx);
903 extern void dhd_rx_mon_pkt(dhd_pub_t *dhdp, host_rxbuf_cmpl_t* msg, void *pkt, int ifidx);
904 #endif /* WL_MONITOR */
905 
906 /* Configure a soft doorbell per D2H ring */
907 static void dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd);
908 static void dhd_prot_process_d2h_ring_config_complete(dhd_pub_t *dhd, void *msg);
909 static void dhd_prot_process_d2h_ring_create_complete(dhd_pub_t *dhd, void *buf);
910 #if !defined(BCM_ROUTER_DHD)
911 static void dhd_prot_process_h2d_ring_create_complete(dhd_pub_t *dhd, void *buf);
912 static void dhd_prot_process_infobuf_complete(dhd_pub_t *dhd, void* buf);
913 #endif /* !BCM_ROUTER_DHD */
914 static void dhd_prot_process_d2h_mb_data(dhd_pub_t *dhd, void* buf);
915 static void dhd_prot_detach_info_rings(dhd_pub_t *dhd);
916 #ifdef BTLOG
917 static void dhd_prot_process_btlog_complete(dhd_pub_t *dhd, void* buf);
918 static void dhd_prot_detach_btlog_rings(dhd_pub_t *dhd);
919 #endif	/* BTLOG */
920 #ifdef DHD_HP2P
921 static void dhd_prot_detach_hp2p_rings(dhd_pub_t *dhd);
922 #endif /* DHD_HP2P */
923 #ifdef EWP_EDL
924 static void dhd_prot_detach_edl_rings(dhd_pub_t *dhd);
925 #endif
926 static void dhd_prot_process_d2h_host_ts_complete(dhd_pub_t *dhd, void* buf);
927 static void dhd_prot_process_snapshot_complete(dhd_pub_t *dhd, void *buf);
928 
929 #ifdef DHD_TIMESYNC
930 extern void dhd_parse_proto(uint8 *pktdata, dhd_pkt_parse_t *parse);
931 #endif
932 
933 #ifdef DHD_FLOW_RING_STATUS_TRACE
934 void dhd_dump_bus_flow_ring_status_isr_trace(dhd_bus_t *bus, struct bcmstrbuf *strbuf);
935 void dhd_dump_bus_flow_ring_status_dpc_trace(dhd_bus_t *bus, struct bcmstrbuf *strbuf);
936 #endif /* DHD_FLOW_RING_STATUS_TRACE */
937 
938 #ifdef DHD_TX_PROFILE
939 extern bool dhd_protocol_matches_profile(uint8 *p, int plen, const
940 		dhd_tx_profile_protocol_t *proto, bool is_host_sfhllc);
941 #endif /* defined(DHD_TX_PROFILE) */
942 
943 #ifdef DHD_HP2P
944 static void dhd_update_hp2p_rxstats(dhd_pub_t *dhd, host_rxbuf_cmpl_t *rxstatus);
945 static void dhd_update_hp2p_txstats(dhd_pub_t *dhd, host_txbuf_cmpl_t *txstatus);
946 static void dhd_calc_hp2p_burst(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint16 flowid);
947 static void dhd_update_hp2p_txdesc(dhd_pub_t *dhd, host_txbuf_post_t *txdesc);
948 #endif
949 typedef void (*dhd_msgbuf_func_t)(dhd_pub_t *dhd, void *msg);
950 
951 /** callback functions for messages generated by the dongle */
952 #define MSG_TYPE_INVALID 0
953 
954 static dhd_msgbuf_func_t table_lookup[DHD_PROT_FUNCS] = {
955 	dhd_prot_noop, /* 0 is MSG_TYPE_INVALID */
956 	dhd_prot_genstatus_process, /* MSG_TYPE_GEN_STATUS */
957 	dhd_prot_ringstatus_process, /* MSG_TYPE_RING_STATUS */
958 	NULL,
959 	dhd_prot_flow_ring_create_response_process, /* MSG_TYPE_FLOW_RING_CREATE_CMPLT */
960 	NULL,
961 	dhd_prot_flow_ring_delete_response_process, /* MSG_TYPE_FLOW_RING_DELETE_CMPLT */
962 	NULL,
963 	dhd_prot_flow_ring_flush_response_process, /* MSG_TYPE_FLOW_RING_FLUSH_CMPLT */
964 	NULL,
965 	dhd_prot_ioctack_process, /* MSG_TYPE_IOCTLPTR_REQ_ACK */
966 	NULL,
967 	dhd_prot_ioctcmplt_process, /* MSG_TYPE_IOCTL_CMPLT */
968 	NULL,
969 	dhd_prot_event_process, /* MSG_TYPE_WL_EVENT */
970 	NULL,
971 	dhd_prot_txstatus_process, /* MSG_TYPE_TX_STATUS */
972 	NULL,
973 	NULL,	/* MSG_TYPE_RX_CMPLT use dedicated handler */
974 	NULL,
975 	dhd_msgbuf_dmaxfer_process, /* MSG_TYPE_LPBK_DMAXFER_CMPLT */
976 	NULL, /* MSG_TYPE_FLOW_RING_RESUME */
977 	dhd_prot_process_flow_ring_resume_response, /* MSG_TYPE_FLOW_RING_RESUME_CMPLT */
978 	NULL, /* MSG_TYPE_FLOW_RING_SUSPEND */
979 	dhd_prot_process_flow_ring_suspend_response, /* MSG_TYPE_FLOW_RING_SUSPEND_CMPLT */
980 	NULL, /* MSG_TYPE_INFO_BUF_POST */
981 #if defined(BCM_ROUTER_DHD)
982 	NULL, /* MSG_TYPE_INFO_BUF_CMPLT */
983 #else
984 	dhd_prot_process_infobuf_complete, /* MSG_TYPE_INFO_BUF_CMPLT */
985 #endif /* BCM_ROUTER_DHD */
986 	NULL, /* MSG_TYPE_H2D_RING_CREATE */
987 	NULL, /* MSG_TYPE_D2H_RING_CREATE */
988 #if defined(BCM_ROUTER_DHD)
989 	NULL, /* MSG_TYPE_H2D_RING_CREATE_CMPLT */
990 #else
991 	dhd_prot_process_h2d_ring_create_complete, /* MSG_TYPE_H2D_RING_CREATE_CMPLT */
992 #endif /* BCM_ROUTER_DHD */
993 	dhd_prot_process_d2h_ring_create_complete, /* MSG_TYPE_D2H_RING_CREATE_CMPLT */
994 	NULL, /* MSG_TYPE_H2D_RING_CONFIG */
995 	NULL, /* MSG_TYPE_D2H_RING_CONFIG */
996 	NULL, /* MSG_TYPE_H2D_RING_CONFIG_CMPLT */
997 	dhd_prot_process_d2h_ring_config_complete, /* MSG_TYPE_D2H_RING_CONFIG_CMPLT */
998 	NULL, /* MSG_TYPE_H2D_MAILBOX_DATA */
999 	dhd_prot_process_d2h_mb_data, /* MSG_TYPE_D2H_MAILBOX_DATA */
1000 	NULL,	/* MSG_TYPE_TIMSTAMP_BUFPOST */
1001 	NULL,	/* MSG_TYPE_HOSTTIMSTAMP */
1002 	dhd_prot_process_d2h_host_ts_complete,	/* MSG_TYPE_HOSTTIMSTAMP_CMPLT */
1003 	dhd_prot_process_fw_timestamp,	/* MSG_TYPE_FIRMWARE_TIMESTAMP */
1004 	NULL,	/* MSG_TYPE_SNAPSHOT_UPLOAD */
1005 	dhd_prot_process_snapshot_complete,	/* MSG_TYPE_SNAPSHOT_CMPLT */
1006 };
1007 
1008 #if (defined(BCM_ROUTER_DHD) && defined(HNDCTF))
1009 /* Related to router CPU mapping per radio core */
1010 #define DHD_RX_CHAINING
1011 #endif /* BCM_ROUTER_DHD && HNDCTF */
1012 
1013 #ifdef DHD_RX_CHAINING
1014 
1015 #define PKT_CTF_CHAINABLE(dhd, ifidx, evh, prio, h_sa, h_da, h_prio) \
1016 	(dhd_wet_chainable(dhd) && \
1017 	dhd_rx_pkt_chainable((dhd), (ifidx)) && \
1018 	!ETHER_ISNULLDEST(((struct ether_header *)(evh))->ether_dhost) && \
1019 	!ETHER_ISMULTI(((struct ether_header *)(evh))->ether_dhost) && \
1020 	!eacmp((h_da), ((struct ether_header *)(evh))->ether_dhost) && \
1021 	!eacmp((h_sa), ((struct ether_header *)(evh))->ether_shost) && \
1022 	((h_prio) == (prio)) && (dhd_ctf_hotbrc_check((dhd), (evh), (ifidx))) && \
1023 	((((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IP)) || \
1024 	(((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IPV6))))
1025 
1026 static INLINE void dhd_rxchain_reset(rxchain_info_t *rxchain);
1027 static void dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx);
1028 static void dhd_rxchain_commit(dhd_pub_t *dhd);
1029 
1030 #define DHD_PKT_CTF_MAX_CHAIN_LEN	64
1031 
1032 #endif /* DHD_RX_CHAINING */
1033 
1034 #ifdef DHD_EFI
1035 #define DHD_LPBKDTDUMP_ON()	(1)
1036 #else
1037 #define DHD_LPBKDTDUMP_ON()	(dhd_msg_level & DHD_LPBKDTDUMP_VAL)
1038 #endif
1039 
1040 static void dhd_prot_h2d_sync_init(dhd_pub_t *dhd);
1041 
1042 #ifdef D2H_MINIDUMP
1043 dhd_dma_buf_t *
dhd_prot_get_minidump_buf(dhd_pub_t * dhd)1044 dhd_prot_get_minidump_buf(dhd_pub_t *dhd)
1045 {
1046 	return &dhd->prot->fw_trap_buf;
1047 }
1048 #endif /* D2H_MINIDUMP */
1049 
1050 uint16
dhd_prot_get_rxbufpost_sz(dhd_pub_t * dhd)1051 dhd_prot_get_rxbufpost_sz(dhd_pub_t *dhd)
1052 {
1053 	return dhd->prot->rxbufpost_sz;
1054 }
1055 
1056 uint16
dhd_prot_get_h2d_rx_post_active(dhd_pub_t * dhd)1057 dhd_prot_get_h2d_rx_post_active(dhd_pub_t *dhd)
1058 {
1059 	dhd_prot_t *prot = dhd->prot;
1060 	msgbuf_ring_t *flow_ring = &prot->h2dring_rxp_subn;
1061 	uint16 rd, wr;
1062 
1063 	/* Since wr is owned by host in h2d direction, directly read wr */
1064 	wr = flow_ring->wr;
1065 
1066 	if (dhd->dma_d2h_ring_upd_support) {
1067 		rd = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, flow_ring->idx);
1068 	} else {
1069 		dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx);
1070 	}
1071 	return NTXPACTIVE(rd, wr, flow_ring->max_items);
1072 }
1073 
1074 uint16
dhd_prot_get_d2h_rx_cpln_active(dhd_pub_t * dhd)1075 dhd_prot_get_d2h_rx_cpln_active(dhd_pub_t *dhd)
1076 {
1077 	dhd_prot_t *prot = dhd->prot;
1078 	msgbuf_ring_t *flow_ring = &prot->d2hring_rx_cpln;
1079 	uint16 rd, wr;
1080 
1081 	if (dhd->dma_d2h_ring_upd_support) {
1082 		wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, flow_ring->idx);
1083 	} else {
1084 		dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx);
1085 	}
1086 
1087 	/* Since rd is owned by host in d2h direction, directly read rd */
1088 	rd = flow_ring->rd;
1089 
1090 	return NTXPACTIVE(rd, wr, flow_ring->max_items);
1091 }
1092 
1093 bool
dhd_prot_is_cmpl_ring_empty(dhd_pub_t * dhd,void * prot_info)1094 dhd_prot_is_cmpl_ring_empty(dhd_pub_t *dhd, void *prot_info)
1095 {
1096 	msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)prot_info;
1097 	uint16 rd, wr;
1098 	bool ret;
1099 
1100 	if (dhd->dma_d2h_ring_upd_support) {
1101 		wr = flow_ring->wr;
1102 	} else {
1103 		dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx);
1104 	}
1105 	if (dhd->dma_h2d_ring_upd_support) {
1106 		rd = flow_ring->rd;
1107 	} else {
1108 		dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx);
1109 	}
1110 	ret = (wr == rd) ? TRUE : FALSE;
1111 	return ret;
1112 }
1113 
1114 void
dhd_prot_dump_ring_ptrs(void * prot_info)1115 dhd_prot_dump_ring_ptrs(void *prot_info)
1116 {
1117 	msgbuf_ring_t *ring = (msgbuf_ring_t *)prot_info;
1118 	DHD_ERROR(("%s curr_rd: %d rd: %d wr: %d \n", __FUNCTION__,
1119 		ring->curr_rd, ring->rd, ring->wr));
1120 }
1121 
1122 uint16
dhd_prot_get_h2d_max_txpost(dhd_pub_t * dhd)1123 dhd_prot_get_h2d_max_txpost(dhd_pub_t *dhd)
1124 {
1125 	return (uint16)h2d_max_txpost;
1126 }
1127 void
dhd_prot_set_h2d_max_txpost(dhd_pub_t * dhd,uint16 max_txpost)1128 dhd_prot_set_h2d_max_txpost(dhd_pub_t *dhd, uint16 max_txpost)
1129 {
1130 	h2d_max_txpost = max_txpost;
1131 }
1132 #if defined(DHD_HTPUT_TUNABLES)
1133 uint16
dhd_prot_get_h2d_htput_max_txpost(dhd_pub_t * dhd)1134 dhd_prot_get_h2d_htput_max_txpost(dhd_pub_t *dhd)
1135 {
1136 	return (uint16)h2d_htput_max_txpost;
1137 }
1138 void
dhd_prot_set_h2d_htput_max_txpost(dhd_pub_t * dhd,uint16 htput_max_txpost)1139 dhd_prot_set_h2d_htput_max_txpost(dhd_pub_t *dhd, uint16 htput_max_txpost)
1140 {
1141 	h2d_htput_max_txpost = htput_max_txpost;
1142 }
1143 
1144 #endif /* DHD_HTPUT_TUNABLES */
1145 /**
1146  * D2H DMA to completion callback handlers. Based on the mode advertised by the
1147  * dongle through the PCIE shared region, the appropriate callback will be
1148  * registered in the proto layer to be invoked prior to precessing any message
1149  * from a D2H DMA ring. If the dongle uses a read barrier or another mode that
1150  * does not require host participation, then a noop callback handler will be
1151  * bound that simply returns the msg_type.
1152  */
1153 static void dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 msg_seqnum, msgbuf_ring_t *ring,
1154                                        uint32 tries, volatile uchar *msg, int msglen);
1155 static uint8 dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
1156                                       volatile cmn_msg_hdr_t *msg, int msglen);
1157 static uint8 dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
1158                                        volatile cmn_msg_hdr_t *msg, int msglen);
1159 static uint8 dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
1160                                     volatile cmn_msg_hdr_t *msg, int msglen);
1161 static void dhd_prot_d2h_sync_init(dhd_pub_t *dhd);
1162 static int dhd_send_d2h_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create,
1163 	uint16 ring_type, uint32 id);
1164 static int dhd_send_h2d_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create,
1165 	uint8 type, uint32 id);
1166 
1167 /**
1168  * dhd_prot_d2h_sync_livelock - when the host determines that a DMA transfer has
1169  * not completed, a livelock condition occurs. Host will avert this livelock by
1170  * dropping this message and moving to the next. This dropped message can lead
1171  * to a packet leak, or even something disastrous in the case the dropped
1172  * message happens to be a control response.
1173  * Here we will log this condition. One may choose to reboot the dongle.
1174  *
1175  */
1176 static void
dhd_prot_d2h_sync_livelock(dhd_pub_t * dhd,uint32 msg_seqnum,msgbuf_ring_t * ring,uint32 tries,volatile uchar * msg,int msglen)1177 dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 msg_seqnum, msgbuf_ring_t *ring, uint32 tries,
1178                            volatile uchar *msg, int msglen)
1179 {
1180 	uint32 ring_seqnum = ring->seqnum;
1181 
1182 	if (dhd_query_bus_erros(dhd)) {
1183 		return;
1184 	}
1185 
1186 	DHD_ERROR((
1187 		"LIVELOCK DHD<%p> ring<%s> msg_seqnum<%u> ring_seqnum<%u:%u> tries<%u> max<%lu>"
1188 		" tot<%lu> dma_buf va<%p> msg<%p> curr_rd<%d> rd<%d> wr<%d>\n",
1189 		dhd, ring->name, msg_seqnum, ring_seqnum, ring_seqnum% D2H_EPOCH_MODULO, tries,
1190 		dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot,
1191 		ring->dma_buf.va, msg, ring->curr_rd, ring->rd, ring->wr));
1192 
1193 	dhd_prhex("D2H MsgBuf Failure", msg, msglen, DHD_ERROR_VAL);
1194 
1195 	/* Try to resume if already suspended or suspend in progress */
1196 #ifdef DHD_PCIE_RUNTIMEPM
1197 	dhdpcie_runtime_bus_wake(dhd, CAN_SLEEP(), __builtin_return_address(0));
1198 #endif /* DHD_PCIE_RUNTIMEPM */
1199 
1200 	/* Skip if still in suspended or suspend in progress */
1201 	if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhd)) {
1202 		DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
1203 			__FUNCTION__, dhd->busstate, dhd->dhd_bus_busy_state));
1204 		goto exit;
1205 	}
1206 
1207 	dhd_bus_dump_console_buffer(dhd->bus);
1208 	dhd_prot_debug_info_print(dhd);
1209 
1210 #ifdef DHD_FW_COREDUMP
1211 	if (dhd->memdump_enabled) {
1212 		/* collect core dump */
1213 		dhd->memdump_type = DUMP_TYPE_BY_LIVELOCK;
1214 		dhd_bus_mem_dump(dhd);
1215 	}
1216 #endif /* DHD_FW_COREDUMP */
1217 
1218 exit:
1219 	dhd_schedule_reset(dhd);
1220 
1221 #ifdef OEM_ANDROID
1222 #ifdef SUPPORT_LINKDOWN_RECOVERY
1223 #ifdef CONFIG_ARCH_MSM
1224 	dhd->bus->no_cfg_restore = 1;
1225 #endif /* CONFIG_ARCH_MSM */
1226 	/* XXX Trigger HANG event for recovery */
1227 	dhd->hang_reason = HANG_REASON_MSGBUF_LIVELOCK;
1228 	dhd_os_send_hang_message(dhd);
1229 #endif /* SUPPORT_LINKDOWN_RECOVERY */
1230 #endif /* OEM_ANDROID */
1231 	dhd->livelock_occured = TRUE;
1232 }
1233 
1234 /**
1235  * dhd_prot_d2h_sync_seqnum - Sync on a D2H DMA completion using the SEQNUM
1236  * mode. Sequence number is always in the last word of a message.
1237  */
1238 static uint8
BCMFASTPATH(dhd_prot_d2h_sync_seqnum)1239 BCMFASTPATH(dhd_prot_d2h_sync_seqnum)(dhd_pub_t *dhd, msgbuf_ring_t *ring,
1240                          volatile cmn_msg_hdr_t *msg, int msglen)
1241 {
1242 	uint32 tries;
1243 	uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
1244 	int num_words = msglen / sizeof(uint32); /* num of 32bit words */
1245 	volatile uint32 *marker = (volatile uint32 *)msg + (num_words - 1); /* last word */
1246 	dhd_prot_t *prot = dhd->prot;
1247 	uint32 msg_seqnum;
1248 	uint32 step = 0;
1249 	uint32 delay = PCIE_D2H_SYNC_DELAY;
1250 	uint32 total_tries = 0;
1251 
1252 	ASSERT(msglen == ring->item_len);
1253 
1254 	BCM_REFERENCE(delay);
1255 	/*
1256 	 * For retries we have to make some sort of stepper algorithm.
1257 	 * We see that every time when the Dongle comes out of the D3
1258 	 * Cold state, the first D2H mem2mem DMA takes more time to
1259 	 * complete, leading to livelock issues.
1260 	 *
1261 	 * Case 1 - Apart from Host CPU some other bus master is
1262 	 * accessing the DDR port, probably page close to the ring
1263 	 * so, PCIE does not get a change to update the memory.
1264 	 * Solution - Increase the number of tries.
1265 	 *
1266 	 * Case 2 - The 50usec delay given by the Host CPU is not
1267 	 * sufficient for the PCIe RC to start its work.
1268 	 * In this case the breathing time of 50usec given by
1269 	 * the Host CPU is not sufficient.
1270 	 * Solution: Increase the delay in a stepper fashion.
1271 	 * This is done to ensure that there are no
1272 	 * unwanted extra delay introdcued in normal conditions.
1273 	 */
1274 	for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
1275 		for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
1276 			msg_seqnum = *marker;
1277 			if (ltoh32(msg_seqnum) == ring_seqnum) { /* dma upto last word done */
1278 				ring->seqnum++; /* next expected sequence number */
1279 				/* Check for LIVELOCK induce flag, which is set by firing
1280 				 * dhd iovar to induce LIVELOCK error. If flag is set,
1281 				 * MSG_TYPE_INVALID is returned, which results in to LIVELOCK error.
1282 				 */
1283 				if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) {
1284 					goto dma_completed;
1285 				}
1286 			}
1287 
1288 			total_tries = (uint32)(((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries);
1289 
1290 			if (total_tries > prot->d2h_sync_wait_max)
1291 				prot->d2h_sync_wait_max = total_tries;
1292 
1293 			OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
1294 			OSL_CPU_RELAX(); /* CPU relax for msg_seqnum  value to update */
1295 			OSL_DELAY(delay * step); /* Add stepper delay */
1296 
1297 		} /* for PCIE_D2H_SYNC_WAIT_TRIES */
1298 	} /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
1299 
1300 	dhd_prot_d2h_sync_livelock(dhd, msg_seqnum, ring, total_tries,
1301 		(volatile uchar *) msg, msglen);
1302 
1303 	ring->seqnum++; /* skip this message ... leak of a pktid */
1304 	return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */
1305 
1306 dma_completed:
1307 
1308 	prot->d2h_sync_wait_tot += tries;
1309 	return msg->msg_type;
1310 }
1311 
1312 /**
1313  * dhd_prot_d2h_sync_xorcsum - Sync on a D2H DMA completion using the XORCSUM
1314  * mode. The xorcsum is placed in the last word of a message. Dongle will also
1315  * place a seqnum in the epoch field of the cmn_msg_hdr.
1316  */
1317 static uint8
BCMFASTPATH(dhd_prot_d2h_sync_xorcsum)1318 BCMFASTPATH(dhd_prot_d2h_sync_xorcsum)(dhd_pub_t *dhd, msgbuf_ring_t *ring,
1319                           volatile cmn_msg_hdr_t *msg, int msglen)
1320 {
1321 	uint32 tries;
1322 	uint32 prot_checksum = 0; /* computed checksum */
1323 	int num_words = msglen / sizeof(uint32); /* num of 32bit words */
1324 	uint8 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
1325 	dhd_prot_t *prot = dhd->prot;
1326 	uint32 step = 0;
1327 	uint32 delay = PCIE_D2H_SYNC_DELAY;
1328 	uint32 total_tries = 0;
1329 
1330 	ASSERT(msglen == ring->item_len);
1331 
1332 	BCM_REFERENCE(delay);
1333 	/*
1334 	 * For retries we have to make some sort of stepper algorithm.
1335 	 * We see that every time when the Dongle comes out of the D3
1336 	 * Cold state, the first D2H mem2mem DMA takes more time to
1337 	 * complete, leading to livelock issues.
1338 	 *
1339 	 * Case 1 - Apart from Host CPU some other bus master is
1340 	 * accessing the DDR port, probably page close to the ring
1341 	 * so, PCIE does not get a change to update the memory.
1342 	 * Solution - Increase the number of tries.
1343 	 *
1344 	 * Case 2 - The 50usec delay given by the Host CPU is not
1345 	 * sufficient for the PCIe RC to start its work.
1346 	 * In this case the breathing time of 50usec given by
1347 	 * the Host CPU is not sufficient.
1348 	 * Solution: Increase the delay in a stepper fashion.
1349 	 * This is done to ensure that there are no
1350 	 * unwanted extra delay introdcued in normal conditions.
1351 	 */
1352 	for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
1353 		for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
1354 			/* First verify if the seqnumber has been update,
1355 			 * if yes, then only check xorcsum.
1356 			 * Once seqnum and xorcsum is proper that means
1357 			 * complete message has arrived.
1358 			 */
1359 			if (msg->epoch == ring_seqnum) {
1360 				prot_checksum = bcm_compute_xor32((volatile uint32 *)msg,
1361 					num_words);
1362 				if (prot_checksum == 0U) { /* checksum is OK */
1363 					ring->seqnum++; /* next expected sequence number */
1364 					/* Check for LIVELOCK induce flag, which is set by firing
1365 					 * dhd iovar to induce LIVELOCK error. If flag is set,
1366 					 * MSG_TYPE_INVALID is returned, which results in to
1367 					 * LIVELOCK error.
1368 					 */
1369 					if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) {
1370 						goto dma_completed;
1371 					}
1372 				}
1373 			}
1374 
1375 			total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries;
1376 
1377 			if (total_tries > prot->d2h_sync_wait_max)
1378 				prot->d2h_sync_wait_max = total_tries;
1379 
1380 			OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
1381 			OSL_CPU_RELAX(); /* CPU relax for msg_seqnum  value to update */
1382 			OSL_DELAY(delay * step); /* Add stepper delay */
1383 
1384 		} /* for PCIE_D2H_SYNC_WAIT_TRIES */
1385 	} /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
1386 
1387 	DHD_ERROR(("%s: prot_checksum = 0x%x\n", __FUNCTION__, prot_checksum));
1388 	dhd_prot_d2h_sync_livelock(dhd, msg->epoch, ring, total_tries,
1389 		(volatile uchar *) msg, msglen);
1390 
1391 	ring->seqnum++; /* skip this message ... leak of a pktid */
1392 	return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */
1393 
1394 dma_completed:
1395 
1396 	prot->d2h_sync_wait_tot += tries;
1397 	return msg->msg_type;
1398 }
1399 
1400 /**
1401  * dhd_prot_d2h_sync_none - Dongle ensure that the DMA will complete and host
1402  * need to try to sync. This noop sync handler will be bound when the dongle
1403  * advertises that neither the SEQNUM nor XORCSUM mode of DMA sync is required.
1404  */
1405 static uint8
BCMFASTPATH(dhd_prot_d2h_sync_none)1406 BCMFASTPATH(dhd_prot_d2h_sync_none)(dhd_pub_t *dhd, msgbuf_ring_t *ring,
1407                        volatile cmn_msg_hdr_t *msg, int msglen)
1408 {
1409 	/* Check for LIVELOCK induce flag, which is set by firing
1410 	* dhd iovar to induce LIVELOCK error. If flag is set,
1411 	* MSG_TYPE_INVALID is returned, which results in to LIVELOCK error.
1412 	*/
1413 	if (dhd->dhd_induce_error == DHD_INDUCE_LIVELOCK) {
1414 		DHD_ERROR(("%s: Inducing livelock\n", __FUNCTION__));
1415 		return MSG_TYPE_INVALID;
1416 	} else {
1417 		return msg->msg_type;
1418 	}
1419 }
1420 
1421 #ifdef EWP_EDL
1422 /**
1423  * dhd_prot_d2h_sync_edl - Sync on a D2H DMA completion by validating the cmn_msg_hdr_t
1424  * header values at both the beginning and end of the payload.
1425  * The cmn_msg_hdr_t is placed at the start and end of the payload
1426  * in each work item in the EDL ring.
1427  * Dongle will place a seqnum inside the cmn_msg_hdr_t 'epoch' field
1428  * and the length of the payload in the 'request_id' field.
1429  * Structure of each work item in the EDL ring:
1430  * | cmn_msg_hdr_t | payload (var len) | cmn_msg_hdr_t |
1431  * NOTE: - it was felt that calculating xorcsum for the entire payload (max length of 1648 bytes) is
1432  * too costly on the dongle side and might take up too many ARM cycles,
1433  * hence the xorcsum sync method is not being used for EDL ring.
1434  */
1435 static int
BCMFASTPATH(dhd_prot_d2h_sync_edl)1436 BCMFASTPATH(dhd_prot_d2h_sync_edl)(dhd_pub_t *dhd, msgbuf_ring_t *ring,
1437                           volatile cmn_msg_hdr_t *msg)
1438 {
1439 	uint32 tries;
1440 	int msglen = 0, len = 0;
1441 	uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
1442 	dhd_prot_t *prot = dhd->prot;
1443 	uint32 step = 0;
1444 	uint32 delay = PCIE_D2H_SYNC_DELAY;
1445 	uint32 total_tries = 0;
1446 	volatile cmn_msg_hdr_t *trailer = NULL;
1447 	volatile uint8 *buf = NULL;
1448 	bool valid_msg = FALSE;
1449 
1450 	BCM_REFERENCE(delay);
1451 	/*
1452 	 * For retries we have to make some sort of stepper algorithm.
1453 	 * We see that every time when the Dongle comes out of the D3
1454 	 * Cold state, the first D2H mem2mem DMA takes more time to
1455 	 * complete, leading to livelock issues.
1456 	 *
1457 	 * Case 1 - Apart from Host CPU some other bus master is
1458 	 * accessing the DDR port, probably page close to the ring
1459 	 * so, PCIE does not get a change to update the memory.
1460 	 * Solution - Increase the number of tries.
1461 	 *
1462 	 * Case 2 - The 50usec delay given by the Host CPU is not
1463 	 * sufficient for the PCIe RC to start its work.
1464 	 * In this case the breathing time of 50usec given by
1465 	 * the Host CPU is not sufficient.
1466 	 * Solution: Increase the delay in a stepper fashion.
1467 	 * This is done to ensure that there are no
1468 	 * unwanted extra delay introdcued in normal conditions.
1469 	 */
1470 	for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
1471 		for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
1472 			/* First verify if the seqnumber has been updated,
1473 			 * if yes, only then validate the header and trailer.
1474 			 * Once seqnum, header and trailer have been validated, it means
1475 			 * that the complete message has arrived.
1476 			 */
1477 			valid_msg = FALSE;
1478 			if (msg->epoch == ring_seqnum &&
1479 				msg->msg_type == MSG_TYPE_INFO_PYLD &&
1480 				msg->request_id > 0 &&
1481 				msg->request_id <= ring->item_len) {
1482 				/* proceed to check trailer only if header is valid */
1483 				buf = (volatile uint8 *)msg;
1484 				msglen = sizeof(cmn_msg_hdr_t) + msg->request_id;
1485 				buf += msglen;
1486 				if (msglen + sizeof(cmn_msg_hdr_t) <= ring->item_len) {
1487 					trailer = (volatile cmn_msg_hdr_t *)buf;
1488 					valid_msg = (trailer->epoch == ring_seqnum) &&
1489 						(trailer->msg_type == msg->msg_type) &&
1490 						(trailer->request_id == msg->request_id);
1491 					if (!valid_msg) {
1492 						DHD_TRACE(("%s:invalid trailer! seqnum=%u;reqid=%u"
1493 						" expected, seqnum=%u; reqid=%u. Retrying... \n",
1494 						__FUNCTION__, trailer->epoch, trailer->request_id,
1495 						msg->epoch, msg->request_id));
1496 					}
1497 				} else {
1498 					DHD_TRACE(("%s: invalid payload length (%u)! Retrying.. \n",
1499 						__FUNCTION__, msg->request_id));
1500 				}
1501 
1502 				if (valid_msg) {
1503 					/* data is OK */
1504 					ring->seqnum++; /* next expected sequence number */
1505 					if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) {
1506 						goto dma_completed;
1507 					}
1508 				}
1509 			} else {
1510 				DHD_TRACE(("%s: wrong hdr, seqnum expected %u, got %u."
1511 					" msg_type=0x%x, request_id=%u."
1512 					" Retrying...\n",
1513 					__FUNCTION__, ring_seqnum, msg->epoch,
1514 					msg->msg_type, msg->request_id));
1515 			}
1516 
1517 			total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries;
1518 
1519 			if (total_tries > prot->d2h_sync_wait_max)
1520 				prot->d2h_sync_wait_max = total_tries;
1521 
1522 			OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
1523 #if !(defined(BCM_ROUTER_DHD) && defined(BCM_GMAC3))
1524 			OSL_CPU_RELAX(); /* CPU relax for msg_seqnum  value to update */
1525 			OSL_DELAY(delay * step); /* Add stepper delay */
1526 #endif /* !(defined(BCM_ROUTER_DHD) && defined(BCM_GMAC3)) */
1527 
1528 		} /* for PCIE_D2H_SYNC_WAIT_TRIES */
1529 	} /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
1530 
1531 	DHD_ERROR(("%s: EDL header check fails !\n", __FUNCTION__));
1532 	DHD_ERROR(("%s: header: seqnum=%u; expected-seqnum=%u"
1533 		" msgtype=0x%x; expected-msgtype=0x%x"
1534 		" length=%u; expected-max-length=%u", __FUNCTION__,
1535 		msg->epoch, ring_seqnum, msg->msg_type, MSG_TYPE_INFO_PYLD,
1536 		msg->request_id, ring->item_len));
1537 	dhd_prhex("msg header bytes: ", (volatile uchar *)msg, sizeof(*msg), DHD_ERROR_VAL);
1538 	if (trailer && msglen > 0 &&
1539 			(msglen + sizeof(cmn_msg_hdr_t)) <= ring->item_len) {
1540 		DHD_ERROR(("%s: trailer: seqnum=%u; expected-seqnum=%u"
1541 			" msgtype=0x%x; expected-msgtype=0x%x"
1542 			" length=%u; expected-length=%u", __FUNCTION__,
1543 			trailer->epoch, ring_seqnum, trailer->msg_type, MSG_TYPE_INFO_PYLD,
1544 			trailer->request_id, msg->request_id));
1545 		dhd_prhex("msg trailer bytes: ", (volatile uchar *)trailer,
1546 			sizeof(*trailer), DHD_ERROR_VAL);
1547 	}
1548 
1549 	if ((msglen + sizeof(cmn_msg_hdr_t)) <= ring->item_len)
1550 		len = msglen + sizeof(cmn_msg_hdr_t);
1551 	else
1552 		len = ring->item_len;
1553 
1554 	dhd_prot_d2h_sync_livelock(dhd, msg->epoch, ring, total_tries,
1555 		(volatile uchar *) msg, len);
1556 
1557 	ring->seqnum++; /* skip this message */
1558 	return BCME_ERROR; /* invalid msg_type 0 -> noop callback */
1559 
1560 dma_completed:
1561 	DHD_TRACE(("%s: EDL header check pass, seqnum=%u; reqid=%u\n", __FUNCTION__,
1562 		msg->epoch, msg->request_id));
1563 
1564 	prot->d2h_sync_wait_tot += tries;
1565 	return BCME_OK;
1566 }
1567 
1568 /**
1569  * dhd_prot_d2h_sync_edl_none - Dongle ensure that the DMA will complete and host
1570  * need to try to sync. This noop sync handler will be bound when the dongle
1571  * advertises that neither the SEQNUM nor XORCSUM mode of DMA sync is required.
1572  */
BCMFASTPATH(dhd_prot_d2h_sync_edl_none)1573 static int BCMFASTPATH
1574 (dhd_prot_d2h_sync_edl_none)(dhd_pub_t *dhd, msgbuf_ring_t *ring,
1575                        volatile cmn_msg_hdr_t *msg)
1576 {
1577 	/* Check for LIVELOCK induce flag, which is set by firing
1578 	* dhd iovar to induce LIVELOCK error. If flag is set,
1579 	* MSG_TYPE_INVALID is returned, which results in to LIVELOCK error.
1580 	*/
1581 	if (dhd->dhd_induce_error == DHD_INDUCE_LIVELOCK) {
1582 		DHD_ERROR(("%s: Inducing livelock\n", __FUNCTION__));
1583 		return BCME_ERROR;
1584 	} else {
1585 		if (msg->msg_type == MSG_TYPE_INFO_PYLD)
1586 			return BCME_OK;
1587 		else
1588 			return msg->msg_type;
1589 	}
1590 }
1591 #endif /* EWP_EDL */
1592 
1593 INLINE void
dhd_wakeup_ioctl_event(dhd_pub_t * dhd,dhd_ioctl_recieved_status_t reason)1594 dhd_wakeup_ioctl_event(dhd_pub_t *dhd, dhd_ioctl_recieved_status_t reason)
1595 {
1596 	/* To synchronize with the previous memory operations call wmb() */
1597 	OSL_SMP_WMB();
1598 	dhd->prot->ioctl_received = reason;
1599 	/* Call another wmb() to make sure before waking up the other event value gets updated */
1600 	OSL_SMP_WMB();
1601 	dhd_os_ioctl_resp_wake(dhd);
1602 }
1603 
1604 /**
1605  * dhd_prot_d2h_sync_init - Setup the host side DMA sync mode based on what
1606  * dongle advertizes.
1607  */
1608 static void
dhd_prot_d2h_sync_init(dhd_pub_t * dhd)1609 dhd_prot_d2h_sync_init(dhd_pub_t *dhd)
1610 {
1611 	dhd_prot_t *prot = dhd->prot;
1612 	prot->d2h_sync_wait_max = 0UL;
1613 	prot->d2h_sync_wait_tot = 0UL;
1614 
1615 	prot->d2hring_ctrl_cpln.seqnum = D2H_EPOCH_INIT_VAL;
1616 	prot->d2hring_ctrl_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
1617 
1618 	prot->d2hring_tx_cpln.seqnum = D2H_EPOCH_INIT_VAL;
1619 	prot->d2hring_tx_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
1620 
1621 	prot->d2hring_rx_cpln.seqnum = D2H_EPOCH_INIT_VAL;
1622 	prot->d2hring_rx_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
1623 
1624 	if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) {
1625 		prot->d2h_sync_cb = dhd_prot_d2h_sync_seqnum;
1626 #ifdef EWP_EDL
1627 		prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl;
1628 #endif /* EWP_EDL */
1629 		DHD_ERROR(("%s(): D2H sync mechanism is SEQNUM \r\n", __FUNCTION__));
1630 	} else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) {
1631 		prot->d2h_sync_cb = dhd_prot_d2h_sync_xorcsum;
1632 #ifdef EWP_EDL
1633 		prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl;
1634 #endif /* EWP_EDL */
1635 		DHD_ERROR(("%s(): D2H sync mechanism is XORCSUM \r\n", __FUNCTION__));
1636 	} else {
1637 		prot->d2h_sync_cb = dhd_prot_d2h_sync_none;
1638 #ifdef EWP_EDL
1639 		prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl_none;
1640 #endif /* EWP_EDL */
1641 		DHD_ERROR(("%s(): D2H sync mechanism is NONE \r\n", __FUNCTION__));
1642 	}
1643 }
1644 
1645 /**
1646  * dhd_prot_h2d_sync_init - Per H2D common ring, setup the msgbuf ring seqnum
1647  */
1648 static void
dhd_prot_h2d_sync_init(dhd_pub_t * dhd)1649 dhd_prot_h2d_sync_init(dhd_pub_t *dhd)
1650 {
1651 	dhd_prot_t *prot = dhd->prot;
1652 	prot->h2dring_rxp_subn.seqnum = H2D_EPOCH_INIT_VAL;
1653 
1654 	prot->h2dring_rxp_subn.current_phase = 0;
1655 
1656 	prot->h2dring_ctrl_subn.seqnum = H2D_EPOCH_INIT_VAL;
1657 	prot->h2dring_ctrl_subn.current_phase = 0;
1658 }
1659 
1660 /* +-----------------  End of PCIE DHD H2D DMA SYNC ------------------------+ */
1661 
1662 /*
1663  * +---------------------------------------------------------------------------+
1664  * PCIE DMA-able buffer. Sets up a dhd_dma_buf_t object, which includes the
1665  * virtual and physical address, the buffer lenght and the DMA handler.
1666  * A secdma handler is also included in the dhd_dma_buf object.
1667  * +---------------------------------------------------------------------------+
1668  */
1669 
1670 static INLINE void
dhd_base_addr_htolpa(sh_addr_t * base_addr,dmaaddr_t pa)1671 dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa)
1672 {
1673 	base_addr->low_addr = htol32(PHYSADDRLO(pa));
1674 	base_addr->high_addr = htol32(PHYSADDRHI(pa));
1675 }
1676 
1677 /**
1678  * dhd_dma_buf_audit - Any audits on a DHD DMA Buffer.
1679  */
1680 static int
dhd_dma_buf_audit(dhd_pub_t * dhd,dhd_dma_buf_t * dma_buf)1681 dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
1682 {
1683 	uint32 pa_lowaddr, end; /* dongle uses 32bit ptr arithmetic */
1684 	ASSERT(dma_buf);
1685 	pa_lowaddr = PHYSADDRLO(dma_buf->pa);
1686 	ASSERT(PHYSADDRLO(dma_buf->pa) || PHYSADDRHI(dma_buf->pa));
1687 	ASSERT(ISALIGNED(pa_lowaddr, DMA_ALIGN_LEN));
1688 	ASSERT(dma_buf->len != 0);
1689 
1690 	/* test 32bit offset arithmetic over dma buffer for loss of carry-over */
1691 	end = (pa_lowaddr + dma_buf->len); /* end address */
1692 
1693 	if ((end & 0xFFFFFFFF) < (pa_lowaddr & 0xFFFFFFFF)) { /* exclude carryover */
1694 		DHD_ERROR(("%s: dma_buf %x len %d spans dongle 32bit ptr arithmetic\n",
1695 			__FUNCTION__, pa_lowaddr, dma_buf->len));
1696 		return BCME_ERROR;
1697 	}
1698 
1699 	return BCME_OK;
1700 }
1701 
1702 /**
1703  * dhd_dma_buf_alloc - Allocate a cache coherent DMA-able buffer.
1704  * returns BCME_OK=0 on success
1705  * returns non-zero negative error value on failure.
1706  */
1707 int
dhd_dma_buf_alloc(dhd_pub_t * dhd,dhd_dma_buf_t * dma_buf,uint32 buf_len)1708 dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len)
1709 {
1710 	uint32 dma_pad = 0;
1711 	osl_t *osh = dhd->osh;
1712 	uint16 dma_align = DMA_ALIGN_LEN;
1713 	uint32 rem = 0;
1714 
1715 	ASSERT(dma_buf != NULL);
1716 	ASSERT(dma_buf->va == NULL);
1717 	ASSERT(dma_buf->len == 0);
1718 
1719 	/* Pad the buffer length to align to cacheline size. */
1720 	rem = (buf_len % DHD_DMA_PAD);
1721 	dma_pad = rem ? (DHD_DMA_PAD - rem) : 0;
1722 
1723 	dma_buf->va = DMA_ALLOC_CONSISTENT(osh, buf_len + dma_pad,
1724 		dma_align, &dma_buf->_alloced, &dma_buf->pa, &dma_buf->dmah);
1725 
1726 	if (dma_buf->va == NULL) {
1727 		DHD_ERROR(("%s: buf_len %d, no memory available\n",
1728 			__FUNCTION__, buf_len));
1729 		return BCME_NOMEM;
1730 	}
1731 
1732 	dma_buf->len = buf_len; /* not including padded len */
1733 
1734 	if (dhd_dma_buf_audit(dhd, dma_buf) != BCME_OK) { /* audit dma buf */
1735 		dhd_dma_buf_free(dhd, dma_buf);
1736 		return BCME_ERROR;
1737 	}
1738 
1739 	dhd_dma_buf_reset(dhd, dma_buf); /* zero out and cache flush */
1740 
1741 	return BCME_OK;
1742 }
1743 
1744 /**
1745  * dhd_dma_buf_reset - Reset a cache coherent DMA-able buffer.
1746  */
1747 static void
dhd_dma_buf_reset(dhd_pub_t * dhd,dhd_dma_buf_t * dma_buf)1748 dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
1749 {
1750 	if ((dma_buf == NULL) || (dma_buf->va == NULL))
1751 		return;
1752 
1753 	(void)dhd_dma_buf_audit(dhd, dma_buf);
1754 
1755 	/* Zero out the entire buffer and cache flush */
1756 	memset((void*)dma_buf->va, 0, dma_buf->len);
1757 	OSL_CACHE_FLUSH((void *)dma_buf->va, dma_buf->len);
1758 }
1759 
1760 void
dhd_local_buf_reset(char * buf,uint32 len)1761 dhd_local_buf_reset(char *buf, uint32 len)
1762 {
1763 	/* Zero out the entire buffer and cache flush */
1764 	memset((void*)buf, 0, len);
1765 	OSL_CACHE_FLUSH((void *)buf, len);
1766 }
1767 
1768 /**
1769  * dhd_dma_buf_free - Free a DMA-able buffer that was previously allocated using
1770  * dhd_dma_buf_alloc().
1771  */
1772 void
dhd_dma_buf_free(dhd_pub_t * dhd,dhd_dma_buf_t * dma_buf)1773 dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
1774 {
1775 	osl_t *osh = dhd->osh;
1776 
1777 	ASSERT(dma_buf);
1778 
1779 	if (dma_buf->va == NULL)
1780 		return; /* Allow for free invocation, when alloc failed */
1781 
1782 	/* DEBUG: dhd_dma_buf_reset(dhd, dma_buf) */
1783 	(void)dhd_dma_buf_audit(dhd, dma_buf);
1784 
1785 	/* dma buffer may have been padded at allocation */
1786 	DMA_FREE_CONSISTENT(osh, dma_buf->va, dma_buf->_alloced,
1787 		dma_buf->pa, dma_buf->dmah);
1788 
1789 	memset(dma_buf, 0, sizeof(dhd_dma_buf_t));
1790 }
1791 
1792 /**
1793  * dhd_dma_buf_init - Initialize a dhd_dma_buf with speicifed values.
1794  * Do not use dhd_dma_buf_init to zero out a dhd_dma_buf_t object. Use memset 0.
1795  */
1796 void
dhd_dma_buf_init(dhd_pub_t * dhd,void * dhd_dma_buf,void * va,uint32 len,dmaaddr_t pa,void * dmah,void * secdma)1797 dhd_dma_buf_init(dhd_pub_t *dhd, void *dhd_dma_buf,
1798 	void *va, uint32 len, dmaaddr_t pa, void *dmah, void *secdma)
1799 {
1800 	dhd_dma_buf_t *dma_buf;
1801 	ASSERT(dhd_dma_buf);
1802 	dma_buf = (dhd_dma_buf_t *)dhd_dma_buf;
1803 	dma_buf->va = va;
1804 	dma_buf->len = len;
1805 	dma_buf->pa = pa;
1806 	dma_buf->dmah = dmah;
1807 	dma_buf->secdma = secdma;
1808 
1809 	/* Audit user defined configuration */
1810 	(void)dhd_dma_buf_audit(dhd, dma_buf);
1811 }
1812 
1813 /* +------------------  End of PCIE DHD DMA BUF ADT ------------------------+ */
1814 
1815 /*
1816  * +---------------------------------------------------------------------------+
1817  * DHD_MAP_PKTID_LOGGING
1818  * Logging the PKTID and DMA map/unmap information for the SMMU fault issue
1819  * debugging in customer platform.
1820  * +---------------------------------------------------------------------------+
1821  */
1822 
1823 #ifdef DHD_MAP_PKTID_LOGGING
1824 typedef struct dhd_pktid_log_item {
1825 	dmaaddr_t pa;		/* DMA bus address */
1826 	uint64 ts_nsec;		/* Timestamp: nsec */
1827 	uint32 size;		/* DMA map/unmap size */
1828 	uint32 pktid;		/* Packet ID */
1829 	uint8 pkttype;		/* Packet Type */
1830 	uint8 rsvd[7];		/* Reserved for future use */
1831 } dhd_pktid_log_item_t;
1832 
1833 typedef struct dhd_pktid_log {
1834 	uint32 items;		/* number of total items */
1835 	uint32 index;		/* index of pktid_log_item */
1836 	dhd_pktid_log_item_t map[0];	/* metadata storage */
1837 } dhd_pktid_log_t;
1838 
1839 typedef void * dhd_pktid_log_handle_t; /* opaque handle to pktid log */
1840 
1841 #define	MAX_PKTID_LOG				(2048)
1842 #define DHD_PKTID_LOG_ITEM_SZ			(sizeof(dhd_pktid_log_item_t))
1843 #define DHD_PKTID_LOG_SZ(items)			(uint32)((sizeof(dhd_pktid_log_t)) + \
1844 					((DHD_PKTID_LOG_ITEM_SZ) * (items)))
1845 
1846 #define DHD_PKTID_LOG_INIT(dhd, hdl)		dhd_pktid_logging_init((dhd), (hdl))
1847 #define DHD_PKTID_LOG_FINI(dhd, hdl)		dhd_pktid_logging_fini((dhd), (hdl))
1848 #define DHD_PKTID_LOG(dhd, hdl, pa, pktid, len, pkttype)	\
1849 	dhd_pktid_logging((dhd), (hdl), (pa), (pktid), (len), (pkttype))
1850 #define DHD_PKTID_LOG_DUMP(dhd)			dhd_pktid_logging_dump((dhd))
1851 
1852 static dhd_pktid_log_handle_t *
dhd_pktid_logging_init(dhd_pub_t * dhd,uint32 num_items)1853 dhd_pktid_logging_init(dhd_pub_t *dhd, uint32 num_items)
1854 {
1855 	dhd_pktid_log_t *log;
1856 	uint32 log_size;
1857 
1858 	log_size = DHD_PKTID_LOG_SZ(num_items);
1859 	log = (dhd_pktid_log_t *)MALLOCZ(dhd->osh, log_size);
1860 	if (log == NULL) {
1861 		DHD_ERROR(("%s: MALLOC failed for size %d\n",
1862 			__FUNCTION__, log_size));
1863 		return (dhd_pktid_log_handle_t *)NULL;
1864 	}
1865 
1866 	log->items = num_items;
1867 	log->index = 0;
1868 
1869 	return (dhd_pktid_log_handle_t *)log; /* opaque handle */
1870 }
1871 
1872 static void
dhd_pktid_logging_fini(dhd_pub_t * dhd,dhd_pktid_log_handle_t * handle)1873 dhd_pktid_logging_fini(dhd_pub_t *dhd, dhd_pktid_log_handle_t *handle)
1874 {
1875 	dhd_pktid_log_t *log;
1876 	uint32 log_size;
1877 
1878 	if (handle == NULL) {
1879 		DHD_ERROR(("%s: handle is NULL\n", __FUNCTION__));
1880 		return;
1881 	}
1882 
1883 	log = (dhd_pktid_log_t *)handle;
1884 	log_size = DHD_PKTID_LOG_SZ(log->items);
1885 	MFREE(dhd->osh, handle, log_size);
1886 }
1887 
1888 static void
dhd_pktid_logging(dhd_pub_t * dhd,dhd_pktid_log_handle_t * handle,dmaaddr_t pa,uint32 pktid,uint32 len,uint8 pkttype)1889 dhd_pktid_logging(dhd_pub_t *dhd, dhd_pktid_log_handle_t *handle, dmaaddr_t pa,
1890 	uint32 pktid, uint32 len, uint8 pkttype)
1891 {
1892 	dhd_pktid_log_t *log;
1893 	uint32 idx;
1894 
1895 	if (handle == NULL) {
1896 		DHD_ERROR(("%s: handle is NULL\n", __FUNCTION__));
1897 		return;
1898 	}
1899 
1900 	log = (dhd_pktid_log_t *)handle;
1901 	idx = log->index;
1902 	log->map[idx].ts_nsec = OSL_LOCALTIME_NS();
1903 	log->map[idx].pa = pa;
1904 	log->map[idx].pktid = pktid;
1905 	log->map[idx].size = len;
1906 	log->map[idx].pkttype = pkttype;
1907 	log->index = (idx + 1) % (log->items);	/* update index */
1908 }
1909 
1910 void
dhd_pktid_logging_dump(dhd_pub_t * dhd)1911 dhd_pktid_logging_dump(dhd_pub_t *dhd)
1912 {
1913 	dhd_prot_t *prot = dhd->prot;
1914 	dhd_pktid_log_t *map_log, *unmap_log;
1915 	uint64 ts_sec, ts_usec;
1916 
1917 	if (prot == NULL) {
1918 		DHD_ERROR(("%s: prot is NULL\n", __FUNCTION__));
1919 		return;
1920 	}
1921 
1922 	map_log = (dhd_pktid_log_t *)(prot->pktid_dma_map);
1923 	unmap_log = (dhd_pktid_log_t *)(prot->pktid_dma_unmap);
1924 	OSL_GET_LOCALTIME(&ts_sec, &ts_usec);
1925 	if (map_log && unmap_log) {
1926 		DHD_ERROR(("%s: map_idx=%d unmap_idx=%d "
1927 			"current time=[%5lu.%06lu]\n", __FUNCTION__,
1928 			map_log->index, unmap_log->index,
1929 			(unsigned long)ts_sec, (unsigned long)ts_usec));
1930 		DHD_ERROR(("%s: pktid_map_log(pa)=0x%llx size=%d, "
1931 			"pktid_unmap_log(pa)=0x%llx size=%d\n", __FUNCTION__,
1932 			(uint64)__virt_to_phys((ulong)(map_log->map)),
1933 			(uint32)(DHD_PKTID_LOG_ITEM_SZ * map_log->items),
1934 			(uint64)__virt_to_phys((ulong)(unmap_log->map)),
1935 			(uint32)(DHD_PKTID_LOG_ITEM_SZ * unmap_log->items)));
1936 	}
1937 }
1938 #endif /* DHD_MAP_PKTID_LOGGING */
1939 
1940 /* +-----------------  End of DHD_MAP_PKTID_LOGGING -----------------------+ */
1941 
1942 /*
1943  * +---------------------------------------------------------------------------+
1944  * PktId Map: Provides a native packet pointer to unique 32bit PktId mapping.
1945  * Main purpose is to save memory on the dongle, has other purposes as well.
1946  * The packet id map, also includes storage for some packet parameters that
1947  * may be saved. A native packet pointer along with the parameters may be saved
1948  * and a unique 32bit pkt id will be returned. Later, the saved packet pointer
1949  * and the metadata may be retrieved using the previously allocated packet id.
1950  * +---------------------------------------------------------------------------+
1951  */
1952 #define DHD_PCIE_PKTID
1953 
1954 /* On Router, the pktptr serves as a pktid. */
1955 #if defined(BCM_ROUTER_DHD) && !defined(BCA_HNDROUTER)
1956 #undef DHD_PCIE_PKTID		/* Comment this undef, to reenable PKTIDMAP */
1957 #endif /* BCM_ROUTER_DHD && !BCA_HNDROUTER */
1958 
1959 #if defined(BCM_ROUTER_DHD) && defined(DHD_PCIE_PKTID)
1960 #undef MAX_TX_PKTID
1961 #define MAX_TX_PKTID     ((36 * 1024) - 1) /* Extend for 64 clients support. */
1962 #endif /* BCM_ROUTER_DHD && DHD_PCIE_PKTID */
1963 
1964 /* XXX: PROP_TXSTATUS: WLFS defines a private pkttag layout.
1965  * Hence cannot store the dma parameters in the pkttag and the pktidmap locker
1966  * is required.
1967  */
1968 #if defined(PROP_TXSTATUS) && !defined(DHD_PCIE_PKTID)
1969 #error "PKTIDMAP must be supported with PROP_TXSTATUS/WLFC"
1970 #endif
1971 
1972 /* Enum for marking the buffer color based on usage */
1973 typedef enum dhd_pkttype {
1974 	PKTTYPE_DATA_TX = 0,
1975 	PKTTYPE_DATA_RX,
1976 	PKTTYPE_IOCTL_RX,
1977 	PKTTYPE_EVENT_RX,
1978 	PKTTYPE_INFO_RX,
1979 	/* dhd_prot_pkt_free no check, if pktid reserved and no space avail case */
1980 	PKTTYPE_NO_CHECK,
1981 	PKTTYPE_TSBUF_RX
1982 } dhd_pkttype_t;
1983 
1984 #define DHD_PKTID_MIN_AVAIL_COUNT		512U
1985 #define DHD_PKTID_DEPLETED_MAX_COUNT		(DHD_PKTID_MIN_AVAIL_COUNT * 2U)
1986 #define DHD_PKTID_INVALID			(0U)
1987 #define DHD_IOCTL_REQ_PKTID			(0xFFFE)
1988 #define DHD_FAKE_PKTID				(0xFACE)
1989 #define DHD_H2D_DBGRING_REQ_PKTID		0xFFFD
1990 #define DHD_D2H_DBGRING_REQ_PKTID		0xFFFC
1991 #define DHD_H2D_HOSTTS_REQ_PKTID		0xFFFB
1992 #define DHD_H2D_BTLOGRING_REQ_PKTID		0xFFFA
1993 #define DHD_D2H_BTLOGRING_REQ_PKTID		0xFFF9
1994 #define DHD_H2D_SNAPSHOT_UPLOAD_REQ_PKTID	0xFFF8
1995 #ifdef DHD_HP2P
1996 #define DHD_D2H_HPPRING_TXREQ_PKTID		0xFFF7
1997 #define DHD_D2H_HPPRING_RXREQ_PKTID		0xFFF6
1998 #endif /* DHD_HP2P */
1999 
2000 #define IS_FLOWRING(ring) \
2001 	((strncmp(ring->name, "h2dflr", sizeof("h2dflr"))) == (0))
2002 
2003 typedef void * dhd_pktid_map_handle_t; /* opaque handle to a pktid map */
2004 
2005 /* Construct a packet id mapping table, returning an opaque map handle */
2006 static dhd_pktid_map_handle_t *dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items);
2007 
2008 /* Destroy a packet id mapping table, freeing all packets active in the table */
2009 static void dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map);
2010 
2011 #define DHD_NATIVE_TO_PKTID_INIT(dhd, items) dhd_pktid_map_init((dhd), (items))
2012 #define DHD_NATIVE_TO_PKTID_RESET(dhd, map)  dhd_pktid_map_reset((dhd), (map))
2013 #define DHD_NATIVE_TO_PKTID_FINI(dhd, map)   dhd_pktid_map_fini((dhd), (map))
2014 #define DHD_NATIVE_TO_PKTID_FINI_IOCTL(osh, map)  dhd_pktid_map_fini_ioctl((osh), (map))
2015 
2016 #if defined(DHD_PCIE_PKTID)
2017 #if defined(NDIS) || defined(DHD_EFI)
2018 /* XXX: for NDIS, using consistent memory instead of buffer from PKTGET for
2019  * up to 8K ioctl response
2020  */
2021 #define IOCTLRESP_USE_CONSTMEM
2022 static void free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf);
2023 static int  alloc_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf);
2024 #endif /* NDIS || DHD_EFI */
2025 
2026 /* Determine number of pktids that are available */
2027 static INLINE uint32 dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle);
2028 
2029 /* Allocate a unique pktid against which a pkt and some metadata is saved */
2030 static INLINE uint32 dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
2031 	void *pkt, dhd_pkttype_t pkttype);
2032 static INLINE void dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
2033 	void *pkt, uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dma,
2034 	void *dmah, void *secdma, dhd_pkttype_t pkttype);
2035 static uint32 dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
2036 	void *pkt, dmaaddr_t pa, uint32 len, uint8 dma,
2037 	void *dmah, void *secdma, dhd_pkttype_t pkttype);
2038 
2039 /* Return an allocated pktid, retrieving previously saved pkt and metadata */
2040 static void *dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
2041 	uint32 id, dmaaddr_t *pa, uint32 *len, void **dmah,
2042 	void **secdma, dhd_pkttype_t pkttype, bool rsv_locker);
2043 
2044 #ifdef DHD_PKTTS
2045 /* Store the Metadata buffer to the locker */
2046 static INLINE void
2047 dhd_pktid_map_save_metadata(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *mpkt,
2048 	dmaaddr_t mpkt_pa,
2049 	uint16	mpkt_len,
2050 	void *dmah,
2051 	uint32 nkey);
2052 
2053 /* Return the Metadata buffer from the locker */
2054 static void * dhd_pktid_map_retreive_metadata(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
2055 	dmaaddr_t *pmpkt_pa, uint32 *pmpkt_len, void **pdmah, uint32 nkey);
2056 #endif /* DHD_PKTTS */
2057 
2058 /*
2059  * DHD_PKTID_AUDIT_ENABLED: Audit of PktIds in DHD for duplicate alloc and frees
2060  *
2061  * DHD_PKTID_AUDIT_MAP: Audit the LIFO or FIFO PktIdMap allocator
2062  * DHD_PKTID_AUDIT_RING: Audit the pktid during producer/consumer ring operation
2063  *
2064  * CAUTION: When DHD_PKTID_AUDIT_ENABLED is defined,
2065  *    either DHD_PKTID_AUDIT_MAP or DHD_PKTID_AUDIT_RING may be selected.
2066  */
2067 #if defined(DHD_PKTID_AUDIT_ENABLED)
2068 #define USE_DHD_PKTID_AUDIT_LOCK 1
2069 /* Audit the pktidmap allocator */
2070 /* #define DHD_PKTID_AUDIT_MAP */
2071 
2072 /* Audit the pktid during production/consumption of workitems */
2073 #define DHD_PKTID_AUDIT_RING
2074 
2075 #if defined(DHD_PKTID_AUDIT_MAP) && defined(DHD_PKTID_AUDIT_RING)
2076 #error "May only enabled audit of MAP or RING, at a time."
2077 #endif /* DHD_PKTID_AUDIT_MAP && DHD_PKTID_AUDIT_RING */
2078 
2079 #define DHD_DUPLICATE_ALLOC     1
2080 #define DHD_DUPLICATE_FREE      2
2081 #define DHD_TEST_IS_ALLOC       3
2082 #define DHD_TEST_IS_FREE        4
2083 
2084 typedef enum dhd_pktid_map_type {
2085 	DHD_PKTID_MAP_TYPE_CTRL = 1,
2086 	DHD_PKTID_MAP_TYPE_TX,
2087 	DHD_PKTID_MAP_TYPE_RX,
2088 	DHD_PKTID_MAP_TYPE_UNKNOWN
2089 } dhd_pktid_map_type_t;
2090 
2091 #ifdef USE_DHD_PKTID_AUDIT_LOCK
2092 #define DHD_PKTID_AUDIT_LOCK_INIT(osh)          osl_spin_lock_init(osh)
2093 #define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock)  osl_spin_lock_deinit(osh, lock)
2094 #define DHD_PKTID_AUDIT_LOCK(lock)              osl_spin_lock(lock)
2095 #define DHD_PKTID_AUDIT_UNLOCK(lock, flags)     osl_spin_unlock(lock, flags)
2096 #else
2097 #define DHD_PKTID_AUDIT_LOCK_INIT(osh)          (void *)(1)
2098 #define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock)  do { /* noop */ } while (0)
2099 #define DHD_PKTID_AUDIT_LOCK(lock)              0
2100 #define DHD_PKTID_AUDIT_UNLOCK(lock, flags)     do { /* noop */ } while (0)
2101 #endif /* !USE_DHD_PKTID_AUDIT_LOCK */
2102 
2103 #endif /* DHD_PKTID_AUDIT_ENABLED */
2104 
2105 #define USE_DHD_PKTID_LOCK   1
2106 
2107 #ifdef USE_DHD_PKTID_LOCK
2108 #define DHD_PKTID_LOCK_INIT(osh)                osl_spin_lock_init(osh)
2109 #define DHD_PKTID_LOCK_DEINIT(osh, lock)        osl_spin_lock_deinit(osh, lock)
2110 #define DHD_PKTID_LOCK(lock, flags)             (flags) = osl_spin_lock(lock)
2111 #define DHD_PKTID_UNLOCK(lock, flags)           osl_spin_unlock(lock, flags)
2112 #else
2113 #define DHD_PKTID_LOCK_INIT(osh)                (void *)(1)
2114 #define DHD_PKTID_LOCK_DEINIT(osh, lock)	\
2115 	do { \
2116 		BCM_REFERENCE(osh); \
2117 		BCM_REFERENCE(lock); \
2118 	} while (0)
2119 #define DHD_PKTID_LOCK(lock)                    0
2120 #define DHD_PKTID_UNLOCK(lock, flags)           \
2121 	do { \
2122 		BCM_REFERENCE(lock); \
2123 		BCM_REFERENCE(flags); \
2124 	} while (0)
2125 #endif /* !USE_DHD_PKTID_LOCK */
2126 
2127 typedef enum dhd_locker_state {
2128 	LOCKER_IS_FREE,
2129 	LOCKER_IS_BUSY,
2130 	LOCKER_IS_RSVD
2131 } dhd_locker_state_t;
2132 
2133 /* Packet metadata saved in packet id mapper */
2134 
2135 typedef struct dhd_pktid_item {
2136 	dhd_locker_state_t state;  /* tag a locker to be free, busy or reserved */
2137 	uint8       dir;      /* dma map direction (Tx=flush or Rx=invalidate) */
2138 	dhd_pkttype_t pkttype; /* pktlists are maintained based on pkttype */
2139 	uint16      len;      /* length of mapped packet's buffer */
2140 	void        *pkt;     /* opaque native pointer to a packet */
2141 	dmaaddr_t   pa;       /* physical address of mapped packet's buffer */
2142 	void        *dmah;    /* handle to OS specific DMA map */
2143 	void		*secdma;
2144 #ifdef DHD_PKTTS
2145 	void		*mpkt;    /* VA of Metadata */
2146 	dmaaddr_t	mpkt_pa;  /* PA of Metadata */
2147 	uint16		mpkt_len; /* Length of Metadata */
2148 #endif /* DHD_PKTTS */
2149 } dhd_pktid_item_t;
2150 
2151 typedef uint32 dhd_pktid_key_t;
2152 
2153 typedef struct dhd_pktid_map {
2154 	uint32      items;    /* total items in map */
2155 	uint32      avail;    /* total available items */
2156 	int         failures; /* lockers unavailable count */
2157 	/* Spinlock to protect dhd_pktid_map in process/tasklet context */
2158 	void        *pktid_lock; /* Used when USE_DHD_PKTID_LOCK is defined */
2159 
2160 #if defined(DHD_PKTID_AUDIT_ENABLED)
2161 	void		*pktid_audit_lock;
2162 	struct bcm_mwbmap *pktid_audit; /* multi word bitmap based audit */
2163 #endif /* DHD_PKTID_AUDIT_ENABLED */
2164 	dhd_pktid_key_t	*keys; /* map_items +1 unique pkt ids */
2165 	dhd_pktid_item_t lockers[0];           /* metadata storage */
2166 } dhd_pktid_map_t;
2167 
2168 /*
2169  * PktId (Locker) #0 is never allocated and is considered invalid.
2170  *
2171  * On request for a pktid, a value DHD_PKTID_INVALID must be treated as a
2172  * depleted pktid pool and must not be used by the caller.
2173  *
2174  * Likewise, a caller must never free a pktid of value DHD_PKTID_INVALID.
2175  */
2176 
2177 #define DHD_PKTID_FREE_LOCKER           (FALSE)
2178 #define DHD_PKTID_RSV_LOCKER            (TRUE)
2179 
2180 #define DHD_PKTID_ITEM_SZ               (sizeof(dhd_pktid_item_t))
2181 #define DHD_PKIDMAP_ITEMS(items)        (items)
2182 #define DHD_PKTID_MAP_SZ(items)         (sizeof(dhd_pktid_map_t) + \
2183 	                                     (DHD_PKTID_ITEM_SZ * ((items) + 1)))
2184 #define DHD_PKTIDMAP_KEYS_SZ(items)     (sizeof(dhd_pktid_key_t) * ((items) + 1))
2185 
2186 #define DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, map)  dhd_pktid_map_reset_ioctl((dhd), (map))
2187 
2188 /* Convert a packet to a pktid, and save pkt pointer in busy locker */
2189 #define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt, pkttype)    \
2190 	dhd_pktid_map_reserve((dhd), (map), (pkt), (pkttype))
2191 /* Reuse a previously reserved locker to save packet params */
2192 #define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dir, dmah, secdma, pkttype) \
2193 	dhd_pktid_map_save((dhd), (map), (void *)(pkt), (nkey), (pa), (uint32)(len), \
2194 		(uint8)(dir), (void *)(dmah), (void *)(secdma), \
2195 		(dhd_pkttype_t)(pkttype))
2196 /* Convert a packet to a pktid, and save packet params in locker */
2197 #define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dir, dmah, secdma, pkttype) \
2198 	dhd_pktid_map_alloc((dhd), (map), (void *)(pkt), (pa), (uint32)(len), \
2199 		(uint8)(dir), (void *)(dmah), (void *)(secdma), \
2200 		(dhd_pkttype_t)(pkttype))
2201 
2202 /* Convert pktid to a packet, and free the locker */
2203 #define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
2204 	dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \
2205 		(dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
2206 		(void **)&(secdma), (dhd_pkttype_t)(pkttype), DHD_PKTID_FREE_LOCKER)
2207 
2208 /* Convert the pktid to a packet, empty locker, but keep it reserved */
2209 #define DHD_PKTID_TO_NATIVE_RSV(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
2210 	dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \
2211 	                   (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
2212 	                   (void **)&(secdma), (dhd_pkttype_t)(pkttype), DHD_PKTID_RSV_LOCKER)
2213 
2214 #ifdef DHD_PKTTS
2215 #define DHD_PKTID_SAVE_METADATA(dhd, map, mpkt, mpkt_pa, mpkt_len, dmah, nkey) \
2216 	dhd_pktid_map_save_metadata(dhd, map, mpkt, mpkt_pa, mpkt_len, dmah, nkey)
2217 
2218 #define DHD_PKTID_RETREIVE_METADATA(dhd, map, mpkt_pa, mpkt_len, dmah, nkey) \
2219 	dhd_pktid_map_retreive_metadata(dhd, map, (dmaaddr_t *)&mpkt_pa, (uint32 *)&mpkt_len, \
2220 		(void **) &dmah, nkey)
2221 #endif /* DHD_PKTTS */
2222 
2223 #define DHD_PKTID_AVAIL(map)                 dhd_pktid_map_avail_cnt(map)
2224 
2225 #if defined(DHD_PKTID_AUDIT_ENABLED)
2226 
2227 static int
dhd_get_pktid_map_type(dhd_pub_t * dhd,dhd_pktid_map_t * pktid_map)2228 dhd_get_pktid_map_type(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map)
2229 {
2230 	dhd_prot_t *prot = dhd->prot;
2231 	int pktid_map_type;
2232 
2233 	if (pktid_map == prot->pktid_ctrl_map) {
2234 		pktid_map_type = DHD_PKTID_MAP_TYPE_CTRL;
2235 	} else if (pktid_map == prot->pktid_tx_map) {
2236 		pktid_map_type = DHD_PKTID_MAP_TYPE_TX;
2237 	} else if (pktid_map == prot->pktid_rx_map) {
2238 		pktid_map_type = DHD_PKTID_MAP_TYPE_RX;
2239 	} else {
2240 		pktid_map_type = DHD_PKTID_MAP_TYPE_UNKNOWN;
2241 	}
2242 
2243 	return pktid_map_type;
2244 }
2245 
2246 /**
2247 * __dhd_pktid_audit - Use the mwbmap to audit validity of a pktid.
2248 */
2249 static int
__dhd_pktid_audit(dhd_pub_t * dhd,dhd_pktid_map_t * pktid_map,uint32 pktid,const int test_for,const char * errmsg)2250 __dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid,
2251 	const int test_for, const char *errmsg)
2252 {
2253 #define DHD_PKT_AUDIT_STR "ERROR: %16s Host PktId Audit: "
2254 	struct bcm_mwbmap *handle;
2255 	uint32	flags;
2256 	bool ignore_audit;
2257 	int error = BCME_OK;
2258 
2259 	if (pktid_map == (dhd_pktid_map_t *)NULL) {
2260 		DHD_ERROR((DHD_PKT_AUDIT_STR "Pkt id map NULL\n", errmsg));
2261 		return BCME_OK;
2262 	}
2263 
2264 	flags = DHD_PKTID_AUDIT_LOCK(pktid_map->pktid_audit_lock);
2265 
2266 	handle = pktid_map->pktid_audit;
2267 	if (handle == (struct bcm_mwbmap *)NULL) {
2268 		DHD_ERROR((DHD_PKT_AUDIT_STR "Handle NULL\n", errmsg));
2269 		goto out;
2270 	}
2271 
2272 	/* Exclude special pktids from audit */
2273 	ignore_audit = (pktid == DHD_IOCTL_REQ_PKTID) | (pktid == DHD_FAKE_PKTID);
2274 	if (ignore_audit) {
2275 		goto out;
2276 	}
2277 
2278 	if ((pktid == DHD_PKTID_INVALID) || (pktid > pktid_map->items)) {
2279 		DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> invalid\n", errmsg, pktid));
2280 		error = BCME_ERROR;
2281 		goto out;
2282 	}
2283 
2284 	/* Perform audit */
2285 	switch (test_for) {
2286 		case DHD_DUPLICATE_ALLOC:
2287 			if (!bcm_mwbmap_isfree(handle, pktid)) {
2288 				DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> alloc duplicate\n",
2289 				           errmsg, pktid));
2290 				error = BCME_ERROR;
2291 			} else {
2292 				bcm_mwbmap_force(handle, pktid);
2293 			}
2294 			break;
2295 
2296 		case DHD_DUPLICATE_FREE:
2297 			if (bcm_mwbmap_isfree(handle, pktid)) {
2298 				DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> free duplicate\n",
2299 				           errmsg, pktid));
2300 				error = BCME_ERROR;
2301 			} else {
2302 				bcm_mwbmap_free(handle, pktid);
2303 			}
2304 			break;
2305 
2306 		case DHD_TEST_IS_ALLOC:
2307 			if (bcm_mwbmap_isfree(handle, pktid)) {
2308 				DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not allocated\n",
2309 				           errmsg, pktid));
2310 				error = BCME_ERROR;
2311 			}
2312 			break;
2313 
2314 		case DHD_TEST_IS_FREE:
2315 			if (!bcm_mwbmap_isfree(handle, pktid)) {
2316 				DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not free",
2317 				           errmsg, pktid));
2318 				error = BCME_ERROR;
2319 			}
2320 			break;
2321 
2322 		default:
2323 			DHD_ERROR(("%s: Invalid test case: %d\n", __FUNCTION__, test_for));
2324 			error = BCME_ERROR;
2325 			break;
2326 	}
2327 
2328 out:
2329 	DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
2330 
2331 	if (error != BCME_OK) {
2332 		dhd->pktid_audit_failed = TRUE;
2333 	}
2334 
2335 	return error;
2336 }
2337 
2338 static int
dhd_pktid_audit(dhd_pub_t * dhd,dhd_pktid_map_t * pktid_map,uint32 pktid,const int test_for,const char * errmsg)2339 dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid,
2340 	const int test_for, const char *errmsg)
2341 {
2342 	int ret = BCME_OK;
2343 	ret = __dhd_pktid_audit(dhd, pktid_map, pktid, test_for, errmsg);
2344 	if (ret == BCME_ERROR) {
2345 		DHD_ERROR(("%s: Got Pkt Id Audit failure: PKTID<%d> PKTID MAP TYPE<%d>\n",
2346 			__FUNCTION__, pktid, dhd_get_pktid_map_type(dhd, pktid_map)));
2347 		dhd_pktid_error_handler(dhd);
2348 #ifdef DHD_MAP_PKTID_LOGGING
2349 		DHD_PKTID_LOG_DUMP(dhd);
2350 #endif /* DHD_MAP_PKTID_LOGGING */
2351 	}
2352 
2353 	return ret;
2354 }
2355 
2356 #define DHD_PKTID_AUDIT(dhdp, map, pktid, test_for) \
2357 	dhd_pktid_audit((dhdp), (dhd_pktid_map_t *)(map), (pktid), (test_for), __FUNCTION__)
2358 
2359 static int
dhd_pktid_audit_ring_debug(dhd_pub_t * dhdp,dhd_pktid_map_t * map,uint32 pktid,const int test_for,void * msg,uint32 msg_len,const char * func)2360 dhd_pktid_audit_ring_debug(dhd_pub_t *dhdp, dhd_pktid_map_t *map, uint32 pktid,
2361 	const int test_for, void *msg, uint32 msg_len, const char *func)
2362 {
2363 	int ret = BCME_OK;
2364 
2365 	if (dhd_query_bus_erros(dhdp)) {
2366 		return BCME_ERROR;
2367 	}
2368 
2369 	ret = __dhd_pktid_audit(dhdp, map, pktid, test_for, func);
2370 	if (ret == BCME_ERROR) {
2371 		DHD_ERROR(("%s: Got Pkt Id Audit failure: PKTID<%d> PKTID MAP TYPE<%d>\n",
2372 			__FUNCTION__, pktid, dhd_get_pktid_map_type(dhdp, map)));
2373 		prhex(func, (uchar *)msg, msg_len);
2374 		dhd_pktid_error_handler(dhdp);
2375 #ifdef DHD_MAP_PKTID_LOGGING
2376 		DHD_PKTID_LOG_DUMP(dhdp);
2377 #endif /* DHD_MAP_PKTID_LOGGING */
2378 	}
2379 	return ret;
2380 }
2381 #define DHD_PKTID_AUDIT_RING_DEBUG(dhdp, map, pktid, test_for, msg, msg_len) \
2382 	dhd_pktid_audit_ring_debug((dhdp), (dhd_pktid_map_t *)(map), \
2383 		(pktid), (test_for), msg, msg_len, __FUNCTION__)
2384 
2385 #endif /* DHD_PKTID_AUDIT_ENABLED */
2386 
2387 /**
2388  * +---------------------------------------------------------------------------+
2389  * Packet to Packet Id mapper using a <numbered_key, locker> paradigm.
2390  *
2391  * dhd_pktid_map manages a set of unique Packet Ids range[1..MAX_xxx_PKTID].
2392  *
2393  * dhd_pktid_map_alloc() may be used to save some packet metadata, and a unique
2394  * packet id is returned. This unique packet id may be used to retrieve the
2395  * previously saved packet metadata, using dhd_pktid_map_free(). On invocation
2396  * of dhd_pktid_map_free(), the unique packet id is essentially freed. A
2397  * subsequent call to dhd_pktid_map_alloc() may reuse this packet id.
2398  *
2399  * Implementation Note:
2400  * Convert this into a <key,locker> abstraction and place into bcmutils !
2401  * Locker abstraction should treat contents as opaque storage, and a
2402  * callback should be registered to handle busy lockers on destructor.
2403  *
2404  * +---------------------------------------------------------------------------+
2405  */
2406 
2407 /** Allocate and initialize a mapper of num_items <numbered_key, locker> */
2408 
2409 static dhd_pktid_map_handle_t *
dhd_pktid_map_init(dhd_pub_t * dhd,uint32 num_items)2410 dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items)
2411 {
2412 	void* osh;
2413 	uint32 nkey;
2414 	dhd_pktid_map_t *map;
2415 	uint32 dhd_pktid_map_sz;
2416 	uint32 map_items;
2417 	uint32 map_keys_sz;
2418 	osh = dhd->osh;
2419 
2420 	dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(num_items);
2421 
2422 	map = (dhd_pktid_map_t *)VMALLOC(osh, dhd_pktid_map_sz);
2423 	if (map == NULL) {
2424 		DHD_ERROR(("%s:%d: MALLOC failed for size %d\n",
2425 			__FUNCTION__, __LINE__, dhd_pktid_map_sz));
2426 		return (dhd_pktid_map_handle_t *)NULL;
2427 	}
2428 
2429 	map->items = num_items;
2430 	map->avail = num_items;
2431 
2432 	map_items = DHD_PKIDMAP_ITEMS(map->items);
2433 
2434 	map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
2435 
2436 	/* Initialize the lock that protects this structure */
2437 	map->pktid_lock = DHD_PKTID_LOCK_INIT(osh);
2438 	if (map->pktid_lock == NULL) {
2439 		DHD_ERROR(("%s:%d: Lock init failed \r\n", __FUNCTION__, __LINE__));
2440 		goto error;
2441 	}
2442 
2443 	map->keys = (dhd_pktid_key_t *)MALLOC(osh, map_keys_sz);
2444 	if (map->keys == NULL) {
2445 		DHD_ERROR(("%s:%d: MALLOC failed for map->keys size %d\n",
2446 			__FUNCTION__, __LINE__, map_keys_sz));
2447 		goto error;
2448 	}
2449 
2450 #if defined(DHD_PKTID_AUDIT_ENABLED)
2451 		/* Incarnate a hierarchical multiword bitmap for auditing pktid allocator */
2452 		map->pktid_audit = bcm_mwbmap_init(osh, map_items + 1);
2453 		if (map->pktid_audit == (struct bcm_mwbmap *)NULL) {
2454 			DHD_ERROR(("%s:%d: pktid_audit init failed\r\n", __FUNCTION__, __LINE__));
2455 			goto error;
2456 		} else {
2457 			DHD_ERROR(("%s:%d: pktid_audit init succeeded %d\n",
2458 				__FUNCTION__, __LINE__, map_items + 1));
2459 		}
2460 		map->pktid_audit_lock = DHD_PKTID_AUDIT_LOCK_INIT(osh);
2461 #endif /* DHD_PKTID_AUDIT_ENABLED */
2462 
2463 	for (nkey = 1; nkey <= map_items; nkey++) { /* locker #0 is reserved */
2464 		map->keys[nkey] = nkey; /* populate with unique keys */
2465 		map->lockers[nkey].state = LOCKER_IS_FREE;
2466 		map->lockers[nkey].pkt   = NULL; /* bzero: redundant */
2467 		map->lockers[nkey].len   = 0;
2468 	}
2469 
2470 	/* Reserve pktid #0, i.e. DHD_PKTID_INVALID to be inuse */
2471 	map->lockers[DHD_PKTID_INVALID].state = LOCKER_IS_BUSY; /* tag locker #0 as inuse */
2472 	map->lockers[DHD_PKTID_INVALID].pkt   = NULL; /* bzero: redundant */
2473 	map->lockers[DHD_PKTID_INVALID].len   = 0;
2474 
2475 #if defined(DHD_PKTID_AUDIT_ENABLED)
2476 	/* do not use dhd_pktid_audit() here, use bcm_mwbmap_force directly */
2477 	bcm_mwbmap_force(map->pktid_audit, DHD_PKTID_INVALID);
2478 #endif /* DHD_PKTID_AUDIT_ENABLED */
2479 
2480 	return (dhd_pktid_map_handle_t *)map; /* opaque handle */
2481 
2482 error:
2483 	if (map) {
2484 #if defined(DHD_PKTID_AUDIT_ENABLED)
2485 		if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
2486 			bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */
2487 			map->pktid_audit = (struct bcm_mwbmap *)NULL;
2488 			if (map->pktid_audit_lock)
2489 				DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock);
2490 		}
2491 #endif /* DHD_PKTID_AUDIT_ENABLED */
2492 
2493 		if (map->keys) {
2494 			MFREE(osh, map->keys, map_keys_sz);
2495 		}
2496 
2497 		if (map->pktid_lock) {
2498 			DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock);
2499 		}
2500 
2501 		VMFREE(osh, map, dhd_pktid_map_sz);
2502 	}
2503 	return (dhd_pktid_map_handle_t *)NULL;
2504 }
2505 
2506 /**
2507  * Retrieve all allocated keys and free all <numbered_key, locker>.
2508  * Freeing implies: unmapping the buffers and freeing the native packet
2509  * This could have been a callback registered with the pktid mapper.
2510  */
2511 static void
dhd_pktid_map_reset(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle)2512 dhd_pktid_map_reset(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
2513 {
2514 	void *osh;
2515 	uint32 nkey;
2516 	dhd_pktid_map_t *map;
2517 	dhd_pktid_item_t *locker;
2518 	uint32 map_items;
2519 	unsigned long flags;
2520 	bool data_tx = FALSE;
2521 
2522 	map = (dhd_pktid_map_t *)handle;
2523 	DHD_PKTID_LOCK(map->pktid_lock, flags);
2524 	osh = dhd->osh;
2525 
2526 	map_items = DHD_PKIDMAP_ITEMS(map->items);
2527 	/* skip reserved KEY #0, and start from 1 */
2528 
2529 	for (nkey = 1; nkey <= map_items; nkey++) {
2530 		if (map->lockers[nkey].state == LOCKER_IS_BUSY) {
2531 			locker = &map->lockers[nkey];
2532 			locker->state = LOCKER_IS_FREE;
2533 			data_tx = (locker->pkttype == PKTTYPE_DATA_TX);
2534 			if (data_tx) {
2535 				OSL_ATOMIC_DEC(dhd->osh, &dhd->prot->active_tx_count);
2536 			}
2537 
2538 #ifdef DHD_PKTID_AUDIT_RING
2539 			DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */
2540 #endif /* DHD_PKTID_AUDIT_RING */
2541 #ifdef DHD_MAP_PKTID_LOGGING
2542 			DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_unmap,
2543 				locker->pa, nkey, locker->len,
2544 				locker->pkttype);
2545 #endif /* DHD_MAP_PKTID_LOGGING */
2546 
2547 			DMA_UNMAP(osh, locker->pa, locker->len, locker->dir, 0, locker->dmah);
2548 			dhd_prot_packet_free(dhd, (ulong*)locker->pkt,
2549 				locker->pkttype, data_tx);
2550 		}
2551 		else {
2552 #ifdef DHD_PKTID_AUDIT_RING
2553 			DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
2554 #endif /* DHD_PKTID_AUDIT_RING */
2555 		}
2556 		map->keys[nkey] = nkey; /* populate with unique keys */
2557 	}
2558 
2559 	map->avail = map_items;
2560 	memset(&map->lockers[1], 0, sizeof(dhd_pktid_item_t) * map_items);
2561 	DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2562 }
2563 
2564 #ifdef IOCTLRESP_USE_CONSTMEM
2565 /** Called in detach scenario. Releasing IOCTL buffers. */
2566 static void
dhd_pktid_map_reset_ioctl(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle)2567 dhd_pktid_map_reset_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
2568 {
2569 	uint32 nkey;
2570 	dhd_pktid_map_t *map;
2571 	dhd_pktid_item_t *locker;
2572 	uint32 map_items;
2573 	unsigned long flags;
2574 
2575 	map = (dhd_pktid_map_t *)handle;
2576 	DHD_PKTID_LOCK(map->pktid_lock, flags);
2577 
2578 	map_items = DHD_PKIDMAP_ITEMS(map->items);
2579 	/* skip reserved KEY #0, and start from 1 */
2580 	for (nkey = 1; nkey <= map_items; nkey++) {
2581 		if (map->lockers[nkey].state == LOCKER_IS_BUSY) {
2582 			dhd_dma_buf_t retbuf;
2583 
2584 #ifdef DHD_PKTID_AUDIT_RING
2585 			DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */
2586 #endif /* DHD_PKTID_AUDIT_RING */
2587 
2588 			locker = &map->lockers[nkey];
2589 			retbuf.va = locker->pkt;
2590 			retbuf.len = locker->len;
2591 			retbuf.pa = locker->pa;
2592 			retbuf.dmah = locker->dmah;
2593 			retbuf.secdma = locker->secdma;
2594 
2595 			free_ioctl_return_buffer(dhd, &retbuf);
2596 		}
2597 		else {
2598 #ifdef DHD_PKTID_AUDIT_RING
2599 			DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
2600 #endif /* DHD_PKTID_AUDIT_RING */
2601 		}
2602 		map->keys[nkey] = nkey; /* populate with unique keys */
2603 	}
2604 
2605 	map->avail = map_items;
2606 	memset(&map->lockers[1], 0, sizeof(dhd_pktid_item_t) * map_items);
2607 	DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2608 }
2609 #endif /* IOCTLRESP_USE_CONSTMEM */
2610 
2611 /**
2612  * Free the pktid map.
2613  */
2614 static void
dhd_pktid_map_fini(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle)2615 dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
2616 {
2617 	dhd_pktid_map_t *map;
2618 	uint32 dhd_pktid_map_sz;
2619 	uint32 map_keys_sz;
2620 
2621 	if (handle == NULL)
2622 		return;
2623 
2624 	/* Free any pending packets */
2625 	dhd_pktid_map_reset(dhd, handle);
2626 
2627 	map = (dhd_pktid_map_t *)handle;
2628 	dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
2629 	map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
2630 
2631 	DHD_PKTID_LOCK_DEINIT(dhd->osh, map->pktid_lock);
2632 
2633 #if defined(DHD_PKTID_AUDIT_ENABLED)
2634 	if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
2635 		bcm_mwbmap_fini(dhd->osh, map->pktid_audit); /* Destruct pktid_audit */
2636 		map->pktid_audit = (struct bcm_mwbmap *)NULL;
2637 		if (map->pktid_audit_lock) {
2638 			DHD_PKTID_AUDIT_LOCK_DEINIT(dhd->osh, map->pktid_audit_lock);
2639 		}
2640 	}
2641 #endif /* DHD_PKTID_AUDIT_ENABLED */
2642 	MFREE(dhd->osh, map->keys, map_keys_sz);
2643 	VMFREE(dhd->osh, handle, dhd_pktid_map_sz);
2644 }
2645 
2646 #ifdef IOCTLRESP_USE_CONSTMEM
2647 static void
dhd_pktid_map_fini_ioctl(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle)2648 dhd_pktid_map_fini_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
2649 {
2650 	dhd_pktid_map_t *map;
2651 	uint32 dhd_pktid_map_sz;
2652 	uint32 map_keys_sz;
2653 
2654 	if (handle == NULL)
2655 		return;
2656 
2657 	/* Free any pending packets */
2658 	dhd_pktid_map_reset_ioctl(dhd, handle);
2659 
2660 	map = (dhd_pktid_map_t *)handle;
2661 	dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
2662 	map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
2663 
2664 	DHD_PKTID_LOCK_DEINIT(dhd->osh, map->pktid_lock);
2665 
2666 #if defined(DHD_PKTID_AUDIT_ENABLED)
2667 	if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
2668 		bcm_mwbmap_fini(dhd->osh, map->pktid_audit); /* Destruct pktid_audit */
2669 		map->pktid_audit = (struct bcm_mwbmap *)NULL;
2670 		if (map->pktid_audit_lock) {
2671 			DHD_PKTID_AUDIT_LOCK_DEINIT(dhd->osh, map->pktid_audit_lock);
2672 		}
2673 	}
2674 #endif /* DHD_PKTID_AUDIT_ENABLED */
2675 
2676 	MFREE(dhd->osh, map->keys, map_keys_sz);
2677 	VMFREE(dhd->osh, handle, dhd_pktid_map_sz);
2678 }
2679 #endif /* IOCTLRESP_USE_CONSTMEM */
2680 
2681 /** Get the pktid free count */
2682 static INLINE uint32
BCMFASTPATH(dhd_pktid_map_avail_cnt)2683 BCMFASTPATH(dhd_pktid_map_avail_cnt)(dhd_pktid_map_handle_t *handle)
2684 {
2685 	dhd_pktid_map_t *map;
2686 	uint32	avail;
2687 	unsigned long flags;
2688 
2689 	ASSERT(handle != NULL);
2690 	map = (dhd_pktid_map_t *)handle;
2691 
2692 	DHD_PKTID_LOCK(map->pktid_lock, flags);
2693 	avail = map->avail;
2694 	DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2695 
2696 	return avail;
2697 }
2698 
2699 /**
2700  * dhd_pktid_map_reserve - reserve a unique numbered key. Reserved locker is not
2701  * yet populated. Invoke the pktid save api to populate the packet parameters
2702  * into the locker. This function is not reentrant, and is the caller's
2703  * responsibility. Caller must treat a returned value DHD_PKTID_INVALID as
2704  * a failure case, implying a depleted pool of pktids.
2705  */
2706 static INLINE uint32
dhd_pktid_map_reserve(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle,void * pkt,dhd_pkttype_t pkttype)2707 dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
2708 	void *pkt, dhd_pkttype_t pkttype)
2709 {
2710 	uint32 nkey;
2711 	dhd_pktid_map_t *map;
2712 	dhd_pktid_item_t *locker;
2713 	unsigned long flags;
2714 
2715 	ASSERT(handle != NULL);
2716 	map = (dhd_pktid_map_t *)handle;
2717 
2718 	DHD_PKTID_LOCK(map->pktid_lock, flags);
2719 
2720 	if ((int)(map->avail) <= 0) { /* no more pktids to allocate */
2721 		map->failures++;
2722 		DHD_INFO(("%s:%d: failed, no free keys\n", __FUNCTION__, __LINE__));
2723 		DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2724 		return DHD_PKTID_INVALID; /* failed alloc request */
2725 	}
2726 
2727 	ASSERT(map->avail <= map->items);
2728 	nkey = map->keys[map->avail]; /* fetch a free locker, pop stack */
2729 
2730 	if ((map->avail > map->items) || (nkey > map->items)) {
2731 		map->failures++;
2732 		DHD_ERROR(("%s:%d: failed to allocate a new pktid,"
2733 			" map->avail<%u>, nkey<%u>, pkttype<%u>\n",
2734 			__FUNCTION__, __LINE__, map->avail, nkey,
2735 			pkttype));
2736 		DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2737 		return DHD_PKTID_INVALID; /* failed alloc request */
2738 	}
2739 
2740 	locker = &map->lockers[nkey]; /* save packet metadata in locker */
2741 	map->avail--;
2742 	locker->pkt = pkt; /* pkt is saved, other params not yet saved. */
2743 	locker->len = 0;
2744 	locker->state = LOCKER_IS_BUSY; /* reserve this locker */
2745 
2746 	DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2747 
2748 	ASSERT(nkey != DHD_PKTID_INVALID);
2749 
2750 	return nkey; /* return locker's numbered key */
2751 }
2752 
2753 #ifdef DHD_PKTTS
2754 /*
2755  * dhd_pktid_map_save_metadata - Save metadata information in a locker
2756  * that has a reserved unique numbered key.
2757  */
2758 static INLINE void
dhd_pktid_map_save_metadata(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle,void * mpkt,dmaaddr_t mpkt_pa,uint16 mpkt_len,void * dmah,uint32 nkey)2759 dhd_pktid_map_save_metadata(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *mpkt,
2760 	dmaaddr_t mpkt_pa,
2761 	uint16	mpkt_len,
2762 	void *dmah,
2763 	uint32 nkey)
2764 {
2765 	dhd_pktid_map_t *map;
2766 	dhd_pktid_item_t *locker;
2767 	unsigned long flags;
2768 
2769 	ASSERT(handle != NULL);
2770 	map = (dhd_pktid_map_t *)handle;
2771 
2772 	DHD_PKTID_LOCK(map->pktid_lock, flags);
2773 
2774 	if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) {
2775 		DHD_ERROR(("%s:%d: Error! saving invalid pktid<%u>",
2776 			__FUNCTION__, __LINE__, nkey));
2777 		DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2778 #ifdef DHD_FW_COREDUMP
2779 		if (dhd->memdump_enabled) {
2780 			/* collect core dump */
2781 			dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
2782 			dhd_bus_mem_dump(dhd);
2783 		}
2784 #else
2785 		ASSERT(0);
2786 #endif /* DHD_FW_COREDUMP */
2787 		return;
2788 	}
2789 
2790 	locker = &map->lockers[nkey];
2791 
2792 	/*
2793 	 * TODO: checking the locker state for BUSY will prevent
2794 	 * us from storing meta data on an already allocated
2795 	 * Locker. But not checking may lead to overwriting
2796 	 * existing data.
2797 	 */
2798 	locker->mpkt = mpkt;
2799 	locker->mpkt_pa = mpkt_pa;
2800 	locker->mpkt_len = mpkt_len;
2801 	locker->dmah = dmah;
2802 
2803 	DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2804 }
2805 #endif /* DHD_PKTTS */
2806 
2807 /*
2808  * dhd_pktid_map_save - Save a packet's parameters into a locker
2809  * corresponding to a previously reserved unique numbered key.
2810  */
2811 static INLINE void
dhd_pktid_map_save(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle,void * pkt,uint32 nkey,dmaaddr_t pa,uint32 len,uint8 dir,void * dmah,void * secdma,dhd_pkttype_t pkttype)2812 dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
2813 	uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma,
2814 	dhd_pkttype_t pkttype)
2815 {
2816 	dhd_pktid_map_t *map;
2817 	dhd_pktid_item_t *locker;
2818 	unsigned long flags;
2819 
2820 	ASSERT(handle != NULL);
2821 	map = (dhd_pktid_map_t *)handle;
2822 
2823 	DHD_PKTID_LOCK(map->pktid_lock, flags);
2824 
2825 	if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) {
2826 		DHD_ERROR(("%s:%d: Error! saving invalid pktid<%u> pkttype<%u>\n",
2827 			__FUNCTION__, __LINE__, nkey, pkttype));
2828 		DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2829 #ifdef DHD_FW_COREDUMP
2830 		if (dhd->memdump_enabled) {
2831 			/* collect core dump */
2832 			dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
2833 			dhd_bus_mem_dump(dhd);
2834 		}
2835 #else
2836 		ASSERT(0);
2837 #endif /* DHD_FW_COREDUMP */
2838 		return;
2839 	}
2840 
2841 	locker = &map->lockers[nkey];
2842 
2843 	ASSERT(((locker->state == LOCKER_IS_BUSY) && (locker->pkt == pkt)) ||
2844 		((locker->state == LOCKER_IS_RSVD) && (locker->pkt == NULL)));
2845 
2846 	/* store contents in locker */
2847 	locker->dir = dir;
2848 	locker->pa = pa;
2849 	locker->len = (uint16)len; /* 16bit len */
2850 	locker->dmah = dmah; /* 16bit len */
2851 	locker->secdma = secdma;
2852 	locker->pkttype = pkttype;
2853 	locker->pkt = pkt;
2854 	locker->state = LOCKER_IS_BUSY; /* make this locker busy */
2855 #ifdef DHD_MAP_PKTID_LOGGING
2856 	DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_map, pa, nkey, len, pkttype);
2857 #endif /* DHD_MAP_PKTID_LOGGING */
2858 	DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2859 }
2860 
2861 /**
2862  * dhd_pktid_map_alloc - Allocate a unique numbered key and save the packet
2863  * contents into the corresponding locker. Return the numbered key.
2864  */
2865 static uint32
BCMFASTPATH(dhd_pktid_map_alloc)2866 BCMFASTPATH(dhd_pktid_map_alloc)(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
2867 	dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma,
2868 	dhd_pkttype_t pkttype)
2869 {
2870 	uint32 nkey;
2871 
2872 	nkey = dhd_pktid_map_reserve(dhd, handle, pkt, pkttype);
2873 	if (nkey != DHD_PKTID_INVALID) {
2874 		dhd_pktid_map_save(dhd, handle, pkt, nkey, pa,
2875 			len, dir, dmah, secdma, pkttype);
2876 	}
2877 
2878 	return nkey;
2879 }
2880 
2881 #ifdef DHD_PKTTS
2882 static void *
BCMFASTPATH(dhd_pktid_map_retreive_metadata)2883 BCMFASTPATH(dhd_pktid_map_retreive_metadata)(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
2884 	dmaaddr_t *pmpkt_pa,
2885 	uint32	*pmpkt_len,
2886 	void **pdmah,
2887 	uint32 nkey)
2888 {
2889 	dhd_pktid_map_t *map;
2890 	dhd_pktid_item_t *locker;
2891 	void *mpkt;
2892 	unsigned long flags;
2893 
2894 	ASSERT(handle != NULL);
2895 
2896 	map = (dhd_pktid_map_t *)handle;
2897 
2898 	DHD_PKTID_LOCK(map->pktid_lock, flags);
2899 
2900 	/* XXX PLEASE DO NOT remove this ASSERT, fix the bug in caller. */
2901 	if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) {
2902 		DHD_ERROR(("%s:%d: Error! Try to free invalid pktid<%u>\n",
2903 		           __FUNCTION__, __LINE__, nkey));
2904 		DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2905 #ifdef DHD_FW_COREDUMP
2906 		if (dhd->memdump_enabled) {
2907 			/* collect core dump */
2908 			dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
2909 			dhd_bus_mem_dump(dhd);
2910 		}
2911 #else
2912 		ASSERT(0);
2913 #endif /* DHD_FW_COREDUMP */
2914 		return NULL;
2915 	}
2916 
2917 	locker = &map->lockers[nkey];
2918 	mpkt = locker->mpkt;
2919 	*pmpkt_pa = locker->mpkt_pa;
2920 	*pmpkt_len = locker->mpkt_len;
2921 	if (pdmah)
2922 		*pdmah = locker->dmah;
2923 	locker->mpkt = NULL;
2924 	locker->mpkt_len = 0;
2925 	locker->dmah = NULL;
2926 
2927 	DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2928 	return mpkt;
2929 }
2930 #endif /* DHD_PKTTS */
2931 
2932 /**
2933  * dhd_pktid_map_free - Given a numbered key, return the locker contents.
2934  * dhd_pktid_map_free() is not reentrant, and is the caller's responsibility.
2935  * Caller may not free a pktid value DHD_PKTID_INVALID or an arbitrary pktid
2936  * value. Only a previously allocated pktid may be freed.
2937  */
2938 static void *
BCMFASTPATH(dhd_pktid_map_free)2939 BCMFASTPATH(dhd_pktid_map_free)(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, uint32 nkey,
2940 	dmaaddr_t *pa, uint32 *len, void **dmah, void **secdma, dhd_pkttype_t pkttype,
2941 	bool rsv_locker)
2942 {
2943 	dhd_pktid_map_t *map;
2944 	dhd_pktid_item_t *locker;
2945 	void * pkt;
2946 	unsigned long long locker_addr;
2947 	unsigned long flags;
2948 
2949 	ASSERT(handle != NULL);
2950 
2951 	map = (dhd_pktid_map_t *)handle;
2952 
2953 	DHD_PKTID_LOCK(map->pktid_lock, flags);
2954 
2955 	/* XXX PLEASE DO NOT remove this ASSERT, fix the bug in caller. */
2956 	if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) {
2957 		DHD_ERROR(("%s:%d: Error! Try to free invalid pktid<%u>, pkttype<%d>\n",
2958 		           __FUNCTION__, __LINE__, nkey, pkttype));
2959 		DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2960 #ifdef DHD_FW_COREDUMP
2961 		if (dhd->memdump_enabled) {
2962 			/* collect core dump */
2963 			dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
2964 			dhd_bus_mem_dump(dhd);
2965 		}
2966 #else
2967 		ASSERT(0);
2968 #endif /* DHD_FW_COREDUMP */
2969 		return NULL;
2970 	}
2971 
2972 	locker = &map->lockers[nkey];
2973 
2974 #if defined(DHD_PKTID_AUDIT_MAP)
2975 	DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* Audit duplicate FREE */
2976 #endif /* DHD_PKTID_AUDIT_MAP */
2977 
2978 	/* Debug check for cloned numbered key */
2979 	if (locker->state == LOCKER_IS_FREE) {
2980 		DHD_ERROR(("%s:%d: Error! freeing already freed invalid pktid<%u>\n",
2981 		           __FUNCTION__, __LINE__, nkey));
2982 		DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2983 		/* XXX PLEASE DO NOT remove this ASSERT, fix the bug in caller. */
2984 #ifdef DHD_FW_COREDUMP
2985 		if (dhd->memdump_enabled) {
2986 			/* collect core dump */
2987 			dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
2988 			dhd_bus_mem_dump(dhd);
2989 		}
2990 #else
2991 		ASSERT(0);
2992 #endif /* DHD_FW_COREDUMP */
2993 		return NULL;
2994 	}
2995 
2996 	/* Check for the colour of the buffer i.e The buffer posted for TX,
2997 	 * should be freed for TX completion. Similarly the buffer posted for
2998 	 * IOCTL should be freed for IOCT completion etc.
2999 	 */
3000 	if ((pkttype != PKTTYPE_NO_CHECK) && (locker->pkttype != pkttype)) {
3001 
3002 		DHD_ERROR(("%s:%d: Error! Invalid Buffer Free for pktid<%u> \n",
3003 			__FUNCTION__, __LINE__, nkey));
3004 #ifdef BCMDMA64OSL
3005 		PHYSADDRTOULONG(locker->pa, locker_addr);
3006 #else
3007 		locker_addr = PHYSADDRLO(locker->pa);
3008 #endif /* BCMDMA64OSL */
3009 		DHD_ERROR(("%s:%d: locker->state <%d>, locker->pkttype <%d>,"
3010 			"pkttype <%d> locker->pa <0x%llx> \n",
3011 			__FUNCTION__, __LINE__, locker->state, locker->pkttype,
3012 			pkttype, locker_addr));
3013 		DHD_PKTID_UNLOCK(map->pktid_lock, flags);
3014 #ifdef DHD_FW_COREDUMP
3015 		if (dhd->memdump_enabled) {
3016 			/* collect core dump */
3017 			dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
3018 			dhd_bus_mem_dump(dhd);
3019 		}
3020 #else
3021 		ASSERT(0);
3022 #endif /* DHD_FW_COREDUMP */
3023 		return NULL;
3024 	}
3025 
3026 	if (rsv_locker == DHD_PKTID_FREE_LOCKER) {
3027 		map->avail++;
3028 		map->keys[map->avail] = nkey; /* make this numbered key available */
3029 		locker->state = LOCKER_IS_FREE; /* open and free Locker */
3030 	} else {
3031 		/* pktid will be reused, but the locker does not have a valid pkt */
3032 		locker->state = LOCKER_IS_RSVD;
3033 	}
3034 
3035 #if defined(DHD_PKTID_AUDIT_MAP)
3036 	DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
3037 #endif /* DHD_PKTID_AUDIT_MAP */
3038 #ifdef DHD_MAP_PKTID_LOGGING
3039 	DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_unmap, locker->pa, nkey,
3040 		(uint32)locker->len, pkttype);
3041 #endif /* DHD_MAP_PKTID_LOGGING */
3042 
3043 	*pa = locker->pa; /* return contents of locker */
3044 	*len = (uint32)locker->len;
3045 	*dmah = locker->dmah;
3046 	*secdma = locker->secdma;
3047 
3048 	pkt = locker->pkt;
3049 	locker->pkt = NULL; /* Clear pkt */
3050 	locker->len = 0;
3051 
3052 	DHD_PKTID_UNLOCK(map->pktid_lock, flags);
3053 
3054 	return pkt;
3055 }
3056 
3057 #else /* ! DHD_PCIE_PKTID */
3058 
3059 #ifndef linux
3060 #error "DHD_PCIE_PKTID has to be defined for non-linux/android platforms"
3061 #endif
3062 
3063 typedef struct pktlist {
3064 	PKT_LIST *tx_pkt_list;		/* list for tx packets */
3065 	PKT_LIST *rx_pkt_list;		/* list for rx packets */
3066 	PKT_LIST *ctrl_pkt_list;	/* list for ioctl/event buf post */
3067 } pktlists_t;
3068 
3069 /*
3070  * Given that each workitem only uses a 32bit pktid, only 32bit hosts may avail
3071  * of a one to one mapping 32bit pktptr and a 32bit pktid.
3072  *
3073  * - When PKTIDMAP is not used, DHD_NATIVE_TO_PKTID variants will never fail.
3074  * - Neither DHD_NATIVE_TO_PKTID nor DHD_PKTID_TO_NATIVE need to be protected by
3075  *   a lock.
3076  * - Hence DHD_PKTID_INVALID is not defined when DHD_PCIE_PKTID is undefined.
3077  */
3078 #define DHD_PKTID32(pktptr32)	((uint32)(pktptr32))
3079 #define DHD_PKTPTR32(pktid32)	((void *)(pktid32))
3080 
3081 static INLINE uint32 dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32,
3082 	dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma,
3083 	dhd_pkttype_t pkttype);
3084 static INLINE void * dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32,
3085 	dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma,
3086 	dhd_pkttype_t pkttype);
3087 
3088 static dhd_pktid_map_handle_t *
dhd_pktid_map_init(dhd_pub_t * dhd,uint32 num_items)3089 dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items)
3090 {
3091 	osl_t *osh = dhd->osh;
3092 	pktlists_t *handle = NULL;
3093 
3094 	if ((handle = (pktlists_t *) MALLOCZ(osh, sizeof(pktlists_t))) == NULL) {
3095 		DHD_ERROR(("%s:%d: MALLOC failed for lists allocation, size=%d\n",
3096 		           __FUNCTION__, __LINE__, sizeof(pktlists_t)));
3097 		goto error_done;
3098 	}
3099 
3100 	if ((handle->tx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
3101 		DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
3102 		           __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
3103 		goto error;
3104 	}
3105 
3106 	if ((handle->rx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
3107 		DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
3108 		           __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
3109 		goto error;
3110 	}
3111 
3112 	if ((handle->ctrl_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
3113 		DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
3114 		           __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
3115 		goto error;
3116 	}
3117 
3118 	PKTLIST_INIT(handle->tx_pkt_list);
3119 	PKTLIST_INIT(handle->rx_pkt_list);
3120 	PKTLIST_INIT(handle->ctrl_pkt_list);
3121 
3122 	return (dhd_pktid_map_handle_t *) handle;
3123 
3124 error:
3125 	if (handle->ctrl_pkt_list) {
3126 		MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST));
3127 	}
3128 
3129 	if (handle->rx_pkt_list) {
3130 		MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST));
3131 	}
3132 
3133 	if (handle->tx_pkt_list) {
3134 		MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST));
3135 	}
3136 
3137 	if (handle) {
3138 		MFREE(osh, handle, sizeof(pktlists_t));
3139 	}
3140 
3141 error_done:
3142 	return (dhd_pktid_map_handle_t *)NULL;
3143 }
3144 
3145 static void
dhd_pktid_map_reset(dhd_pub_t * dhd,pktlists_t * handle)3146 dhd_pktid_map_reset(dhd_pub_t *dhd, pktlists_t *handle)
3147 {
3148 	osl_t *osh = dhd->osh;
3149 
3150 	if (handle->ctrl_pkt_list) {
3151 		PKTLIST_FINI(handle->ctrl_pkt_list);
3152 		MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST));
3153 	}
3154 
3155 	if (handle->rx_pkt_list) {
3156 		PKTLIST_FINI(handle->rx_pkt_list);
3157 		MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST));
3158 	}
3159 
3160 	if (handle->tx_pkt_list) {
3161 		PKTLIST_FINI(handle->tx_pkt_list);
3162 		MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST));
3163 	}
3164 }
3165 
3166 static void
dhd_pktid_map_fini(dhd_pub_t * dhd,dhd_pktid_map_handle_t * map)3167 dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map)
3168 {
3169 	osl_t *osh = dhd->osh;
3170 	pktlists_t *handle = (pktlists_t *) map;
3171 
3172 	ASSERT(handle != NULL);
3173 	if (handle == (pktlists_t *)NULL) {
3174 		return;
3175 	}
3176 
3177 	dhd_pktid_map_reset(dhd, handle);
3178 
3179 	if (handle) {
3180 		MFREE(osh, handle, sizeof(pktlists_t));
3181 	}
3182 }
3183 
3184 /** Save dma parameters into the packet's pkttag and convert a pktptr to pktid */
3185 static INLINE uint32
dhd_native_to_pktid(dhd_pktid_map_handle_t * map,void * pktptr32,dmaaddr_t pa,uint32 dma_len,void * dmah,void * secdma,dhd_pkttype_t pkttype)3186 dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32,
3187 	dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma,
3188 	dhd_pkttype_t pkttype)
3189 {
3190 	pktlists_t *handle = (pktlists_t *) map;
3191 	ASSERT(pktptr32 != NULL);
3192 	DHD_PKT_SET_DMA_LEN(pktptr32, dma_len);
3193 	DHD_PKT_SET_DMAH(pktptr32, dmah);
3194 	DHD_PKT_SET_PA(pktptr32, pa);
3195 	DHD_PKT_SET_SECDMA(pktptr32, secdma);
3196 
3197 	/* XXX optimize these branch conditionals */
3198 	if (pkttype == PKTTYPE_DATA_TX) {
3199 		PKTLIST_ENQ(handle->tx_pkt_list,  pktptr32);
3200 	} else if (pkttype == PKTTYPE_DATA_RX) {
3201 		PKTLIST_ENQ(handle->rx_pkt_list,  pktptr32);
3202 	} else {
3203 		PKTLIST_ENQ(handle->ctrl_pkt_list,  pktptr32);
3204 	}
3205 
3206 	return DHD_PKTID32(pktptr32);
3207 }
3208 
3209 /** Convert a pktid to pktptr and retrieve saved dma parameters from packet */
3210 static INLINE void *
dhd_pktid_to_native(dhd_pktid_map_handle_t * map,uint32 pktid32,dmaaddr_t * pa,uint32 * dma_len,void ** dmah,void ** secdma,dhd_pkttype_t pkttype)3211 dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32,
3212 	dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma,
3213 	dhd_pkttype_t pkttype)
3214 {
3215 	pktlists_t *handle = (pktlists_t *) map;
3216 	void *pktptr32;
3217 
3218 	ASSERT(pktid32 != 0U);
3219 	pktptr32 = DHD_PKTPTR32(pktid32);
3220 	*dma_len = DHD_PKT_GET_DMA_LEN(pktptr32);
3221 	*dmah = DHD_PKT_GET_DMAH(pktptr32);
3222 	*pa = DHD_PKT_GET_PA(pktptr32);
3223 	*secdma = DHD_PKT_GET_SECDMA(pktptr32);
3224 
3225 	/* XXX optimize these branch conditionals */
3226 	if (pkttype == PKTTYPE_DATA_TX) {
3227 		PKTLIST_UNLINK(handle->tx_pkt_list,  pktptr32);
3228 	} else if (pkttype == PKTTYPE_DATA_RX) {
3229 		PKTLIST_UNLINK(handle->rx_pkt_list,  pktptr32);
3230 	} else {
3231 		PKTLIST_UNLINK(handle->ctrl_pkt_list,  pktptr32);
3232 	}
3233 
3234 	return pktptr32;
3235 }
3236 
3237 #define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt, pkttype)  DHD_PKTID32(pkt)
3238 
3239 #define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dma_dir, dmah, secdma, pkttype) \
3240 	({ BCM_REFERENCE(dhd); BCM_REFERENCE(nkey); BCM_REFERENCE(dma_dir); \
3241 	   dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \
3242 			   (dmah), (secdma), (dhd_pkttype_t)(pkttype)); \
3243 	})
3244 
3245 #define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dma_dir, dmah, secdma, pkttype) \
3246 	({ BCM_REFERENCE(dhd); BCM_REFERENCE(dma_dir); \
3247 	   dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \
3248 			   (dmah), (secdma), (dhd_pkttype_t)(pkttype)); \
3249 	})
3250 
3251 #define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
3252 	({ BCM_REFERENCE(dhd); BCM_REFERENCE(pkttype);	\
3253 		dhd_pktid_to_native((dhd_pktid_map_handle_t *) map, (uint32)(pktid), \
3254 				(dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
3255 				(void **)&secdma, (dhd_pkttype_t)(pkttype)); \
3256 	})
3257 
3258 #define DHD_PKTID_AVAIL(map)  (~0)
3259 
3260 #endif /* ! DHD_PCIE_PKTID */
3261 
3262 /* +------------------ End of PCIE DHD PKTID MAPPER  -----------------------+ */
3263 
3264 /*
3265  * Allocating buffers for common rings.
3266  * also allocating Buffers for hmaptest, Scratch buffer for dma rx offset,
3267  * bus_throughput_measurement and snapshot upload
3268  */
3269 static int
dhd_prot_allocate_bufs(dhd_pub_t * dhd,dhd_prot_t * prot)3270 dhd_prot_allocate_bufs(dhd_pub_t *dhd, dhd_prot_t *prot)
3271 {
3272 
3273 	/* Common Ring Allocations */
3274 
3275 	/* Ring  0: H2D Control Submission */
3276 	if (dhd_prot_ring_attach(dhd, &prot->h2dring_ctrl_subn, "h2dctrl",
3277 	        H2DRING_CTRL_SUB_MAX_ITEM, H2DRING_CTRL_SUB_ITEMSIZE,
3278 	        BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT) != BCME_OK) {
3279 		DHD_ERROR(("%s: dhd_prot_ring_attach H2D Ctrl Submission failed\n",
3280 			__FUNCTION__));
3281 		goto fail;
3282 	}
3283 
3284 	/* Ring  1: H2D Receive Buffer Post */
3285 	if (dhd_prot_ring_attach(dhd, &prot->h2dring_rxp_subn, "h2drxp",
3286 	        H2DRING_RXPOST_MAX_ITEM, H2DRING_RXPOST_ITEMSIZE,
3287 	        BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT) != BCME_OK) {
3288 		DHD_ERROR(("%s: dhd_prot_ring_attach H2D RxPost failed\n",
3289 			__FUNCTION__));
3290 		goto fail;
3291 	}
3292 
3293 	/* Ring  2: D2H Control Completion */
3294 	if (dhd_prot_ring_attach(dhd, &prot->d2hring_ctrl_cpln, "d2hctrl",
3295 	        D2HRING_CTRL_CMPLT_MAX_ITEM, D2HRING_CTRL_CMPLT_ITEMSIZE,
3296 	        BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE) != BCME_OK) {
3297 		DHD_ERROR(("%s: dhd_prot_ring_attach D2H Ctrl Completion failed\n",
3298 			__FUNCTION__));
3299 		goto fail;
3300 	}
3301 
3302 	/* Ring  3: D2H Transmit Complete */
3303 	if (dhd_prot_ring_attach(dhd, &prot->d2hring_tx_cpln, "d2htxcpl",
3304 	        D2HRING_TXCMPLT_MAX_ITEM, D2HRING_TXCMPLT_ITEMSIZE,
3305 	        BCMPCIE_D2H_MSGRING_TX_COMPLETE) != BCME_OK) {
3306 		DHD_ERROR(("%s: dhd_prot_ring_attach D2H Tx Completion failed\n",
3307 			__FUNCTION__));
3308 		goto fail;
3309 
3310 	}
3311 
3312 	/* Ring  4: D2H Receive Complete */
3313 	if (dhd_prot_ring_attach(dhd, &prot->d2hring_rx_cpln, "d2hrxcpl",
3314 	        D2HRING_RXCMPLT_MAX_ITEM, D2HRING_RXCMPLT_ITEMSIZE,
3315 	        BCMPCIE_D2H_MSGRING_RX_COMPLETE) != BCME_OK) {
3316 		DHD_ERROR(("%s: dhd_prot_ring_attach D2H Rx Completion failed\n",
3317 			__FUNCTION__));
3318 		goto fail;
3319 
3320 	}
3321 
3322 	/*
3323 	 * Max number of flowrings is not yet known. msgbuf_ring_t with DMA-able
3324 	 * buffers for flowrings will be instantiated, in dhd_prot_init() .
3325 	 * See dhd_prot_flowrings_pool_attach()
3326 	 */
3327 	/* ioctl response buffer */
3328 	if (dhd_dma_buf_alloc(dhd, &prot->retbuf, IOCT_RETBUF_SIZE)) {
3329 		goto fail;
3330 	}
3331 
3332 	/* IOCTL request buffer */
3333 	if (dhd_dma_buf_alloc(dhd, &prot->ioctbuf, IOCT_RETBUF_SIZE)) {
3334 		goto fail;
3335 	}
3336 
3337 	/* Host TS request buffer one buffer for now */
3338 	if (dhd_dma_buf_alloc(dhd, &prot->hostts_req_buf, CTRLSUB_HOSTTS_MEESAGE_SIZE)) {
3339 		goto fail;
3340 	}
3341 	prot->hostts_req_buf_inuse = FALSE;
3342 
3343 	/* Scratch buffer for dma rx offset */
3344 #ifdef BCM_HOST_BUF
3345 	if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf,
3346 		ROUNDUP(DMA_D2H_SCRATCH_BUF_LEN, 16) + DMA_HOST_BUFFER_LEN))
3347 #else
3348 	if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf, DMA_D2H_SCRATCH_BUF_LEN))
3349 
3350 #endif /* BCM_HOST_BUF */
3351 	{
3352 		goto fail;
3353 	}
3354 
3355 #ifdef DHD_HMAPTEST
3356 	/* Allocate buffer for hmaptest  */
3357 	DHD_ERROR(("allocating memory for hmaptest \n"));
3358 	if (dhd_dma_buf_alloc(dhd, &prot->hmaptest.mem, HMAP_SANDBOX_BUFFER_LEN)) {
3359 
3360 		goto fail;
3361 	} else {
3362 		uint32 scratch_len;
3363 		uint64 scratch_lin, w1_start;
3364 		dmaaddr_t scratch_pa;
3365 
3366 		scratch_pa = prot->hmaptest.mem.pa;
3367 		scratch_len = prot->hmaptest.mem.len;
3368 		scratch_lin  = (uint64)(PHYSADDRLO(scratch_pa) & 0xffffffff)
3369 			| (((uint64)PHYSADDRHI(scratch_pa)& 0xffffffff) << 32);
3370 		w1_start  = scratch_lin +  scratch_len;
3371 		DHD_ERROR(("hmap: NOTE Buffer alloc for HMAPTEST Start=0x%0llx len=0x%08x"
3372 			"End=0x%0llx\n", (uint64) scratch_lin, scratch_len, (uint64) w1_start));
3373 	}
3374 #endif /* DHD_HMAPTEST */
3375 
3376 	/* scratch buffer bus throughput measurement */
3377 	if (dhd_dma_buf_alloc(dhd, &prot->host_bus_throughput_buf, DHD_BUS_TPUT_BUF_LEN)) {
3378 		goto fail;
3379 	}
3380 
3381 #ifdef SNAPSHOT_UPLOAD
3382 	/* snapshot upload buffer */
3383 	if (dhd_dma_buf_alloc(dhd, &prot->snapshot_upload_buf, SNAPSHOT_UPLOAD_BUF_SIZE)) {
3384 		goto fail;
3385 	}
3386 #endif	/* SNAPSHOT_UPLOAD */
3387 
3388 	return BCME_OK;
3389 
3390 fail:
3391 	return BCME_NOMEM;
3392 }
3393 
3394 /**
3395  * The PCIE FD protocol layer is constructed in two phases:
3396  *    Phase 1. dhd_prot_attach()
3397  *    Phase 2. dhd_prot_init()
3398  *
3399  * dhd_prot_attach() - Allocates a dhd_prot_t object and resets all its fields.
3400  * All Common rings are also attached (msgbuf_ring_t objects are allocated
3401  * with DMA-able buffers).
3402  * All dhd_dma_buf_t objects are also allocated here.
3403  *
3404  * As dhd_prot_attach is invoked prior to the pcie_shared object is read, any
3405  * initialization of objects that requires information advertized by the dongle
3406  * may not be performed here.
3407  * E.g. the number of TxPost flowrings is not know at this point, neither do
3408  * we know shich form of D2H DMA sync mechanism is advertized by the dongle, or
3409  * whether the dongle supports DMA-ing of WR/RD indices for the H2D and/or D2H
3410  * rings (common + flow).
3411  *
3412  * dhd_prot_init() is invoked after the bus layer has fetched the information
3413  * advertized by the dongle in the pcie_shared_t.
3414  */
3415 int
dhd_prot_attach(dhd_pub_t * dhd)3416 dhd_prot_attach(dhd_pub_t *dhd)
3417 {
3418 	osl_t *osh = dhd->osh;
3419 	dhd_prot_t *prot;
3420 	uint32 trap_buf_len;
3421 
3422 	/* Allocate prot structure */
3423 	if (!(prot = (dhd_prot_t *)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_PROT,
3424 		sizeof(dhd_prot_t)))) {
3425 		DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
3426 		goto fail;
3427 	}
3428 	memset(prot, 0, sizeof(*prot));
3429 
3430 	prot->osh = osh;
3431 	dhd->prot = prot;
3432 
3433 	/* DMAing ring completes supported? FALSE by default  */
3434 	dhd->dma_d2h_ring_upd_support = FALSE;
3435 	dhd->dma_h2d_ring_upd_support = FALSE;
3436 	dhd->dma_ring_upd_overwrite = FALSE;
3437 
3438 	dhd->idma_inited = 0;
3439 	dhd->ifrm_inited = 0;
3440 	dhd->dar_inited = 0;
3441 
3442 	if (dhd_prot_allocate_bufs(dhd, prot) != BCME_OK) {
3443 		goto fail;
3444 	}
3445 
3446 #ifdef DHD_RX_CHAINING
3447 	dhd_rxchain_reset(&prot->rxchain);
3448 #endif
3449 
3450 	prot->pktid_ctrl_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_PKTID_CTRL);
3451 	if (prot->pktid_ctrl_map == NULL) {
3452 		goto fail;
3453 	}
3454 
3455 	prot->pktid_rx_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_PKTID_RX);
3456 	if (prot->pktid_rx_map == NULL)
3457 		goto fail;
3458 
3459 	prot->pktid_tx_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_PKTID_TX);
3460 	if (prot->pktid_rx_map == NULL)
3461 		goto fail;
3462 
3463 #ifdef IOCTLRESP_USE_CONSTMEM
3464 	prot->pktid_map_handle_ioctl = DHD_NATIVE_TO_PKTID_INIT(dhd,
3465 		DHD_FLOWRING_MAX_IOCTLRESPBUF_POST);
3466 	if (prot->pktid_map_handle_ioctl == NULL) {
3467 		goto fail;
3468 	}
3469 #endif /* IOCTLRESP_USE_CONSTMEM */
3470 
3471 #ifdef DHD_MAP_PKTID_LOGGING
3472 	prot->pktid_dma_map = DHD_PKTID_LOG_INIT(dhd, MAX_PKTID_LOG);
3473 	if (prot->pktid_dma_map == NULL) {
3474 		DHD_ERROR(("%s: failed to allocate pktid_dma_map\n",
3475 			__FUNCTION__));
3476 	}
3477 
3478 	prot->pktid_dma_unmap = DHD_PKTID_LOG_INIT(dhd, MAX_PKTID_LOG);
3479 	if (prot->pktid_dma_unmap == NULL) {
3480 		DHD_ERROR(("%s: failed to allocate pktid_dma_unmap\n",
3481 			__FUNCTION__));
3482 	}
3483 #endif /* DHD_MAP_PKTID_LOGGING */
3484 
3485 #ifdef D2H_MINIDUMP
3486 	if (dhd->bus->sih->buscorerev < 71) {
3487 		trap_buf_len = BCMPCIE_HOST_EXT_TRAP_DBGBUF_LEN_MIN;
3488 	} else {
3489 		/* buscorerev >= 71, supports minimdump of len 96KB */
3490 		trap_buf_len = BCMPCIE_HOST_EXT_TRAP_DBGBUF_LEN;
3491 	}
3492 #else
3493 	/* FW going to DMA extended trap data,
3494 	 * allocate buffer for the maximum extended trap data.
3495 	 */
3496 	trap_buf_len = BCMPCIE_EXT_TRAP_DATA_MAXLEN;
3497 #endif /* D2H_MINIDUMP */
3498 
3499 	/* Initialize trap buffer */
3500 	if (dhd_dma_buf_alloc(dhd, &dhd->prot->fw_trap_buf, trap_buf_len)) {
3501 		DHD_ERROR(("%s: dhd_init_trap_buffer falied\n", __FUNCTION__));
3502 		goto fail;
3503 	}
3504 
3505 	return BCME_OK;
3506 
3507 fail:
3508 
3509 	if (prot) {
3510 		/* Free up all allocated memories */
3511 		dhd_prot_detach(dhd);
3512 	}
3513 
3514 	return BCME_NOMEM;
3515 } /* dhd_prot_attach */
3516 
3517 static int
dhd_alloc_host_scbs(dhd_pub_t * dhd)3518 dhd_alloc_host_scbs(dhd_pub_t *dhd)
3519 {
3520 	int ret = BCME_OK;
3521 	sh_addr_t base_addr;
3522 	dhd_prot_t *prot = dhd->prot;
3523 	uint32 host_scb_size = 0;
3524 
3525 	if (dhd->hscb_enable) {
3526 		/* read number of bytes to allocate from F/W */
3527 		dhd_bus_cmn_readshared(dhd->bus, &host_scb_size, HOST_SCB_ADDR, 0);
3528 		if (host_scb_size) {
3529 			/* In fw reload scenario the buffer could have been allocated for previous
3530 			 * run. Check the existing buffer if there is one that can accommodate
3531 			 * the new firmware requirement and reuse the buffer is possible.
3532 			 */
3533 			if (prot->host_scb_buf.va) {
3534 				if (prot->host_scb_buf.len >= host_scb_size) {
3535 					prot->host_scb_buf.len = host_scb_size;
3536 				} else {
3537 					dhd_dma_buf_free(dhd, &prot->host_scb_buf);
3538 				}
3539 			}
3540 			/* alloc array of host scbs */
3541 			if (prot->host_scb_buf.va == NULL) {
3542 				ret = dhd_dma_buf_alloc(dhd, &prot->host_scb_buf, host_scb_size);
3543 			}
3544 			/* write host scb address to F/W */
3545 			if (ret == BCME_OK) {
3546 				dhd_base_addr_htolpa(&base_addr, prot->host_scb_buf.pa);
3547 				dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
3548 					HOST_SCB_ADDR, 0);
3549 			}
3550 		}
3551 	} else {
3552 		DHD_TRACE(("%s: Host scb not supported in F/W. \n", __FUNCTION__));
3553 	}
3554 
3555 	if (ret != BCME_OK) {
3556 		DHD_ERROR(("%s dhd_alloc_host_scbs, alloc failed: Err Code %d\n",
3557 			__FUNCTION__, ret));
3558 	}
3559 	return ret;
3560 }
3561 
3562 void
dhd_set_host_cap(dhd_pub_t * dhd)3563 dhd_set_host_cap(dhd_pub_t *dhd)
3564 {
3565 	uint32 data = 0;
3566 	dhd_prot_t *prot = dhd->prot;
3567 #ifdef D2H_MINIDUMP
3568 	uint16 host_trap_addr_len;
3569 #endif /* D2H_MINIDUMP */
3570 
3571 	if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6) {
3572 		if (dhd->h2d_phase_supported) {
3573 			data |= HOSTCAP_H2D_VALID_PHASE;
3574 			if (dhd->force_dongletrap_on_bad_h2d_phase)
3575 				data |= HOSTCAP_H2D_ENABLE_TRAP_ON_BADPHASE;
3576 		}
3577 		if (prot->host_ipc_version > prot->device_ipc_version)
3578 			prot->active_ipc_version = prot->device_ipc_version;
3579 		else
3580 			prot->active_ipc_version = prot->host_ipc_version;
3581 
3582 		data |= prot->active_ipc_version;
3583 
3584 		if (dhdpcie_bus_get_pcie_hostready_supported(dhd->bus)) {
3585 			DHD_INFO(("Advertise Hostready Capability\n"));
3586 			data |= HOSTCAP_H2D_ENABLE_HOSTRDY;
3587 		}
3588 #ifdef PCIE_INB_DW
3589 		if (dhdpcie_bus_get_pcie_inband_dw_supported(dhd->bus)) {
3590 			DHD_INFO(("Advertise Inband-DW Capability\n"));
3591 			data |= HOSTCAP_DS_INBAND_DW;
3592 			data |= HOSTCAP_DS_NO_OOB_DW;
3593 			dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_INB);
3594 			if (!dhd->dma_h2d_ring_upd_support || !dhd->dma_d2h_ring_upd_support) {
3595 				dhd_init_dongle_ds_lock(dhd->bus);
3596 				dhdpcie_set_dongle_deepsleep(dhd->bus, FALSE);
3597 			}
3598 		} else
3599 #endif /* PCIE_INB_DW */
3600 #ifdef PCIE_OOB
3601 		if (dhdpcie_bus_get_pcie_oob_dw_supported(dhd->bus)) {
3602 			dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_OOB);
3603 		} else
3604 #endif /* PCIE_OOB */
3605 		{
3606 			/* Disable DS altogether */
3607 			data |= HOSTCAP_DS_NO_OOB_DW;
3608 			dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_NONE);
3609 		}
3610 
3611 		/* Indicate support for extended trap data */
3612 		data |= HOSTCAP_EXTENDED_TRAP_DATA;
3613 
3614 		/* Indicate support for TX status metadata */
3615 		if (dhd->pcie_txs_metadata_enable != 0)
3616 			data |= HOSTCAP_TXSTATUS_METADATA;
3617 
3618 #ifdef BTLOG
3619 		/* Indicate support for BT logging */
3620 		if (dhd->bt_logging) {
3621 			if (dhd->bt_logging_enabled) {
3622 				data |= HOSTCAP_BT_LOGGING;
3623 				DHD_ERROR(("BT LOGGING  enabled\n"));
3624 			}
3625 			else {
3626 				DHD_ERROR(("BT logging upported in FW, BT LOGGING disabled\n"));
3627 			}
3628 		}
3629 		else {
3630 			DHD_ERROR(("BT LOGGING not enabled in FW !!\n"));
3631 		}
3632 #endif	/* BTLOG */
3633 
3634 		/* Enable fast delete ring in firmware if supported */
3635 		if (dhd->fast_delete_ring_support) {
3636 			data |= HOSTCAP_FAST_DELETE_RING;
3637 		}
3638 
3639 		if (dhdpcie_bus_get_pcie_idma_supported(dhd->bus)) {
3640 			DHD_ERROR(("IDMA inited\n"));
3641 			data |= HOSTCAP_H2D_IDMA;
3642 			dhd->idma_inited = TRUE;
3643 		} else {
3644 			DHD_ERROR(("IDMA not enabled in FW !!\n"));
3645 			dhd->idma_inited = FALSE;
3646 		}
3647 
3648 		if (dhdpcie_bus_get_pcie_ifrm_supported(dhd->bus)) {
3649 			DHD_ERROR(("IFRM Inited\n"));
3650 			data |= HOSTCAP_H2D_IFRM;
3651 			dhd->ifrm_inited = TRUE;
3652 			dhd->dma_h2d_ring_upd_support = FALSE;
3653 			dhd_prot_dma_indx_free(dhd);
3654 		} else {
3655 			DHD_ERROR(("IFRM not enabled in FW !!\n"));
3656 			dhd->ifrm_inited = FALSE;
3657 		}
3658 
3659 		if (dhdpcie_bus_get_pcie_dar_supported(dhd->bus)) {
3660 			DHD_ERROR(("DAR doorbell Use\n"));
3661 			data |= HOSTCAP_H2D_DAR;
3662 			dhd->dar_inited = TRUE;
3663 		} else {
3664 			DHD_ERROR(("DAR not enabled in FW !!\n"));
3665 			dhd->dar_inited = FALSE;
3666 		}
3667 
3668 		/* FW Checks for HOSTCAP_UR_FW_NO_TRAP and Does not TRAP if set
3669 		 * Radar 36403220 JIRA SWWLAN-182145
3670 		 */
3671 		data |= HOSTCAP_UR_FW_NO_TRAP;
3672 
3673 #ifdef SNAPSHOT_UPLOAD
3674 		/* Indicate support for snapshot upload */
3675 		if (dhd->snapshot_upload) {
3676 			data |= HOSTCAP_SNAPSHOT_UPLOAD;
3677 			DHD_ERROR(("ALLOW SNAPSHOT UPLOAD!!\n"));
3678 		}
3679 #endif	/* SNAPSHOT_UPLOAD */
3680 
3681 		if (dhd->hscb_enable) {
3682 			data |= HOSTCAP_HSCB;
3683 		}
3684 
3685 #ifdef EWP_EDL
3686 		if (dhd->dongle_edl_support) {
3687 			data |= HOSTCAP_EDL_RING;
3688 			DHD_ERROR(("Enable EDL host cap\n"));
3689 		} else {
3690 			DHD_ERROR(("DO NOT SET EDL host cap\n"));
3691 		}
3692 #endif /* EWP_EDL */
3693 
3694 #ifdef D2H_MINIDUMP
3695 		if (dhd_bus_is_minidump_enabled(dhd)) {
3696 			data |= HOSTCAP_EXT_TRAP_DBGBUF;
3697 			DHD_ERROR(("ALLOW D2H MINIDUMP!!\n"));
3698 		}
3699 #endif /* D2H_MINIDUMP */
3700 #ifdef DHD_HP2P
3701 		if (dhdpcie_bus_get_hp2p_supported(dhd->bus)) {
3702 			data |= HOSTCAP_PKT_TIMESTAMP;
3703 			data |= HOSTCAP_PKT_HP2P;
3704 			DHD_ERROR(("Enable HP2P in host cap\n"));
3705 		} else {
3706 			DHD_ERROR(("HP2P not enabled in host cap\n"));
3707 		}
3708 #endif /* DHD_HP2P */
3709 
3710 #ifdef DHD_DB0TS
3711 		if (dhd->db0ts_capable) {
3712 			data |= HOSTCAP_DB0_TIMESTAMP;
3713 			DHD_ERROR(("Enable DB0 TS in host cap\n"));
3714 		} else {
3715 			DHD_ERROR(("DB0 TS not enabled in host cap\n"));
3716 		}
3717 #endif /* DHD_DB0TS */
3718 		if (dhd->extdtxs_in_txcpl) {
3719 			DHD_ERROR(("Enable hostcap: EXTD TXS in txcpl\n"));
3720 			data |= HOSTCAP_PKT_TXSTATUS;
3721 		}
3722 		else {
3723 			DHD_ERROR(("Enable hostcap: EXTD TXS in txcpl\n"));
3724 		}
3725 
3726 		DHD_INFO(("%s:Active Ver:%d, Host Ver:%d, FW Ver:%d\n",
3727 			__FUNCTION__,
3728 			prot->active_ipc_version, prot->host_ipc_version,
3729 			prot->device_ipc_version));
3730 
3731 		dhd_bus_cmn_writeshared(dhd->bus, &data, sizeof(uint32), HOST_API_VERSION, 0);
3732 		dhd_bus_cmn_writeshared(dhd->bus, &prot->fw_trap_buf.pa,
3733 			sizeof(prot->fw_trap_buf.pa), DNGL_TO_HOST_TRAP_ADDR, 0);
3734 #ifdef D2H_MINIDUMP
3735 		if (dhd_bus_is_minidump_enabled(dhd)) {
3736 			/* Dongle expects the host_trap_addr_len in terms of words */
3737 			host_trap_addr_len = prot->fw_trap_buf.len / 4;
3738 			dhd_bus_cmn_writeshared(dhd->bus, &host_trap_addr_len,
3739 				sizeof(host_trap_addr_len), DNGL_TO_HOST_TRAP_ADDR_LEN, 0);
3740 		}
3741 #endif /* D2H_MINIDUMP */
3742 	}
3743 
3744 #ifdef DHD_TIMESYNC
3745 	dhd_timesync_notify_ipc_rev(dhd->ts, prot->active_ipc_version);
3746 #endif /* DHD_TIMESYNC */
3747 }
3748 
3749 #ifdef AGG_H2D_DB
dhd_agg_inflight_stats_dump(dhd_pub_t * dhd,struct bcmstrbuf * strbuf)3750 void dhd_agg_inflight_stats_dump(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
3751 {
3752 	uint64 *inflight_histo = dhd->prot->agg_h2d_db_info.inflight_histo;
3753 	uint32 i;
3754 	uint64 total_inflight_histo = 0;
3755 
3756 	bcm_bprintf(strbuf, "inflight: \t count\n");
3757 	for (i = 0; i < DHD_NUM_INFLIGHT_HISTO_ROWS; i++) {
3758 		bcm_bprintf(strbuf, "%16u: \t %llu\n", 1U<<i, inflight_histo[i]);
3759 		total_inflight_histo += inflight_histo[i];
3760 	}
3761 	bcm_bprintf(strbuf, "total_inflight_histo: %llu\n", total_inflight_histo);
3762 }
3763 
dhd_agg_inflights_stats_update(dhd_pub_t * dhd,uint32 inflight)3764 void dhd_agg_inflights_stats_update(dhd_pub_t *dhd, uint32 inflight)
3765 {
3766 	uint64 *bin = dhd->prot->agg_h2d_db_info.inflight_histo;
3767 	uint64 *p;
3768 	uint32 bin_power;
3769 	bin_power = next_larger_power2(inflight);
3770 
3771 	switch (bin_power) {
3772 		case   1: p = bin + 0; break;
3773 		case   2: p = bin + 1; break;
3774 		case   4: p = bin + 2; break;
3775 		case   8: p = bin + 3; break;
3776 		case  16: p = bin + 4; break;
3777 		case  32: p = bin + 5; break;
3778 		case  64: p = bin + 6; break;
3779 		case 128: p = bin + 7; break;
3780 		case 256: p = bin + 8; break;
3781 		case 512: p = bin + 9; break;
3782 		case 1024: p = bin + 10; break;
3783 		case 2048: p = bin + 11; break;
3784 		case 4096: p = bin + 12; break;
3785 		case 8192: p = bin + 13; break;
3786 		default : p = bin + 13; break;
3787 	}
3788 	ASSERT((p - bin) < DHD_NUM_INFLIGHT_HISTO_ROWS);
3789 	*p = *p + 1;
3790 	return;
3791 }
3792 
3793 /*
3794  * dhd_msgbuf_agg_h2d_db_timer_fn:
3795  * Timer callback function for ringing h2d DB.
3796  * This is run in isr context (HRTIMER_MODE_REL),
3797  * do not hold any spin_lock_bh().
3798  * Using HRTIMER_MODE_REL_SOFT causing TPUT regressions.
3799  */
3800 enum hrtimer_restart
dhd_msgbuf_agg_h2d_db_timer_fn(struct hrtimer * timer)3801 dhd_msgbuf_agg_h2d_db_timer_fn(struct hrtimer *timer)
3802 {
3803 	agg_h2d_db_info_t *agg_db_info;
3804 	dhd_pub_t *dhd;
3805 	dhd_prot_t *prot;
3806 	uint32 db_index;
3807 	uint corerev;
3808 
3809 	GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
3810 	agg_db_info = container_of(timer, agg_h2d_db_info_t, timer);
3811 	GCC_DIAGNOSTIC_POP();
3812 
3813 	dhd = agg_db_info->dhd;
3814 	prot = dhd->prot;
3815 
3816 	prot->agg_h2d_db_info.timer_db_cnt++;
3817 	if (IDMA_ACTIVE(dhd)) {
3818 		db_index = IDMA_IDX0;
3819 		if (dhd->bus->sih) {
3820 			corerev = dhd->bus->sih->buscorerev;
3821 			if (corerev >= 24) {
3822 				db_index |= (DMA_TYPE_IDMA << DMA_TYPE_SHIFT);
3823 			}
3824 		}
3825 		prot->mb_2_ring_fn(dhd->bus, db_index, TRUE);
3826 	} else {
3827 		prot->mb_ring_fn(dhd->bus, DHD_AGGR_H2D_DB_MAGIC);
3828 	}
3829 
3830 	return HRTIMER_NORESTART;
3831 }
3832 
3833 void
dhd_msgbuf_agg_h2d_db_timer_start(dhd_prot_t * prot)3834 dhd_msgbuf_agg_h2d_db_timer_start(dhd_prot_t *prot)
3835 {
3836 	agg_h2d_db_info_t *agg_db_info = &prot->agg_h2d_db_info;
3837 
3838 	/* Queue the timer only when it is not in the queue */
3839 	if (!hrtimer_active(&agg_db_info->timer)) {
3840 		hrtimer_start(&agg_db_info->timer, ns_to_ktime(agg_h2d_db_timeout * NSEC_PER_USEC),
3841 				HRTIMER_MODE_REL);
3842 	}
3843 }
3844 
3845 static void
dhd_msgbuf_agg_h2d_db_timer_init(dhd_pub_t * dhd)3846 dhd_msgbuf_agg_h2d_db_timer_init(dhd_pub_t *dhd)
3847 {
3848 	dhd_prot_t *prot = dhd->prot;
3849 	agg_h2d_db_info_t *agg_db_info = &prot->agg_h2d_db_info;
3850 
3851 	agg_db_info->dhd = dhd;
3852 	hrtimer_init(&agg_db_info->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3853 	/* The timer function will run from ISR context, ensure no spin_lock_bh are used */
3854 	agg_db_info->timer.function = &dhd_msgbuf_agg_h2d_db_timer_fn;
3855 	agg_db_info->init = TRUE;
3856 	agg_db_info->timer_db_cnt = 0;
3857 	agg_db_info->direct_db_cnt = 0;
3858 	agg_db_info->inflight_histo = (uint64 *)MALLOCZ(dhd->osh, DHD_INFLIGHT_HISTO_SIZE);
3859 }
3860 
3861 static void
dhd_msgbuf_agg_h2d_db_timer_reset(dhd_pub_t * dhd)3862 dhd_msgbuf_agg_h2d_db_timer_reset(dhd_pub_t *dhd)
3863 {
3864 	dhd_prot_t *prot = dhd->prot;
3865 	agg_h2d_db_info_t *agg_db_info = &prot->agg_h2d_db_info;
3866 	if (agg_db_info->init) {
3867 		if (agg_db_info->inflight_histo) {
3868 			MFREE(dhd->osh, agg_db_info->inflight_histo, DHD_INFLIGHT_HISTO_SIZE);
3869 		}
3870 		hrtimer_try_to_cancel(&agg_db_info->timer);
3871 		agg_db_info->init = FALSE;
3872 	}
3873 }
3874 
3875 static void
dhd_msgbuf_agg_h2d_db_timer_cancel(dhd_pub_t * dhd)3876 dhd_msgbuf_agg_h2d_db_timer_cancel(dhd_pub_t *dhd)
3877 {
3878 	dhd_prot_t *prot = dhd->prot;
3879 	agg_h2d_db_info_t *agg_db_info = &prot->agg_h2d_db_info;
3880 	hrtimer_try_to_cancel(&agg_db_info->timer);
3881 }
3882 #endif /* AGG_H2D_DB */
3883 
3884 void
dhd_prot_clearcounts(dhd_pub_t * dhd)3885 dhd_prot_clearcounts(dhd_pub_t *dhd)
3886 {
3887 	dhd_prot_t *prot = dhd->prot;
3888 #ifdef AGG_H2D_DB
3889 	agg_h2d_db_info_t *agg_db_info = &prot->agg_h2d_db_info;
3890 	if (agg_db_info->inflight_histo) {
3891 		memset(agg_db_info->inflight_histo, 0, DHD_INFLIGHT_HISTO_SIZE);
3892 	}
3893 	agg_db_info->direct_db_cnt = 0;
3894 	agg_db_info->timer_db_cnt = 0;
3895 #endif /* AGG_H2D_DB */
3896 	prot->txcpl_db_cnt = 0;
3897 	prot->tx_h2d_db_cnt = 0;
3898 }
3899 
3900 /**
3901  * dhd_prot_init - second stage of dhd_prot_attach. Now that the dongle has
3902  * completed it's initialization of the pcie_shared structure, we may now fetch
3903  * the dongle advertized features and adjust the protocol layer accordingly.
3904  *
3905  * dhd_prot_init() may be invoked again after a dhd_prot_reset().
3906  */
3907 int
dhd_prot_init(dhd_pub_t * dhd)3908 dhd_prot_init(dhd_pub_t *dhd)
3909 {
3910 	sh_addr_t base_addr;
3911 	dhd_prot_t *prot = dhd->prot;
3912 	int ret = 0;
3913 	uint32 idmacontrol;
3914 	uint32 waitcount = 0;
3915 	uint16 max_eventbufpost = 0;
3916 
3917 	/**
3918 	 * A user defined value can be assigned to global variable h2d_max_txpost via
3919 	 * 1. DHD IOVAR h2d_max_txpost, before firmware download
3920 	 * 2. module parameter h2d_max_txpost
3921 	 * prot->h2d_max_txpost is assigned with DHD_H2DRING_TXPOST_MAX_ITEM,
3922 	 * if user has not defined any buffers by one of the above methods.
3923 	 */
3924 	prot->h2d_max_txpost = (uint16)h2d_max_txpost;
3925 	DHD_ERROR(("%s:%d: h2d_max_txpost = %d\n", __FUNCTION__, __LINE__, prot->h2d_max_txpost));
3926 
3927 #if defined(DHD_HTPUT_TUNABLES)
3928 	prot->h2d_htput_max_txpost = (uint16)h2d_htput_max_txpost;
3929 	DHD_ERROR(("%s:%d: h2d_htput_max_txpost = %d\n",
3930 		__FUNCTION__, __LINE__, prot->h2d_htput_max_txpost));
3931 #endif /* DHD_HTPUT_TUNABLES */
3932 
3933 	/* Read max rx packets supported by dongle */
3934 	dhd_bus_cmn_readshared(dhd->bus, &prot->max_rxbufpost, MAX_HOST_RXBUFS, 0);
3935 	if (prot->max_rxbufpost == 0) {
3936 		/* This would happen if the dongle firmware is not */
3937 		/* using the latest shared structure template */
3938 		prot->max_rxbufpost = DEFAULT_RX_BUFFERS_TO_POST;
3939 	}
3940 	DHD_ERROR(("%s:%d: MAX_RXBUFPOST = %d\n", __FUNCTION__, __LINE__, prot->max_rxbufpost));
3941 
3942 	/* Initialize.  bzero() would blow away the dma pointers. */
3943 	max_eventbufpost = (uint16)dhdpcie_get_max_eventbufpost(dhd->bus);
3944 	prot->max_eventbufpost = (((max_eventbufpost + DHD_FLOWRING_MAX_IOCTLRESPBUF_POST)) >=
3945 		H2DRING_CTRL_SUB_MAX_ITEM) ? DHD_FLOWRING_MAX_EVENTBUF_POST : max_eventbufpost;
3946 	prot->max_ioctlrespbufpost = DHD_FLOWRING_MAX_IOCTLRESPBUF_POST;
3947 	prot->max_infobufpost = DHD_H2D_INFORING_MAX_BUF_POST;
3948 #ifdef BTLOG
3949 	prot->max_btlogbufpost = DHD_H2D_BTLOGRING_MAX_BUF_POST;
3950 #endif	/* BTLOG */
3951 	prot->max_tsbufpost = DHD_MAX_TSBUF_POST;
3952 
3953 	prot->cur_ioctlresp_bufs_posted = 0;
3954 	OSL_ATOMIC_INIT(dhd->osh, &prot->active_tx_count);
3955 	prot->data_seq_no = 0;
3956 	prot->ioctl_seq_no = 0;
3957 	prot->rxbufpost = 0;
3958 	prot->tot_rxbufpost = 0;
3959 	prot->tot_rxcpl = 0;
3960 	prot->cur_event_bufs_posted = 0;
3961 	prot->ioctl_state = 0;
3962 	prot->curr_ioctl_cmd = 0;
3963 	prot->cur_ts_bufs_posted = 0;
3964 	prot->infobufpost = 0;
3965 #ifdef BTLOG
3966 	prot->btlogbufpost = 0;
3967 #endif	/* BTLOG */
3968 
3969 	prot->dmaxfer.srcmem.va = NULL;
3970 	prot->dmaxfer.dstmem.va = NULL;
3971 	prot->dmaxfer.in_progress = FALSE;
3972 
3973 #ifdef DHD_HMAPTEST
3974 	prot->hmaptest.in_progress = FALSE;
3975 #endif /* DHD_HMAPTEST */
3976 	prot->metadata_dbg = FALSE;
3977 	prot->rx_metadata_offset = 0;
3978 	prot->tx_metadata_offset = 0;
3979 	prot->txp_threshold = TXP_FLUSH_MAX_ITEMS_FLUSH_CNT;
3980 
3981 	/* To catch any rollover issues fast, starting with higher ioctl_trans_id */
3982 	prot->ioctl_trans_id = MAXBITVAL(NBITS(prot->ioctl_trans_id)) - BUFFER_BEFORE_ROLLOVER;
3983 	prot->ioctl_state = 0;
3984 	prot->ioctl_status = 0;
3985 	prot->ioctl_resplen = 0;
3986 	prot->ioctl_received = IOCTL_WAIT;
3987 
3988 	/* Initialize Common MsgBuf Rings */
3989 
3990 	prot->device_ipc_version = dhd->bus->api.fw_rev;
3991 	prot->host_ipc_version = PCIE_SHARED_VERSION;
3992 	prot->no_tx_resource = FALSE;
3993 
3994 	/* Init the host API version */
3995 	dhd_set_host_cap(dhd);
3996 
3997 	/* alloc and configure scb host address for dongle */
3998 	if ((ret = dhd_alloc_host_scbs(dhd))) {
3999 		return ret;
4000 	}
4001 
4002 	/* Register the interrupt function upfront */
4003 	/* remove corerev checks in data path */
4004 	/* do this after host/fw negotiation for DAR */
4005 	prot->mb_ring_fn = dhd_bus_get_mbintr_fn(dhd->bus);
4006 	prot->mb_2_ring_fn = dhd_bus_get_mbintr_2_fn(dhd->bus);
4007 
4008 	prot->tx_h2d_db_cnt = 0;
4009 #ifdef AGG_H2D_DB
4010 	dhd_msgbuf_agg_h2d_db_timer_init(dhd);
4011 #endif /* AGG_H2D_DB */
4012 
4013 	dhd->bus->_dar_war = (dhd->bus->sih->buscorerev < 64) ? TRUE : FALSE;
4014 
4015 	/* If supported by the host, indicate the memory block
4016 	 * for completion writes / submission reads to shared space
4017 	 */
4018 	if (dhd->dma_d2h_ring_upd_support) {
4019 		dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_wr_buf.pa);
4020 		dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
4021 			D2H_DMA_INDX_WR_BUF, 0);
4022 		dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_rd_buf.pa);
4023 		dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
4024 			H2D_DMA_INDX_RD_BUF, 0);
4025 	}
4026 
4027 	if (dhd->dma_h2d_ring_upd_support || IDMA_ENAB(dhd)) {
4028 		dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_wr_buf.pa);
4029 		dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
4030 			H2D_DMA_INDX_WR_BUF, 0);
4031 		dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_rd_buf.pa);
4032 		dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
4033 			D2H_DMA_INDX_RD_BUF, 0);
4034 	}
4035 
4036 	dhd_prot_ring_init(dhd, &prot->h2dring_ctrl_subn);
4037 	dhd_prot_ring_init(dhd, &prot->h2dring_rxp_subn);
4038 	dhd_prot_ring_init(dhd, &prot->d2hring_ctrl_cpln);
4039 
4040 	/* Make it compatibile with pre-rev7 Firmware */
4041 	if (prot->active_ipc_version < PCIE_SHARED_VERSION_7) {
4042 		prot->d2hring_tx_cpln.item_len =
4043 			D2HRING_TXCMPLT_ITEMSIZE_PREREV7;
4044 		prot->d2hring_rx_cpln.item_len =
4045 			D2HRING_RXCMPLT_ITEMSIZE_PREREV7;
4046 	}
4047 	dhd_prot_ring_init(dhd, &prot->d2hring_tx_cpln);
4048 	dhd_prot_ring_init(dhd, &prot->d2hring_rx_cpln);
4049 
4050 	dhd_prot_d2h_sync_init(dhd);
4051 
4052 	dhd_prot_h2d_sync_init(dhd);
4053 
4054 #ifdef PCIE_INB_DW
4055 	/* Set the initial DS state */
4056 	if (INBAND_DW_ENAB(dhd->bus)) {
4057 		dhdpcie_bus_set_pcie_inband_dw_state(dhd->bus,
4058 			DW_DEVICE_DS_ACTIVE);
4059 	}
4060 #endif /* PCIE_INB_DW */
4061 
4062 	/* init the scratch buffer */
4063 	dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_scratch_buf.pa);
4064 	dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
4065 		D2H_DMA_SCRATCH_BUF, 0);
4066 	dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_scratch_buf.len,
4067 		sizeof(prot->d2h_dma_scratch_buf.len), D2H_DMA_SCRATCH_BUF_LEN, 0);
4068 #ifdef DHD_DMA_INDICES_SEQNUM
4069 	prot->host_seqnum = D2H_EPOCH_INIT_VAL % D2H_EPOCH_MODULO;
4070 #endif /* DHD_DMA_INDICES_SEQNUM */
4071 	/* Signal to the dongle that common ring init is complete */
4072 	if (dhd->hostrdy_after_init)
4073 		dhd_bus_hostready(dhd->bus);
4074 
4075 	/*
4076 	 * If the DMA-able buffers for flowring needs to come from a specific
4077 	 * contiguous memory region, then setup prot->flowrings_dma_buf here.
4078 	 * dhd_prot_flowrings_pool_attach() will carve out DMA-able buffers from
4079 	 * this contiguous memory region, for each of the flowrings.
4080 	 */
4081 
4082 	/* Pre-allocate pool of msgbuf_ring for flowrings */
4083 	if (dhd_prot_flowrings_pool_attach(dhd) != BCME_OK) {
4084 		return BCME_ERROR;
4085 	}
4086 
4087 	dhd->ring_attached = TRUE;
4088 
4089 	/* If IFRM is enabled, wait for FW to setup the DMA channel */
4090 	if (IFRM_ENAB(dhd)) {
4091 		dhd_base_addr_htolpa(&base_addr, prot->h2d_ifrm_indx_wr_buf.pa);
4092 		dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
4093 			H2D_IFRM_INDX_WR_BUF, 0);
4094 	}
4095 
4096 	/* If IDMA is enabled and initied, wait for FW to setup the IDMA descriptors
4097 	 * Waiting just before configuring doorbell
4098 	 */
4099 #ifdef BCMQT
4100 #define	IDMA_ENABLE_WAIT  100
4101 #else
4102 #define	IDMA_ENABLE_WAIT  10
4103 #endif
4104 	if (IDMA_ACTIVE(dhd)) {
4105 		/* wait for idma_en bit in IDMAcontrol register to be set */
4106 		/* Loop till idma_en is not set */
4107 		uint buscorerev = dhd->bus->sih->buscorerev;
4108 		idmacontrol = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
4109 			IDMAControl(buscorerev), 0, 0);
4110 		while (!(idmacontrol & PCIE_IDMA_MODE_EN(buscorerev)) &&
4111 			(waitcount++ < IDMA_ENABLE_WAIT)) {
4112 
4113 			DHD_ERROR(("iDMA not enabled yet,waiting 1 ms c=%d IDMAControl = %08x\n",
4114 				waitcount, idmacontrol));
4115 			OSL_DELAY(1000); /* 1ms as its onetime only */
4116 			idmacontrol = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
4117 				IDMAControl(buscorerev), 0, 0);
4118 		}
4119 
4120 		if (waitcount < IDMA_ENABLE_WAIT) {
4121 			DHD_ERROR(("iDMA enabled PCIEControl = %08x\n", idmacontrol));
4122 		} else {
4123 			DHD_ERROR(("Error: wait for iDMA timed out wait=%d IDMAControl = %08x\n",
4124 				waitcount, idmacontrol));
4125 			return BCME_ERROR;
4126 		}
4127 		// add delay to fix bring up issue
4128 		OSL_SLEEP(1);
4129 	}
4130 
4131 	/* Host should configure soft doorbells if needed ... here */
4132 
4133 	/* Post to dongle host configured soft doorbells */
4134 	dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd);
4135 
4136 	dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
4137 	dhd_msgbuf_rxbuf_post_event_bufs(dhd);
4138 
4139 	prot->no_retry = FALSE;
4140 	prot->no_aggr = FALSE;
4141 	prot->fixed_rate = FALSE;
4142 
4143 	/*
4144 	 * Note that any communication with the Dongle should be added
4145 	 * below this point. Any other host data structure initialiation that
4146 	 * needs to be done prior to the DPC starts executing should be done
4147 	 * befor this point.
4148 	 * Because once we start sending H2D requests to Dongle, the Dongle
4149 	 * respond immediately. So the DPC context to handle this
4150 	 * D2H response could preempt the context in which dhd_prot_init is running.
4151 	 * We want to ensure that all the Host part of dhd_prot_init is
4152 	 * done before that.
4153 	 */
4154 
4155 	/* See if info rings could be created, info rings should be created
4156 	* only if dongle does not support EDL
4157 	*/
4158 #ifdef EWP_EDL
4159 	if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !dhd->dongle_edl_support) {
4160 #else
4161 	if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6) {
4162 #endif /* EWP_EDL */
4163 		if ((ret = dhd_prot_init_info_rings(dhd)) != BCME_OK) {
4164 			/* For now log and proceed, further clean up action maybe necessary
4165 			 * when we have more clarity.
4166 			 */
4167 			DHD_ERROR(("%s Info rings couldn't be created: Err Code%d",
4168 				__FUNCTION__, ret));
4169 		}
4170 	}
4171 
4172 #ifdef EWP_EDL
4173 		/* Create Enhanced Debug Lane rings (EDL) if dongle supports it */
4174 		if (dhd->dongle_edl_support) {
4175 			if ((ret = dhd_prot_init_edl_rings(dhd)) != BCME_OK) {
4176 				DHD_ERROR(("%s EDL rings couldn't be created: Err Code%d",
4177 					__FUNCTION__, ret));
4178 			}
4179 		}
4180 #endif /* EWP_EDL */
4181 
4182 #ifdef BTLOG
4183 	/* create BT log rings */
4184 	if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_7 && dhd->bt_logging) {
4185 		if ((ret = dhd_prot_init_btlog_rings(dhd)) != BCME_OK) {
4186 			/* For now log and proceed, further clean up action maybe necessary
4187 			 * when we have more clarity.
4188 			 */
4189 			DHD_ERROR(("%s Info rings couldn't be created: Err Code%d",
4190 				__FUNCTION__, ret));
4191 		}
4192 	}
4193 #endif	/* BTLOG */
4194 
4195 #ifdef DHD_HP2P
4196 	/* create HPP txcmpl/rxcmpl rings */
4197 	if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_7 && dhd->hp2p_capable) {
4198 		if ((ret = dhd_prot_init_hp2p_rings(dhd)) != BCME_OK) {
4199 			/* For now log and proceed, further clean up action maybe necessary
4200 			 * when we have more clarity.
4201 			 */
4202 			DHD_ERROR(("%s HP2P rings couldn't be created: Err Code%d",
4203 				__FUNCTION__, ret));
4204 		}
4205 	}
4206 #endif /* DHD_HP2P */
4207 
4208 #ifdef DHD_LB_RXP
4209 	/* defualt rx flow ctrl thresholds. Can be changed at run time through sysfs */
4210 	dhd->lb_rxp_stop_thr = (D2HRING_RXCMPLT_MAX_ITEM * LB_RXP_STOP_THR);
4211 	dhd->lb_rxp_strt_thr = (D2HRING_RXCMPLT_MAX_ITEM * LB_RXP_STRT_THR);
4212 	atomic_set(&dhd->lb_rxp_flow_ctrl, FALSE);
4213 #endif /* DHD_LB_RXP */
4214 	return BCME_OK;
4215 } /* dhd_prot_init */
4216 
4217 /**
4218  * dhd_prot_detach - PCIE FD protocol layer destructor.
4219  * Unlink, frees allocated protocol memory (including dhd_prot)
4220  */
4221 void dhd_prot_detach(dhd_pub_t *dhd)
4222 {
4223 	dhd_prot_t *prot = dhd->prot;
4224 
4225 	/* Stop the protocol module */
4226 	if (prot) {
4227 		/* For non-android platforms, devreset will not be called,
4228 		 * so call prot_reset here. It is harmless if called twice.
4229 		 */
4230 		dhd_prot_reset(dhd);
4231 
4232 		/* free up all DMA-able buffers allocated during prot attach/init */
4233 
4234 		dhd_dma_buf_free(dhd, &prot->d2h_dma_scratch_buf);
4235 #ifdef DHD_HMAPTEST
4236 		dhd_dma_buf_free(dhd, &prot->hmaptest.mem);
4237 #endif /* DHD_HMAPTEST */
4238 		dhd_dma_buf_free(dhd, &prot->retbuf);
4239 		dhd_dma_buf_free(dhd, &prot->ioctbuf);
4240 		dhd_dma_buf_free(dhd, &prot->host_bus_throughput_buf);
4241 		dhd_dma_buf_free(dhd, &prot->hostts_req_buf);
4242 		dhd_dma_buf_free(dhd, &prot->fw_trap_buf);
4243 		dhd_dma_buf_free(dhd, &prot->host_scb_buf);
4244 #ifdef SNAPSHOT_UPLOAD
4245 		dhd_dma_buf_free(dhd, &prot->snapshot_upload_buf);
4246 #endif	/* SNAPSHOT_UPLOAD */
4247 
4248 		/* DMA-able buffers for DMAing H2D/D2H WR/RD indices */
4249 		dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf);
4250 		dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_rd_buf);
4251 		dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_wr_buf);
4252 		dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf);
4253 
4254 		dhd_dma_buf_free(dhd, &prot->h2d_ifrm_indx_wr_buf);
4255 
4256 		/* Common MsgBuf Rings */
4257 		dhd_prot_ring_detach(dhd, &prot->h2dring_ctrl_subn);
4258 		dhd_prot_ring_detach(dhd, &prot->h2dring_rxp_subn);
4259 		dhd_prot_ring_detach(dhd, &prot->d2hring_ctrl_cpln);
4260 		dhd_prot_ring_detach(dhd, &prot->d2hring_tx_cpln);
4261 		dhd_prot_ring_detach(dhd, &prot->d2hring_rx_cpln);
4262 
4263 		/* Detach each DMA-able buffer and free the pool of msgbuf_ring_t */
4264 		dhd_prot_flowrings_pool_detach(dhd);
4265 
4266 		/* detach info rings */
4267 		dhd_prot_detach_info_rings(dhd);
4268 
4269 #ifdef BTLOG
4270 		/* detach BT log rings */
4271 		dhd_prot_detach_btlog_rings(dhd);
4272 #endif	/* BTLOG */
4273 
4274 #ifdef EWP_EDL
4275 		dhd_prot_detach_edl_rings(dhd);
4276 #endif
4277 #ifdef DHD_HP2P
4278 		/* detach HPP rings */
4279 		dhd_prot_detach_hp2p_rings(dhd);
4280 #endif /* DHD_HP2P */
4281 
4282 		/* if IOCTLRESP_USE_CONSTMEM is defined IOCTL PKTs use pktid_map_handle_ioctl
4283 		 * handler and PKT memory is allocated using alloc_ioctl_return_buffer(), Otherwise
4284 		 * they will be part of pktid_ctrl_map handler and PKT memory is allocated using
4285 		 * PKTGET_STATIC (if DHD_USE_STATIC_CTRLBUF is defined) OR PKGET.
4286 		 * Similarly for freeing PKT buffers DHD_NATIVE_TO_PKTID_FINI will be used
4287 		 * which calls PKTFREE_STATIC (if DHD_USE_STATIC_CTRLBUF is defined) OR PKFREE.
4288 		 * Else if IOCTLRESP_USE_CONSTMEM is defined IOCTL PKTs will be freed using
4289 		 * DHD_NATIVE_TO_PKTID_FINI_IOCTL which calls free_ioctl_return_buffer.
4290 		 */
4291 		DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_ctrl_map);
4292 		DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_rx_map);
4293 		DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_tx_map);
4294 #ifdef IOCTLRESP_USE_CONSTMEM
4295 		DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, prot->pktid_map_handle_ioctl);
4296 #endif
4297 #ifdef DHD_MAP_PKTID_LOGGING
4298 		DHD_PKTID_LOG_FINI(dhd, prot->pktid_dma_map);
4299 		DHD_PKTID_LOG_FINI(dhd, prot->pktid_dma_unmap);
4300 #endif /* DHD_MAP_PKTID_LOGGING */
4301 #ifdef DHD_DMA_INDICES_SEQNUM
4302 		if (prot->h2d_dma_indx_rd_copy_buf) {
4303 			MFREE(dhd->osh, prot->h2d_dma_indx_rd_copy_buf,
4304 				prot->h2d_dma_indx_rd_copy_bufsz);
4305 		}
4306 		if (prot->d2h_dma_indx_wr_copy_buf) {
4307 			MFREE(dhd->osh, prot->d2h_dma_indx_wr_copy_buf,
4308 				prot->d2h_dma_indx_wr_copy_bufsz);
4309 		}
4310 #endif /* DHD_DMA_INDICES_SEQNUM */
4311 		DHD_OS_PREFREE(dhd, dhd->prot, sizeof(dhd_prot_t));
4312 
4313 		dhd->prot = NULL;
4314 	}
4315 } /* dhd_prot_detach */
4316 
4317 /**
4318  * dhd_prot_reset - Reset the protocol layer without freeing any objects.
4319  * This may be invoked to soft reboot the dongle, without having to
4320  * detach and attach the entire protocol layer.
4321  *
4322  * After dhd_prot_reset(), dhd_prot_init() may be invoked
4323  * without going througha dhd_prot_attach() phase.
4324  */
4325 void
4326 dhd_prot_reset(dhd_pub_t *dhd)
4327 {
4328 	struct dhd_prot *prot = dhd->prot;
4329 
4330 	DHD_TRACE(("%s\n", __FUNCTION__));
4331 
4332 	if (prot == NULL) {
4333 		return;
4334 	}
4335 
4336 	dhd->ring_attached = FALSE;
4337 
4338 	dhd_prot_flowrings_pool_reset(dhd);
4339 
4340 	/* Reset Common MsgBuf Rings */
4341 	dhd_prot_ring_reset(dhd, &prot->h2dring_ctrl_subn);
4342 	dhd_prot_ring_reset(dhd, &prot->h2dring_rxp_subn);
4343 	dhd_prot_ring_reset(dhd, &prot->d2hring_ctrl_cpln);
4344 	dhd_prot_ring_reset(dhd, &prot->d2hring_tx_cpln);
4345 	dhd_prot_ring_reset(dhd, &prot->d2hring_rx_cpln);
4346 
4347 	/* Reset info rings */
4348 	if (prot->h2dring_info_subn) {
4349 		dhd_prot_ring_reset(dhd, prot->h2dring_info_subn);
4350 	}
4351 
4352 	if (prot->d2hring_info_cpln) {
4353 		dhd_prot_ring_reset(dhd, prot->d2hring_info_cpln);
4354 	}
4355 
4356 #ifdef EWP_EDL
4357 	if (prot->d2hring_edl) {
4358 		dhd_prot_ring_reset(dhd, prot->d2hring_edl);
4359 	}
4360 #endif /* EWP_EDL */
4361 
4362 	/* Reset all DMA-able buffers allocated during prot attach */
4363 	dhd_dma_buf_reset(dhd, &prot->d2h_dma_scratch_buf);
4364 #ifdef DHD_HMAPTEST
4365 	dhd_dma_buf_reset(dhd, &prot->hmaptest.mem);
4366 #endif /* DHD_HMAPTEST */
4367 	dhd_dma_buf_reset(dhd, &prot->retbuf);
4368 	dhd_dma_buf_reset(dhd, &prot->ioctbuf);
4369 	dhd_dma_buf_reset(dhd, &prot->host_bus_throughput_buf);
4370 	dhd_dma_buf_reset(dhd, &prot->hostts_req_buf);
4371 	dhd_dma_buf_reset(dhd, &prot->fw_trap_buf);
4372 	dhd_dma_buf_reset(dhd, &prot->host_scb_buf);
4373 #ifdef SNAPSHOT_UPLOAD
4374 	dhd_dma_buf_reset(dhd, &prot->snapshot_upload_buf);
4375 #endif /* SNAPSHOT_UPLOAD */
4376 
4377 	dhd_dma_buf_reset(dhd, &prot->h2d_ifrm_indx_wr_buf);
4378 
4379 	/* Rest all DMA-able buffers for DMAing H2D/D2H WR/RD indices */
4380 	dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_rd_buf);
4381 	dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_wr_buf);
4382 	dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_rd_buf);
4383 	dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_wr_buf);
4384 
4385 #ifdef DHD_DMA_INDICES_SEQNUM
4386 		if (prot->d2h_dma_indx_wr_copy_buf) {
4387 			dhd_local_buf_reset(prot->h2d_dma_indx_rd_copy_buf,
4388 				prot->h2d_dma_indx_rd_copy_bufsz);
4389 			dhd_local_buf_reset(prot->d2h_dma_indx_wr_copy_buf,
4390 				prot->d2h_dma_indx_wr_copy_bufsz);
4391 		}
4392 #endif /* DHD_DMA_INDICES_SEQNUM */
4393 
4394 	/* XXX: dmaxfer src and dst? */
4395 
4396 	prot->rx_metadata_offset = 0;
4397 	prot->tx_metadata_offset = 0;
4398 
4399 	prot->rxbufpost = 0;
4400 	prot->cur_event_bufs_posted = 0;
4401 	prot->cur_ioctlresp_bufs_posted = 0;
4402 
4403 	OSL_ATOMIC_INIT(dhd->osh, &prot->active_tx_count);
4404 	prot->data_seq_no = 0;
4405 	prot->ioctl_seq_no = 0;
4406 	prot->ioctl_state = 0;
4407 	prot->curr_ioctl_cmd = 0;
4408 	prot->ioctl_received = IOCTL_WAIT;
4409 	/* To catch any rollover issues fast, starting with higher ioctl_trans_id */
4410 	prot->ioctl_trans_id = MAXBITVAL(NBITS(prot->ioctl_trans_id)) - BUFFER_BEFORE_ROLLOVER;
4411 	prot->txcpl_db_cnt = 0;
4412 
4413 	/* dhd_flow_rings_init is located at dhd_bus_start,
4414 	 * so when stopping bus, flowrings shall be deleted
4415 	 */
4416 	if (dhd->flow_rings_inited) {
4417 		dhd_flow_rings_deinit(dhd);
4418 	}
4419 
4420 #ifdef BTLOG
4421 	/* Reset BTlog rings */
4422 	if (prot->h2dring_btlog_subn) {
4423 		dhd_prot_ring_reset(dhd, prot->h2dring_btlog_subn);
4424 	}
4425 
4426 	if (prot->d2hring_btlog_cpln) {
4427 		dhd_prot_ring_reset(dhd, prot->d2hring_btlog_cpln);
4428 	}
4429 #endif	/* BTLOG */
4430 #ifdef DHD_HP2P
4431 	if (prot->d2hring_hp2p_txcpl) {
4432 		dhd_prot_ring_reset(dhd, prot->d2hring_hp2p_txcpl);
4433 	}
4434 	if (prot->d2hring_hp2p_rxcpl) {
4435 		dhd_prot_ring_reset(dhd, prot->d2hring_hp2p_rxcpl);
4436 	}
4437 #endif /* DHD_HP2P */
4438 
4439 	/* Reset PKTID map */
4440 	DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_ctrl_map);
4441 	DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_rx_map);
4442 	DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_tx_map);
4443 #ifdef IOCTLRESP_USE_CONSTMEM
4444 	DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, prot->pktid_map_handle_ioctl);
4445 #endif /* IOCTLRESP_USE_CONSTMEM */
4446 #ifdef DMAMAP_STATS
4447 	dhd->dma_stats.txdata = dhd->dma_stats.txdata_sz = 0;
4448 	dhd->dma_stats.rxdata = dhd->dma_stats.rxdata_sz = 0;
4449 #ifndef IOCTLRESP_USE_CONSTMEM
4450 	dhd->dma_stats.ioctl_rx = dhd->dma_stats.ioctl_rx_sz = 0;
4451 #endif /* IOCTLRESP_USE_CONSTMEM */
4452 	dhd->dma_stats.event_rx = dhd->dma_stats.event_rx_sz = 0;
4453 	dhd->dma_stats.info_rx = dhd->dma_stats.info_rx_sz = 0;
4454 	dhd->dma_stats.tsbuf_rx = dhd->dma_stats.tsbuf_rx_sz = 0;
4455 #endif /* DMAMAP_STATS */
4456 
4457 #ifdef AGG_H2D_DB
4458 	dhd_msgbuf_agg_h2d_db_timer_reset(dhd);
4459 #endif /* AGG_H2D_DB */
4460 
4461 } /* dhd_prot_reset */
4462 
4463 #if defined(DHD_LB_RXP)
4464 #define DHD_LB_DISPATCH_RX_PROCESS(dhdp)	dhd_lb_dispatch_rx_process(dhdp)
4465 #else /* !DHD_LB_RXP */
4466 #define DHD_LB_DISPATCH_RX_PROCESS(dhdp)	do { /* noop */ } while (0)
4467 #endif /* !DHD_LB_RXP */
4468 
4469 #if defined(DHD_LB)
4470 /* DHD load balancing: deferral of work to another online CPU */
4471 /* DHD_LB_RXP dispatchers, in dhd_linux.c */
4472 extern void dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp);
4473 extern void dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx);
4474 extern unsigned long dhd_read_lb_rxp(dhd_pub_t *dhdp);
4475 
4476 #if defined(DHD_LB_RXP)
4477 /**
4478  * dhd_lb_dispatch_rx_process - load balance by dispatch Rx processing work
4479  * to other CPU cores
4480  */
4481 static INLINE void
4482 dhd_lb_dispatch_rx_process(dhd_pub_t *dhdp)
4483 {
4484 	dhd_lb_rx_napi_dispatch(dhdp); /* dispatch rx_process_napi */
4485 }
4486 #endif /* DHD_LB_RXP */
4487 #endif /* DHD_LB */
4488 
4489 void
4490 dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 rx_offset)
4491 {
4492 	dhd_prot_t *prot = dhd->prot;
4493 	prot->rx_dataoffset = rx_offset;
4494 }
4495 
4496 static int
4497 dhd_check_create_info_rings(dhd_pub_t *dhd)
4498 {
4499 	dhd_prot_t *prot = dhd->prot;
4500 	int ret = BCME_ERROR;
4501 	uint16 ringid;
4502 
4503 #ifdef BTLOG
4504 	if (dhd->submit_count_WAR) {
4505 		ringid = dhd->bus->max_tx_flowrings + BCMPCIE_COMMON_MSGRINGS;
4506 	} else
4507 #endif	/* BTLOG */
4508 	{
4509 		/* dongle may increase max_submission_rings so keep
4510 		 * ringid at end of dynamic rings
4511 		 */
4512 		ringid = dhd->bus->max_tx_flowrings +
4513 			(dhd->bus->max_submission_rings - dhd->bus->max_tx_flowrings) +
4514 			BCMPCIE_H2D_COMMON_MSGRINGS;
4515 	}
4516 
4517 	if (prot->d2hring_info_cpln) {
4518 		/* for d2hring re-entry case, clear inited flag */
4519 		prot->d2hring_info_cpln->inited = FALSE;
4520 	}
4521 
4522 	if (prot->h2dring_info_subn && prot->d2hring_info_cpln) {
4523 		return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
4524 	}
4525 
4526 	if (prot->h2dring_info_subn == NULL) {
4527 		prot->h2dring_info_subn = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
4528 
4529 		if (prot->h2dring_info_subn == NULL) {
4530 			DHD_ERROR(("%s: couldn't alloc memory for h2dring_info_subn\n",
4531 				__FUNCTION__));
4532 			return BCME_NOMEM;
4533 		}
4534 
4535 		DHD_INFO(("%s: about to create debug submit ring\n", __FUNCTION__));
4536 		ret = dhd_prot_ring_attach(dhd, prot->h2dring_info_subn, "h2dinfo",
4537 			H2DRING_DYNAMIC_INFO_MAX_ITEM, H2DRING_INFO_BUFPOST_ITEMSIZE,
4538 			ringid);
4539 		if (ret != BCME_OK) {
4540 			DHD_ERROR(("%s: couldn't alloc resources for dbg submit ring\n",
4541 				__FUNCTION__));
4542 			goto err;
4543 		}
4544 	}
4545 
4546 	if (prot->d2hring_info_cpln == NULL) {
4547 		prot->d2hring_info_cpln = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
4548 
4549 		if (prot->d2hring_info_cpln == NULL) {
4550 			DHD_ERROR(("%s: couldn't alloc memory for h2dring_info_subn\n",
4551 				__FUNCTION__));
4552 			return BCME_NOMEM;
4553 		}
4554 
4555 		/* create the debug info completion ring next to debug info submit ring
4556 		* ringid = id next to debug info submit ring
4557 		*/
4558 		ringid = ringid + 1;
4559 
4560 		DHD_INFO(("%s: about to create debug cpl ring\n", __FUNCTION__));
4561 		ret = dhd_prot_ring_attach(dhd, prot->d2hring_info_cpln, "d2hinfo",
4562 			D2HRING_DYNAMIC_INFO_MAX_ITEM, D2HRING_INFO_BUFCMPLT_ITEMSIZE,
4563 			ringid);
4564 		if (ret != BCME_OK) {
4565 			DHD_ERROR(("%s: couldn't alloc resources for dbg cpl ring\n",
4566 				__FUNCTION__));
4567 			dhd_prot_ring_detach(dhd, prot->h2dring_info_subn);
4568 			goto err;
4569 		}
4570 	}
4571 
4572 	return ret;
4573 err:
4574 	MFREE(prot->osh, prot->h2dring_info_subn, sizeof(msgbuf_ring_t));
4575 
4576 	if (prot->d2hring_info_cpln) {
4577 		MFREE(prot->osh, prot->d2hring_info_cpln, sizeof(msgbuf_ring_t));
4578 	}
4579 	return ret;
4580 } /* dhd_check_create_info_rings */
4581 
4582 int
4583 dhd_prot_init_info_rings(dhd_pub_t *dhd)
4584 {
4585 	dhd_prot_t *prot = dhd->prot;
4586 	int ret = BCME_OK;
4587 
4588 	if ((ret = dhd_check_create_info_rings(dhd)) != BCME_OK) {
4589 		DHD_ERROR(("%s: info rings aren't created! \n",
4590 			__FUNCTION__));
4591 		return ret;
4592 	}
4593 
4594 	if ((prot->d2hring_info_cpln->inited) || (prot->d2hring_info_cpln->create_pending)) {
4595 		DHD_INFO(("Info completion ring was created!\n"));
4596 		return ret;
4597 	}
4598 
4599 	DHD_TRACE(("trying to send create d2h info ring: id %d\n", prot->d2hring_info_cpln->idx));
4600 	ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_info_cpln,
4601 		BCMPCIE_D2H_RING_TYPE_DBGBUF_CPL, DHD_D2H_DBGRING_REQ_PKTID);
4602 	if (ret != BCME_OK)
4603 		return ret;
4604 
4605 	prot->h2dring_info_subn->seqnum = H2D_EPOCH_INIT_VAL;
4606 	prot->h2dring_info_subn->current_phase = 0;
4607 	prot->d2hring_info_cpln->seqnum = D2H_EPOCH_INIT_VAL;
4608 	prot->d2hring_info_cpln->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
4609 
4610 	DHD_TRACE(("trying to send create h2d info ring id %d\n", prot->h2dring_info_subn->idx));
4611 	prot->h2dring_info_subn->n_completion_ids = 1;
4612 	prot->h2dring_info_subn->compeltion_ring_ids[0] = prot->d2hring_info_cpln->idx;
4613 
4614 	ret = dhd_send_h2d_ringcreate(dhd, prot->h2dring_info_subn,
4615 		BCMPCIE_H2D_RING_TYPE_DBGBUF_SUBMIT, DHD_H2D_DBGRING_REQ_PKTID);
4616 
4617 	/* Note that there is no way to delete d2h or h2d ring deletion incase either fails,
4618 	 * so can not cleanup if one ring was created while the other failed
4619 	 */
4620 	return ret;
4621 } /* dhd_prot_init_info_rings */
4622 
4623 static void
4624 dhd_prot_detach_info_rings(dhd_pub_t *dhd)
4625 {
4626 	if (dhd->prot->h2dring_info_subn) {
4627 		dhd_prot_ring_detach(dhd, dhd->prot->h2dring_info_subn);
4628 		MFREE(dhd->prot->osh, dhd->prot->h2dring_info_subn, sizeof(msgbuf_ring_t));
4629 	}
4630 	if (dhd->prot->d2hring_info_cpln) {
4631 		dhd_prot_ring_detach(dhd, dhd->prot->d2hring_info_cpln);
4632 		MFREE(dhd->prot->osh, dhd->prot->d2hring_info_cpln, sizeof(msgbuf_ring_t));
4633 	}
4634 }
4635 
4636 #ifdef DHD_HP2P
4637 static int
4638 dhd_check_create_hp2p_rings(dhd_pub_t *dhd)
4639 {
4640 	dhd_prot_t *prot = dhd->prot;
4641 	int ret = BCME_ERROR;
4642 	uint16 ringid;
4643 
4644 	/* Last 2 dynamic ring indices are used by hp2p rings */
4645 	ringid = dhd->bus->max_submission_rings + dhd->bus->max_completion_rings - 2;
4646 
4647 	if (prot->d2hring_hp2p_txcpl == NULL) {
4648 		prot->d2hring_hp2p_txcpl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
4649 
4650 		if (prot->d2hring_hp2p_txcpl == NULL) {
4651 			DHD_ERROR(("%s: couldn't alloc memory for d2hring_hp2p_txcpl\n",
4652 				__FUNCTION__));
4653 			return BCME_NOMEM;
4654 		}
4655 
4656 		DHD_INFO(("%s: about to create hp2p txcpl ring\n", __FUNCTION__));
4657 		ret = dhd_prot_ring_attach(dhd, prot->d2hring_hp2p_txcpl, "d2hhp2p_txcpl",
4658 			dhd_bus_get_hp2p_ring_max_size(dhd->bus, TRUE), D2HRING_TXCMPLT_ITEMSIZE,
4659 			ringid);
4660 		if (ret != BCME_OK) {
4661 			DHD_ERROR(("%s: couldn't alloc resources for hp2p txcpl ring\n",
4662 				__FUNCTION__));
4663 			goto err2;
4664 		}
4665 	} else {
4666 		/* for re-entry case, clear inited flag */
4667 		prot->d2hring_hp2p_txcpl->inited = FALSE;
4668 	}
4669 	if (prot->d2hring_hp2p_rxcpl == NULL) {
4670 		prot->d2hring_hp2p_rxcpl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
4671 
4672 		if (prot->d2hring_hp2p_rxcpl == NULL) {
4673 			DHD_ERROR(("%s: couldn't alloc memory for d2hring_hp2p_rxcpl\n",
4674 				__FUNCTION__));
4675 			return BCME_NOMEM;
4676 		}
4677 
4678 		/* create the hp2p rx completion ring next to hp2p tx compl ring
4679 		* ringid = id next to hp2p tx compl ring
4680 		*/
4681 		ringid = ringid + 1;
4682 
4683 		DHD_INFO(("%s: about to create hp2p rxcpl ring\n", __FUNCTION__));
4684 		ret = dhd_prot_ring_attach(dhd, prot->d2hring_hp2p_rxcpl, "d2hhp2p_rxcpl",
4685 			dhd_bus_get_hp2p_ring_max_size(dhd->bus, FALSE), D2HRING_RXCMPLT_ITEMSIZE,
4686 			ringid);
4687 		if (ret != BCME_OK) {
4688 			DHD_ERROR(("%s: couldn't alloc resources for hp2p rxcpl ring\n",
4689 				__FUNCTION__));
4690 			goto err1;
4691 		}
4692 	} else {
4693 		/* for re-entry case, clear inited flag */
4694 		prot->d2hring_hp2p_rxcpl->inited = FALSE;
4695 	}
4696 
4697 	if (prot->d2hring_hp2p_rxcpl != NULL &&
4698 		prot->d2hring_hp2p_txcpl != NULL) {
4699 		/* dhd_prot_init rentry after a dhd_prot_reset */
4700 		ret = BCME_OK;
4701 	}
4702 
4703 	return ret;
4704 err1:
4705 	MFREE(prot->osh, prot->d2hring_hp2p_rxcpl, sizeof(msgbuf_ring_t));
4706 	prot->d2hring_hp2p_rxcpl = NULL;
4707 
4708 err2:
4709 	MFREE(prot->osh, prot->d2hring_hp2p_txcpl, sizeof(msgbuf_ring_t));
4710 	prot->d2hring_hp2p_txcpl = NULL;
4711 	return ret;
4712 } /* dhd_check_create_hp2p_rings */
4713 
4714 int
4715 dhd_prot_init_hp2p_rings(dhd_pub_t *dhd)
4716 {
4717 	dhd_prot_t *prot = dhd->prot;
4718 	int ret = BCME_OK;
4719 
4720 	dhd->hp2p_ring_more = TRUE;
4721 	/* default multiflow not allowed */
4722 	dhd->hp2p_mf_enable = FALSE;
4723 
4724 	if ((ret = dhd_check_create_hp2p_rings(dhd)) != BCME_OK) {
4725 		DHD_ERROR(("%s: hp2p rings aren't created! \n",
4726 			__FUNCTION__));
4727 		return ret;
4728 	}
4729 
4730 	if ((prot->d2hring_hp2p_txcpl->inited) || (prot->d2hring_hp2p_txcpl->create_pending)) {
4731 		DHD_INFO(("hp2p tx completion ring was created!\n"));
4732 		return ret;
4733 	}
4734 
4735 	DHD_TRACE(("trying to send create d2h hp2p txcpl ring: id %d\n",
4736 		prot->d2hring_hp2p_txcpl->idx));
4737 	ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_hp2p_txcpl,
4738 		BCMPCIE_D2H_RING_TYPE_HPP_TX_CPL, DHD_D2H_HPPRING_TXREQ_PKTID);
4739 	if (ret != BCME_OK)
4740 		return ret;
4741 
4742 	prot->d2hring_hp2p_txcpl->seqnum = D2H_EPOCH_INIT_VAL;
4743 	prot->d2hring_hp2p_txcpl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
4744 
4745 	if ((prot->d2hring_hp2p_rxcpl->inited) || (prot->d2hring_hp2p_rxcpl->create_pending)) {
4746 		DHD_INFO(("hp2p rx completion ring was created!\n"));
4747 		return ret;
4748 	}
4749 
4750 	DHD_TRACE(("trying to send create d2h hp2p rxcpl ring: id %d\n",
4751 		prot->d2hring_hp2p_rxcpl->idx));
4752 	ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_hp2p_rxcpl,
4753 		BCMPCIE_D2H_RING_TYPE_HPP_RX_CPL, DHD_D2H_HPPRING_RXREQ_PKTID);
4754 	if (ret != BCME_OK)
4755 		return ret;
4756 
4757 	prot->d2hring_hp2p_rxcpl->seqnum = D2H_EPOCH_INIT_VAL;
4758 	prot->d2hring_hp2p_rxcpl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
4759 
4760 	/* Note that there is no way to delete d2h or h2d ring deletion incase either fails,
4761 	 * so can not cleanup if one ring was created while the other failed
4762 	 */
4763 	return BCME_OK;
4764 } /* dhd_prot_init_hp2p_rings */
4765 
4766 static void
4767 dhd_prot_detach_hp2p_rings(dhd_pub_t *dhd)
4768 {
4769 	if (dhd->prot->d2hring_hp2p_txcpl) {
4770 		dhd_prot_ring_detach(dhd, dhd->prot->d2hring_hp2p_txcpl);
4771 		MFREE(dhd->prot->osh, dhd->prot->d2hring_hp2p_txcpl, sizeof(msgbuf_ring_t));
4772 		dhd->prot->d2hring_hp2p_txcpl = NULL;
4773 	}
4774 	if (dhd->prot->d2hring_hp2p_rxcpl) {
4775 		dhd_prot_ring_detach(dhd, dhd->prot->d2hring_hp2p_rxcpl);
4776 		MFREE(dhd->prot->osh, dhd->prot->d2hring_hp2p_rxcpl, sizeof(msgbuf_ring_t));
4777 		dhd->prot->d2hring_hp2p_rxcpl = NULL;
4778 	}
4779 }
4780 #endif /* DHD_HP2P */
4781 
4782 #ifdef BTLOG
4783 static int
4784 dhd_check_create_btlog_rings(dhd_pub_t *dhd)
4785 {
4786 	dhd_prot_t *prot = dhd->prot;
4787 	int ret = BCME_ERROR;
4788 	uint16 ringid;
4789 
4790 	if (dhd->submit_count_WAR) {
4791 		ringid = dhd->bus->max_tx_flowrings + BCMPCIE_COMMON_MSGRINGS + 2;
4792 	} else {
4793 		/* ringid is one less than ringids assign by dhd_check_create_info_rings */
4794 		ringid = dhd->bus->max_tx_flowrings +
4795 			(dhd->bus->max_submission_rings - dhd->bus->max_tx_flowrings) +
4796 			BCMPCIE_H2D_COMMON_MSGRINGS - 1;
4797 	}
4798 
4799 	if (prot->d2hring_btlog_cpln) {
4800 		/* for re-entry case, clear inited flag */
4801 		prot->d2hring_btlog_cpln->inited = FALSE;
4802 	}
4803 
4804 	if (prot->h2dring_btlog_subn && prot->d2hring_btlog_cpln) {
4805 		return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
4806 	}
4807 
4808 	if (prot->h2dring_btlog_subn == NULL) {
4809 		prot->h2dring_btlog_subn = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
4810 
4811 		if (prot->h2dring_btlog_subn == NULL) {
4812 			DHD_ERROR(("%s: couldn't alloc memory for h2dring_btlog_subn\n",
4813 				__FUNCTION__));
4814 			return BCME_NOMEM;
4815 		}
4816 
4817 		DHD_INFO(("%s: about to create debug submit ring\n", __FUNCTION__));
4818 		ret = dhd_prot_ring_attach(dhd, prot->h2dring_btlog_subn, "h2dbtlog",
4819 			H2DRING_DYNAMIC_INFO_MAX_ITEM, H2DRING_INFO_BUFPOST_ITEMSIZE,
4820 			ringid);
4821 		if (ret != BCME_OK) {
4822 			DHD_ERROR(("%s: couldn't alloc resources for dbg submit ring\n",
4823 				__FUNCTION__));
4824 			goto err;
4825 		}
4826 	}
4827 
4828 	if (prot->d2hring_btlog_cpln == NULL) {
4829 		prot->d2hring_btlog_cpln = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
4830 
4831 		if (prot->d2hring_btlog_cpln == NULL) {
4832 			DHD_ERROR(("%s: couldn't alloc memory for h2dring_btlog_subn\n",
4833 				__FUNCTION__));
4834 			return BCME_NOMEM;
4835 		}
4836 
4837 		if (dhd->submit_count_WAR) {
4838 			ringid = ringid + 1;
4839 		} else {
4840 			/* advance ringid past BTLOG submit ring and INFO submit and cmplt rings */
4841 			ringid = ringid + 3;
4842 		}
4843 
4844 		DHD_INFO(("%s: about to create debug cpl ring\n", __FUNCTION__));
4845 		ret = dhd_prot_ring_attach(dhd, prot->d2hring_btlog_cpln, "d2hbtlog",
4846 			D2HRING_DYNAMIC_INFO_MAX_ITEM, D2HRING_INFO_BUFCMPLT_ITEMSIZE,
4847 			ringid);
4848 		if (ret != BCME_OK) {
4849 			DHD_ERROR(("%s: couldn't alloc resources for dbg cpl ring\n",
4850 				__FUNCTION__));
4851 			dhd_prot_ring_detach(dhd, prot->h2dring_btlog_subn);
4852 			goto err;
4853 		}
4854 	}
4855 
4856 	return ret;
4857 err:
4858 	MFREE(prot->osh, prot->h2dring_btlog_subn, sizeof(msgbuf_ring_t));
4859 
4860 	if (prot->d2hring_btlog_cpln) {
4861 		MFREE(prot->osh, prot->d2hring_btlog_cpln, sizeof(msgbuf_ring_t));
4862 	}
4863 	return ret;
4864 } /* dhd_check_create_btlog_rings */
4865 
4866 int
4867 dhd_prot_init_btlog_rings(dhd_pub_t *dhd)
4868 {
4869 	dhd_prot_t *prot = dhd->prot;
4870 	int ret = BCME_OK;
4871 
4872 	if ((ret = dhd_check_create_btlog_rings(dhd)) != BCME_OK) {
4873 		DHD_ERROR(("%s: btlog rings aren't created! \n",
4874 			__FUNCTION__));
4875 		return ret;
4876 	}
4877 
4878 	if ((prot->d2hring_btlog_cpln->inited) || (prot->d2hring_btlog_cpln->create_pending)) {
4879 		DHD_INFO(("Info completion ring was created!\n"));
4880 		return ret;
4881 	}
4882 
4883 	DHD_ERROR(("trying to send create d2h btlog ring: id %d\n", prot->d2hring_btlog_cpln->idx));
4884 	ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_btlog_cpln,
4885 		BCMPCIE_D2H_RING_TYPE_BTLOG_CPL, DHD_D2H_BTLOGRING_REQ_PKTID);
4886 	if (ret != BCME_OK)
4887 		return ret;
4888 
4889 	prot->h2dring_btlog_subn->seqnum = H2D_EPOCH_INIT_VAL;
4890 	prot->h2dring_btlog_subn->current_phase = 0;
4891 	prot->d2hring_btlog_cpln->seqnum = D2H_EPOCH_INIT_VAL;
4892 	prot->d2hring_btlog_cpln->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
4893 
4894 	DHD_ERROR(("trying to send create h2d btlog ring id %d\n", prot->h2dring_btlog_subn->idx));
4895 	prot->h2dring_btlog_subn->n_completion_ids = 1;
4896 	prot->h2dring_btlog_subn->compeltion_ring_ids[0] = prot->d2hring_btlog_cpln->idx;
4897 
4898 	ret = dhd_send_h2d_ringcreate(dhd, prot->h2dring_btlog_subn,
4899 		BCMPCIE_H2D_RING_TYPE_BTLOG_SUBMIT, DHD_H2D_BTLOGRING_REQ_PKTID);
4900 
4901 	/* Note that there is no way to delete d2h or h2d ring deletion incase either fails,
4902 	 * so can not cleanup if one ring was created while the other failed
4903 	 */
4904 	return ret;
4905 } /* dhd_prot_init_btlog_rings */
4906 
4907 static void
4908 dhd_prot_detach_btlog_rings(dhd_pub_t *dhd)
4909 {
4910 	if (dhd->prot->h2dring_btlog_subn) {
4911 		dhd_prot_ring_detach(dhd, dhd->prot->h2dring_btlog_subn);
4912 		MFREE(dhd->prot->osh, dhd->prot->h2dring_btlog_subn, sizeof(msgbuf_ring_t));
4913 	}
4914 	if (dhd->prot->d2hring_btlog_cpln) {
4915 		dhd_prot_ring_detach(dhd, dhd->prot->d2hring_btlog_cpln);
4916 		MFREE(dhd->prot->osh, dhd->prot->d2hring_btlog_cpln, sizeof(msgbuf_ring_t));
4917 	}
4918 }
4919 #endif	/* BTLOG */
4920 
4921 #ifdef EWP_EDL
4922 static int
4923 dhd_check_create_edl_rings(dhd_pub_t *dhd)
4924 {
4925 	dhd_prot_t *prot = dhd->prot;
4926 	int ret = BCME_ERROR;
4927 	uint16 ringid;
4928 
4929 #ifdef BTLOG
4930 	if (dhd->submit_count_WAR) {
4931 		ringid = dhd->bus->max_tx_flowrings + BCMPCIE_COMMON_MSGRINGS;
4932 	} else
4933 #endif	/* BTLOG */
4934 	{
4935 		/* dongle may increase max_submission_rings so keep
4936 		 * ringid at end of dynamic rings (re-use info ring cpl ring id)
4937 		 */
4938 		ringid = dhd->bus->max_tx_flowrings +
4939 			(dhd->bus->max_submission_rings - dhd->bus->max_tx_flowrings) +
4940 			BCMPCIE_H2D_COMMON_MSGRINGS + 1;
4941 	}
4942 
4943 	if (prot->d2hring_edl) {
4944 		prot->d2hring_edl->inited = FALSE;
4945 		return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
4946 	}
4947 
4948 	if (prot->d2hring_edl == NULL) {
4949 		prot->d2hring_edl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
4950 
4951 		if (prot->d2hring_edl == NULL) {
4952 			DHD_ERROR(("%s: couldn't alloc memory for d2hring_edl\n",
4953 				__FUNCTION__));
4954 			return BCME_NOMEM;
4955 		}
4956 
4957 		DHD_ERROR(("%s: about to create EDL ring, ringid: %u \n", __FUNCTION__,
4958 			ringid));
4959 		ret = dhd_prot_ring_attach(dhd, prot->d2hring_edl, "d2hring_edl",
4960 			D2HRING_EDL_MAX_ITEM, D2HRING_EDL_ITEMSIZE,
4961 			ringid);
4962 		if (ret != BCME_OK) {
4963 			DHD_ERROR(("%s: couldn't alloc resources for EDL ring\n",
4964 				__FUNCTION__));
4965 			goto err;
4966 		}
4967 	}
4968 
4969 	return ret;
4970 err:
4971 	MFREE(prot->osh, prot->d2hring_edl, sizeof(msgbuf_ring_t));
4972 	prot->d2hring_edl = NULL;
4973 
4974 	return ret;
4975 } /* dhd_check_create_btlog_rings */
4976 
4977 int
4978 dhd_prot_init_edl_rings(dhd_pub_t *dhd)
4979 {
4980 	dhd_prot_t *prot = dhd->prot;
4981 	int ret = BCME_ERROR;
4982 
4983 	if ((ret = dhd_check_create_edl_rings(dhd)) != BCME_OK) {
4984 		DHD_ERROR(("%s: EDL rings aren't created! \n",
4985 			__FUNCTION__));
4986 		return ret;
4987 	}
4988 
4989 	if ((prot->d2hring_edl->inited) || (prot->d2hring_edl->create_pending)) {
4990 		DHD_INFO(("EDL completion ring was created!\n"));
4991 		return ret;
4992 	}
4993 
4994 	DHD_ERROR(("trying to send create d2h edl ring: idx %d\n", prot->d2hring_edl->idx));
4995 	ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_edl,
4996 		BCMPCIE_D2H_RING_TYPE_EDL, DHD_D2H_DBGRING_REQ_PKTID);
4997 	if (ret != BCME_OK)
4998 		return ret;
4999 
5000 	prot->d2hring_edl->seqnum = D2H_EPOCH_INIT_VAL;
5001 	prot->d2hring_edl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
5002 
5003 	return BCME_OK;
5004 } /* dhd_prot_init_btlog_rings */
5005 
5006 static void
5007 dhd_prot_detach_edl_rings(dhd_pub_t *dhd)
5008 {
5009 	if (dhd->prot->d2hring_edl) {
5010 		dhd_prot_ring_detach(dhd, dhd->prot->d2hring_edl);
5011 		MFREE(dhd->prot->osh, dhd->prot->d2hring_edl, sizeof(msgbuf_ring_t));
5012 		dhd->prot->d2hring_edl = NULL;
5013 	}
5014 }
5015 #endif	/* EWP_EDL */
5016 
5017 /**
5018  * Initialize protocol: sync w/dongle state.
5019  * Sets dongle media info (iswl, drv_version, mac address).
5020  */
5021 int dhd_sync_with_dongle(dhd_pub_t *dhd)
5022 {
5023 	int ret = 0;
5024 	uint len = 0;
5025 	wlc_rev_info_t revinfo;
5026 	char buf[128];
5027 	dhd_prot_t *prot = dhd->prot;
5028 
5029 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5030 
5031 	dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
5032 
5033 	/* Post ts buffer after shim layer is attached */
5034 	ret = dhd_msgbuf_rxbuf_post_ts_bufs(dhd);
5035 
5036 	/* query for 'wlc_ver' to get version info from firmware */
5037 	/* memsetting to zero */
5038 	bzero(buf, sizeof(buf));
5039 	len = bcm_mkiovar("wlc_ver", NULL, 0, buf, sizeof(buf));
5040 	if (len == 0) {
5041 		DHD_ERROR(("%s failed in calling bcm_mkiovar %u\n", __FUNCTION__, len));
5042 		ret = BCME_ERROR;
5043 		goto done;
5044 	}
5045 	ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0);
5046 	if (ret < 0) {
5047 		DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
5048 	} else {
5049 		dhd->wlc_ver_major = ((wl_wlc_version_t*)buf)->wlc_ver_major;
5050 		dhd->wlc_ver_minor = ((wl_wlc_version_t*)buf)->wlc_ver_minor;
5051 	}
5052 
5053 	DHD_ERROR(("wlc_ver_major %d, wlc_ver_minor %d\n", dhd->wlc_ver_major, dhd->wlc_ver_minor));
5054 #ifndef OEM_ANDROID
5055 	/* Get the device MAC address */
5056 	bzero(buf, sizeof(buf));
5057 	strlcpy(buf, "cur_etheraddr", sizeof(buf));
5058 	ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0);
5059 	if (ret < 0) {
5060 		DHD_ERROR(("%s: GET iovar cur_etheraddr FAILED\n", __FUNCTION__));
5061 		goto done;
5062 	}
5063 	memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
5064 	if (dhd_msg_level & DHD_INFO_VAL) {
5065 		bcm_print_bytes("CUR_ETHERADDR ", (uchar *)buf, ETHER_ADDR_LEN);
5066 	}
5067 #endif /* OEM_ANDROID */
5068 
5069 #ifdef DHD_FW_COREDUMP
5070 	/* Check the memdump capability */
5071 	dhd_get_memdump_info(dhd);
5072 #endif /* DHD_FW_COREDUMP */
5073 #ifdef BCMASSERT_LOG
5074 	dhd_get_assert_info(dhd);
5075 #endif /* BCMASSERT_LOG */
5076 
5077 	/* Get the device rev info */
5078 	memset(&revinfo, 0, sizeof(revinfo));
5079 	ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0);
5080 	if (ret < 0) {
5081 		DHD_ERROR(("%s: GET revinfo FAILED\n", __FUNCTION__));
5082 		goto done;
5083 	}
5084 	DHD_ERROR(("%s: GET_REVINFO device 0x%x, vendor 0x%x, chipnum 0x%x\n", __FUNCTION__,
5085 		revinfo.deviceid, revinfo.vendorid, revinfo.chipnum));
5086 
5087 	/* Get the RxBuf post size */
5088 	/* Use default value in case of failure */
5089 	prot->rxbufpost_sz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
5090 	memset(buf, 0, sizeof(buf));
5091 	len = bcm_mkiovar("rxbufpost_sz", NULL, 0, buf, sizeof(buf));
5092 	if (len == 0) {
5093 		DHD_ERROR(("%s failed in calling bcm_mkiovar %u\n", __FUNCTION__, len));
5094 	} else {
5095 		ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0);
5096 		if (ret < 0) {
5097 			DHD_ERROR(("%s: GET RxBuf post FAILED, use default %d\n",
5098 				__FUNCTION__, DHD_FLOWRING_RX_BUFPOST_PKTSZ));
5099 		} else {
5100 			if (memcpy_s(&(prot->rxbufpost_sz), sizeof(prot->rxbufpost_sz),
5101 					buf, sizeof(uint16)) != BCME_OK) {
5102 				DHD_ERROR(("%s: rxbufpost_sz memcpy failed\n", __FUNCTION__));
5103 			}
5104 
5105 			if (prot->rxbufpost_sz > DHD_FLOWRING_RX_BUFPOST_PKTSZ_MAX) {
5106 				DHD_ERROR(("%s: Invalid RxBuf post size : %d, default to %d\n",
5107 					__FUNCTION__, prot->rxbufpost_sz,
5108 					DHD_FLOWRING_RX_BUFPOST_PKTSZ));
5109 					prot->rxbufpost_sz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
5110 			} else {
5111 				DHD_ERROR(("%s: RxBuf Post : %d\n",
5112 					__FUNCTION__, prot->rxbufpost_sz));
5113 			}
5114 		}
5115 	}
5116 
5117 	/* Post buffers for packet reception */
5118 	dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
5119 
5120 	DHD_SSSR_DUMP_INIT(dhd);
5121 
5122 	dhd_process_cid_mac(dhd, TRUE);
5123 	ret = dhd_preinit_ioctls(dhd);
5124 	dhd_process_cid_mac(dhd, FALSE);
5125 #if defined(DHD_SDTC_ETB_DUMP)
5126 	dhd_sdtc_etb_init(dhd);
5127 #endif /* DHD_SDTC_ETB_DUMP */
5128 #if defined(DHD_H2D_LOG_TIME_SYNC)
5129 #ifdef DHD_HP2P
5130 	if (FW_SUPPORTED(dhd, h2dlogts) || dhd->hp2p_capable)
5131 #else
5132 	if (FW_SUPPORTED(dhd, h2dlogts))
5133 #endif // endif
5134 	{
5135 #ifdef DHD_HP2P
5136 		if (dhd->hp2p_enable) {
5137 			dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH / 40;
5138 		} else {
5139 			dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH;
5140 		}
5141 #else
5142 		dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH;
5143 #endif /* DHD_HP2P */
5144 		dhd->bus->dhd_rte_time_sync_count = OSL_SYSUPTIME_US();
5145 		/* This is during initialization. */
5146 		dhd_h2d_log_time_sync(dhd);
5147 	} else {
5148 		dhd->dhd_rte_time_sync_ms = 0;
5149 	}
5150 #endif /* DHD_H2D_LOG_TIME_SYNC */
5151 
5152 #ifdef HOST_SFH_LLC
5153 	if (FW_SUPPORTED(dhd, host_sfhllc)) {
5154 		dhd->host_sfhllc_supported = TRUE;
5155 	} else {
5156 		dhd->host_sfhllc_supported = FALSE;
5157 	}
5158 #endif /* HOST_SFH_LLC */
5159 
5160 	/* Always assumes wl for now */
5161 	dhd->iswl = TRUE;
5162 done:
5163 	return ret;
5164 } /* dhd_sync_with_dongle */
5165 
5166 #define DHD_DBG_SHOW_METADATA	0
5167 
5168 #if DHD_DBG_SHOW_METADATA
5169 static void
5170 BCMFASTPATH(dhd_prot_print_metadata)(dhd_pub_t *dhd, void *ptr, int len)
5171 {
5172 	uint8 tlv_t;
5173 	uint8 tlv_l;
5174 	uint8 *tlv_v = (uint8 *)ptr;
5175 
5176 	if (len <= BCMPCIE_D2H_METADATA_HDRLEN)
5177 		return;
5178 
5179 	len -= BCMPCIE_D2H_METADATA_HDRLEN;
5180 	tlv_v += BCMPCIE_D2H_METADATA_HDRLEN;
5181 
5182 	while (len > TLV_HDR_LEN) {
5183 		tlv_t = tlv_v[TLV_TAG_OFF];
5184 		tlv_l = tlv_v[TLV_LEN_OFF];
5185 
5186 		len -= TLV_HDR_LEN;
5187 		tlv_v += TLV_HDR_LEN;
5188 		if (len < tlv_l)
5189 			break;
5190 		if ((tlv_t == 0) || (tlv_t == WLFC_CTL_TYPE_FILLER))
5191 			break;
5192 
5193 		switch (tlv_t) {
5194 		case WLFC_CTL_TYPE_TXSTATUS: {
5195 			uint32 txs;
5196 			memcpy(&txs, tlv_v, sizeof(uint32));
5197 			if (tlv_l < (sizeof(wl_txstatus_additional_info_t) + sizeof(uint32))) {
5198 				printf("METADATA TX_STATUS: %08x\n", txs);
5199 			} else {
5200 				wl_txstatus_additional_info_t tx_add_info;
5201 				memcpy(&tx_add_info, tlv_v + sizeof(uint32),
5202 					sizeof(wl_txstatus_additional_info_t));
5203 				printf("METADATA TX_STATUS: %08x WLFCTS[%04x | %08x - %08x - %08x]"
5204 					" rate = %08x tries = %d - %d\n", txs,
5205 					tx_add_info.seq, tx_add_info.entry_ts,
5206 					tx_add_info.enq_ts, tx_add_info.last_ts,
5207 					tx_add_info.rspec, tx_add_info.rts_cnt,
5208 					tx_add_info.tx_cnt);
5209 			}
5210 			} break;
5211 
5212 		case WLFC_CTL_TYPE_RSSI: {
5213 			if (tlv_l == 1)
5214 				printf("METADATA RX_RSSI: rssi = %d\n", *tlv_v);
5215 			else
5216 				printf("METADATA RX_RSSI[%04x]: rssi = %d snr = %d\n",
5217 					(*(tlv_v + 3) << 8) | *(tlv_v + 2),
5218 					(int8)(*tlv_v), *(tlv_v + 1));
5219 			} break;
5220 
5221 		case WLFC_CTL_TYPE_FIFO_CREDITBACK:
5222 			bcm_print_bytes("METADATA FIFO_CREDITBACK", tlv_v, tlv_l);
5223 			break;
5224 
5225 		case WLFC_CTL_TYPE_TX_ENTRY_STAMP:
5226 			bcm_print_bytes("METADATA TX_ENTRY", tlv_v, tlv_l);
5227 			break;
5228 
5229 		case WLFC_CTL_TYPE_RX_STAMP: {
5230 			struct {
5231 				uint32 rspec;
5232 				uint32 bus_time;
5233 				uint32 wlan_time;
5234 			} rx_tmstamp;
5235 			memcpy(&rx_tmstamp, tlv_v, sizeof(rx_tmstamp));
5236 			printf("METADATA RX TIMESTMAP: WLFCTS[%08x - %08x] rate = %08x\n",
5237 				rx_tmstamp.wlan_time, rx_tmstamp.bus_time, rx_tmstamp.rspec);
5238 			} break;
5239 
5240 		case WLFC_CTL_TYPE_TRANS_ID:
5241 			bcm_print_bytes("METADATA TRANS_ID", tlv_v, tlv_l);
5242 			break;
5243 
5244 		case WLFC_CTL_TYPE_COMP_TXSTATUS:
5245 			bcm_print_bytes("METADATA COMP_TXSTATUS", tlv_v, tlv_l);
5246 			break;
5247 
5248 		default:
5249 			bcm_print_bytes("METADATA UNKNOWN", tlv_v, tlv_l);
5250 			break;
5251 		}
5252 
5253 		len -= tlv_l;
5254 		tlv_v += tlv_l;
5255 	}
5256 }
5257 #endif /* DHD_DBG_SHOW_METADATA */
5258 
5259 static INLINE void
5260 BCMFASTPATH(dhd_prot_packet_free)(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send)
5261 {
5262 	if (pkt) {
5263 		if (pkttype == PKTTYPE_IOCTL_RX ||
5264 			pkttype == PKTTYPE_EVENT_RX ||
5265 			pkttype == PKTTYPE_INFO_RX ||
5266 			pkttype == PKTTYPE_TSBUF_RX) {
5267 #ifdef DHD_USE_STATIC_CTRLBUF
5268 			PKTFREE_STATIC(dhd->osh, pkt, send);
5269 #else
5270 			PKTFREE(dhd->osh, pkt, send);
5271 #endif /* DHD_USE_STATIC_CTRLBUF */
5272 		} else {
5273 			PKTFREE(dhd->osh, pkt, send);
5274 		}
5275 	}
5276 }
5277 
5278 /**
5279  * dhd_prot_packet_get should be called only for items having pktid_ctrl_map handle
5280  * and all the bottom most functions like dhd_pktid_map_free hold separate DHD_PKTID_LOCK
5281  * to ensure thread safety, so no need to hold any locks for this function
5282  */
5283 static INLINE void *
5284 BCMFASTPATH(dhd_prot_packet_get)(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype, bool free_pktid)
5285 {
5286 	void *PKTBUF;
5287 	dmaaddr_t pa;
5288 	uint32 len;
5289 	void *dmah;
5290 	void *secdma;
5291 
5292 #ifdef DHD_PCIE_PKTID
5293 	if (free_pktid) {
5294 		PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_ctrl_map,
5295 			pktid, pa, len, dmah, secdma, pkttype);
5296 	} else {
5297 		PKTBUF = DHD_PKTID_TO_NATIVE_RSV(dhd, dhd->prot->pktid_ctrl_map,
5298 			pktid, pa, len, dmah, secdma, pkttype);
5299 	}
5300 #else
5301 	PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_ctrl_map, pktid, pa,
5302 		len, dmah, secdma, pkttype);
5303 #endif /* DHD_PCIE_PKTID */
5304 	if (PKTBUF) {
5305 		DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
5306 #ifdef DMAMAP_STATS
5307 		switch (pkttype) {
5308 #ifndef IOCTLRESP_USE_CONSTMEM
5309 			case PKTTYPE_IOCTL_RX:
5310 				dhd->dma_stats.ioctl_rx--;
5311 				dhd->dma_stats.ioctl_rx_sz -= len;
5312 				break;
5313 #endif /* IOCTLRESP_USE_CONSTMEM */
5314 			case PKTTYPE_EVENT_RX:
5315 				dhd->dma_stats.event_rx--;
5316 				dhd->dma_stats.event_rx_sz -= len;
5317 				break;
5318 			case PKTTYPE_INFO_RX:
5319 				dhd->dma_stats.info_rx--;
5320 				dhd->dma_stats.info_rx_sz -= len;
5321 				break;
5322 			case PKTTYPE_TSBUF_RX:
5323 				dhd->dma_stats.tsbuf_rx--;
5324 				dhd->dma_stats.tsbuf_rx_sz -= len;
5325 				break;
5326 		}
5327 #endif /* DMAMAP_STATS */
5328 	}
5329 
5330 	return PKTBUF;
5331 }
5332 
5333 #ifdef IOCTLRESP_USE_CONSTMEM
5334 static INLINE void
5335 BCMFASTPATH(dhd_prot_ioctl_ret_buffer_get)(dhd_pub_t *dhd, uint32 pktid, dhd_dma_buf_t *retbuf)
5336 {
5337 	memset(retbuf, 0, sizeof(dhd_dma_buf_t));
5338 	retbuf->va = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle_ioctl, pktid,
5339 		retbuf->pa, retbuf->len, retbuf->dmah, retbuf->secdma, PKTTYPE_IOCTL_RX);
5340 
5341 	return;
5342 }
5343 #endif
5344 
5345 #ifdef PCIE_INB_DW
5346 static int
5347 dhd_prot_inc_hostactive_devwake_assert(dhd_bus_t *bus)
5348 {
5349 	unsigned long flags = 0;
5350 
5351 	if (INBAND_DW_ENAB(bus)) {
5352 		DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
5353 		bus->host_active_cnt++;
5354 		DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
5355 		if (dhd_bus_set_device_wake(bus, TRUE) != BCME_OK) {
5356 			DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
5357 			bus->host_active_cnt--;
5358 			dhd_bus_inb_ack_pending_ds_req(bus);
5359 			DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
5360 			return BCME_ERROR;
5361 		}
5362 	}
5363 
5364 	return BCME_OK;
5365 }
5366 
5367 static void
5368 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd_bus_t *bus)
5369 {
5370 	unsigned long flags = 0;
5371 	if (INBAND_DW_ENAB(bus)) {
5372 		DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
5373 		bus->host_active_cnt--;
5374 		dhd_bus_inb_ack_pending_ds_req(bus);
5375 		DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
5376 	}
5377 }
5378 #endif /* PCIE_INB_DW */
5379 
5380 static void
5381 BCMFASTPATH(dhd_msgbuf_rxbuf_post)(dhd_pub_t *dhd, bool use_rsv_pktid)
5382 {
5383 	dhd_prot_t *prot = dhd->prot;
5384 	int16 fillbufs;
5385 	int retcount = 0;
5386 
5387 	fillbufs = prot->max_rxbufpost - prot->rxbufpost;
5388 	while (fillbufs >= RX_BUF_BURST) {
5389 		/* Post in a burst of 32 buffers at a time */
5390 		fillbufs = MIN(fillbufs, RX_BUF_BURST);
5391 
5392 		/* Post buffers */
5393 		retcount = dhd_prot_rxbuf_post(dhd, fillbufs, use_rsv_pktid);
5394 
5395 		if (retcount > 0) {
5396 			prot->rxbufpost += (uint16)retcount;
5397 			/* how many more to post */
5398 			fillbufs = prot->max_rxbufpost - prot->rxbufpost;
5399 		} else {
5400 			/* Make sure we don't run loop any further */
5401 			fillbufs = 0;
5402 		}
5403 	}
5404 }
5405 
5406 /** Post 'count' no of rx buffers to dongle */
5407 static int
5408 BCMFASTPATH(dhd_prot_rxbuf_post)(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid)
5409 {
5410 	void *p, **pktbuf;
5411 	uint8 *rxbuf_post_tmp;
5412 	host_rxbuf_post_t *rxbuf_post;
5413 	void *msg_start;
5414 	dmaaddr_t pa, *pktbuf_pa;
5415 	uint32 *pktlen;
5416 	uint16 i = 0, alloced = 0;
5417 	unsigned long flags;
5418 	uint32 pktid;
5419 	dhd_prot_t *prot = dhd->prot;
5420 	msgbuf_ring_t *ring = &prot->h2dring_rxp_subn;
5421 	void *lcl_buf;
5422 	uint16 lcl_buf_size;
5423 #ifdef BCM_ROUTER_DHD
5424 	uint16 pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ + BCMEXTRAHDROOM;
5425 #else
5426 	uint16 pktsz = prot->rxbufpost_sz;
5427 #endif /* BCM_ROUTER_DHD */
5428 
5429 #ifdef PCIE_INB_DW
5430 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
5431 		return BCME_ERROR;
5432 #endif /* PCIE_INB_DW */
5433 	/* allocate a local buffer to store pkt buffer va, pa and length */
5434 	lcl_buf_size = (sizeof(void *) + sizeof(dmaaddr_t) + sizeof(uint32)) *
5435 		RX_BUF_BURST;
5436 	lcl_buf = MALLOC(dhd->osh, lcl_buf_size);
5437 	if (!lcl_buf) {
5438 		DHD_ERROR(("%s: local scratch buffer allocation failed\n", __FUNCTION__));
5439 #ifdef PCIE_INB_DW
5440 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
5441 #endif
5442 		return 0;
5443 	}
5444 	pktbuf = lcl_buf;
5445 	pktbuf_pa = (dmaaddr_t *)((uint8 *)pktbuf + sizeof(void *) * RX_BUF_BURST);
5446 	pktlen = (uint32 *)((uint8 *)pktbuf_pa + sizeof(dmaaddr_t) * RX_BUF_BURST);
5447 
5448 	for (i = 0; i < count; i++) {
5449 		if ((p = PKTGET(dhd->osh, pktsz, FALSE)) == NULL) {
5450 			DHD_ERROR(("%s:%d: PKTGET for rxbuf failed\n", __FUNCTION__, __LINE__));
5451 			dhd->rx_pktgetfail++;
5452 			break;
5453 		}
5454 
5455 #ifdef BCM_ROUTER_DHD
5456 		/* Reserve extra headroom for router builds */
5457 		PKTPULL(dhd->osh, p, BCMEXTRAHDROOM);
5458 #endif /* BCM_ROUTER_DHD */
5459 		pktlen[i] = PKTLEN(dhd->osh, p);
5460 		pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen[i], DMA_RX, p, 0);
5461 
5462 		if (PHYSADDRISZERO(pa)) {
5463 			PKTFREE(dhd->osh, p, FALSE);
5464 			DHD_ERROR(("Invalid phyaddr 0\n"));
5465 			ASSERT(0);
5466 			break;
5467 		}
5468 #ifdef DMAMAP_STATS
5469 		dhd->dma_stats.rxdata++;
5470 		dhd->dma_stats.rxdata_sz += pktlen[i];
5471 #endif /* DMAMAP_STATS */
5472 
5473 		PKTPULL(dhd->osh, p, prot->rx_metadata_offset);
5474 		pktlen[i] = PKTLEN(dhd->osh, p);
5475 		pktbuf[i] = p;
5476 		pktbuf_pa[i] = pa;
5477 	}
5478 
5479 	/* only post what we have */
5480 	count = i;
5481 
5482 	/* grab the ring lock to allocate pktid and post on ring */
5483 	DHD_RING_LOCK(ring->ring_lock, flags);
5484 
5485 	/* Claim space for exactly 'count' no of messages, for mitigation purpose */
5486 	msg_start = (void *)
5487 		dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, TRUE);
5488 	if (msg_start == NULL) {
5489 		DHD_INFO(("%s:%d: Rxbufpost Msgbuf Not available\n", __FUNCTION__, __LINE__));
5490 		DHD_RING_UNLOCK(ring->ring_lock, flags);
5491 		goto cleanup;
5492 	}
5493 	/* if msg_start !=  NULL, we should have alloced space for atleast 1 item */
5494 	ASSERT(alloced > 0);
5495 
5496 	rxbuf_post_tmp = (uint8*)msg_start;
5497 
5498 	for (i = 0; i < alloced; i++) {
5499 		rxbuf_post = (host_rxbuf_post_t *)rxbuf_post_tmp;
5500 		p = pktbuf[i];
5501 		pa = pktbuf_pa[i];
5502 
5503 		pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_rx_map, p, pa,
5504 			pktlen[i], DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_RX);
5505 #if defined(DHD_PCIE_PKTID)
5506 		if (pktid == DHD_PKTID_INVALID) {
5507 			break;
5508 		}
5509 #endif /* DHD_PCIE_PKTID */
5510 
5511 #ifdef DHD_HMAPTEST
5512 	if (dhd->prot->hmaptest_rx_active == HMAPTEST_D11_RX_ACTIVE) {
5513 		/* scratchbuf area */
5514 		dhd->prot->hmap_rx_buf_va = (char *)dhd->prot->hmaptest.mem.va
5515 			+ dhd->prot->hmaptest.offset;
5516 
5517 		dhd->prot->hmap_rx_buf_len = pktlen[i] + prot->rx_metadata_offset;
5518 		if ((dhd->prot->hmap_rx_buf_va +  dhd->prot->hmap_rx_buf_len) >
5519 			((char *)dhd->prot->hmaptest.mem.va + dhd->prot->hmaptest.mem.len)) {
5520 			DHD_ERROR(("hmaptest: ERROR Rxpost outside HMAPTEST buffer\n"));
5521 			DHD_ERROR(("hmaptest: NOT Replacing Rx Buffer\n"));
5522 			dhd->prot->hmaptest_rx_active = HMAPTEST_D11_RX_INACTIVE;
5523 			dhd->prot->hmaptest.in_progress = FALSE;
5524 		} else {
5525 			pa = DMA_MAP(dhd->osh, dhd->prot->hmap_rx_buf_va,
5526 				dhd->prot->hmap_rx_buf_len, DMA_RX, p, 0);
5527 
5528 			dhd->prot->hmap_rx_buf_pa = pa;
5529 			dhd->prot->hmaptest_rx_pktid = pktid;
5530 			dhd->prot->hmaptest_rx_active = HMAPTEST_D11_RX_POSTED;
5531 			DHD_ERROR(("hmaptest: d11write rxpost scratch rxbuf pktid=0x%08x\n",
5532 				pktid));
5533 			DHD_ERROR(("hmaptest: d11write rxpost scratch rxbuf va=0x%p pa.lo=0x%08x\n",
5534 				dhd->prot->hmap_rx_buf_va, (uint32)PHYSADDRLO(pa)));
5535 			DHD_ERROR(("hmaptest: d11write rxpost orig pktdata va=0x%p pa.lo=0x%08x\n",
5536 				PKTDATA(dhd->osh, p), (uint32)PHYSADDRLO(pktbuf_pa[i])));
5537 		}
5538 	}
5539 #endif /* DHD_HMAPTEST */
5540 		dhd->prot->tot_rxbufpost++;
5541 		/* Common msg header */
5542 		rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_RXBUF_POST;
5543 		rxbuf_post->cmn_hdr.if_id = 0;
5544 		rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
5545 		rxbuf_post->cmn_hdr.flags = ring->current_phase;
5546 		ring->seqnum++;
5547 		rxbuf_post->data_buf_len = htol16((uint16)pktlen[i]);
5548 		rxbuf_post->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
5549 		rxbuf_post->data_buf_addr.low_addr =
5550 			htol32(PHYSADDRLO(pa) + prot->rx_metadata_offset);
5551 
5552 		if (prot->rx_metadata_offset) {
5553 			rxbuf_post->metadata_buf_len = prot->rx_metadata_offset;
5554 			rxbuf_post->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
5555 			rxbuf_post->metadata_buf_addr.low_addr  = htol32(PHYSADDRLO(pa));
5556 		} else {
5557 			rxbuf_post->metadata_buf_len = 0;
5558 			rxbuf_post->metadata_buf_addr.high_addr = 0;
5559 			rxbuf_post->metadata_buf_addr.low_addr  = 0;
5560 		}
5561 
5562 #ifdef DHD_PKTID_AUDIT_RING
5563 		DHD_PKTID_AUDIT(dhd, prot->pktid_rx_map, pktid, DHD_DUPLICATE_ALLOC);
5564 #endif /* DHD_PKTID_AUDIT_RING */
5565 
5566 		rxbuf_post->cmn_hdr.request_id = htol32(pktid);
5567 
5568 		/* Move rxbuf_post_tmp to next item */
5569 		rxbuf_post_tmp = rxbuf_post_tmp + ring->item_len;
5570 #ifdef DHD_LBUF_AUDIT
5571 		PKTAUDIT(dhd->osh, p);
5572 #endif
5573 	}
5574 
5575 	if (i < alloced) {
5576 		if (ring->wr < (alloced - i))
5577 			ring->wr = ring->max_items - (alloced - i);
5578 		else
5579 			ring->wr -= (alloced - i);
5580 
5581 		if (ring->wr == 0) {
5582 			DHD_INFO(("%s: flipping the phase now\n", ring->name));
5583 				ring->current_phase = ring->current_phase ?
5584 				0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
5585 		}
5586 
5587 		alloced = i;
5588 	}
5589 
5590 	/* update ring's WR index and ring doorbell to dongle */
5591 	if (alloced > 0) {
5592 		dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced);
5593 	}
5594 
5595 	DHD_RING_UNLOCK(ring->ring_lock, flags);
5596 
5597 cleanup:
5598 	for (i = alloced; i < count; i++) {
5599 		p = pktbuf[i];
5600 		pa = pktbuf_pa[i];
5601 
5602 		DMA_UNMAP(dhd->osh, pa, pktlen[i], DMA_RX, 0, DHD_DMAH_NULL);
5603 		PKTFREE(dhd->osh, p, FALSE);
5604 	}
5605 
5606 	MFREE(dhd->osh, lcl_buf, lcl_buf_size);
5607 #ifdef PCIE_INB_DW
5608 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
5609 #endif
5610 
5611 	return alloced;
5612 } /* dhd_prot_rxbufpost */
5613 
5614 #if !defined(BCM_ROUTER_DHD)
5615 static int
5616 dhd_prot_infobufpost(dhd_pub_t *dhd, msgbuf_ring_t *ring)
5617 {
5618 	unsigned long flags;
5619 	uint32 pktid;
5620 	dhd_prot_t *prot = dhd->prot;
5621 	uint16 alloced = 0;
5622 	uint16 pktsz = DHD_INFOBUF_RX_BUFPOST_PKTSZ;
5623 	uint32 pktlen;
5624 	info_buf_post_msg_t *infobuf_post;
5625 	uint8 *infobuf_post_tmp;
5626 	void *p;
5627 	void* msg_start;
5628 	uint8 i = 0;
5629 	dmaaddr_t pa;
5630 	int16 count = 0;
5631 
5632 	if (ring == NULL)
5633 		return 0;
5634 
5635 	if (ring->inited != TRUE)
5636 		return 0;
5637 	if (ring == dhd->prot->h2dring_info_subn) {
5638 		if (prot->max_infobufpost == 0)
5639 			return 0;
5640 
5641 		count = prot->max_infobufpost - prot->infobufpost;
5642 	}
5643 #ifdef BTLOG
5644 	else if (ring == dhd->prot->h2dring_btlog_subn) {
5645 		if (prot->max_btlogbufpost == 0)
5646 			return 0;
5647 
5648 		pktsz = DHD_BTLOG_RX_BUFPOST_PKTSZ;
5649 		count = prot->max_btlogbufpost - prot->btlogbufpost;
5650 	}
5651 #endif	/* BTLOG */
5652 	else {
5653 		DHD_ERROR(("Unknown ring\n"));
5654 		return 0;
5655 	}
5656 
5657 	if (count <= 0) {
5658 		DHD_INFO(("%s: Cannot post more than max info resp buffers\n",
5659 			__FUNCTION__));
5660 		return 0;
5661 	}
5662 
5663 #ifdef PCIE_INB_DW
5664 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
5665 		return BCME_ERROR;
5666 #endif /* PCIE_INB_DW */
5667 
5668 	/* grab the ring lock to allocate pktid and post on ring */
5669 	DHD_RING_LOCK(ring->ring_lock, flags);
5670 
5671 	/* Claim space for exactly 'count' no of messages, for mitigation purpose */
5672 	msg_start = (void *) dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, FALSE);
5673 
5674 	if (msg_start == NULL) {
5675 		DHD_INFO(("%s:%d: infobufpost Msgbuf Not available\n", __FUNCTION__, __LINE__));
5676 		DHD_RING_UNLOCK(ring->ring_lock, flags);
5677 #ifdef PCIE_INB_DW
5678 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
5679 #endif
5680 		return -1;
5681 	}
5682 
5683 	/* if msg_start !=  NULL, we should have alloced space for atleast 1 item */
5684 	ASSERT(alloced > 0);
5685 
5686 	infobuf_post_tmp = (uint8*) msg_start;
5687 
5688 	/* loop through each allocated message in the host ring */
5689 	for (i = 0; i < alloced; i++) {
5690 		infobuf_post = (info_buf_post_msg_t *) infobuf_post_tmp;
5691 		/* Create a rx buffer */
5692 #ifdef DHD_USE_STATIC_CTRLBUF
5693 		p = PKTGET_STATIC(dhd->osh, pktsz, FALSE);
5694 #else
5695 		p = PKTGET(dhd->osh, pktsz, FALSE);
5696 #endif /* DHD_USE_STATIC_CTRLBUF */
5697 		if (p == NULL) {
5698 			DHD_ERROR(("%s:%d: PKTGET for infobuf failed\n", __FUNCTION__, __LINE__));
5699 			dhd->rx_pktgetfail++;
5700 			break;
5701 		}
5702 		pktlen = PKTLEN(dhd->osh, p);
5703 		pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
5704 		if (PHYSADDRISZERO(pa)) {
5705 			DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
5706 #ifdef DHD_USE_STATIC_CTRLBUF
5707 			PKTFREE_STATIC(dhd->osh, p, FALSE);
5708 #else
5709 			PKTFREE(dhd->osh, p, FALSE);
5710 #endif /* DHD_USE_STATIC_CTRLBUF */
5711 			DHD_ERROR(("Invalid phyaddr 0\n"));
5712 			ASSERT(0);
5713 			break;
5714 		}
5715 #ifdef DMAMAP_STATS
5716 		dhd->dma_stats.info_rx++;
5717 		dhd->dma_stats.info_rx_sz += pktlen;
5718 #endif /* DMAMAP_STATS */
5719 		pktlen = PKTLEN(dhd->osh, p);
5720 
5721 		/* Common msg header */
5722 		infobuf_post->cmn_hdr.msg_type = MSG_TYPE_INFO_BUF_POST;
5723 		infobuf_post->cmn_hdr.if_id = 0;
5724 		infobuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
5725 		infobuf_post->cmn_hdr.flags = ring->current_phase;
5726 		ring->seqnum++;
5727 
5728 		pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_ctrl_map, p, pa,
5729 			pktlen, DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_INFO_RX);
5730 
5731 #if defined(DHD_PCIE_PKTID)
5732 		if (pktid == DHD_PKTID_INVALID) {
5733 			DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, 0);
5734 
5735 #ifdef DHD_USE_STATIC_CTRLBUF
5736 			PKTFREE_STATIC(dhd->osh, p, FALSE);
5737 #else
5738 			PKTFREE(dhd->osh, p, FALSE);
5739 #endif /* DHD_USE_STATIC_CTRLBUF */
5740 			DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__));
5741 			break;
5742 		}
5743 #endif /* DHD_PCIE_PKTID */
5744 
5745 		infobuf_post->host_buf_len = htol16((uint16)pktlen);
5746 		infobuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
5747 		infobuf_post->host_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
5748 
5749 #ifdef DHD_PKTID_AUDIT_RING
5750 		DHD_PKTID_AUDIT(dhd, prot->pktid_ctrl_map, pktid, DHD_DUPLICATE_ALLOC);
5751 #endif /* DHD_PKTID_AUDIT_RING */
5752 
5753 		DHD_MSGBUF_INFO(("ID %d, low_addr 0x%08x, high_addr 0x%08x\n",
5754 			infobuf_post->cmn_hdr.request_id,  infobuf_post->host_buf_addr.low_addr,
5755 			infobuf_post->host_buf_addr.high_addr));
5756 
5757 		infobuf_post->cmn_hdr.request_id = htol32(pktid);
5758 		/* Move rxbuf_post_tmp to next item */
5759 		infobuf_post_tmp = infobuf_post_tmp + ring->item_len;
5760 #ifdef DHD_LBUF_AUDIT
5761 		PKTAUDIT(dhd->osh, p);
5762 #endif
5763 	}
5764 
5765 	if (i < alloced) {
5766 		if (ring->wr < (alloced - i))
5767 			ring->wr = ring->max_items - (alloced - i);
5768 		else
5769 			ring->wr -= (alloced - i);
5770 
5771 		alloced = i;
5772 		if (alloced && ring->wr == 0) {
5773 			DHD_INFO(("%s: flipping the phase now\n", ring->name));
5774 			ring->current_phase = ring->current_phase ?
5775 				0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
5776 		}
5777 	}
5778 
5779 	/* Update the write pointer in TCM & ring bell */
5780 	if (alloced > 0) {
5781 		if (ring == dhd->prot->h2dring_info_subn) {
5782 			prot->infobufpost += alloced;
5783 		}
5784 #ifdef BTLOG
5785 		if (ring == dhd->prot->h2dring_btlog_subn) {
5786 			prot->btlogbufpost += alloced;
5787 		}
5788 #endif	/* BTLOG */
5789 		dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced);
5790 	}
5791 
5792 	DHD_RING_UNLOCK(ring->ring_lock, flags);
5793 
5794 #ifdef PCIE_INB_DW
5795 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
5796 #endif
5797 	return alloced;
5798 } /* dhd_prot_infobufpost */
5799 #endif /* !BCM_ROUTER_DHD */
5800 
5801 #ifdef IOCTLRESP_USE_CONSTMEM
5802 static int
5803 alloc_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf)
5804 {
5805 	int err;
5806 	memset(retbuf, 0, sizeof(dhd_dma_buf_t));
5807 
5808 	if ((err = dhd_dma_buf_alloc(dhd, retbuf, IOCT_RETBUF_SIZE)) != BCME_OK) {
5809 		DHD_ERROR(("%s: dhd_dma_buf_alloc err %d\n", __FUNCTION__, err));
5810 		ASSERT(0);
5811 		return BCME_NOMEM;
5812 	}
5813 
5814 	return BCME_OK;
5815 }
5816 
5817 static void
5818 free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf)
5819 {
5820 	/* retbuf (declared on stack) not fully populated ...  */
5821 	if (retbuf->va) {
5822 		uint32 dma_pad;
5823 		dma_pad = (IOCT_RETBUF_SIZE % DHD_DMA_PAD) ? DHD_DMA_PAD : 0;
5824 		retbuf->len = IOCT_RETBUF_SIZE;
5825 		retbuf->_alloced = retbuf->len + dma_pad;
5826 	}
5827 
5828 	dhd_dma_buf_free(dhd, retbuf);
5829 	return;
5830 }
5831 #endif /* IOCTLRESP_USE_CONSTMEM */
5832 
5833 static int
5834 dhd_prot_rxbufpost_ctrl(dhd_pub_t *dhd, uint8 msg_type)
5835 {
5836 	void *p;
5837 	uint16 pktsz;
5838 	ioctl_resp_evt_buf_post_msg_t *rxbuf_post;
5839 	dmaaddr_t pa;
5840 	uint32 pktlen;
5841 	dhd_prot_t *prot = dhd->prot;
5842 	uint16 alloced = 0;
5843 	unsigned long flags;
5844 	dhd_dma_buf_t retbuf;
5845 	void *dmah = NULL;
5846 	uint32 pktid;
5847 	void *map_handle;
5848 	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
5849 	bool non_ioctl_resp_buf = 0;
5850 	dhd_pkttype_t buf_type;
5851 
5852 	if (dhd->busstate == DHD_BUS_DOWN) {
5853 		DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
5854 		return -1;
5855 	}
5856 	memset(&retbuf, 0, sizeof(dhd_dma_buf_t));
5857 
5858 	if (msg_type == MSG_TYPE_IOCTLRESP_BUF_POST)
5859 		buf_type = PKTTYPE_IOCTL_RX;
5860 	else if (msg_type == MSG_TYPE_EVENT_BUF_POST)
5861 		buf_type = PKTTYPE_EVENT_RX;
5862 	else if (msg_type == MSG_TYPE_TIMSTAMP_BUFPOST)
5863 		buf_type = PKTTYPE_TSBUF_RX;
5864 	else {
5865 		DHD_ERROR(("invalid message type to be posted to Ctrl ring %d\n", msg_type));
5866 		/* XXX: may be add an assert */
5867 		return -1;
5868 	}
5869 #ifdef PCIE_INB_DW
5870 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) {
5871 		 return BCME_ERROR;
5872 	}
5873 #endif /* PCIE_INB_DW */
5874 
5875 	if ((msg_type == MSG_TYPE_EVENT_BUF_POST) || (msg_type == MSG_TYPE_TIMSTAMP_BUFPOST))
5876 		non_ioctl_resp_buf = TRUE;
5877 	else
5878 		non_ioctl_resp_buf = FALSE;
5879 
5880 	if (non_ioctl_resp_buf) {
5881 		/* Allocate packet for not ioctl resp buffer post */
5882 		pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
5883 	} else {
5884 		/* Allocate packet for ctrl/ioctl buffer post */
5885 		pktsz = DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ;
5886 	}
5887 
5888 #ifdef IOCTLRESP_USE_CONSTMEM
5889 	if (!non_ioctl_resp_buf) {
5890 		if (alloc_ioctl_return_buffer(dhd, &retbuf) != BCME_OK) {
5891 			DHD_ERROR(("Could not allocate IOCTL response buffer\n"));
5892 			goto fail;
5893 		}
5894 		ASSERT(retbuf.len == IOCT_RETBUF_SIZE);
5895 		p = retbuf.va;
5896 		pktlen = retbuf.len;
5897 		pa = retbuf.pa;
5898 		dmah = retbuf.dmah;
5899 	} else
5900 #endif /* IOCTLRESP_USE_CONSTMEM */
5901 	{
5902 #ifdef DHD_USE_STATIC_CTRLBUF
5903 		p = PKTGET_STATIC(dhd->osh, pktsz, FALSE);
5904 #else
5905 		p = PKTGET(dhd->osh, pktsz, FALSE);
5906 #endif /* DHD_USE_STATIC_CTRLBUF */
5907 		if (p == NULL) {
5908 			DHD_ERROR(("%s:%d: PKTGET for %s buf failed\n",
5909 				__FUNCTION__, __LINE__, non_ioctl_resp_buf ?
5910 				"EVENT" : "IOCTL RESP"));
5911 			dhd->rx_pktgetfail++;
5912 			goto fail;
5913 		}
5914 
5915 		pktlen = PKTLEN(dhd->osh, p);
5916 		pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
5917 
5918 		if (PHYSADDRISZERO(pa)) {
5919 			DHD_ERROR(("Invalid physaddr 0\n"));
5920 			ASSERT(0);
5921 			goto free_pkt_return;
5922 		}
5923 
5924 #ifdef DMAMAP_STATS
5925 		switch (buf_type) {
5926 #ifndef IOCTLRESP_USE_CONSTMEM
5927 			case PKTTYPE_IOCTL_RX:
5928 				dhd->dma_stats.ioctl_rx++;
5929 				dhd->dma_stats.ioctl_rx_sz += pktlen;
5930 				break;
5931 #endif /* !IOCTLRESP_USE_CONSTMEM */
5932 			case PKTTYPE_EVENT_RX:
5933 				dhd->dma_stats.event_rx++;
5934 				dhd->dma_stats.event_rx_sz += pktlen;
5935 				break;
5936 			case PKTTYPE_TSBUF_RX:
5937 				dhd->dma_stats.tsbuf_rx++;
5938 				dhd->dma_stats.tsbuf_rx_sz += pktlen;
5939 				break;
5940 			default:
5941 				break;
5942 		}
5943 #endif /* DMAMAP_STATS */
5944 
5945 	}
5946 
5947 	/* grab the ring lock to allocate pktid and post on ring */
5948 	DHD_RING_LOCK(ring->ring_lock, flags);
5949 
5950 	rxbuf_post = (ioctl_resp_evt_buf_post_msg_t *)
5951 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
5952 
5953 	if (rxbuf_post == NULL) {
5954 		DHD_RING_UNLOCK(ring->ring_lock, flags);
5955 		DHD_ERROR(("%s:%d: Ctrl submit Msgbuf Not available to post buffer \n",
5956 			__FUNCTION__, __LINE__));
5957 
5958 #ifdef IOCTLRESP_USE_CONSTMEM
5959 		if (non_ioctl_resp_buf)
5960 #endif /* IOCTLRESP_USE_CONSTMEM */
5961 		{
5962 			DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
5963 		}
5964 		goto free_pkt_return;
5965 	}
5966 
5967 	/* CMN msg header */
5968 	rxbuf_post->cmn_hdr.msg_type = msg_type;
5969 
5970 #ifdef IOCTLRESP_USE_CONSTMEM
5971 	if (!non_ioctl_resp_buf) {
5972 		map_handle = dhd->prot->pktid_map_handle_ioctl;
5973 		pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle, p, pa, pktlen, DMA_RX, dmah,
5974 			ring->dma_buf.secdma, buf_type);
5975 	} else
5976 #endif /* IOCTLRESP_USE_CONSTMEM */
5977 	{
5978 		map_handle = dhd->prot->pktid_ctrl_map;
5979 		pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle,
5980 			p, pa, pktlen, DMA_RX, dmah, ring->dma_buf.secdma,
5981 			buf_type);
5982 	}
5983 
5984 	if (pktid == DHD_PKTID_INVALID) {
5985 		if (ring->wr == 0) {
5986 			ring->wr = ring->max_items - 1;
5987 		} else {
5988 			ring->wr--;
5989 			if (ring->wr == 0) {
5990 				ring->current_phase = ring->current_phase ? 0 :
5991 					BCMPCIE_CMNHDR_PHASE_BIT_INIT;
5992 			}
5993 		}
5994 		DHD_RING_UNLOCK(ring->ring_lock, flags);
5995 		DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
5996 		DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__));
5997 		goto free_pkt_return;
5998 	}
5999 
6000 #ifdef DHD_PKTID_AUDIT_RING
6001 	DHD_PKTID_AUDIT(dhd, map_handle, pktid, DHD_DUPLICATE_ALLOC);
6002 #endif /* DHD_PKTID_AUDIT_RING */
6003 
6004 	rxbuf_post->cmn_hdr.request_id = htol32(pktid);
6005 	rxbuf_post->cmn_hdr.if_id = 0;
6006 	rxbuf_post->cmn_hdr.epoch =  ring->seqnum % H2D_EPOCH_MODULO;
6007 	ring->seqnum++;
6008 	rxbuf_post->cmn_hdr.flags = ring->current_phase;
6009 
6010 #if defined(DHD_PCIE_PKTID)
6011 	if (rxbuf_post->cmn_hdr.request_id == DHD_PKTID_INVALID) {
6012 		if (ring->wr == 0) {
6013 			ring->wr = ring->max_items - 1;
6014 		} else {
6015 			if (ring->wr == 0) {
6016 				ring->current_phase = ring->current_phase ? 0 :
6017 					BCMPCIE_CMNHDR_PHASE_BIT_INIT;
6018 			}
6019 		}
6020 		DHD_RING_UNLOCK(ring->ring_lock, flags);
6021 #ifdef IOCTLRESP_USE_CONSTMEM
6022 		if (non_ioctl_resp_buf)
6023 #endif /* IOCTLRESP_USE_CONSTMEM */
6024 		{
6025 			DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
6026 		}
6027 		goto free_pkt_return;
6028 	}
6029 #endif /* DHD_PCIE_PKTID */
6030 
6031 #ifndef IOCTLRESP_USE_CONSTMEM
6032 	rxbuf_post->host_buf_len = htol16((uint16)PKTLEN(dhd->osh, p));
6033 #else
6034 	rxbuf_post->host_buf_len = htol16((uint16)pktlen);
6035 #endif /* IOCTLRESP_USE_CONSTMEM */
6036 	rxbuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
6037 	rxbuf_post->host_buf_addr.low_addr  = htol32(PHYSADDRLO(pa));
6038 #ifdef DHD_LBUF_AUDIT
6039 	if (non_ioctl_resp_buf)
6040 		PKTAUDIT(dhd->osh, p);
6041 #endif
6042 	/* update ring's WR index and ring doorbell to dongle */
6043 	dhd_prot_ring_write_complete(dhd, ring, rxbuf_post, 1);
6044 
6045 	DHD_RING_UNLOCK(ring->ring_lock, flags);
6046 
6047 #ifdef PCIE_INB_DW
6048 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
6049 #endif
6050 	return 1;
6051 
6052 free_pkt_return:
6053 	if (!non_ioctl_resp_buf) {
6054 #ifdef IOCTLRESP_USE_CONSTMEM
6055 		free_ioctl_return_buffer(dhd, &retbuf);
6056 #else
6057 		dhd_prot_packet_free(dhd, p, buf_type, FALSE);
6058 #endif /* IOCTLRESP_USE_CONSTMEM */
6059 	} else {
6060 		dhd_prot_packet_free(dhd, p, buf_type, FALSE);
6061 	}
6062 
6063 fail:
6064 #ifdef PCIE_INB_DW
6065 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
6066 #endif
6067 	return -1;
6068 } /* dhd_prot_rxbufpost_ctrl */
6069 
6070 static uint16
6071 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, uint8 msg_type, uint32 max_to_post)
6072 {
6073 	uint32 i = 0;
6074 	int32 ret_val;
6075 
6076 	DHD_MSGBUF_INFO(("max to post %d, event %d \n", max_to_post, msg_type));
6077 
6078 	if (dhd->busstate == DHD_BUS_DOWN) {
6079 		DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
6080 		return 0;
6081 	}
6082 
6083 	while (i < max_to_post) {
6084 		ret_val  = dhd_prot_rxbufpost_ctrl(dhd, msg_type);
6085 		if (ret_val < 0)
6086 			break;
6087 		i++;
6088 	}
6089 	DHD_MSGBUF_INFO(("posted %d buffers of type %d\n", i, msg_type));
6090 	return (uint16)i;
6091 }
6092 
6093 static void
6094 dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *dhd)
6095 {
6096 	dhd_prot_t *prot = dhd->prot;
6097 	int max_to_post;
6098 
6099 	DHD_MSGBUF_INFO(("ioctl resp buf post\n"));
6100 	max_to_post = prot->max_ioctlrespbufpost - prot->cur_ioctlresp_bufs_posted;
6101 	if (max_to_post <= 0) {
6102 		DHD_INFO(("%s: Cannot post more than max IOCTL resp buffers\n",
6103 			__FUNCTION__));
6104 		return;
6105 	}
6106 	prot->cur_ioctlresp_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
6107 		MSG_TYPE_IOCTLRESP_BUF_POST, max_to_post);
6108 }
6109 
6110 static void
6111 dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *dhd)
6112 {
6113 	dhd_prot_t *prot = dhd->prot;
6114 	int max_to_post;
6115 
6116 	max_to_post = prot->max_eventbufpost - prot->cur_event_bufs_posted;
6117 	if (max_to_post <= 0) {
6118 		DHD_ERROR(("%s: Cannot post more than max event buffers\n",
6119 			__FUNCTION__));
6120 		return;
6121 	}
6122 	prot->cur_event_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
6123 		MSG_TYPE_EVENT_BUF_POST, max_to_post);
6124 }
6125 
6126 static int
6127 dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t *dhd)
6128 {
6129 #ifdef DHD_TIMESYNC
6130 	dhd_prot_t *prot = dhd->prot;
6131 	int max_to_post;
6132 
6133 	if (prot->active_ipc_version < 7) {
6134 		DHD_ERROR(("no ts buffers to device ipc rev is %d, needs to be atleast 7\n",
6135 			prot->active_ipc_version));
6136 		return 0;
6137 	}
6138 
6139 	max_to_post = prot->max_tsbufpost - prot->cur_ts_bufs_posted;
6140 	if (max_to_post <= 0) {
6141 		DHD_INFO(("%s: Cannot post more than max ts buffers\n",
6142 			__FUNCTION__));
6143 		return 0;
6144 	}
6145 
6146 	prot->cur_ts_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
6147 		MSG_TYPE_TIMSTAMP_BUFPOST, max_to_post);
6148 #endif /* DHD_TIMESYNC */
6149 	return 0;
6150 }
6151 
6152 bool
6153 BCMFASTPATH(dhd_prot_process_msgbuf_infocpl)(dhd_pub_t *dhd, uint bound)
6154 {
6155 	dhd_prot_t *prot = dhd->prot;
6156 	bool more = TRUE;
6157 	uint n = 0;
6158 	msgbuf_ring_t *ring = prot->d2hring_info_cpln;
6159 	unsigned long flags;
6160 
6161 	if (ring == NULL)
6162 		return FALSE;
6163 	if (ring->inited != TRUE)
6164 		return FALSE;
6165 
6166 	/* Process all the messages - DTOH direction */
6167 	while (!dhd_is_device_removed(dhd)) {
6168 		uint8 *msg_addr;
6169 		uint32 msg_len;
6170 
6171 		if (dhd->hang_was_sent) {
6172 			more = FALSE;
6173 			break;
6174 		}
6175 
6176 		if (dhd->smmu_fault_occurred) {
6177 			more = FALSE;
6178 			break;
6179 		}
6180 
6181 		DHD_RING_LOCK(ring->ring_lock, flags);
6182 		/* Get the message from ring */
6183 		msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
6184 		DHD_RING_UNLOCK(ring->ring_lock, flags);
6185 		if (msg_addr == NULL) {
6186 			more = FALSE;
6187 			break;
6188 		}
6189 
6190 		/* Prefetch data to populate the cache */
6191 		OSL_PREFETCH(msg_addr);
6192 
6193 		if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
6194 			DHD_ERROR(("%s: Error at  process rxpl msgbuf of len %d\n",
6195 				__FUNCTION__, msg_len));
6196 		}
6197 
6198 		/* Update read pointer */
6199 		dhd_prot_upd_read_idx(dhd, ring);
6200 
6201 		/* After batch processing, check RX bound */
6202 		n += msg_len / ring->item_len;
6203 		if (n >= bound) {
6204 			break;
6205 		}
6206 	}
6207 
6208 	return more;
6209 }
6210 
6211 #ifdef BTLOG
6212 bool
6213 BCMFASTPATH(dhd_prot_process_msgbuf_btlogcpl)(dhd_pub_t *dhd, uint bound)
6214 {
6215 	dhd_prot_t *prot = dhd->prot;
6216 	bool more = TRUE;
6217 	uint n = 0;
6218 	msgbuf_ring_t *ring = prot->d2hring_btlog_cpln;
6219 
6220 	if (ring == NULL)
6221 		return FALSE;
6222 	if (ring->inited != TRUE)
6223 		return FALSE;
6224 
6225 	/* Process all the messages - DTOH direction */
6226 	while (!dhd_is_device_removed(dhd)) {
6227 		uint8 *msg_addr;
6228 		uint32 msg_len;
6229 
6230 		if (dhd_query_bus_erros(dhd)) {
6231 			more = FALSE;
6232 			break;
6233 		}
6234 
6235 		if (dhd->hang_was_sent) {
6236 			more = FALSE;
6237 			break;
6238 		}
6239 
6240 		if (dhd->smmu_fault_occurred) {
6241 			more = FALSE;
6242 			break;
6243 		}
6244 
6245 		/* Get the message from ring */
6246 		msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
6247 		if (msg_addr == NULL) {
6248 			more = FALSE;
6249 			break;
6250 		}
6251 
6252 		/* Prefetch data to populate the cache */
6253 		OSL_PREFETCH(msg_addr);
6254 
6255 		if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
6256 			DHD_ERROR(("%s: Error at  process rxpl msgbuf of len %d\n",
6257 				__FUNCTION__, msg_len));
6258 		}
6259 
6260 		/* Update read pointer */
6261 		dhd_prot_upd_read_idx(dhd, ring);
6262 
6263 		/* After batch processing, check RX bound */
6264 		n += msg_len / ring->item_len;
6265 		if (n >= bound) {
6266 			break;
6267 		}
6268 	}
6269 
6270 	return more;
6271 }
6272 #endif	/* BTLOG */
6273 
6274 #ifdef EWP_EDL
6275 bool
6276 dhd_prot_process_msgbuf_edl(dhd_pub_t *dhd)
6277 {
6278 	dhd_prot_t *prot = dhd->prot;
6279 	msgbuf_ring_t *ring = prot->d2hring_edl;
6280 	unsigned long flags = 0;
6281 	uint32 items = 0;
6282 	uint16 rd = 0;
6283 	uint16 depth = 0;
6284 
6285 	if (ring == NULL)
6286 		return FALSE;
6287 	if (ring->inited != TRUE)
6288 		return FALSE;
6289 	if (ring->item_len == 0) {
6290 		DHD_ERROR(("%s: Bad ring ! ringidx %d, item_len %d \n",
6291 			__FUNCTION__, ring->idx, ring->item_len));
6292 		return FALSE;
6293 	}
6294 
6295 	if (dhd_query_bus_erros(dhd)) {
6296 		return FALSE;
6297 	}
6298 
6299 	if (dhd->hang_was_sent) {
6300 		return FALSE;
6301 	}
6302 
6303 	/* in this DPC context just check if wr index has moved
6304 	 * and schedule deferred context to actually process the
6305 	 * work items.
6306 	*/
6307 
6308 	/* update the write index */
6309 	DHD_RING_LOCK(ring->ring_lock, flags);
6310 	if (dhd->dma_d2h_ring_upd_support) {
6311 		/* DMAing write/read indices supported */
6312 		ring->wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
6313 	} else {
6314 		dhd_bus_cmn_readshared(dhd->bus, &ring->wr, RING_WR_UPD, ring->idx);
6315 	}
6316 	rd = ring->rd;
6317 	DHD_RING_UNLOCK(ring->ring_lock, flags);
6318 
6319 	depth = ring->max_items;
6320 	/* check for avail space, in number of ring items */
6321 	items = READ_AVAIL_SPACE(ring->wr, rd, depth);
6322 	if (items == 0) {
6323 		/* no work items in edl ring */
6324 		return FALSE;
6325 	}
6326 	if (items > ring->max_items) {
6327 		DHD_ERROR(("\r\n======================= \r\n"));
6328 		DHD_ERROR(("%s(): ring %p, ring->name %s, ring->max_items %d, items %d \r\n",
6329 			__FUNCTION__, ring, ring->name, ring->max_items, items));
6330 		DHD_ERROR(("wr: %d,  rd: %d,  depth: %d  \r\n",
6331 			ring->wr, ring->rd, depth));
6332 		DHD_ERROR(("dhd->busstate %d bus->wait_for_d3_ack %d \r\n",
6333 			dhd->busstate, dhd->bus->wait_for_d3_ack));
6334 		DHD_ERROR(("\r\n======================= \r\n"));
6335 #ifdef SUPPORT_LINKDOWN_RECOVERY
6336 		if (ring->wr >= ring->max_items) {
6337 			dhd->bus->read_shm_fail = TRUE;
6338 		}
6339 #else
6340 #ifdef DHD_FW_COREDUMP
6341 		if (dhd->memdump_enabled) {
6342 			/* collect core dump */
6343 			dhd->memdump_type = DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR;
6344 			dhd_bus_mem_dump(dhd);
6345 
6346 		}
6347 #endif /* DHD_FW_COREDUMP */
6348 #endif /* SUPPORT_LINKDOWN_RECOVERY */
6349 		dhd_schedule_reset(dhd);
6350 
6351 		return FALSE;
6352 	}
6353 
6354 	if (items > D2HRING_EDL_WATERMARK) {
6355 		DHD_ERROR_RLMT(("%s: WARNING! EDL watermark hit, num items=%u;"
6356 			" rd=%u; wr=%u; depth=%u;\n", __FUNCTION__, items,
6357 			ring->rd, ring->wr, depth));
6358 	}
6359 
6360 	dhd_schedule_logtrace(dhd->info);
6361 
6362 	return FALSE;
6363 }
6364 
6365 /*
6366  * This is called either from work queue context of 'event_log_dispatcher_work' or
6367  * from the kthread context of dhd_logtrace_thread
6368  */
6369 int
6370 dhd_prot_process_edl_complete(dhd_pub_t *dhd, void *evt_decode_data)
6371 {
6372 	dhd_prot_t *prot = NULL;
6373 	msgbuf_ring_t *ring = NULL;
6374 	int err = 0;
6375 	unsigned long flags = 0;
6376 	cmn_msg_hdr_t *msg = NULL;
6377 	uint8 *msg_addr = NULL;
6378 	uint32 max_items_to_process = 0, n = 0;
6379 	uint32 num_items = 0, new_items = 0;
6380 	uint16 depth = 0;
6381 	volatile uint16 wr = 0;
6382 
6383 	if (!dhd || !dhd->prot)
6384 		return 0;
6385 
6386 	prot = dhd->prot;
6387 	ring = prot->d2hring_edl;
6388 
6389 	if (!ring || !evt_decode_data) {
6390 		return 0;
6391 	}
6392 
6393 	if (dhd->hang_was_sent) {
6394 		return FALSE;
6395 	}
6396 
6397 	DHD_RING_LOCK(ring->ring_lock, flags);
6398 	ring->curr_rd = ring->rd;
6399 	wr = ring->wr;
6400 	depth = ring->max_items;
6401 	/* check for avail space, in number of ring items
6402 	 * Note, that this will only give the # of items
6403 	 * from rd to wr if wr>=rd, or from rd to ring end
6404 	 * if wr < rd. So in the latter case strictly speaking
6405 	 * not all the items are read. But this is OK, because
6406 	 * these will be processed in the next doorbell as rd
6407 	 * would have wrapped around. Processing in the next
6408 	 * doorbell is acceptable since EDL only contains debug data
6409 	 */
6410 	num_items = READ_AVAIL_SPACE(wr, ring->rd, depth);
6411 
6412 	if (num_items == 0) {
6413 		/* no work items in edl ring */
6414 		DHD_RING_UNLOCK(ring->ring_lock, flags);
6415 		return 0;
6416 	}
6417 
6418 	DHD_INFO(("%s: EDL work items [%u] available \n",
6419 			__FUNCTION__, num_items));
6420 
6421 	/* if space is available, calculate address to be read */
6422 	msg_addr = (char*)ring->dma_buf.va + (ring->rd * ring->item_len);
6423 
6424 	max_items_to_process = MIN(num_items, DHD_EVENT_LOGTRACE_BOUND);
6425 
6426 	DHD_RING_UNLOCK(ring->ring_lock, flags);
6427 
6428 	/* Prefetch data to populate the cache */
6429 	OSL_PREFETCH(msg_addr);
6430 
6431 	n = max_items_to_process;
6432 	while (n > 0) {
6433 		msg = (cmn_msg_hdr_t *)msg_addr;
6434 		/* wait for DMA of work item to complete */
6435 		if ((err = dhd->prot->d2h_edl_sync_cb(dhd, ring, msg)) != BCME_OK) {
6436 			DHD_ERROR(("%s: Error waiting for DMA to cmpl in EDL ring; err = %d\n",
6437 				__FUNCTION__, err));
6438 		}
6439 		/*
6440 		 * Update the curr_rd to the current index in the ring, from where
6441 		 * the work item is fetched. This way if the fetched work item
6442 		 * fails in LIVELOCK, we can print the exact read index in the ring
6443 		 * that shows up the corrupted work item.
6444 		 */
6445 		if ((ring->curr_rd + 1) >= ring->max_items) {
6446 			ring->curr_rd = 0;
6447 		} else {
6448 			ring->curr_rd += 1;
6449 		}
6450 
6451 		if (err != BCME_OK) {
6452 			return 0;
6453 		}
6454 
6455 		/* process the edl work item, i.e, the event log */
6456 		err = dhd_event_logtrace_process_edl(dhd, msg_addr, evt_decode_data);
6457 
6458 		/* Dummy sleep so that scheduler kicks in after processing any logprints */
6459 		OSL_SLEEP(0);
6460 
6461 		/* Prefetch data to populate the cache */
6462 		OSL_PREFETCH(msg_addr + ring->item_len);
6463 
6464 		msg_addr += ring->item_len;
6465 		--n;
6466 	}
6467 
6468 	DHD_RING_LOCK(ring->ring_lock, flags);
6469 	/* update host ring read pointer */
6470 	if ((ring->rd + max_items_to_process) >= ring->max_items)
6471 		ring->rd = 0;
6472 	else
6473 		ring->rd += max_items_to_process;
6474 	DHD_RING_UNLOCK(ring->ring_lock, flags);
6475 
6476 	/* Now after processing max_items_to_process update dongle rd index.
6477 	 * The TCM rd index is updated only if bus is not
6478 	 * in D3. Else, the rd index is updated from resume
6479 	 * context in - 'dhdpcie_bus_suspend'
6480 	 */
6481 	DHD_GENERAL_LOCK(dhd, flags);
6482 	if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhd)) {
6483 		DHD_INFO(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
6484 			__FUNCTION__, dhd->busstate, dhd->dhd_bus_busy_state));
6485 		DHD_GENERAL_UNLOCK(dhd, flags);
6486 	} else {
6487 		DHD_GENERAL_UNLOCK(dhd, flags);
6488 		DHD_EDL_RING_TCM_RD_UPDATE(dhd);
6489 	}
6490 
6491 	/* if num_items > bound, then anyway we will reschedule and
6492 	 * this function runs again, so that if in between the DPC has
6493 	 * updated the wr index, then the updated wr is read. But if
6494 	 * num_items <= bound, and if DPC executes and updates the wr index
6495 	 * when the above while loop is running, then the updated 'wr' index
6496 	 * needs to be re-read from here, If we don't do so, then till
6497 	 * the next time this function is scheduled
6498 	 * the event logs will not be processed.
6499 	*/
6500 	if (num_items <= DHD_EVENT_LOGTRACE_BOUND) {
6501 		/* read the updated wr index if reqd. and update num_items */
6502 		DHD_RING_LOCK(ring->ring_lock, flags);
6503 		if (wr != (volatile uint16)ring->wr) {
6504 			wr = (volatile uint16)ring->wr;
6505 			new_items = READ_AVAIL_SPACE(wr, ring->rd, depth);
6506 			DHD_INFO(("%s: new items [%u] avail in edl\n",
6507 				__FUNCTION__, new_items));
6508 			num_items += new_items;
6509 		}
6510 		DHD_RING_UNLOCK(ring->ring_lock, flags);
6511 	}
6512 
6513 	/* if # of items processed is less than num_items, need to re-schedule
6514 	* the deferred ctx
6515 	*/
6516 	if (max_items_to_process < num_items) {
6517 		DHD_INFO(("%s: EDL bound hit / new items found, "
6518 				"items processed=%u; remaining=%u, "
6519 				"resched deferred ctx...\n",
6520 				__FUNCTION__, max_items_to_process,
6521 				num_items - max_items_to_process));
6522 		return (num_items - max_items_to_process);
6523 	}
6524 
6525 	return 0;
6526 
6527 }
6528 
6529 void
6530 dhd_prot_edl_ring_tcm_rd_update(dhd_pub_t *dhd)
6531 {
6532 	dhd_prot_t *prot = NULL;
6533 	unsigned long flags = 0;
6534 	msgbuf_ring_t *ring = NULL;
6535 
6536 	if (!dhd)
6537 		return;
6538 
6539 	prot = dhd->prot;
6540 	if (!prot || !prot->d2hring_edl)
6541 		return;
6542 
6543 	ring = prot->d2hring_edl;
6544 	DHD_RING_LOCK(ring->ring_lock, flags);
6545 	dhd_prot_upd_read_idx(dhd, ring);
6546 	DHD_RING_UNLOCK(ring->ring_lock, flags);
6547 	if (dhd->dma_h2d_ring_upd_support &&
6548 		!IDMA_ACTIVE(dhd)) {
6549 		dhd_prot_ring_doorbell(dhd, DHD_RDPTR_UPDATE_H2D_DB_MAGIC(ring));
6550 	}
6551 }
6552 #endif /* EWP_EDL */
6553 
6554 static void
6555 dhd_prot_rx_frame(dhd_pub_t *dhd, void *pkt, int ifidx, uint pkt_count)
6556 {
6557 
6558 #ifdef DHD_LB_RXP
6559 	if (dhd_read_lb_rxp(dhd) == 1) {
6560 		dhd_lb_rx_pkt_enqueue(dhd, pkt, ifidx);
6561 		return;
6562 	}
6563 #endif /* DHD_LB_RXP */
6564 	dhd_bus_rx_frame(dhd->bus, pkt, ifidx, pkt_count);
6565 }
6566 
6567 #ifdef DHD_LB_RXP
6568 static int dhd_prot_lb_rxp_flow_ctrl(dhd_pub_t *dhd)
6569 {
6570 	if ((dhd->lb_rxp_stop_thr == 0) || (dhd->lb_rxp_strt_thr == 0)) {
6571 		/* when either of stop and start thresholds are zero flow ctrl is not enabled */
6572 		return FALSE;
6573 	}
6574 
6575 	if ((dhd_lb_rxp_process_qlen(dhd) >= dhd->lb_rxp_stop_thr) &&
6576 			(!atomic_read(&dhd->lb_rxp_flow_ctrl))) {
6577 		atomic_set(&dhd->lb_rxp_flow_ctrl, TRUE);
6578 #ifdef DHD_LB_STATS
6579 		dhd->lb_rxp_stop_thr_hitcnt++;
6580 #endif /* DHD_LB_STATS */
6581 		DHD_INFO(("lb_rxp_process_qlen %d lb_rxp_stop_thr %d\n",
6582 			dhd_lb_rxp_process_qlen(dhd), dhd->lb_rxp_stop_thr));
6583 	} else if ((dhd_lb_rxp_process_qlen(dhd) <= dhd->lb_rxp_strt_thr) &&
6584 			(atomic_read(&dhd->lb_rxp_flow_ctrl))) {
6585 		atomic_set(&dhd->lb_rxp_flow_ctrl, FALSE);
6586 #ifdef DHD_LB_STATS
6587 		dhd->lb_rxp_strt_thr_hitcnt++;
6588 #endif /* DHD_LB_STATS */
6589 		DHD_INFO(("lb_rxp_process_qlen %d lb_rxp_strt_thr %d\n",
6590 			dhd_lb_rxp_process_qlen(dhd), dhd->lb_rxp_strt_thr));
6591 	}
6592 
6593 	return atomic_read(&dhd->lb_rxp_flow_ctrl);
6594 }
6595 #endif /* DHD_LB_RXP */
6596 
6597 /** called when DHD needs to check for 'receive complete' messages from the dongle */
6598 bool
6599 BCMFASTPATH(dhd_prot_process_msgbuf_rxcpl)(dhd_pub_t *dhd, uint bound, int ringtype)
6600 {
6601 	bool more = FALSE;
6602 	uint n = 0;
6603 	dhd_prot_t *prot = dhd->prot;
6604 	msgbuf_ring_t *ring;
6605 	uint16 item_len;
6606 	host_rxbuf_cmpl_t *msg = NULL;
6607 	uint8 *msg_addr;
6608 	uint32 msg_len;
6609 	uint16 pkt_cnt, pkt_cnt_newidx;
6610 	unsigned long flags;
6611 	dmaaddr_t pa;
6612 	uint32 len;
6613 	void *dmah;
6614 	void *secdma;
6615 	int ifidx = 0, if_newidx = 0;
6616 	void *pkt, *pktqhead = NULL, *prevpkt = NULL, *pkt_newidx, *nextpkt;
6617 	uint32 pktid;
6618 	int i;
6619 	uint8 sync;
6620 
6621 #ifdef DHD_LB_RXP
6622 	/* must be the first check in this function */
6623 	if (dhd_prot_lb_rxp_flow_ctrl(dhd)) {
6624 		/* DHD is holding a lot of RX packets.
6625 		 * Just give chance for netwrok stack to consumes RX packets.
6626 		 */
6627 		return FALSE;
6628 	}
6629 #endif /* DHD_LB_RXP */
6630 #ifdef DHD_PCIE_RUNTIMEPM
6631 	/* Set rx_pending_due_to_rpm if device is not in resume state */
6632 	if (dhdpcie_runtime_bus_wake(dhd, FALSE, dhd_prot_process_msgbuf_rxcpl)) {
6633 		dhd->rx_pending_due_to_rpm = TRUE;
6634 		return more;
6635 	}
6636 	dhd->rx_pending_due_to_rpm = FALSE;
6637 #endif /* DHD_PCIE_RUNTIMEPM */
6638 
6639 #ifdef DHD_HP2P
6640 	if (ringtype == DHD_HP2P_RING && prot->d2hring_hp2p_rxcpl)
6641 		ring = prot->d2hring_hp2p_rxcpl;
6642 	else
6643 #endif /* DHD_HP2P */
6644 		ring = &prot->d2hring_rx_cpln;
6645 	item_len = ring->item_len;
6646 	while (1) {
6647 		if (dhd_is_device_removed(dhd))
6648 			break;
6649 
6650 		if (dhd_query_bus_erros(dhd))
6651 			break;
6652 
6653 		if (dhd->hang_was_sent)
6654 			break;
6655 
6656 		if (dhd->smmu_fault_occurred) {
6657 			break;
6658 		}
6659 
6660 		pkt_cnt = 0;
6661 		pktqhead = pkt_newidx = NULL;
6662 		pkt_cnt_newidx = 0;
6663 
6664 		DHD_RING_LOCK(ring->ring_lock, flags);
6665 
6666 		/* Get the address of the next message to be read from ring */
6667 		msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
6668 		if (msg_addr == NULL) {
6669 			DHD_RING_UNLOCK(ring->ring_lock, flags);
6670 			break;
6671 		}
6672 
6673 		while (msg_len > 0) {
6674 			msg = (host_rxbuf_cmpl_t *)msg_addr;
6675 
6676 			/* Wait until DMA completes, then fetch msg_type */
6677 			sync = prot->d2h_sync_cb(dhd, ring, &msg->cmn_hdr, item_len);
6678 			/*
6679 			 * Update the curr_rd to the current index in the ring, from where
6680 			 * the work item is fetched. This way if the fetched work item
6681 			 * fails in LIVELOCK, we can print the exact read index in the ring
6682 			 * that shows up the corrupted work item.
6683 			 */
6684 			if ((ring->curr_rd + 1) >= ring->max_items) {
6685 				ring->curr_rd = 0;
6686 			} else {
6687 				ring->curr_rd += 1;
6688 			}
6689 
6690 			if (!sync) {
6691 				msg_len -= item_len;
6692 				msg_addr += item_len;
6693 				continue;
6694 			}
6695 
6696 			pktid = ltoh32(msg->cmn_hdr.request_id);
6697 
6698 #ifdef DHD_PKTID_AUDIT_RING
6699 			DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_rx_map, pktid,
6700 				DHD_DUPLICATE_FREE, msg, D2HRING_RXCMPLT_ITEMSIZE);
6701 #endif /* DHD_PKTID_AUDIT_RING */
6702 
6703 			pkt = DHD_PKTID_TO_NATIVE(dhd, prot->pktid_rx_map, pktid, pa,
6704 			        len, dmah, secdma, PKTTYPE_DATA_RX);
6705 			/* Sanity check of shinfo nrfrags */
6706 			if (!pkt || (dhd_check_shinfo_nrfrags(dhd, pkt, &pa, pktid) != BCME_OK)) {
6707 				msg_len -= item_len;
6708 				msg_addr += item_len;
6709 				continue;
6710 			}
6711 			dhd->prot->tot_rxcpl++;
6712 
6713 			DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
6714 
6715 #ifdef DMAMAP_STATS
6716 			dhd->dma_stats.rxdata--;
6717 			dhd->dma_stats.rxdata_sz -= len;
6718 #endif /* DMAMAP_STATS */
6719 #ifdef DHD_HMAPTEST
6720 			if ((dhd->prot->hmaptest_rx_active == HMAPTEST_D11_RX_POSTED) &&
6721 				(pktid == dhd->prot->hmaptest_rx_pktid)) {
6722 
6723 				uchar *ptr;
6724 				ptr = PKTDATA(dhd->osh, pkt) - (prot->rx_metadata_offset);
6725 				DMA_UNMAP(dhd->osh, dhd->prot->hmap_rx_buf_pa,
6726 					(uint)dhd->prot->hmap_rx_buf_len, DMA_RX, 0, dmah);
6727 				DHD_ERROR(("hmaptest: d11write rxcpl rcvd sc rxbuf pktid=0x%08x\n",
6728 					pktid));
6729 				DHD_ERROR(("hmaptest: d11write rxcpl r0_st=0x%08x r1_stat=0x%08x\n",
6730 					msg->rx_status_0, msg->rx_status_1));
6731 				DHD_ERROR(("hmaptest: d11write rxcpl rxbuf va=0x%p pa=0x%08x\n",
6732 					dhd->prot->hmap_rx_buf_va,
6733 					(uint32)PHYSADDRLO(dhd->prot->hmap_rx_buf_pa)));
6734 				DHD_ERROR(("hmaptest: d11write rxcpl pktdata va=0x%p pa=0x%08x\n",
6735 					PKTDATA(dhd->osh, pkt), (uint32)PHYSADDRLO(pa)));
6736 				memcpy(ptr, dhd->prot->hmap_rx_buf_va, dhd->prot->hmap_rx_buf_len);
6737 				dhd->prot->hmaptest_rx_active = HMAPTEST_D11_RX_INACTIVE;
6738 				dhd->prot->hmap_rx_buf_va = NULL;
6739 				dhd->prot->hmap_rx_buf_len = 0;
6740 				PHYSADDRHISET(dhd->prot->hmap_rx_buf_pa, 0);
6741 				PHYSADDRLOSET(dhd->prot->hmap_rx_buf_pa, 0);
6742 				prot->hmaptest.in_progress = FALSE;
6743 			}
6744 #endif /* DHD_HMAPTEST */
6745 			DHD_MSGBUF_INFO(("id 0x%04x, offset %d, len %d, idx %d, phase 0x%02x, "
6746 				"pktdata %p, metalen %d\n",
6747 				ltoh32(msg->cmn_hdr.request_id),
6748 				ltoh16(msg->data_offset),
6749 				ltoh16(msg->data_len), msg->cmn_hdr.if_id,
6750 				msg->cmn_hdr.flags, PKTDATA(dhd->osh, pkt),
6751 				ltoh16(msg->metadata_len)));
6752 
6753 			pkt_cnt++;
6754 			msg_len -= item_len;
6755 			msg_addr += item_len;
6756 
6757 #if !defined(BCM_ROUTER_DHD)
6758 #if DHD_DBG_SHOW_METADATA
6759 			if (prot->metadata_dbg && prot->rx_metadata_offset &&
6760 			        msg->metadata_len) {
6761 				uchar *ptr;
6762 				ptr = PKTDATA(dhd->osh, pkt) - (prot->rx_metadata_offset);
6763 				/* header followed by data */
6764 				bcm_print_bytes("rxmetadata", ptr, msg->metadata_len);
6765 				dhd_prot_print_metadata(dhd, ptr, msg->metadata_len);
6766 			}
6767 #endif /* DHD_DBG_SHOW_METADATA */
6768 #endif /* !BCM_ROUTER_DHD */
6769 
6770 			/* data_offset from buf start */
6771 			if (ltoh16(msg->data_offset)) {
6772 				/* data offset given from dongle after split rx */
6773 				PKTPULL(dhd->osh, pkt, ltoh16(msg->data_offset));
6774 			}
6775 			else if (prot->rx_dataoffset) {
6776 				/* DMA RX offset updated through shared area */
6777 				PKTPULL(dhd->osh, pkt, prot->rx_dataoffset);
6778 			}
6779 			/* Actual length of the packet */
6780 			PKTSETLEN(dhd->osh, pkt, ltoh16(msg->data_len));
6781 #ifdef DHD_PKTTS
6782 			if (dhd_get_pktts_enab(dhd) == TRUE) {
6783 				uint fwr1 = 0, fwr2 = 0;
6784 
6785 				/* firmware mark rx_pktts.tref with 0xFFFFFFFF for errors */
6786 				if (ltoh32(msg->rx_pktts.tref) != 0xFFFFFFFF) {
6787 					fwr1 = (uint)htonl(ltoh32(msg->rx_pktts.tref));
6788 					fwr2 = (uint)htonl(ltoh32(msg->rx_pktts.tref) +
6789 						ltoh16(msg->rx_pktts.d_t2));
6790 
6791 					/* check for overflow */
6792 					if (ntohl(fwr2) > ntohl(fwr1)) {
6793 						/* send rx timestamp to netlnik socket */
6794 						dhd_msgbuf_send_msg_rx_ts(dhd, pkt, fwr1, fwr2);
6795 					}
6796 				}
6797 			}
6798 #endif /* DHD_PKTTS */
6799 
6800 #if defined(WL_MONITOR)
6801 			if (dhd_monitor_enabled(dhd, ifidx)) {
6802 				if (msg->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11) {
6803 					dhd_rx_mon_pkt(dhd, msg, pkt, ifidx);
6804 					continue;
6805 				} else {
6806 					DHD_ERROR(("Received non 802.11 packet, "
6807 						"when monitor mode is enabled\n"));
6808 				}
6809 			}
6810 #endif /* WL_MONITOR */
6811 
6812 			if (!pktqhead) {
6813 				pktqhead = prevpkt = pkt;
6814 				ifidx = msg->cmn_hdr.if_id;
6815 			} else {
6816 				if (ifidx != msg->cmn_hdr.if_id) {
6817 					pkt_newidx = pkt;
6818 					if_newidx = msg->cmn_hdr.if_id;
6819 					pkt_cnt--;
6820 					pkt_cnt_newidx = 1;
6821 					break;
6822 				} else {
6823 					PKTSETNEXT(dhd->osh, prevpkt, pkt);
6824 					prevpkt = pkt;
6825 				}
6826 			}
6827 
6828 #ifdef DHD_HP2P
6829 			if (dhd->hp2p_capable && ring == prot->d2hring_hp2p_rxcpl) {
6830 #ifdef DHD_HP2P_DEBUG
6831 				bcm_print_bytes("Rxcpl", (uchar *)msg,  sizeof(host_rxbuf_cmpl_t));
6832 #endif /* DHD_HP2P_DEBUG */
6833 				dhd_update_hp2p_rxstats(dhd, msg);
6834 			}
6835 #endif /* DHD_HP2P */
6836 
6837 #ifdef DHD_TIMESYNC
6838 			if (dhd->prot->rx_ts_log_enabled) {
6839 				dhd_pkt_parse_t parse;
6840 				ts_timestamp_t *ts = (ts_timestamp_t *)&msg->ts;
6841 
6842 				memset(&parse, 0, sizeof(dhd_pkt_parse_t));
6843 				dhd_parse_proto(PKTDATA(dhd->osh, pkt), &parse);
6844 
6845 				if (parse.proto == IP_PROT_ICMP)
6846 					dhd_timesync_log_rx_timestamp(dhd->ts, ifidx,
6847 							ts->low, ts->high, &parse);
6848 			}
6849 #endif /* DHD_TIMESYNC */
6850 
6851 #ifdef DHD_LBUF_AUDIT
6852 			PKTAUDIT(dhd->osh, pkt);
6853 #endif
6854 		}
6855 
6856 		/* roll back read pointer for unprocessed message */
6857 		if (msg_len > 0) {
6858 			if (ring->rd < msg_len / item_len)
6859 				ring->rd = ring->max_items - msg_len / item_len;
6860 			else
6861 				ring->rd -= msg_len / item_len;
6862 		}
6863 
6864 		/* Update read pointer */
6865 		dhd_prot_upd_read_idx(dhd, ring);
6866 
6867 		DHD_RING_UNLOCK(ring->ring_lock, flags);
6868 
6869 		pkt = pktqhead;
6870 		for (i = 0; pkt && i < pkt_cnt; i++, pkt = nextpkt) {
6871 			nextpkt = PKTNEXT(dhd->osh, pkt);
6872 			PKTSETNEXT(dhd->osh, pkt, NULL);
6873 #ifdef DHD_RX_CHAINING
6874 			dhd_rxchain_frame(dhd, pkt, ifidx);
6875 #else
6876 			dhd_prot_rx_frame(dhd, pkt, ifidx, 1);
6877 #endif /* DHD_LB_RXP */
6878 		}
6879 
6880 		if (pkt_newidx) {
6881 #ifdef DHD_RX_CHAINING
6882 			dhd_rxchain_frame(dhd, pkt_newidx, if_newidx);
6883 #else
6884 			dhd_prot_rx_frame(dhd, pkt_newidx, if_newidx, 1);
6885 #endif /* DHD_LB_RXP */
6886 		}
6887 
6888 		pkt_cnt += pkt_cnt_newidx;
6889 
6890 		/* Post another set of rxbufs to the device */
6891 		dhd_prot_return_rxbuf(dhd, ring, 0, pkt_cnt);
6892 
6893 #ifdef DHD_RX_CHAINING
6894 		dhd_rxchain_commit(dhd);
6895 #endif
6896 
6897 		/* After batch processing, check RX bound */
6898 		n += pkt_cnt;
6899 		if (n >= bound) {
6900 			more = TRUE;
6901 			break;
6902 		}
6903 	}
6904 
6905 	/* Call lb_dispatch only if packets are queued */
6906 	if (n &&
6907 #ifdef WL_MONITOR
6908 	!(dhd_monitor_enabled(dhd, ifidx)) &&
6909 #endif /* WL_MONITOR */
6910 	TRUE) {
6911 		DHD_LB_DISPATCH_RX_PROCESS(dhd);
6912 	}
6913 
6914 	return more;
6915 
6916 }
6917 
6918 /**
6919  * Hands transmit packets (with a caller provided flow_id) over to dongle territory (the flow ring)
6920  */
6921 void
6922 dhd_prot_update_txflowring(dhd_pub_t *dhd, uint16 flowid, void *msgring)
6923 {
6924 	msgbuf_ring_t *ring = (msgbuf_ring_t *)msgring;
6925 
6926 	if (ring == NULL) {
6927 		DHD_ERROR(("%s: NULL txflowring. exiting...\n",  __FUNCTION__));
6928 		return;
6929 	}
6930 	/* Update read pointer */
6931 	if (dhd->dma_d2h_ring_upd_support) {
6932 		ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
6933 	}
6934 
6935 	DHD_TRACE(("ringid %d flowid %d write %d read %d \n\n",
6936 		ring->idx, flowid, ring->wr, ring->rd));
6937 
6938 	/* Need more logic here, but for now use it directly */
6939 	dhd_bus_schedule_queue(dhd->bus, flowid, TRUE); /* from queue to flowring */
6940 }
6941 
6942 /** called when DHD needs to check for 'transmit complete' messages from the dongle */
6943 bool
6944 BCMFASTPATH(dhd_prot_process_msgbuf_txcpl)(dhd_pub_t *dhd, uint bound, int ringtype)
6945 {
6946 	bool more = TRUE;
6947 	uint n = 0;
6948 	msgbuf_ring_t *ring;
6949 	unsigned long flags;
6950 
6951 #ifdef DHD_HP2P
6952 	if (ringtype == DHD_HP2P_RING && dhd->prot->d2hring_hp2p_txcpl)
6953 		ring = dhd->prot->d2hring_hp2p_txcpl;
6954 	else
6955 #endif /* DHD_HP2P */
6956 		ring = &dhd->prot->d2hring_tx_cpln;
6957 
6958 	/* Process all the messages - DTOH direction */
6959 	while (!dhd_is_device_removed(dhd)) {
6960 		uint8 *msg_addr;
6961 		uint32 msg_len;
6962 
6963 		if (dhd_query_bus_erros(dhd)) {
6964 			more = FALSE;
6965 			break;
6966 		}
6967 
6968 		if (dhd->hang_was_sent) {
6969 			more = FALSE;
6970 			break;
6971 		}
6972 
6973 		if (dhd->smmu_fault_occurred) {
6974 			more = FALSE;
6975 			break;
6976 		}
6977 
6978 		DHD_RING_LOCK(ring->ring_lock, flags);
6979 		/* Get the address of the next message to be read from ring */
6980 		msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
6981 		DHD_RING_UNLOCK(ring->ring_lock, flags);
6982 
6983 		if (msg_addr == NULL) {
6984 			more = FALSE;
6985 			break;
6986 		}
6987 
6988 		/* Prefetch data to populate the cache */
6989 		OSL_PREFETCH(msg_addr);
6990 
6991 		if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
6992 			DHD_ERROR(("%s: process %s msg addr %p len %d\n",
6993 				__FUNCTION__, ring->name, msg_addr, msg_len));
6994 		}
6995 
6996 		/* Write to dngl rd ptr */
6997 		dhd_prot_upd_read_idx(dhd, ring);
6998 
6999 		/* After batch processing, check bound */
7000 		n += msg_len / ring->item_len;
7001 		if (n >= bound) {
7002 			break;
7003 		}
7004 	}
7005 
7006 	if (n) {
7007 		/* For IDMA and HWA case, doorbell is sent along with read index update.
7008 		 * For DMA indices case ring doorbell once n items are read to sync with dongle.
7009 		 */
7010 		if (dhd->dma_h2d_ring_upd_support && !IDMA_ACTIVE(dhd)) {
7011 			dhd_prot_ring_doorbell(dhd, DHD_RDPTR_UPDATE_H2D_DB_MAGIC(ring));
7012 			dhd->prot->txcpl_db_cnt++;
7013 		}
7014 	}
7015 	return more;
7016 }
7017 
7018 int
7019 BCMFASTPATH(dhd_prot_process_trapbuf)(dhd_pub_t *dhd)
7020 {
7021 	uint32 data;
7022 	dhd_dma_buf_t *trap_addr = &dhd->prot->fw_trap_buf;
7023 
7024 	/* Interrupts can come in before this struct
7025 	 *  has been initialized.
7026 	 */
7027 	if (trap_addr->va == NULL) {
7028 		DHD_ERROR(("%s: trap_addr->va is NULL\n", __FUNCTION__));
7029 		return 0;
7030 	}
7031 
7032 	OSL_CACHE_INV((void *)trap_addr->va, sizeof(uint32));
7033 	data = *(uint32 *)(trap_addr->va);
7034 
7035 	if (data & D2H_DEV_FWHALT) {
7036 		if (dhd->db7_trap.fw_db7w_trap_inprogress) {
7037 			DHD_ERROR(("DB7 FW responded 0x%04x\n", data));
7038 		} else {
7039 			DHD_ERROR(("Firmware trapped and trap_data is 0x%04x\n", data));
7040 		}
7041 
7042 		if (data & D2H_DEV_EXT_TRAP_DATA)
7043 		{
7044 			if (dhd->extended_trap_data) {
7045 				OSL_CACHE_INV((void *)trap_addr->va,
7046 				       BCMPCIE_EXT_TRAP_DATA_MAXLEN);
7047 				memcpy(dhd->extended_trap_data, (uint32 *)trap_addr->va,
7048 				       BCMPCIE_EXT_TRAP_DATA_MAXLEN);
7049 			}
7050 			if (dhd->db7_trap.fw_db7w_trap_inprogress == FALSE) {
7051 				DHD_ERROR(("Extended trap data available\n"));
7052 			}
7053 		}
7054 #ifdef BT_OVER_PCIE
7055 		if (data & D2H_DEV_TRAP_DUE_TO_BT) {
7056 			DHD_ERROR(("WLAN Firmware trapped due to BT\n"));
7057 			dhd->dongle_trap_due_to_bt = TRUE;
7058 		}
7059 #endif /* BT_OVER_PCIE */
7060 		return data;
7061 	}
7062 	return 0;
7063 }
7064 
7065 /** called when DHD needs to check for 'ioctl complete' messages from the dongle */
7066 int
7067 BCMFASTPATH(dhd_prot_process_ctrlbuf)(dhd_pub_t *dhd)
7068 {
7069 	dhd_prot_t *prot = dhd->prot;
7070 	msgbuf_ring_t *ring = &prot->d2hring_ctrl_cpln;
7071 	unsigned long flags;
7072 
7073 	/* Process all the messages - DTOH direction */
7074 	while (!dhd_is_device_removed(dhd)) {
7075 		uint8 *msg_addr;
7076 		uint32 msg_len;
7077 
7078 		if (dhd_query_bus_erros(dhd)) {
7079 			break;
7080 		}
7081 
7082 		if (dhd->hang_was_sent) {
7083 			break;
7084 		}
7085 
7086 		if (dhd->smmu_fault_occurred) {
7087 			break;
7088 		}
7089 
7090 		DHD_RING_LOCK(ring->ring_lock, flags);
7091 		/* Get the address of the next message to be read from ring */
7092 		msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
7093 		DHD_RING_UNLOCK(ring->ring_lock, flags);
7094 
7095 		if (msg_addr == NULL) {
7096 			break;
7097 		}
7098 
7099 		/* Prefetch data to populate the cache */
7100 		OSL_PREFETCH(msg_addr);
7101 		if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
7102 			DHD_ERROR(("%s: process %s msg addr %p len %d\n",
7103 				__FUNCTION__, ring->name, msg_addr, msg_len));
7104 		}
7105 
7106 		/* Write to dngl rd ptr */
7107 		dhd_prot_upd_read_idx(dhd, ring);
7108 	}
7109 
7110 	return 0;
7111 }
7112 
7113 /**
7114  * Consume messages out of the D2H ring. Ensure that the message's DMA to host
7115  * memory has completed, before invoking the message handler via a table lookup
7116  * of the cmn_msg_hdr::msg_type.
7117  */
7118 static int
7119 BCMFASTPATH(dhd_prot_process_msgtype)(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len)
7120 {
7121 	uint32 buf_len = len;
7122 	uint16 item_len;
7123 	uint8 msg_type;
7124 	cmn_msg_hdr_t *msg = NULL;
7125 	int ret = BCME_OK;
7126 
7127 	ASSERT(ring);
7128 	item_len = ring->item_len;
7129 	if (item_len == 0) {
7130 		DHD_ERROR(("%s: ringidx %d, item_len %d buf_len %d \n",
7131 			__FUNCTION__, ring->idx, item_len, buf_len));
7132 		return BCME_ERROR;
7133 	}
7134 
7135 	while (buf_len > 0) {
7136 		if (dhd->hang_was_sent) {
7137 			ret = BCME_ERROR;
7138 			goto done;
7139 		}
7140 
7141 		if (dhd->smmu_fault_occurred) {
7142 			ret = BCME_ERROR;
7143 			goto done;
7144 		}
7145 
7146 		msg = (cmn_msg_hdr_t *)buf;
7147 
7148 		/* Wait until DMA completes, then fetch msg_type */
7149 		msg_type = dhd->prot->d2h_sync_cb(dhd, ring, msg, item_len);
7150 
7151 		/*
7152 		 * Update the curr_rd to the current index in the ring, from where
7153 		 * the work item is fetched. This way if the fetched work item
7154 		 * fails in LIVELOCK, we can print the exact read index in the ring
7155 		 * that shows up the corrupted work item.
7156 		 */
7157 		if ((ring->curr_rd + 1) >= ring->max_items) {
7158 			ring->curr_rd = 0;
7159 		} else {
7160 			ring->curr_rd += 1;
7161 		}
7162 
7163 		/* Prefetch data to populate the cache */
7164 		OSL_PREFETCH(buf + item_len);
7165 
7166 		DHD_MSGBUF_INFO(("msg_type %d item_len %d buf_len %d\n",
7167 			msg_type, item_len, buf_len));
7168 
7169 		if (msg_type == MSG_TYPE_LOOPBACK) {
7170 			bcm_print_bytes("LPBK RESP: ", (uint8 *)msg, item_len);
7171 			DHD_ERROR((" MSG_TYPE_LOOPBACK, len %d\n", item_len));
7172 		}
7173 
7174 		ASSERT(msg_type < DHD_PROT_FUNCS);
7175 		if (msg_type >= DHD_PROT_FUNCS) {
7176 			DHD_ERROR(("%s: msg_type %d, item_len %d buf_len %d\n",
7177 				__FUNCTION__, msg_type, item_len, buf_len));
7178 			ret = BCME_ERROR;
7179 			goto done;
7180 		}
7181 
7182 #if !defined(BCM_ROUTER_DHD)
7183 		if (msg_type == MSG_TYPE_INFO_BUF_CMPLT) {
7184 			if (ring == dhd->prot->d2hring_info_cpln) {
7185 				if (!dhd->prot->infobufpost) {
7186 					DHD_ERROR(("infobuf posted are zero,"
7187 						   "but there is a completion\n"));
7188 					goto done;
7189 				}
7190 				dhd->prot->infobufpost--;
7191 				dhd_prot_infobufpost(dhd, dhd->prot->h2dring_info_subn);
7192 				dhd_prot_process_infobuf_complete(dhd, buf);
7193 			}
7194 #ifdef BTLOG
7195 			else if (ring == dhd->prot->d2hring_btlog_cpln) {
7196 				info_buf_resp_t *resp = (info_buf_resp_t *)buf;
7197 
7198 				if (!dhd->prot->btlogbufpost) {
7199 					DHD_ERROR(("btlogbuf posted are zero,"
7200 						   "but there is a completion\n"));
7201 					goto done;
7202 				}
7203 
7204 				dhd->prot->btlogbufpost--;
7205 				if (resp->compl_hdr.status != BCMPCIE_PKT_FLUSH) {
7206 					dhd_prot_infobufpost(dhd, dhd->prot->h2dring_btlog_subn);
7207 				}
7208 				dhd_prot_process_btlog_complete(dhd, buf);
7209 			}
7210 #endif	/* BTLOG */
7211 		} else
7212 #endif	/* !defined(BCM_ROUTER_DHD) */
7213 		if (table_lookup[msg_type]) {
7214 			table_lookup[msg_type](dhd, buf);
7215 		}
7216 
7217 		if (buf_len < item_len) {
7218 			ret = BCME_ERROR;
7219 			goto done;
7220 		}
7221 		buf_len = buf_len - item_len;
7222 		buf = buf + item_len;
7223 	}
7224 
7225 done:
7226 
7227 #ifdef DHD_RX_CHAINING
7228 	dhd_rxchain_commit(dhd);
7229 #endif
7230 
7231 	return ret;
7232 } /* dhd_prot_process_msgtype */
7233 
7234 static void
7235 dhd_prot_noop(dhd_pub_t *dhd, void *msg)
7236 {
7237 	return;
7238 }
7239 
7240 /** called on MSG_TYPE_RING_STATUS message received from dongle */
7241 static void
7242 dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg)
7243 {
7244 	pcie_ring_status_t *ring_status = (pcie_ring_status_t *) msg;
7245 	uint32 request_id = ltoh32(ring_status->cmn_hdr.request_id);
7246 	uint16 status = ltoh16(ring_status->compl_hdr.status);
7247 	uint16 ring_id = ltoh16(ring_status->compl_hdr.flow_ring_id);
7248 
7249 	DHD_ERROR(("ring status: request_id %d, status 0x%04x, flow ring %d, write_idx %d \n",
7250 		request_id, status, ring_id, ltoh16(ring_status->write_idx)));
7251 
7252 	if (ltoh16(ring_status->compl_hdr.ring_id) != BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT)
7253 		return;
7254 	if (status == BCMPCIE_BAD_PHASE) {
7255 		/* bad phase report from */
7256 		/* XXX: if the request is ioctl request finish the ioctl, rather than timing out */
7257 		DHD_ERROR(("Bad phase\n"));
7258 	}
7259 	if (status != BCMPCIE_BADOPTION)
7260 		return;
7261 
7262 	if (request_id == DHD_H2D_DBGRING_REQ_PKTID) {
7263 		/* XXX: see if the debug ring create is pending */
7264 		if (dhd->prot->h2dring_info_subn != NULL) {
7265 			if (dhd->prot->h2dring_info_subn->create_pending == TRUE) {
7266 				DHD_ERROR(("H2D ring create failed for info ring\n"));
7267 				dhd->prot->h2dring_info_subn->create_pending = FALSE;
7268 			}
7269 			else
7270 				DHD_ERROR(("ring create ID for a ring, create not pending\n"));
7271 		} else {
7272 			DHD_ERROR(("%s info submit ring doesn't exist\n", __FUNCTION__));
7273 		}
7274 	}
7275 	else if (request_id == DHD_D2H_DBGRING_REQ_PKTID) {
7276 		/* XXX: see if the debug ring create is pending */
7277 		if (dhd->prot->d2hring_info_cpln != NULL) {
7278 			if (dhd->prot->d2hring_info_cpln->create_pending == TRUE) {
7279 				DHD_ERROR(("D2H ring create failed for info ring\n"));
7280 				dhd->prot->d2hring_info_cpln->create_pending = FALSE;
7281 			}
7282 			else
7283 				DHD_ERROR(("ring create ID for info ring, create not pending\n"));
7284 		} else {
7285 			DHD_ERROR(("%s info cpl ring doesn't exist\n", __FUNCTION__));
7286 		}
7287 	}
7288 #ifdef BTLOG
7289 	else if (request_id == DHD_H2D_BTLOGRING_REQ_PKTID) {
7290 		/* XXX: see if the debug ring create is pending */
7291 		if (dhd->prot->h2dring_btlog_subn != NULL) {
7292 			if (dhd->prot->h2dring_btlog_subn->create_pending == TRUE) {
7293 				DHD_ERROR(("H2D ring create failed for btlog ring\n"));
7294 				dhd->prot->h2dring_btlog_subn->create_pending = FALSE;
7295 			}
7296 			else
7297 				DHD_ERROR(("ring create ID for a ring, create not pending\n"));
7298 		} else {
7299 			DHD_ERROR(("%s btlog submit ring doesn't exist\n", __FUNCTION__));
7300 		}
7301 	}
7302 	else if (request_id == DHD_D2H_BTLOGRING_REQ_PKTID) {
7303 		/* XXX: see if the debug ring create is pending */
7304 		if (dhd->prot->d2hring_btlog_cpln != NULL) {
7305 			if (dhd->prot->d2hring_btlog_cpln->create_pending == TRUE) {
7306 				DHD_ERROR(("D2H ring create failed for btlog ring\n"));
7307 				dhd->prot->d2hring_btlog_cpln->create_pending = FALSE;
7308 			}
7309 			else
7310 				DHD_ERROR(("ring create ID for btlog ring, create not pending\n"));
7311 		} else {
7312 			DHD_ERROR(("%s btlog cpl ring doesn't exist\n", __FUNCTION__));
7313 		}
7314 	}
7315 #endif	/* BTLOG */
7316 #ifdef DHD_HP2P
7317 	else if (request_id == DHD_D2H_HPPRING_TXREQ_PKTID) {
7318 		/* XXX: see if the HPP txcmpl ring create is pending */
7319 		if (dhd->prot->d2hring_hp2p_txcpl != NULL) {
7320 			if (dhd->prot->d2hring_hp2p_txcpl->create_pending == TRUE) {
7321 				DHD_ERROR(("H2D ring create failed for hp2p ring\n"));
7322 				dhd->prot->d2hring_hp2p_txcpl->create_pending = FALSE;
7323 			}
7324 			else
7325 				DHD_ERROR(("ring create ID for a ring, create not pending\n"));
7326 		} else {
7327 			DHD_ERROR(("%s hp2p txcmpl ring doesn't exist\n", __FUNCTION__));
7328 		}
7329 	}
7330 	else if (request_id == DHD_D2H_HPPRING_RXREQ_PKTID) {
7331 		/* XXX: see if the hp2p rxcmpl ring create is pending */
7332 		if (dhd->prot->d2hring_hp2p_rxcpl != NULL) {
7333 			if (dhd->prot->d2hring_hp2p_rxcpl->create_pending == TRUE) {
7334 				DHD_ERROR(("D2H ring create failed for hp2p rxcmpl ring\n"));
7335 				dhd->prot->d2hring_hp2p_rxcpl->create_pending = FALSE;
7336 			}
7337 			else
7338 				DHD_ERROR(("ring create ID for hp2p rxcmpl ring, not pending\n"));
7339 		} else {
7340 			DHD_ERROR(("%s hp2p rxcpl ring doesn't exist\n", __FUNCTION__));
7341 		}
7342 	}
7343 #endif /* DHD_HP2P */
7344 	else {
7345 		DHD_ERROR(("don;t know how to pair with original request\n"));
7346 	}
7347 	/* How do we track this to pair it with ??? */
7348 	return;
7349 }
7350 
7351 /** called on MSG_TYPE_GEN_STATUS ('general status') message received from dongle */
7352 static void
7353 dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg)
7354 {
7355 	pcie_gen_status_t *gen_status = (pcie_gen_status_t *)msg;
7356 	DHD_ERROR(("ERROR: gen status: request_id %d, STATUS 0x%04x, flow ring %d \n",
7357 		gen_status->cmn_hdr.request_id, gen_status->compl_hdr.status,
7358 		gen_status->compl_hdr.flow_ring_id));
7359 
7360 	/* How do we track this to pair it with ??? */
7361 	return;
7362 }
7363 
7364 /**
7365  * Called on MSG_TYPE_IOCTLPTR_REQ_ACK ('ioctl ack') message received from dongle, meaning that the
7366  * dongle received the ioctl message in dongle memory.
7367  */
7368 static void
7369 dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg)
7370 {
7371 	ioctl_req_ack_msg_t *ioct_ack = (ioctl_req_ack_msg_t *)msg;
7372 	unsigned long flags;
7373 #if defined(DHD_PKTID_AUDIT_RING) && !defined(BCM_ROUTER_DHD)
7374 	uint32 pktid = ltoh32(ioct_ack->cmn_hdr.request_id);
7375 #endif /* DHD_PKTID_AUDIT_RING && !BCM_ROUTER_DHD */
7376 
7377 #if defined(DHD_PKTID_AUDIT_RING) && !defined(BCM_ROUTER_DHD)
7378 	/* Skip audit for ADHD_IOCTL_REQ_PKTID = 0xFFFE */
7379 	if (pktid != DHD_IOCTL_REQ_PKTID) {
7380 #ifndef IOCTLRESP_USE_CONSTMEM
7381 		DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid,
7382 			DHD_TEST_IS_ALLOC, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
7383 #else
7384 		DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_map_handle_ioctl, pktid,
7385 			DHD_TEST_IS_ALLOC, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
7386 #endif /* !IOCTLRESP_USE_CONSTMEM */
7387 	}
7388 #endif /* DHD_PKTID_AUDIT_RING && !BCM_ROUTER_DHD */
7389 
7390 	dhd->prot->ioctl_ack_time = OSL_LOCALTIME_NS();
7391 
7392 	DHD_GENERAL_LOCK(dhd, flags);
7393 	if ((dhd->prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) &&
7394 		(dhd->prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) {
7395 		dhd->prot->ioctl_state &= ~MSGBUF_IOCTL_ACK_PENDING;
7396 	} else {
7397 		DHD_ERROR(("%s: received ioctl ACK with state %02x trans_id = %d\n",
7398 			__FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id));
7399 		prhex("dhd_prot_ioctack_process:",
7400 			(uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
7401 	}
7402 	DHD_GENERAL_UNLOCK(dhd, flags);
7403 
7404 	DHD_CTL(("ioctl req ack: request_id %d, status 0x%04x, flow ring %d \n",
7405 		ioct_ack->cmn_hdr.request_id, ioct_ack->compl_hdr.status,
7406 		ioct_ack->compl_hdr.flow_ring_id));
7407 	if (ioct_ack->compl_hdr.status != 0)  {
7408 		DHD_ERROR(("got an error status for the ioctl request...need to handle that\n"));
7409 		/* FIXME: should we fail the pending IOCTL compelteion wait process... */
7410 	}
7411 #ifdef REPORT_FATAL_TIMEOUTS
7412 	else {
7413 		dhd_stop_bus_timer(dhd);
7414 	}
7415 #endif /* REPORT_FATAL_TIMEOUTS */
7416 }
7417 
7418 /** called on MSG_TYPE_IOCTL_CMPLT message received from dongle */
7419 static void
7420 dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg)
7421 {
7422 	dhd_prot_t *prot = dhd->prot;
7423 	uint32 pkt_id, xt_id;
7424 	ioctl_comp_resp_msg_t *ioct_resp = (ioctl_comp_resp_msg_t *)msg;
7425 	void *pkt;
7426 	unsigned long flags;
7427 	dhd_dma_buf_t retbuf;
7428 #ifdef REPORT_FATAL_TIMEOUTS
7429 	uint16	dhd_xt_id;
7430 #endif
7431 
7432 	/* Check for ioctl timeout induce flag, which is set by firing
7433 	 * dhd iovar to induce IOCTL timeout. If flag is set,
7434 	 * return from here, which results in to IOCTL timeout.
7435 	 */
7436 	if (dhd->dhd_induce_error == DHD_INDUCE_IOCTL_TIMEOUT) {
7437 		DHD_ERROR(("%s: Inducing resumed on timeout\n", __FUNCTION__));
7438 		return;
7439 	}
7440 
7441 	memset(&retbuf, 0, sizeof(dhd_dma_buf_t));
7442 
7443 	pkt_id = ltoh32(ioct_resp->cmn_hdr.request_id);
7444 
7445 #if defined(DHD_PKTID_AUDIT_RING) && !defined(BCM_ROUTER_DHD)
7446 #ifndef IOCTLRESP_USE_CONSTMEM
7447 	DHD_PKTID_AUDIT_RING_DEBUG(dhd, prot->pktid_ctrl_map, pkt_id,
7448 		DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
7449 #else
7450 	DHD_PKTID_AUDIT_RING_DEBUG(dhd, prot->pktid_map_handle_ioctl, pkt_id,
7451 		DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
7452 #endif /* !IOCTLRESP_USE_CONSTMEM */
7453 #endif /* DHD_PKTID_AUDIT_RING && !BCM_ROUTER_DHD */
7454 
7455 	DHD_GENERAL_LOCK(dhd, flags);
7456 	if ((prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) ||
7457 		!(prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) {
7458 		DHD_ERROR(("%s: received ioctl response with state %02x trans_id = %d\n",
7459 			__FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id));
7460 		prhex("dhd_prot_ioctcmplt_process:",
7461 			(uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
7462 		DHD_GENERAL_UNLOCK(dhd, flags);
7463 		return;
7464 	}
7465 
7466 	dhd->prot->ioctl_cmplt_time = OSL_LOCALTIME_NS();
7467 
7468 	/* Clear Response pending bit */
7469 	prot->ioctl_state &= ~MSGBUF_IOCTL_RESP_PENDING;
7470 	DHD_GENERAL_UNLOCK(dhd, flags);
7471 
7472 #ifndef IOCTLRESP_USE_CONSTMEM
7473 	pkt = dhd_prot_packet_get(dhd, pkt_id, PKTTYPE_IOCTL_RX, TRUE);
7474 #else
7475 	dhd_prot_ioctl_ret_buffer_get(dhd, pkt_id, &retbuf);
7476 	pkt = retbuf.va;
7477 #endif /* !IOCTLRESP_USE_CONSTMEM */
7478 	if (!pkt) {
7479 		DHD_ERROR(("%s: received ioctl response with NULL pkt\n", __FUNCTION__));
7480 		prhex("dhd_prot_ioctcmplt_process:",
7481 			(uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
7482 		return;
7483 	}
7484 
7485 	prot->ioctl_resplen = ltoh16(ioct_resp->resp_len);
7486 	prot->ioctl_status = ltoh16(ioct_resp->compl_hdr.status);
7487 	xt_id = ltoh16(ioct_resp->trans_id);
7488 
7489 	if (xt_id != prot->ioctl_trans_id || prot->curr_ioctl_cmd != ioct_resp->cmd) {
7490 		DHD_ERROR(("%s: transaction id(%d %d) or cmd(%d %d) mismatch\n",
7491 			__FUNCTION__, xt_id, prot->ioctl_trans_id,
7492 			prot->curr_ioctl_cmd, ioct_resp->cmd));
7493 #ifdef REPORT_FATAL_TIMEOUTS
7494 		dhd_stop_cmd_timer(dhd);
7495 #endif /* REPORT_FATAL_TIMEOUTS */
7496 		dhd_wakeup_ioctl_event(dhd, IOCTL_RETURN_ON_ERROR);
7497 		dhd_prot_debug_info_print(dhd);
7498 #ifdef DHD_FW_COREDUMP
7499 		if (dhd->memdump_enabled) {
7500 			/* collect core dump */
7501 			dhd->memdump_type = DUMP_TYPE_TRANS_ID_MISMATCH;
7502 			dhd_bus_mem_dump(dhd);
7503 		}
7504 #else
7505 		ASSERT(0);
7506 #endif /* DHD_FW_COREDUMP */
7507 		dhd_schedule_reset(dhd);
7508 		goto exit;
7509 	}
7510 #ifdef REPORT_FATAL_TIMEOUTS
7511 	dhd_xt_id = dhd_get_request_id(dhd);
7512 	if (xt_id == dhd_xt_id) {
7513 		dhd_stop_cmd_timer(dhd);
7514 	} else {
7515 		DHD_ERROR(("%s: Cmd timer not stopped received xt_id %d stored xt_id %d",
7516 			__FUNCTION__, xt_id, dhd_xt_id));
7517 	}
7518 #endif /* REPORT_FATAL_TIMEOUTS */
7519 	DHD_CTL(("IOCTL_COMPLETE: req_id %x transid %d status %x resplen %d\n",
7520 		pkt_id, xt_id, prot->ioctl_status, prot->ioctl_resplen));
7521 
7522 	if (prot->ioctl_resplen > 0) {
7523 #ifndef IOCTLRESP_USE_CONSTMEM
7524 		bcopy(PKTDATA(dhd->osh, pkt), prot->retbuf.va, prot->ioctl_resplen);
7525 #else
7526 		bcopy(pkt, prot->retbuf.va, prot->ioctl_resplen);
7527 #endif /* !IOCTLRESP_USE_CONSTMEM */
7528 	}
7529 
7530 	/* wake up any dhd_os_ioctl_resp_wait() */
7531 	dhd_wakeup_ioctl_event(dhd, IOCTL_RETURN_ON_SUCCESS);
7532 
7533 exit:
7534 #ifndef IOCTLRESP_USE_CONSTMEM
7535 	dhd_prot_packet_free(dhd, pkt,
7536 		PKTTYPE_IOCTL_RX, FALSE);
7537 #else
7538 	free_ioctl_return_buffer(dhd, &retbuf);
7539 #endif /* !IOCTLRESP_USE_CONSTMEM */
7540 
7541 	/* Post another ioctl buf to the device */
7542 	if (prot->cur_ioctlresp_bufs_posted > 0) {
7543 		prot->cur_ioctlresp_bufs_posted--;
7544 	}
7545 
7546 	dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
7547 }
7548 
7549 int
7550 dhd_prot_check_tx_resource(dhd_pub_t *dhd)
7551 {
7552 	return dhd->prot->no_tx_resource;
7553 }
7554 
7555 #ifdef DHD_PKTTS
7556 /**
7557  * dhd_msgbuf_get_ip_info - this api finds following (ipv4 and ipv6 are supported)
7558  * 1. pointer to data portion of pkt
7559  * 2. five tuple checksum of pkt
7560  *   = {scr_ip, dst_ip, src_port, dst_port, proto}
7561  * 3. ip_prec
7562  *
7563  * @dhdp: pointer to dhd_pub object
7564  * @pkt: packet pointer
7565  * @ptr: retuns pointer to data portion of pkt
7566  * @chksum: returns five tuple checksum of pkt
7567  * @prec: returns ip precedence
7568  * @tcp_seqno: returns tcp sequnce number
7569  *
7570  * returns packet length remaining after tcp/udp header or BCME_ERROR.
7571  */
7572 static int
7573 dhd_msgbuf_get_ip_info(dhd_pub_t *dhdp, void *pkt, void **ptr, uint32 *chksum,
7574 	uint32 *prec, uint32 *tcp_seqno, uint32 *tcp_ackno)
7575 {
7576 	char *pdata;
7577 	uint plen;
7578 	uint32 type, len;
7579 	uint32 checksum = 0;
7580 	uint8 dscp_prio = 0;
7581 	struct bcmtcp_hdr *tcp = NULL;
7582 
7583 	pdata = PKTDATA(dhdp->osh, pkt);
7584 	plen = PKTLEN(dhdp->osh, pkt);
7585 
7586 	/* Ethernet header */
7587 	if (plen < ETHER_HDR_LEN) {
7588 		return BCME_ERROR;
7589 	}
7590 	type = ntoh16(((struct ether_header *)pdata)->ether_type);
7591 	pdata += ETHER_HDR_LEN;
7592 	plen -= ETHER_HDR_LEN;
7593 
7594 	if ((type == ETHER_TYPE_IP) ||
7595 		(type == ETHER_TYPE_IPV6)) {
7596 		dscp_prio = (IP_TOS46(pdata) >> IPV4_TOS_PREC_SHIFT);
7597 	}
7598 
7599 	/* IP header (v4 or v6) */
7600 	if (type == ETHER_TYPE_IP) {
7601 		struct ipv4_hdr *iph = (struct ipv4_hdr *)pdata;
7602 		if (plen <= sizeof(*iph)) {
7603 			return BCME_ERROR;
7604 		}
7605 
7606 		len = IPV4_HLEN(iph);
7607 		if (plen <= len || IP_VER(iph) != IP_VER_4 || len < IPV4_MIN_HEADER_LEN) {
7608 			return BCME_ERROR;
7609 		}
7610 
7611 		type = IPV4_PROT(iph);
7612 		pdata += len;
7613 		plen -= len;
7614 
7615 		checksum ^= bcm_compute_xor32((volatile uint32 *)iph->src_ip,
7616 			sizeof(iph->src_ip) / sizeof(uint32));
7617 		checksum ^= bcm_compute_xor32((volatile uint32 *)iph->dst_ip,
7618 			sizeof(iph->dst_ip) / sizeof(uint32));
7619 	} else if (type == ETHER_TYPE_IPV6) {
7620 		struct ipv6_hdr *ip6h = (struct ipv6_hdr *)pdata;
7621 
7622 		if (plen <= IPV6_MIN_HLEN || IP_VER(ip6h) != IP_VER_6) {
7623 			return BCME_ERROR;
7624 		}
7625 
7626 		type = IPV6_PROT(ip6h);
7627 		pdata += IPV6_MIN_HLEN;
7628 		plen -= IPV6_MIN_HLEN;
7629 		if (IPV6_EXTHDR(type)) {
7630 			uint8 proto = 0;
7631 			int32 exth_len = ipv6_exthdr_len(pdata, &proto);
7632 			if (exth_len < 0 || ((plen -= exth_len) <= 0)) {
7633 				return BCME_ERROR;
7634 			}
7635 			type = proto;
7636 			pdata += exth_len;
7637 			plen -= exth_len;
7638 		}
7639 
7640 		checksum ^= bcm_compute_xor32((volatile uint32 *)&ip6h->saddr,
7641 			sizeof(ip6h->saddr) / sizeof(uint32));
7642 		checksum ^= bcm_compute_xor32((volatile uint32 *)&ip6h->daddr,
7643 			sizeof(ip6h->saddr) / sizeof(uint32));
7644 	}
7645 
7646 	/* return error if not TCP or UDP */
7647 	if ((type != IP_PROT_UDP) && (type != IP_PROT_TCP)) {
7648 		return BCME_ERROR;
7649 	}
7650 
7651 	/* src_port and dst_port (together 32bit) */
7652 	checksum ^= bcm_compute_xor32((volatile uint32 *)pdata, 1);
7653 	checksum ^= bcm_compute_xor32((volatile uint32 *)&type, 1);
7654 
7655 	if (type == IP_PROT_TCP) {
7656 		tcp = (struct bcmtcp_hdr *)pdata;
7657 		len = TCP_HDRLEN(pdata[TCP_HLEN_OFFSET]) << 2;
7658 	} else { /* IP_PROT_UDP */
7659 		len =	sizeof(struct bcmudp_hdr);
7660 	}
7661 
7662 	/* length check */
7663 	if (plen < len) {
7664 		return BCME_ERROR;
7665 	}
7666 
7667 	pdata += len;
7668 	plen -= len;
7669 
7670 	/* update data[0] */
7671 	*ptr = (void *)pdata;
7672 
7673 	/* update fivetuple checksum */
7674 	*chksum = checksum;
7675 
7676 	/* update ip prec */
7677 	*prec = dscp_prio;
7678 
7679 	/* update tcp sequence number */
7680 	if (tcp != NULL) {
7681 		*tcp_seqno = tcp->seq_num;
7682 		*tcp_ackno = tcp->ack_num;
7683 	}
7684 
7685 	return plen;
7686 }
7687 
7688 /**
7689  * dhd_msgbuf_send_msg_tx_ts - send pktts tx timestamp to netlnik socket
7690  *
7691  * @dhdp: pointer to dhd_pub object
7692  * @pkt: packet pointer
7693  * @fwts: firmware timestamp {fwt1..fwt4}
7694  * @version: pktlat version supported in firmware
7695  */
7696 static void
7697 dhd_msgbuf_send_msg_tx_ts(dhd_pub_t *dhdp, void *pkt, void *fw_ts, uint16 version)
7698 {
7699 	bcm_to_info_tx_ts_t to_tx_info;
7700 	void *ptr = NULL;
7701 	int dlen = 0;
7702 	uint32 checksum = 0;
7703 	uint32 prec = 0;
7704 	pktts_flow_t *flow = NULL;
7705 	uint32 flow_pkt_offset = 0;
7706 	uint32 num_config = 0;
7707 	uint32 tcp_seqno = 0;
7708 	uint32 tcp_ackno = 0;
7709 
7710 	dlen = dhd_msgbuf_get_ip_info(dhdp, pkt, &ptr, &checksum, &prec, &tcp_seqno, &tcp_ackno);
7711 
7712 	flow = dhd_match_pktts_flow(dhdp, checksum, NULL, &num_config);
7713 	if (flow) {
7714 		/* there is valid config for this chksum */
7715 		flow_pkt_offset = flow->pkt_offset;
7716 	} else if (num_config) {
7717 		/* there is valid config + no matching config for this chksum */
7718 		return;
7719 	} else {
7720 		/* there is no valid config. pass all to netlink */
7721 	}
7722 
7723 	memset(&to_tx_info, 0, sizeof(to_tx_info));
7724 	to_tx_info.hdr.type = BCM_TS_TX;
7725 	to_tx_info.hdr.flowid = checksum;
7726 	to_tx_info.hdr.prec = prec;
7727 
7728 	/* special case: if flow is not configured, copy tcp seqno and ackno in xbytes */
7729 	if (!flow && tcp_seqno) {
7730 		uint32 *xbytes = (uint32 *)to_tx_info.hdr.xbytes;
7731 
7732 		(void)memcpy_s(&xbytes[0], sizeof(xbytes[0]),
7733 			((uint8 *)&tcp_seqno), sizeof(tcp_seqno));
7734 		(void)memcpy_s(&xbytes[1], sizeof(xbytes[1]),
7735 			((uint8 *)&tcp_ackno), sizeof(tcp_ackno));
7736 	} else if ((dlen > flow_pkt_offset) &&
7737 		((dlen - flow_pkt_offset) >= sizeof(to_tx_info.hdr.xbytes))) {
7738 		(void)memcpy_s(to_tx_info.hdr.xbytes, sizeof(to_tx_info.hdr.xbytes),
7739 			((uint8 *)ptr + flow_pkt_offset), sizeof(to_tx_info.hdr.xbytes));
7740 	}
7741 
7742 	to_tx_info.dhdt0 = DHD_PKT_GET_QTIME(pkt);
7743 	to_tx_info.dhdt5 = OSL_SYSUPTIME_US();
7744 
7745 	if (version == METADATA_VER_1) {
7746 		struct pktts_fwtx_v1 *fwts = (struct pktts_fwtx_v1 *)fw_ts;
7747 
7748 		to_tx_info.hdr.magic = BCM_TS_MAGIC;
7749 
7750 		to_tx_info.fwts[0] = ntohl(fwts->ts[0]);
7751 		to_tx_info.fwts[1] = ntohl(fwts->ts[1]);
7752 		to_tx_info.fwts[2] = ntohl(fwts->ts[2]);
7753 		to_tx_info.fwts[3] = ntohl(fwts->ts[3]);
7754 
7755 		dhd_send_msg_to_ts(NULL, (void *)&to_tx_info, OFFSETOF(bcm_to_info_tx_ts_t, ucts));
7756 	} else if (version == METADATA_VER_2) {
7757 		struct pktts_fwtx_v2 *fwts = (struct pktts_fwtx_v2 *)fw_ts;
7758 
7759 		to_tx_info.hdr.magic = BCM_TS_MAGIC_V2;
7760 
7761 		to_tx_info.fwts[0] = ntohl(fwts->ts[0]);
7762 		to_tx_info.fwts[1] = ntohl(fwts->ts[1]);
7763 		to_tx_info.fwts[2] = ntohl(fwts->ts[2]);
7764 		to_tx_info.fwts[3] = ntohl(fwts->ts[3]);
7765 
7766 		to_tx_info.ucts[0] = ntohl(fwts->ut[0]);
7767 		to_tx_info.ucts[1] = ntohl(fwts->ut[1]);
7768 		to_tx_info.ucts[2] = ntohl(fwts->ut[2]);
7769 		to_tx_info.ucts[3] = ntohl(fwts->ut[3]);
7770 		to_tx_info.ucts[4] = ntohl(fwts->ut[4]);
7771 
7772 		to_tx_info.uccnt[0] = ntohl(fwts->uc[0]);
7773 		to_tx_info.uccnt[1] = ntohl(fwts->uc[1]);
7774 		to_tx_info.uccnt[2] = ntohl(fwts->uc[2]);
7775 		to_tx_info.uccnt[3] = ntohl(fwts->uc[3]);
7776 		to_tx_info.uccnt[4] = ntohl(fwts->uc[4]);
7777 		to_tx_info.uccnt[5] = ntohl(fwts->uc[5]);
7778 		to_tx_info.uccnt[6] = ntohl(fwts->uc[6]);
7779 		to_tx_info.uccnt[7] = ntohl(fwts->uc[7]);
7780 
7781 		dhd_send_msg_to_ts(NULL, (void *)&to_tx_info, sizeof(to_tx_info));
7782 	}
7783 	return;
7784 }
7785 
7786 /**
7787  * dhd_msgbuf_send_msg_dx_ts - send pktts rx timestamp to netlnik socket
7788  *
7789  * @dhdp: pointer to dhd_pub object
7790  * @pkt: packet pointer
7791  * @fwr1: firmware timestamp at probe point 1
7792  * @fwr2: firmware timestamp at probe point 2
7793  */
7794 static void
7795 dhd_msgbuf_send_msg_rx_ts(dhd_pub_t *dhdp, void *pkt, uint fwr1, uint fwr2)
7796 {
7797 	bcm_to_info_rx_ts_t to_rx_info;
7798 	void *ptr = NULL;
7799 	int dlen = 0;
7800 	uint32 checksum = 0;
7801 	uint32 prec = 0;
7802 	pktts_flow_t *flow = NULL;
7803 	uint32 flow_pkt_offset = 0;
7804 	uint32 num_config = 0;
7805 	uint32 tcp_seqno = 0;
7806 	uint32 tcp_ackno = 0;
7807 
7808 	dlen = dhd_msgbuf_get_ip_info(dhdp, pkt, &ptr, &checksum, &prec, &tcp_seqno, &tcp_ackno);
7809 
7810 	flow = dhd_match_pktts_flow(dhdp, checksum, NULL, &num_config);
7811 	if (flow) {
7812 		/* there is valid config for this chksum */
7813 		flow_pkt_offset = flow->pkt_offset;
7814 	} else if (num_config) {
7815 		/* there is valid config + no matching config for this chksum */
7816 		return;
7817 	} else {
7818 		/* there is no valid config. pass all to netlink */
7819 	}
7820 
7821 	memset(&to_rx_info, 0, sizeof(to_rx_info));
7822 	to_rx_info.hdr.magic = BCM_TS_MAGIC;
7823 	to_rx_info.hdr.type = BCM_TS_RX;
7824 	to_rx_info.hdr.flowid = checksum;
7825 	to_rx_info.hdr.prec = prec;
7826 
7827 	/* special case: if flow is not configured, copy tcp seqno and ackno in xbytes */
7828 	if (!flow && tcp_seqno) {
7829 		uint32 *xbytes = (uint32 *)to_rx_info.hdr.xbytes;
7830 
7831 		(void)memcpy_s(&xbytes[0], sizeof(xbytes[0]),
7832 			((uint8 *)&tcp_seqno), sizeof(tcp_seqno));
7833 		(void)memcpy_s(&xbytes[1], sizeof(xbytes[1]),
7834 			((uint8 *)&tcp_ackno), sizeof(tcp_ackno));
7835 	} else if ((dlen > flow_pkt_offset) &&
7836 		((dlen - flow_pkt_offset) >= sizeof(to_rx_info.hdr.xbytes))) {
7837 		(void)memcpy_s(to_rx_info.hdr.xbytes, sizeof(to_rx_info.hdr.xbytes),
7838 			((uint8 *)ptr + flow_pkt_offset), sizeof(to_rx_info.hdr.xbytes));
7839 	}
7840 
7841 	to_rx_info.dhdr3 = OSL_SYSUPTIME_US();
7842 
7843 	to_rx_info.fwts[0] = ntohl(fwr1);
7844 	to_rx_info.fwts[1] = ntohl(fwr2);
7845 
7846 	dhd_send_msg_to_ts(NULL, (void *)&to_rx_info, sizeof(to_rx_info));
7847 	return;
7848 }
7849 #endif /* DHD_PKTTS */
7850 
7851 /** called on MSG_TYPE_TX_STATUS message received from dongle */
7852 static void
7853 BCMFASTPATH(dhd_prot_txstatus_process)(dhd_pub_t *dhd, void *msg)
7854 {
7855 	dhd_prot_t *prot = dhd->prot;
7856 	host_txbuf_cmpl_t * txstatus;
7857 	unsigned long flags;
7858 	uint32 pktid;
7859 	void *pkt;
7860 	dmaaddr_t pa;
7861 	uint32 len;
7862 	void *dmah;
7863 	void *secdma;
7864 	bool pkt_fate;
7865 	msgbuf_ring_t *ring = &dhd->prot->d2hring_tx_cpln;
7866 #if defined(TX_STATUS_LATENCY_STATS)
7867 	flow_info_t *flow_info;
7868 	uint64 tx_status_latency;
7869 #endif /* TX_STATUS_LATENCY_STATS */
7870 #ifdef AGG_H2D_DB
7871 	msgbuf_ring_t *flow_ring;
7872 #endif /* AGG_H2D_DB */
7873 #if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
7874 	dhd_awdl_stats_t *awdl_stats;
7875 	if_flow_lkup_t *if_flow_lkup;
7876 	unsigned long awdl_stats_lock_flags;
7877 	uint8 ifindex;
7878 	uint8 role;
7879 #endif /* DHD_AWDL && AWDL_SLOT_STATS */
7880 	flow_ring_node_t *flow_ring_node;
7881 	uint16 flowid;
7882 #ifdef DHD_PKTTS
7883 	struct metadata_txcmpl_v1 meta_ts_v1;
7884 	struct metadata_txcmpl_v2 meta_ts_v2;
7885 	dhd_dma_buf_t meta_data_buf;
7886 	uint64 addr = 0;
7887 
7888 	BCM_REFERENCE(meta_ts_v1);
7889 	BCM_REFERENCE(meta_ts_v2);
7890 	BCM_REFERENCE(meta_data_buf);
7891 	BCM_REFERENCE(addr);
7892 
7893 	if ((dhd->memdump_type == DUMP_TYPE_PKTID_AUDIT_FAILURE) ||
7894 		(dhd->memdump_type == DUMP_TYPE_PKTID_INVALID)) {
7895 		DHD_ERROR_RLMT(("%s: return as invalid pktid detected\n", __FUNCTION__));
7896 		return;
7897 	}
7898 
7899 	memset(&meta_ts_v1, 0, sizeof(meta_ts_v1));
7900 	memset(&meta_ts_v2, 0, sizeof(meta_ts_v2));
7901 	memset(&meta_data_buf, 0, sizeof(meta_data_buf));
7902 #endif /* DHD_PKTTS */
7903 	txstatus = (host_txbuf_cmpl_t *)msg;
7904 
7905 	flowid = txstatus->compl_hdr.flow_ring_id;
7906 	flow_ring_node = DHD_FLOW_RING(dhd, flowid);
7907 #ifdef AGG_H2D_DB
7908 	flow_ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid);
7909 	OSL_ATOMIC_DEC(dhd->osh, &flow_ring->inflight);
7910 #endif /* AGG_H2D_DB */
7911 
7912 	BCM_REFERENCE(flow_ring_node);
7913 
7914 #ifdef DEVICE_TX_STUCK_DETECT
7915 	/**
7916 	 * Since we got a completion message on this flowid,
7917 	 * update tx_cmpl time stamp
7918 	 */
7919 	flow_ring_node->tx_cmpl = OSL_SYSUPTIME();
7920 	/* update host copy of rd pointer */
7921 #ifdef DHD_HP2P
7922 	if (dhd->prot->d2hring_hp2p_txcpl &&
7923 		flow_ring_node->flow_info.tid == HP2P_PRIO) {
7924 		ring = dhd->prot->d2hring_hp2p_txcpl;
7925 	}
7926 #endif /* DHD_HP2P */
7927 	ring->curr_rd++;
7928 	if (ring->curr_rd >= ring->max_items) {
7929 		ring->curr_rd = 0;
7930 	}
7931 #endif /* DEVICE_TX_STUCK_DETECT */
7932 
7933 	/* locks required to protect circular buffer accesses */
7934 	DHD_RING_LOCK(ring->ring_lock, flags);
7935 	pktid = ltoh32(txstatus->cmn_hdr.request_id);
7936 
7937 	if (dhd->pcie_txs_metadata_enable > 1) {
7938 		/* Return metadata format (little endian):
7939 		 * |<--- txstatus --->|<- metadatalen ->|
7940 		 * |____|____|________|________|________|
7941 		 * |    |    |        |        |> total delay from fetch to report (8-bit 1 = 4ms)
7942 		 * |    |    |        |> ucode delay from enqueue to completion (8-bit 1 = 4ms)
7943 		 * |    |    |> 8-bit reserved (pre-filled with original TX status by caller)
7944 		 * |    |> delay time first fetch to the last fetch (4-bit 1 = 32ms)
7945 		 * |> fetch count (4-bit)
7946 		 */
7947 		printf("TX status[%d] = %04x-%04x -> status = %d (%d/%dms + %d/%dms)\n", pktid,
7948 			ltoh16(txstatus->tx_status_ext), ltoh16(txstatus->tx_status),
7949 			(txstatus->tx_status & WLFC_CTL_PKTFLAG_MASK),
7950 			((txstatus->tx_status >> 12) & 0xf),
7951 			((txstatus->tx_status >> 8) & 0xf) * 32,
7952 			((txstatus->tx_status_ext & 0xff) * 4),
7953 			((txstatus->tx_status_ext >> 8) & 0xff) * 4);
7954 	}
7955 	pkt_fate = TRUE;
7956 
7957 #if defined(DHD_PKTID_AUDIT_RING) && !defined(BCM_ROUTER_DHD)
7958 	DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_tx_map, pktid,
7959 			DHD_DUPLICATE_FREE, msg, D2HRING_TXCMPLT_ITEMSIZE);
7960 #endif /* DHD_PKTID_AUDIT_RING && !BCM_ROUTER_DHD */
7961 
7962 	DHD_MSGBUF_INFO(("txstatus for pktid 0x%04x\n", pktid));
7963 	if (OSL_ATOMIC_DEC_RETURN(dhd->osh, &prot->active_tx_count) < 0) {
7964 		DHD_ERROR(("Extra packets are freed\n"));
7965 	}
7966 	ASSERT(pktid != 0);
7967 
7968 #ifdef DHD_HMAPTEST
7969 
7970 	if ((dhd->prot->hmaptest_tx_active == HMAPTEST_D11_TX_POSTED) &&
7971 		(pktid == dhd->prot->hmaptest_tx_pktid)) {
7972 		DHD_ERROR(("hmaptest: d11read txcpl received sc txbuf pktid=0x%08x\n", pktid));
7973 		DHD_ERROR(("hmaptest: d11read txcpl txstatus=0x%08x\n", txstatus->tx_status));
7974 		DHD_ERROR(("hmaptest: d11read txcpl sc txbuf va=0x%p pa=0x%08x\n",
7975 			dhd->prot->hmap_tx_buf_va, (uint32)PHYSADDRLO(dhd->prot->hmap_tx_buf_pa)));
7976 		dhd->prot->hmaptest_tx_active = HMAPTEST_D11_TX_INACTIVE;
7977 		dhd->prot->hmap_tx_buf_va = NULL;
7978 		dhd->prot->hmap_tx_buf_len = 0;
7979 		PHYSADDRHISET(dhd->prot->hmap_tx_buf_pa, 0);
7980 		PHYSADDRLOSET(dhd->prot->hmap_tx_buf_pa, 0);
7981 		prot->hmaptest.in_progress = FALSE;
7982 	}
7983 	/* original skb is kept as it is because its going to be freed  later in this path */
7984 #endif /* DHD_HMAPTEST */
7985 
7986 #ifdef DHD_PKTTS
7987 	if (dhd_get_pktts_enab(dhd) &&
7988 		dhd->pkt_metadata_buflen) {
7989 		/* Handle the Metadata first */
7990 		meta_data_buf.va = DHD_PKTID_RETREIVE_METADATA(dhd, dhd->prot->pktid_tx_map,
7991 			meta_data_buf.pa, meta_data_buf._alloced, meta_data_buf.dmah, pktid);
7992 		if (meta_data_buf.va) {
7993 			if (dhd->pkt_metadata_version == METADATA_VER_1) {
7994 				memcpy(&meta_ts_v1, meta_data_buf.va, sizeof(meta_ts_v1));
7995 			} else if (dhd->pkt_metadata_version == METADATA_VER_2) {
7996 				memcpy(&meta_ts_v2, meta_data_buf.va, sizeof(meta_ts_v2));
7997 			}
7998 			memcpy(&addr, &meta_data_buf.pa, sizeof(meta_data_buf.pa));
7999 			DHD_TRACE(("%s(): pktid %d retrieved mdata buffer %p "
8000 				"pa: %llx dmah: %p\r\n",  __FUNCTION__,
8001 				pktid, meta_data_buf.va, addr,
8002 				meta_data_buf.dmah));
8003 		}
8004 	}
8005 #endif /* DHD_PKTTS */
8006 
8007 	pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, pktid,
8008 		pa, len, dmah, secdma, PKTTYPE_DATA_TX);
8009 	if (!pkt) {
8010 		DHD_RING_UNLOCK(ring->ring_lock, flags);
8011 #ifdef DHD_PKTTS
8012 		/*
8013 		 * Call the free function after the Ring Lock is released.
8014 		 * This is becuase pcie_free_consistent is not supposed to be
8015 		 * called with Interrupts Disabled
8016 		 */
8017 		if (meta_data_buf.va) {
8018 			DMA_FREE_CONSISTENT(dhd->osh, meta_data_buf.va, meta_data_buf._alloced,
8019 				meta_data_buf.pa, meta_data_buf.dmah);
8020 		}
8021 #endif /* DHD_PKTTS */
8022 		DHD_ERROR(("%s: received txstatus with NULL pkt\n", __FUNCTION__));
8023 		prhex("dhd_prot_txstatus_process:", (uchar *)msg, D2HRING_TXCMPLT_ITEMSIZE);
8024 #ifdef DHD_FW_COREDUMP
8025 		if (dhd->memdump_enabled) {
8026 			/* collect core dump */
8027 			dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
8028 			dhd_bus_mem_dump(dhd);
8029 		}
8030 #else
8031 		ASSERT(0);
8032 #endif /* DHD_FW_COREDUMP */
8033 		return;
8034 	}
8035 
8036 	if (DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map) == DHD_PKTID_MIN_AVAIL_COUNT) {
8037 		DHD_ERROR_RLMT(("%s: start tx queue as min pktids are available\n",
8038 			__FUNCTION__));
8039 		prot->pktid_txq_stop_cnt--;
8040 		dhd->prot->no_tx_resource = FALSE;
8041 		dhd_bus_start_queue(dhd->bus);
8042 	}
8043 
8044 	DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
8045 
8046 #ifdef TX_STATUS_LATENCY_STATS
8047 	/* update the tx status latency for flowid */
8048 	flow_info = &flow_ring_node->flow_info;
8049 	tx_status_latency = OSL_SYSUPTIME_US() - DHD_PKT_GET_QTIME(pkt);
8050 #if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
8051 	if (dhd->pkt_latency > 0 &&
8052 		tx_status_latency > (dhd->pkt_latency)) {
8053 		DHD_ERROR(("Latency: %llu > %u aw_cnt: %u \n",
8054 			tx_status_latency, dhd->pkt_latency,
8055 			dhd->awdl_aw_counter));
8056 	}
8057 #endif /*  defined(DHD_AWDL) && defined(AWDL_SLOT_STATS) */
8058 	flow_info->cum_tx_status_latency += tx_status_latency;
8059 	flow_info->num_tx_status++;
8060 #endif /* TX_STATUS_LATENCY_STATS */
8061 #if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
8062 	/* update the tx status latency when this AWDL slot is active */
8063 	if_flow_lkup = (if_flow_lkup_t *)dhd->if_flow_lkup;
8064 	ifindex = flow_ring_node->flow_info.ifindex;
8065 	role = if_flow_lkup[ifindex].role;
8066 	if (role == WLC_E_IF_ROLE_AWDL) {
8067 		awdl_stats = &dhd->awdl_stats[dhd->awdl_tx_status_slot];
8068 		DHD_AWDL_STATS_LOCK(dhd->awdl_stats_lock, awdl_stats_lock_flags);
8069 		awdl_stats->cum_tx_status_latency += tx_status_latency;
8070 		awdl_stats->num_tx_status++;
8071 		DHD_AWDL_STATS_UNLOCK(dhd->awdl_stats_lock, awdl_stats_lock_flags);
8072 	}
8073 #endif /* DHD_AWDL && AWDL_SLOT_STATS */
8074 
8075 #ifdef HOST_SFH_LLC
8076 	if (dhd->host_sfhllc_supported) {
8077 		struct ether_header eth;
8078 		if (!memcpy_s(&eth, sizeof(eth),
8079 			PKTDATA(dhd->osh, pkt), sizeof(eth))) {
8080 			if (dhd_8023_llc_to_ether_hdr(dhd->osh,
8081 				&eth, pkt) != BCME_OK) {
8082 				DHD_ERROR_RLMT(("%s: host sfh llc"
8083 					" converstion to ether failed\n",
8084 					__FUNCTION__));
8085 			}
8086 		}
8087 	}
8088 #endif /* HOST_SFH_LLC */
8089 
8090 #ifdef DMAMAP_STATS
8091 	dhd->dma_stats.txdata--;
8092 	dhd->dma_stats.txdata_sz -= len;
8093 #endif /* DMAMAP_STATS */
8094 	pkt_fate = dhd_dbg_process_tx_status(dhd, pkt, pktid,
8095 		ltoh16(txstatus->compl_hdr.status) & WLFC_CTL_PKTFLAG_MASK);
8096 #ifdef DHD_PKT_LOGGING
8097 	if (dhd->d11_tx_status) {
8098 		uint16 status = ltoh16(txstatus->compl_hdr.status) &
8099 			WLFC_CTL_PKTFLAG_MASK;
8100 		dhd_handle_pktdata(dhd, ltoh32(txstatus->cmn_hdr.if_id),
8101 			pkt, (uint8 *)PKTDATA(dhd->osh, pkt), pktid, len,
8102 			&status, NULL, TRUE, FALSE, TRUE);
8103 	}
8104 #endif /* DHD_PKT_LOGGING */
8105 #if defined(BCMPCIE) && (defined(LINUX) || defined(OEM_ANDROID) || defined(DHD_EFI))
8106 	dhd_txcomplete(dhd, pkt, pkt_fate);
8107 #ifdef DHD_4WAYM4_FAIL_DISCONNECT
8108 	dhd_eap_txcomplete(dhd, pkt, pkt_fate, txstatus->cmn_hdr.if_id);
8109 #endif /* DHD_4WAYM4_FAIL_DISCONNECT */
8110 #endif /* BCMPCIE && (defined(LINUX) || defined(OEM_ANDROID)) */
8111 
8112 #ifdef DHD_PKTTS
8113 	if (dhd_get_pktts_enab(dhd) == TRUE) {
8114 		if (dhd->pkt_metadata_buflen) {
8115 			/* firmware mark tx_pktts.tref with 0xFFFFFFFF for errors */
8116 			if ((dhd->pkt_metadata_version == METADATA_VER_1) &&
8117 					(ltoh32(meta_ts_v1.tref) != 0xFFFFFFFF)) {
8118 				struct pktts_fwtx_v1 fwts;
8119 				fwts.ts[0] = (uint32)htonl(ltoh32(meta_ts_v1.tref));
8120 				fwts.ts[1] = (uint32)htonl(ltoh32(meta_ts_v1.tref) +
8121 					ltoh16(meta_ts_v1.d_t2));
8122 				fwts.ts[2] = (uint32)htonl(ltoh32(meta_ts_v1.tref) +
8123 					ltoh16(meta_ts_v1.d_t3));
8124 				fwts.ts[3] = (uint32)htonl(ltoh32(meta_ts_v1.tref) +
8125 					ltoh16(meta_ts_v1.d_t4));
8126 				/* check for overflow */
8127 				if (ntohl(fwts.ts[3]) > ntohl(fwts.ts[0])) {
8128 					/* send tx timestamp to netlink socket */
8129 					dhd_msgbuf_send_msg_tx_ts(dhd, pkt, &fwts,
8130 						dhd->pkt_metadata_version);
8131 				}
8132 			} else if ((dhd->pkt_metadata_version == METADATA_VER_2) &&
8133 					(ltoh32(meta_ts_v2.tref) != 0xFFFFFFFF)) {
8134 				struct pktts_fwtx_v2 fwts;
8135 				fwts.ts[0] = (uint32)htonl(ltoh32(meta_ts_v2.tref));
8136 				fwts.ts[1] = (uint32)htonl(ltoh32(meta_ts_v2.tref) +
8137 					ltoh16(meta_ts_v2.d_t2));
8138 				fwts.ts[2] = (uint32)htonl(ltoh32(meta_ts_v2.tref) +
8139 					ltoh16(meta_ts_v2.d_t3));
8140 				fwts.ts[3] = (uint32)htonl(ltoh32(meta_ts_v2.tref) +
8141 					ltoh16(meta_ts_v2.d_t4));
8142 
8143 				fwts.ut[0] = (uint32)htonl(ltoh32(meta_ts_v2.tref) +
8144 					ltoh16(meta_ts_v2.u_t1));
8145 				fwts.ut[1] = (uint32)htonl(ltoh16(meta_ts_v2.u_t2));
8146 				fwts.ut[2] = (uint32)htonl(ltoh16(meta_ts_v2.u_t3));
8147 				fwts.ut[3] = (uint32)htonl(ltoh16(meta_ts_v2.u_t4));
8148 				fwts.ut[4] = (uint32)htonl(ltoh32(meta_ts_v2.tref) +
8149 					ltoh16(meta_ts_v2.u_t5));
8150 
8151 				fwts.uc[0] = (uint32)htonl(ltoh32(meta_ts_v2.u_c1));
8152 				fwts.uc[1] = (uint32)htonl(ltoh32(meta_ts_v2.u_c2));
8153 				fwts.uc[2] = (uint32)htonl(ltoh32(meta_ts_v2.u_c3));
8154 				fwts.uc[3] = (uint32)htonl(ltoh32(meta_ts_v2.u_c4));
8155 				fwts.uc[4] = (uint32)htonl(ltoh32(meta_ts_v2.u_c5));
8156 				fwts.uc[5] = (uint32)htonl(ltoh32(meta_ts_v2.u_c6));
8157 				fwts.uc[6] = (uint32)htonl(ltoh32(meta_ts_v2.u_c7));
8158 				fwts.uc[7] = (uint32)htonl(ltoh32(meta_ts_v2.u_c8));
8159 
8160 				DHD_INFO(("uct1:%x uct2:%x uct3:%x uct4:%x uct5:%x\n",
8161 					ntohl(fwts.ut[0]), ntohl(fwts.ut[1]), ntohl(fwts.ut[2]),
8162 					ntohl(fwts.ut[3]), ntohl(fwts.ut[4])));
8163 				DHD_INFO(("ucc1:%x ucc2:%x ucc3:%x ucc4:%x"
8164 					" ucc5:%x ucc6:%x ucc7:%x ucc8:%x\n",
8165 					ntohl(fwts.uc[0]), ntohl(fwts.uc[1]), ntohl(fwts.uc[2]),
8166 					ntohl(fwts.uc[3]), ntohl(fwts.uc[4]), ntohl(fwts.uc[5]),
8167 					ntohl(fwts.uc[6]), ntohl(fwts.uc[7])));
8168 				/* check for overflow */
8169 				if (ntohl(fwts.ts[3]) > ntohl(fwts.ts[0])) {
8170 					/* send tx timestamp to netlink socket */
8171 					dhd_msgbuf_send_msg_tx_ts(dhd, pkt, &fwts,
8172 						dhd->pkt_metadata_version);
8173 				}
8174 			}
8175 		} else {
8176 			/* firmware mark tx_pktts.tref with 0xFFFFFFFF for errors */
8177 			if (ltoh32(txstatus->tx_pktts.tref) != 0xFFFFFFFF) {
8178 				struct pktts_fwtx_v1 fwts;
8179 
8180 				fwts.ts[0] = (uint32)htonl(ltoh32(txstatus->tx_pktts.tref));
8181 				fwts.ts[1] = (uint32)htonl(ltoh32(txstatus->tx_pktts.tref) +
8182 					ltoh16(txstatus->tx_pktts.d_t2));
8183 				fwts.ts[2] = (uint32)htonl(ltoh32(txstatus->tx_pktts.tref) +
8184 					ltoh16(txstatus->tx_pktts.d_t3));
8185 				fwts.ts[3] = (uint32)htonl(ltoh32(txstatus->tx_pktts.tref) +
8186 					ltoh16(txstatus->compl_hdr.tx_pktts.d_t4));
8187 
8188 				/* check for overflow */
8189 				if (ntohl(fwts.ts[3]) > ntohl(fwts.ts[0])) {
8190 					/* send tx timestamp to netlnik socket */
8191 					dhd_msgbuf_send_msg_tx_ts(dhd, pkt, &fwts, METADATA_VER_1);
8192 				}
8193 			}
8194 		}
8195 	}
8196 #endif /* DHD_PKTTS */
8197 
8198 #if DHD_DBG_SHOW_METADATA
8199 	if (dhd->prot->metadata_dbg &&
8200 			dhd->prot->tx_metadata_offset && txstatus->metadata_len) {
8201 		uchar *ptr;
8202 		/* The Ethernet header of TX frame was copied and removed.
8203 		 * Here, move the data pointer forward by Ethernet header size.
8204 		 */
8205 		PKTPULL(dhd->osh, pkt, ETHER_HDR_LEN);
8206 		ptr = PKTDATA(dhd->osh, pkt)  - (dhd->prot->tx_metadata_offset);
8207 		bcm_print_bytes("txmetadata", ptr, txstatus->metadata_len);
8208 		dhd_prot_print_metadata(dhd, ptr, txstatus->metadata_len);
8209 	}
8210 #endif /* DHD_DBG_SHOW_METADATA */
8211 
8212 #ifdef DHD_HP2P
8213 	if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) {
8214 #ifdef DHD_HP2P_DEBUG
8215 		bcm_print_bytes("txcpl", (uint8 *)txstatus, sizeof(host_txbuf_cmpl_t));
8216 #endif /* DHD_HP2P_DEBUG */
8217 		dhd_update_hp2p_txstats(dhd, txstatus);
8218 	}
8219 #endif /* DHD_HP2P */
8220 
8221 #ifdef DHD_TIMESYNC
8222 	if (dhd->prot->tx_ts_log_enabled) {
8223 		dhd_pkt_parse_t parse;
8224 		ts_timestamp_t *ts = (ts_timestamp_t *)&(txstatus->ts);
8225 
8226 		memset(&parse, 0, sizeof(parse));
8227 		dhd_parse_proto(PKTDATA(dhd->osh, pkt), &parse);
8228 
8229 		if (parse.proto == IP_PROT_ICMP)
8230 			dhd_timesync_log_tx_timestamp(dhd->ts,
8231 				txstatus->compl_hdr.flow_ring_id,
8232 				txstatus->cmn_hdr.if_id,
8233 				ts->low, ts->high, &parse);
8234 	}
8235 #endif /* DHD_TIMESYNC */
8236 
8237 #ifdef DHD_LBUF_AUDIT
8238 	PKTAUDIT(dhd->osh, pkt);
8239 #endif
8240 	DHD_FLOWRING_TXSTATUS_CNT_UPDATE(dhd->bus, txstatus->compl_hdr.flow_ring_id,
8241 		txstatus->tx_status);
8242 	DHD_RING_UNLOCK(ring->ring_lock, flags);
8243 #ifdef DHD_PKTTS
8244 	if (meta_data_buf.va) {
8245 		DMA_FREE_CONSISTENT(dhd->osh, meta_data_buf.va, meta_data_buf._alloced,
8246 			meta_data_buf.pa, meta_data_buf.dmah);
8247 	}
8248 #endif /* DHD_PKTTS */
8249 #ifdef DHD_MEM_STATS
8250 	DHD_MEM_STATS_LOCK(dhd->mem_stats_lock, flags);
8251 	DHD_MSGBUF_INFO(("%s txpath_mem: %llu PKTLEN: %d\n",
8252 		__FUNCTION__, dhd->txpath_mem, PKTLEN(dhd->osh, pkt)));
8253 	dhd->txpath_mem -= PKTLEN(dhd->osh, pkt);
8254 	DHD_MEM_STATS_UNLOCK(dhd->mem_stats_lock, flags);
8255 #endif /* DHD_MEM_STATS */
8256 	PKTFREE(dhd->osh, pkt, TRUE);
8257 
8258 	return;
8259 } /* dhd_prot_txstatus_process */
8260 
8261 /* FIXME: assuming that it is getting inline data related to the event data */
8262 /** called on MSG_TYPE_WL_EVENT message received from dongle */
8263 static void
8264 dhd_prot_event_process(dhd_pub_t *dhd, void *msg)
8265 {
8266 	wlevent_req_msg_t *evnt;
8267 	uint32 bufid;
8268 	uint16 buflen;
8269 	int ifidx = 0;
8270 	void* pkt;
8271 	dhd_prot_t *prot = dhd->prot;
8272 
8273 	/* Event complete header */
8274 	evnt = (wlevent_req_msg_t *)msg;
8275 	bufid = ltoh32(evnt->cmn_hdr.request_id);
8276 
8277 #if defined(DHD_PKTID_AUDIT_RING) && !defined(BCM_ROUTER_DHD)
8278 	DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, bufid,
8279 			DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
8280 #endif /* DHD_PKTID_AUDIT_RING && !BCM_ROUTER_DHD */
8281 
8282 	buflen = ltoh16(evnt->event_data_len);
8283 
8284 	ifidx = BCMMSGBUF_API_IFIDX(&evnt->cmn_hdr);
8285 	/* FIXME: check the event status */
8286 
8287 	/* Post another rxbuf to the device */
8288 	if (prot->cur_event_bufs_posted)
8289 		prot->cur_event_bufs_posted--;
8290 	dhd_msgbuf_rxbuf_post_event_bufs(dhd);
8291 
8292 	pkt = dhd_prot_packet_get(dhd, bufid, PKTTYPE_EVENT_RX, TRUE);
8293 
8294 	if (!pkt) {
8295 		DHD_ERROR(("%s: pkt is NULL for pktid %d\n", __FUNCTION__, bufid));
8296 		return;
8297 	}
8298 
8299 #if !defined(BCM_ROUTER_DHD)
8300 	/* FIXME: make sure the length is more than dataoffset */
8301 	/* DMA RX offset updated through shared area */
8302 	if (dhd->prot->rx_dataoffset)
8303 		PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
8304 #endif /* !BCM_ROUTER_DHD */
8305 
8306 	PKTSETLEN(dhd->osh, pkt, buflen);
8307 #ifdef DHD_LBUF_AUDIT
8308 	PKTAUDIT(dhd->osh, pkt);
8309 #endif
8310 	dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1);
8311 }
8312 
8313 #if !defined(BCM_ROUTER_DHD)
8314 /** called on MSG_TYPE_INFO_BUF_CMPLT message received from dongle */
8315 static void
8316 BCMFASTPATH(dhd_prot_process_infobuf_complete)(dhd_pub_t *dhd, void* buf)
8317 {
8318 	info_buf_resp_t *resp;
8319 	uint32 pktid;
8320 	uint16 buflen;
8321 	void * pkt;
8322 
8323 	resp = (info_buf_resp_t *)buf;
8324 	pktid = ltoh32(resp->cmn_hdr.request_id);
8325 	buflen = ltoh16(resp->info_data_len);
8326 
8327 #ifdef DHD_PKTID_AUDIT_RING
8328 	DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid,
8329 			DHD_DUPLICATE_FREE, buf, D2HRING_INFO_BUFCMPLT_ITEMSIZE);
8330 #endif /* DHD_PKTID_AUDIT_RING */
8331 
8332 	DHD_MSGBUF_INFO(("id 0x%04x, len %d, phase 0x%02x, seqnum %d, rx_dataoffset %d\n",
8333 		pktid, buflen, resp->cmn_hdr.flags, ltoh16(resp->seqnum),
8334 		dhd->prot->rx_dataoffset));
8335 
8336 	if (dhd->debug_buf_dest_support) {
8337 		if (resp->dest < DEBUG_BUF_DEST_MAX) {
8338 			dhd->debug_buf_dest_stat[resp->dest]++;
8339 		}
8340 	}
8341 
8342 	pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_INFO_RX, TRUE);
8343 	if (!pkt)
8344 		return;
8345 
8346 #if !defined(BCM_ROUTER_DHD)
8347 	/* FIXME: make sure the length is more than dataoffset */
8348 	/* DMA RX offset updated through shared area */
8349 	if (dhd->prot->rx_dataoffset)
8350 		PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
8351 #endif /* !BCM_ROUTER_DHD */
8352 
8353 	PKTSETLEN(dhd->osh, pkt, buflen);
8354 #ifdef DHD_LBUF_AUDIT
8355 	PKTAUDIT(dhd->osh, pkt);
8356 #endif
8357 	/* info ring "debug" data, which is not a 802.3 frame, is sent/hacked with a
8358 	 * special ifidx of -1.  This is just internal to dhd to get the data to
8359 	 * dhd_linux.c:dhd_rx_frame() from here (dhd_prot_infobuf_cmplt_process).
8360 	 */
8361 	dhd_bus_rx_frame(dhd->bus, pkt, DHD_DUMMY_INFO_IF /* ifidx HACK */, 1);
8362 }
8363 #endif /* !BCM_ROUTER_DHD */
8364 
8365 /** called on MSG_TYPE_SNAPSHOT_CMPLT message received from dongle */
8366 static void
8367 BCMFASTPATH(dhd_prot_process_snapshot_complete)(dhd_pub_t *dhd, void *buf)
8368 {
8369 #ifdef SNAPSHOT_UPLOAD
8370 	dhd_prot_t *prot = dhd->prot;
8371 	snapshot_resp_t *resp;
8372 	uint16 status;
8373 
8374 	resp = (snapshot_resp_t *)buf;
8375 
8376 	/* check completion status */
8377 	status = resp->compl_hdr.status;
8378 	if (status != BCMPCIE_SUCCESS) {
8379 		DHD_ERROR(("%s: failed: %s (%d)\n",
8380 			__FUNCTION__,
8381 			status == BCMPCIE_BT_DMA_ERR ? "DMA_ERR" :
8382 			status == BCMPCIE_BT_DMA_DESCR_FETCH_ERR ?
8383 				"DMA_DESCR_ERR" :
8384 			status == BCMPCIE_SNAPSHOT_ERR ? "SNAPSHOT_ERR" :
8385 			status == BCMPCIE_NOT_READY ? "NOT_READY" :
8386 			status == BCMPCIE_INVALID_DATA ? "INVALID_DATA" :
8387 			status == BCMPCIE_NO_RESPONSE ? "NO_RESPONSE" :
8388 			status == BCMPCIE_NO_CLOCK ? "NO_CLOCK" :
8389 			"", status));
8390 	}
8391 
8392 	/* length may be truncated if error occurred */
8393 	prot->snapshot_upload_len = ltoh32(resp->resp_len);
8394 	prot->snapshot_type = resp->type;
8395 	prot->snapshot_cmpl_pending = FALSE;
8396 
8397 	DHD_INFO(("%s id 0x%04x, phase 0x%02x, resp_len %d, type %d\n",
8398 		__FUNCTION__, ltoh32(resp->cmn_hdr.request_id),
8399 		resp->cmn_hdr.flags,
8400 		prot->snapshot_upload_len, prot->snapshot_type));
8401 #endif	/* SNAPSHOT_UPLOAD */
8402 }
8403 
8404 #ifdef BTLOG
8405 /** called on MSG_TYPE_BT_LOG_CMPLT message received from dongle */
8406 static void
8407 BCMFASTPATH(dhd_prot_process_btlog_complete)(dhd_pub_t *dhd, void* buf)
8408 {
8409 	info_buf_resp_t *resp;
8410 	uint32 pktid;
8411 	uint16 buflen;
8412 	void * pkt;
8413 
8414 	resp = (info_buf_resp_t *)buf;
8415 	pktid = ltoh32(resp->cmn_hdr.request_id);
8416 	buflen = ltoh16(resp->info_data_len);
8417 
8418 	/* check completion status */
8419 	if (resp->compl_hdr.status != BCMPCIE_SUCCESS) {
8420 		DHD_ERROR(("%s: failed completion status %d\n",
8421 			__FUNCTION__, resp->compl_hdr.status));
8422 		return;
8423 	}
8424 
8425 #ifdef DHD_PKTID_AUDIT_RING
8426 	DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid,
8427 			DHD_DUPLICATE_FREE, buf, D2HRING_INFO_BUFCMPLT_ITEMSIZE);
8428 #endif /* DHD_PKTID_AUDIT_RING */
8429 
8430 	DHD_INFO(("id 0x%04x, len %d, phase 0x%02x, seqnum %d, rx_dataoffset %d\n",
8431 		pktid, buflen, resp->cmn_hdr.flags, ltoh16(resp->seqnum),
8432 		dhd->prot->rx_dataoffset));
8433 
8434 	pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_INFO_RX, TRUE);
8435 
8436 	if (!pkt)
8437 		return;
8438 
8439 #if !defined(BCM_ROUTER_DHD)
8440 	/* FIXME: make sure the length is more than dataoffset */
8441 	/* DMA RX offset updated through shared area */
8442 	if (dhd->prot->rx_dataoffset)
8443 		PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
8444 #endif /* !BCM_ROUTER_DHD */
8445 
8446 	PKTSETLEN(dhd->osh, pkt, buflen);
8447 	PKTSETNEXT(dhd->osh, pkt, NULL);
8448 
8449 	dhd_bus_rx_bt_log(dhd->bus, pkt);
8450 }
8451 #endif	/* BTLOG */
8452 
8453 /** Stop protocol: sync w/dongle state. */
8454 void dhd_prot_stop(dhd_pub_t *dhd)
8455 {
8456 	ASSERT(dhd);
8457 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
8458 
8459 #if defined(NDIS)
8460 	if (dhd->prot) {
8461 		DHD_NATIVE_TO_PKTID_RESET(dhd, dhd->prot->pktid_ctrl_map);
8462 		DHD_NATIVE_TO_PKTID_RESET(dhd, dhd->prot->pktid_rx_map);
8463 		DHD_NATIVE_TO_PKTID_RESET(dhd, dhd->prot->pktid_tx_map);
8464 #if defined(IOCTLRESP_USE_CONSTMEM)
8465 		DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, dhd->prot->pktid_map_handle_ioctl);
8466 #endif /* DHD_PCIE_PKTID */
8467 	}
8468 #endif /* NDIS */
8469 }
8470 
8471 /* Add any protocol-specific data header.
8472  * Caller must reserve prot_hdrlen prepend space.
8473  */
8474 void
8475 BCMFASTPATH(dhd_prot_hdrpush)(dhd_pub_t *dhd, int ifidx, void *PKTBUF)
8476 {
8477 	return;
8478 }
8479 
8480 uint
8481 dhd_prot_hdrlen(dhd_pub_t *dhd, void *PKTBUF)
8482 {
8483 	return 0;
8484 }
8485 
8486 #define PKTBUF pktbuf
8487 
8488 /**
8489  * Called when a tx ethernet packet has been dequeued from a flow queue, and has to be inserted in
8490  * the corresponding flow ring.
8491  */
8492 int
8493 BCMFASTPATH(dhd_prot_txdata)(dhd_pub_t *dhd, void *PKTBUF, uint8 ifidx)
8494 {
8495 	unsigned long flags;
8496 	dhd_prot_t *prot = dhd->prot;
8497 	host_txbuf_post_t *txdesc = NULL;
8498 	dmaaddr_t pa, meta_pa;
8499 	uint8 *pktdata;
8500 	uint32 pktlen;
8501 	uint32 pktid;
8502 	uint8	prio;
8503 	uint16 flowid = 0;
8504 	uint16 alloced = 0;
8505 	uint16	headroom;
8506 	msgbuf_ring_t *ring;
8507 	flow_ring_table_t *flow_ring_table;
8508 	flow_ring_node_t *flow_ring_node;
8509 #if defined(BCMINTERNAL) && defined(LINUX)
8510 	void *pkt_to_free = NULL;
8511 #endif /* BCMINTERNAL && LINUX */
8512 #ifdef DHD_PKTTS
8513 	dhd_dma_buf_t	meta_data_buf;
8514 	uint16	meta_data_buf_len = dhd->pkt_metadata_buflen;
8515 	uint64 addr = 0;
8516 #endif /* DHD_PKTTS */
8517 	void *big_pktbuf = NULL;
8518 	uint8 dhd_udr = FALSE;
8519 	bool host_sfh_llc_reqd = dhd->host_sfhllc_supported;
8520 	bool llc_inserted = FALSE;
8521 
8522 	BCM_REFERENCE(llc_inserted);
8523 #ifdef PCIE_INB_DW
8524 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) {
8525 		DHD_ERROR(("failed to increment hostactive_devwake\n"));
8526 		return BCME_ERROR;
8527 	}
8528 #endif /* PCIE_INB_DW */
8529 
8530 	if (dhd->flow_ring_table == NULL) {
8531 		DHD_ERROR(("dhd flow_ring_table is NULL\n"));
8532 		goto fail;
8533 	}
8534 
8535 #ifdef DHD_PCIE_PKTID
8536 		if (!DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map)) {
8537 			if (dhd->prot->pktid_depleted_cnt == DHD_PKTID_DEPLETED_MAX_COUNT) {
8538 				DHD_ERROR(("%s: stop tx queue as pktid_depleted_cnt maxed\n",
8539 					__FUNCTION__));
8540 				prot->pktid_txq_stop_cnt++;
8541 				dhd_bus_stop_queue(dhd->bus);
8542 				dhd->prot->no_tx_resource = TRUE;
8543 			}
8544 			dhd->prot->pktid_depleted_cnt++;
8545 			goto fail;
8546 		} else {
8547 			dhd->prot->pktid_depleted_cnt = 0;
8548 		}
8549 #endif /* DHD_PCIE_PKTID */
8550 
8551 	if (dhd->dhd_induce_error == DHD_INDUCE_TX_BIG_PKT) {
8552 		if ((big_pktbuf = PKTGET(dhd->osh, DHD_FLOWRING_TX_BIG_PKT_SIZE, TRUE)) == NULL) {
8553 			DHD_ERROR(("%s:%d: PKTGET for txbuf failed\n", __FUNCTION__, __LINE__));
8554 			goto fail;
8555 		}
8556 
8557 		memset(PKTDATA(dhd->osh, big_pktbuf), 0xff, DHD_FLOWRING_TX_BIG_PKT_SIZE);
8558 		DHD_ERROR(("PKTBUF len = %d big_pktbuf len = %d\n", PKTLEN(dhd->osh, PKTBUF),
8559 				PKTLEN(dhd->osh, big_pktbuf)));
8560 		if (memcpy_s(PKTDATA(dhd->osh, big_pktbuf), DHD_FLOWRING_TX_BIG_PKT_SIZE,
8561 				PKTDATA(dhd->osh, PKTBUF), PKTLEN(dhd->osh, PKTBUF)) != BCME_OK) {
8562 			DHD_ERROR(("%s:%d: memcpy_s big_pktbuf failed\n", __FUNCTION__, __LINE__));
8563 			ASSERT(0);
8564 		}
8565 	}
8566 
8567 	flowid = DHD_PKT_GET_FLOWID(PKTBUF);
8568 	flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
8569 	flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
8570 
8571 	ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
8572 
8573 	/*
8574 	 * XXX:
8575 	 * JIRA SW4349-436:
8576 	 * Copying the TX Buffer to an SKB that lives in the DMA Zone
8577 	 * is done here. Previously this was done from dhd_stat_xmit
8578 	 * On conditions where the Host is pumping heavy traffic to
8579 	 * the dongle, we see that the Queue that is backing up the
8580 	 * flow rings is getting full and holds the precious memory
8581 	 * from DMA Zone, leading the host to run out of memory in DMA
8582 	 * Zone. So after this change the back up queue would continue to
8583 	 * hold the pointers from Network Stack, just before putting
8584 	 * the PHY ADDR in the flow rings, we'll do the copy.
8585 	 */
8586 #if defined(BCMINTERNAL) && defined(LINUX)
8587 	if (osl_is_flag_set(dhd->osh, OSL_PHYS_MEM_LESS_THAN_16MB)) {
8588 		struct sk_buff *skb;
8589 		/*
8590 		 * We are about to add the Ethernet header and send out,
8591 		 * copy the skb here.
8592 		 */
8593 		skb = skb_copy(PKTBUF, GFP_DMA);
8594 		if (skb == NULL) {
8595 			/*
8596 			 * Memory allocation failed, the old packet can
8597 			 * live in the queue, return BCME_NORESOURCE so
8598 			 * the caller re-queues this packet
8599 			 */
8600 			DHD_ERROR(("%s: skb_copy(DMA) failed\n", __FUNCTION__));
8601 			goto fail;
8602 		}
8603 
8604 		/*
8605 		 * Now we have copied the SKB to GFP_DMA memory, make the
8606 		 * rest of the code operate on this new SKB. Hold on to
8607 		 * the original SKB. If we don't get the pkt id or flow ring
8608 		 * space we'll free the Zone memory and return "no resource"
8609 		 * so the caller would re-queue the original SKB.
8610 		 */
8611 		pkt_to_free = PKTBUF;
8612 		PKTBUF = skb;
8613 	}
8614 #endif	/* BCMINTERNAL && LINUX */
8615 
8616 	if (dhd->dhd_induce_error == DHD_INDUCE_TX_BIG_PKT && big_pktbuf) {
8617 		PKTFREE(dhd->osh, PKTBUF, TRUE);
8618 		PKTBUF = big_pktbuf;
8619 	}
8620 
8621 	DHD_RING_LOCK(ring->ring_lock, flags);
8622 
8623 	/* Create a unique 32-bit packet id */
8624 	pktid = DHD_NATIVE_TO_PKTID_RSV(dhd, dhd->prot->pktid_tx_map,
8625 		PKTBUF, PKTTYPE_DATA_TX);
8626 #if defined(DHD_PCIE_PKTID)
8627 	if (pktid == DHD_PKTID_INVALID) {
8628 		DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__));
8629 		/*
8630 		 * If we return error here, the caller would queue the packet
8631 		 * again. So we'll just free the skb allocated in DMA Zone.
8632 		 * Since we have not freed the original SKB yet the caller would
8633 		 * requeue the same.
8634 		 */
8635 		goto err_no_res_pktfree;
8636 	}
8637 #endif /* DHD_PCIE_PKTID */
8638 
8639 	/* Reserve space in the circular buffer */
8640 	txdesc = (host_txbuf_post_t *)
8641 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
8642 	if (txdesc == NULL) {
8643 		DHD_INFO(("%s:%d: HTOD Msgbuf Not available TxCount = %d\n",
8644 			__FUNCTION__, __LINE__, OSL_ATOMIC_READ(dhd->osh, &prot->active_tx_count)));
8645 		goto err_free_pktid;
8646 	}
8647 	txdesc->flags = 0;
8648 
8649 	/* Extract the data pointer and length information */
8650 	pktdata = PKTDATA(dhd->osh, PKTBUF);
8651 	pktlen  = PKTLEN(dhd->osh, PKTBUF);
8652 
8653 	/* TODO: XXX: re-look into dropped packets */
8654 	DHD_DBG_PKT_MON_TX(dhd, PKTBUF, pktid);
8655 
8656 	dhd_handle_pktdata(dhd, ifidx, PKTBUF, pktdata, pktid,
8657 		pktlen, NULL, &dhd_udr, TRUE, FALSE, TRUE);
8658 
8659 #if defined(BCMINTERNAL) && defined(LINUX)
8660 	/*
8661 	 * We have got all the resources, pktid and ring space
8662 	 * so we can safely free the original SKB here.
8663 	 */
8664 	if (osl_is_flag_set(dhd->osh, OSL_PHYS_MEM_LESS_THAN_16MB))
8665 		PKTCFREE(dhd->osh, pkt_to_free, FALSE);
8666 #endif	/* BCMINTERNAL && LINUX */
8667 
8668 	/* Ethernet header - contains ethertype field
8669 	* Copy before we cache flush packet using DMA_MAP
8670 	*/
8671 	bcopy(pktdata, txdesc->txhdr, ETHER_HDR_LEN);
8672 
8673 #ifdef DHD_AWDL
8674 	/* the awdl ifidx will always have a non-zero value
8675 	 * if the awdl iface is created. This is because the
8676 	 * primary iface (usually eth1) will always have ifidx of 0.
8677 	 * Hence we can check for non-zero value of awdl ifidx to
8678 	 * see if awdl iface is created or not
8679 	 */
8680 	if (dhd->awdl_llc_enabled &&
8681 		dhd->awdl_ifidx && ifidx == dhd->awdl_ifidx) {
8682 		if (host_sfh_llc_reqd) {
8683 			/* if FW supports host sfh llc insertion
8684 			 * then BOTH sfh and llc needs to be inserted
8685 			 * in which case the host LLC only path
8686 			 * in FW will not be exercised - which is the
8687 			 * objective of this feature. Hence in such a
8688 			 * case disable awdl llc insertion
8689 			 */
8690 			DHD_ERROR_RLMT(("%s: FW supports host sfh + llc, this is"
8691 				"is incompatible with awdl llc insertion"
8692 				" disable host sfh llc support in FW and try\n",
8693 				__FUNCTION__));
8694 		} else {
8695 			if (dhd_ether_to_awdl_llc_hdr(dhd, (struct ether_header *)pktdata,
8696 				PKTBUF) == BCME_OK) {
8697 			llc_inserted = TRUE;
8698 			/* in work item change ether type to len by
8699 			 * re-copying the ether header
8700 			 */
8701 			memcpy_s(txdesc->txhdr, ETHER_HDR_LEN, PKTDATA(dhd->osh, PKTBUF),
8702 				ETHER_HDR_LEN);
8703 			} else {
8704 				goto err_rollback_idx;
8705 			}
8706 		}
8707 	}
8708 #endif /* DHD_AWDL */
8709 
8710 #ifdef HOST_SFH_LLC
8711 	if (host_sfh_llc_reqd) {
8712 		if (dhd_ether_to_8023_hdr(dhd->osh, (struct ether_header *)pktdata,
8713 				PKTBUF) == BCME_OK) {
8714 			/* adjust the data pointer and length information */
8715 			pktdata = PKTDATA(dhd->osh, PKTBUF);
8716 			pktlen  = PKTLEN(dhd->osh, PKTBUF);
8717 			txdesc->flags |= BCMPCIE_TXPOST_FLAGS_HOST_SFH_LLC;
8718 		} else {
8719 			goto err_rollback_idx;
8720 		}
8721 	} else
8722 #endif /* HOST_SFH_LLC */
8723 	{
8724 		/* Extract the ethernet header and adjust the data pointer and length */
8725 		pktlen = PKTLEN(dhd->osh, PKTBUF) - ETHER_HDR_LEN;
8726 		pktdata = PKTPULL(dhd->osh, PKTBUF, ETHER_HDR_LEN);
8727 	}
8728 
8729 	/* Map the data pointer to a DMA-able address */
8730 	pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen, DMA_TX, PKTBUF, 0);
8731 
8732 	if (PHYSADDRISZERO(pa)) {
8733 		DHD_ERROR(("%s: Something really bad, unless 0 is "
8734 			"a valid phyaddr for pa\n", __FUNCTION__));
8735 		ASSERT(0);
8736 		/* XXX if ASSERT() doesn't work like as Android platform,
8737 		 * try to requeue the packet to the backup queue.
8738 		 */
8739 		goto err_rollback_idx;
8740 	}
8741 
8742 #ifdef DMAMAP_STATS
8743 	dhd->dma_stats.txdata++;
8744 	dhd->dma_stats.txdata_sz += pktlen;
8745 #endif /* DMAMAP_STATS */
8746 	/* No need to lock. Save the rest of the packet's metadata */
8747 	DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_tx_map, PKTBUF, pktid,
8748 	    pa, pktlen, DMA_TX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_TX);
8749 
8750 #ifdef TXP_FLUSH_NITEMS
8751 	if (ring->pend_items_count == 0)
8752 		ring->start_addr = (void *)txdesc;
8753 	ring->pend_items_count++;
8754 #endif
8755 #ifdef DHD_HMAPTEST
8756 	if (dhd->prot->hmaptest_tx_active == HMAPTEST_D11_TX_ACTIVE) {
8757 		/* scratch area */
8758 		dhd->prot->hmap_tx_buf_va = (char *)dhd->prot->hmaptest.mem.va
8759 			+ dhd->prot->hmaptest.offset;
8760 		/* replace pa with our pa for txbuf post only */
8761 		dhd->prot->hmap_tx_buf_len = pktlen;
8762 		if ((dhd->prot->hmap_tx_buf_va + dhd->prot->hmap_tx_buf_len) >
8763 			((char *)dhd->prot->hmaptest.mem.va + dhd->prot->hmaptest.mem.len)) {
8764 			DHD_ERROR(("hmaptest: ERROR Txpost outside HMAPTEST buffer\n"));
8765 			DHD_ERROR(("hmaptest: NOT Replacing Rx Buffer\n"));
8766 			dhd->prot->hmaptest_tx_active = HMAPTEST_D11_TX_INACTIVE;
8767 			dhd->prot->hmaptest.in_progress = FALSE;
8768 		} else {
8769 			/* copy pktdata to our va */
8770 			memcpy(dhd->prot->hmap_tx_buf_va, PKTDATA(dhd->osh, PKTBUF), pktlen);
8771 			pa = DMA_MAP(dhd->osh, dhd->prot->hmap_tx_buf_va,
8772 				dhd->prot->hmap_tx_buf_len, DMA_TX, PKTBUF, 0);
8773 
8774 			dhd->prot->hmap_tx_buf_pa = pa;
8775 			/* store pktid for later mapping in txcpl */
8776 			dhd->prot->hmaptest_tx_pktid = pktid;
8777 			dhd->prot->hmaptest_tx_active = HMAPTEST_D11_TX_POSTED;
8778 			DHD_ERROR(("hmaptest: d11read txpost scratch txbuf pktid=0x%08x\n", pktid));
8779 			DHD_ERROR(("hmaptest: d11read txpost txbuf va=0x%p pa.lo=0x%08x len=%d\n",
8780 				dhd->prot->hmap_tx_buf_va, (uint32)PHYSADDRLO(pa), pktlen));
8781 		}
8782 	}
8783 #endif /* DHD_HMAPTEST */
8784 
8785 #ifdef DHD_PKTTS
8786 	memset(&meta_data_buf, 0, sizeof(meta_data_buf));
8787 	if (dhd_get_pktts_enab(dhd) &&
8788 		dhd->pkt_metadata_buflen) {
8789 		/* Allocate memory for Meta data */
8790 		meta_data_buf.va = DMA_ALLOC_CONSISTENT(dhd->osh, meta_data_buf_len,
8791 			DMA_ALIGN_LEN, &meta_data_buf._alloced,
8792 			&meta_data_buf.pa, &meta_data_buf.dmah);
8793 
8794 		if (meta_data_buf.va == NULL) {
8795 			DHD_ERROR_RLMT(("%s: dhd_dma_buf_alloc failed \r\n", __FUNCTION__));
8796 			DHD_ERROR_RLMT((" ... Proceeding without metadata buffer \r\n"));
8797 		} else {
8798 			DHD_PKTID_SAVE_METADATA(dhd, dhd->prot->pktid_tx_map,
8799 				(void *)meta_data_buf.va,
8800 				meta_data_buf.pa,
8801 				(uint16)meta_data_buf._alloced,
8802 				meta_data_buf.dmah,
8803 				pktid);
8804 		}
8805 		memcpy(&addr, &meta_data_buf.pa, sizeof(meta_data_buf.pa));
8806 		DHD_TRACE(("Meta data Buffer VA: %p  PA: %llx dmah: %p\r\n",
8807 			meta_data_buf.va, addr, meta_data_buf.dmah));
8808 
8809 		txdesc->metadata_buf_addr.low = addr & (0xFFFFFFFF);
8810 		txdesc->metadata_buf_addr.high = (addr >> 32) & (0xFFFFFFFF);
8811 		txdesc->metadata_buf_len = meta_data_buf_len;
8812 	}
8813 #endif /* DHD_PKTTS */
8814 
8815 	/* Form the Tx descriptor message buffer */
8816 
8817 	/* Common message hdr */
8818 	txdesc->cmn_hdr.msg_type = MSG_TYPE_TX_POST;
8819 	txdesc->cmn_hdr.if_id = ifidx;
8820 	txdesc->cmn_hdr.flags = ring->current_phase;
8821 
8822 	txdesc->flags |= BCMPCIE_PKT_FLAGS_FRAME_802_3;
8823 	prio = (uint8)PKTPRIO(PKTBUF);
8824 
8825 #ifdef EXT_STA
8826 	txdesc->flags &= ~BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_MASK <<
8827 		BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_SHIFT;
8828 	txdesc->flags |= (WLPKTFLAG_EXEMPT_GET(WLPKTTAG(PKTBUF)) &
8829 		BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_MASK)
8830 		<< BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_SHIFT;
8831 #endif
8832 
8833 	txdesc->flags |= (prio & 0x7) << BCMPCIE_PKT_FLAGS_PRIO_SHIFT;
8834 	txdesc->seg_cnt = 1;
8835 
8836 	txdesc->data_len = htol16((uint16) pktlen);
8837 	txdesc->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
8838 	txdesc->data_buf_addr.low_addr  = htol32(PHYSADDRLO(pa));
8839 
8840 	if (!host_sfh_llc_reqd)
8841 	{
8842 		/* Move data pointer to keep ether header in local PKTBUF for later reference */
8843 		PKTPUSH(dhd->osh, PKTBUF, ETHER_HDR_LEN);
8844 	}
8845 
8846 	txdesc->ext_flags = 0;
8847 
8848 #ifdef DHD_TIMESYNC
8849 	txdesc->rate = 0;
8850 
8851 	if (!llc_inserted && dhd->prot->tx_ts_log_enabled) {
8852 		dhd_pkt_parse_t parse;
8853 
8854 		dhd_parse_proto(PKTDATA(dhd->osh, PKTBUF), &parse);
8855 
8856 		if (parse.proto == IP_PROT_ICMP) {
8857 			if (dhd->prot->no_retry)
8858 				txdesc->ext_flags = BCMPCIE_PKT_FLAGS_FRAME_NORETRY;
8859 			if (dhd->prot->no_aggr)
8860 				txdesc->ext_flags |= BCMPCIE_PKT_FLAGS_FRAME_NOAGGR;
8861 			if (dhd->prot->fixed_rate)
8862 				txdesc->ext_flags |= BCMPCIE_PKT_FLAGS_FRAME_UDR;
8863 		}
8864 	}
8865 #endif /* DHD_TIMESYNC */
8866 
8867 #ifdef DHD_SBN
8868 	if (dhd_udr) {
8869 		txdesc->ext_flags |= BCMPCIE_PKT_FLAGS_FRAME_UDR;
8870 	}
8871 #endif /* DHD_SBN */
8872 
8873 #ifdef DHD_TX_PROFILE
8874 	if (!llc_inserted &&
8875 		dhd->tx_profile_enab && dhd->num_profiles > 0)
8876 	{
8877 		uint8 offset;
8878 
8879 		for (offset = 0; offset < dhd->num_profiles; offset++) {
8880 			if (dhd_protocol_matches_profile((uint8 *)PKTDATA(dhd->osh, PKTBUF),
8881 				PKTLEN(dhd->osh, PKTBUF), &(dhd->protocol_filters[offset]),
8882 				host_sfh_llc_reqd)) {
8883 				/* mask so other reserved bits are not modified. */
8884 				txdesc->rate |=
8885 					(((uint8)dhd->protocol_filters[offset].profile_index) &
8886 					BCMPCIE_TXPOST_RATE_PROFILE_IDX_MASK);
8887 
8888 				/* so we can use the rate field for our purposes */
8889 				txdesc->rate |= BCMPCIE_TXPOST_RATE_EXT_USAGE;
8890 
8891 				break;
8892 			}
8893 		}
8894 	}
8895 #endif /* defined(DHD_TX_PROFILE) */
8896 
8897 	/* Handle Tx metadata */
8898 	headroom = (uint16)PKTHEADROOM(dhd->osh, PKTBUF);
8899 	if (prot->tx_metadata_offset && (headroom < prot->tx_metadata_offset))
8900 		DHD_ERROR(("No headroom for Metadata tx %d %d\n",
8901 		prot->tx_metadata_offset, headroom));
8902 
8903 	if (prot->tx_metadata_offset && (headroom >= prot->tx_metadata_offset)) {
8904 		DHD_TRACE(("Metadata in tx %d\n", prot->tx_metadata_offset));
8905 
8906 		/* Adjust the data pointer to account for meta data in DMA_MAP */
8907 		PKTPUSH(dhd->osh, PKTBUF, prot->tx_metadata_offset);
8908 
8909 		meta_pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF),
8910 			prot->tx_metadata_offset, DMA_RX, PKTBUF, 0);
8911 
8912 		if (PHYSADDRISZERO(meta_pa)) {
8913 			/* Unmap the data pointer to a DMA-able address */
8914 			DMA_UNMAP(dhd->osh, pa, pktlen, DMA_TX, 0, DHD_DMAH_NULL);
8915 #ifdef TXP_FLUSH_NITEMS
8916 			/* update pend_items_count */
8917 			ring->pend_items_count--;
8918 #endif /* TXP_FLUSH_NITEMS */
8919 
8920 			DHD_ERROR(("%s: Something really bad, unless 0 is "
8921 				"a valid phyaddr for meta_pa\n", __FUNCTION__));
8922 			ASSERT(0);
8923 			/* XXX if ASSERT() doesn't work like as Android platform,
8924 			 * try to requeue the packet to the backup queue.
8925 			 */
8926 			goto err_rollback_idx;
8927 		}
8928 
8929 		/* Adjust the data pointer back to original value */
8930 		PKTPULL(dhd->osh, PKTBUF, prot->tx_metadata_offset);
8931 
8932 		txdesc->metadata_buf_len = prot->tx_metadata_offset;
8933 		txdesc->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(meta_pa));
8934 		txdesc->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(meta_pa));
8935 	} else {
8936 #ifdef DHD_HP2P
8937 		if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) {
8938 			dhd_update_hp2p_txdesc(dhd, txdesc);
8939 		} else
8940 #endif /* DHD_HP2P */
8941 #ifdef DHD_PKTTS
8942 		if (!dhd_get_pktts_enab(dhd) || !dhd->pkt_metadata_buflen) {
8943 #else
8944 		if (1) {
8945 #endif /* DHD_PKTTS */
8946 			txdesc->metadata_buf_len = htol16(0);
8947 			txdesc->metadata_buf_addr.high_addr = 0;
8948 			txdesc->metadata_buf_addr.low_addr = 0;
8949 		}
8950 	}
8951 
8952 #ifdef AGG_H2D_DB
8953 	OSL_ATOMIC_INC(dhd->osh, &ring->inflight);
8954 #endif /* AGG_H2D_DB */
8955 
8956 #ifdef DHD_PKTID_AUDIT_RING
8957 	DHD_PKTID_AUDIT(dhd, prot->pktid_tx_map, pktid, DHD_DUPLICATE_ALLOC);
8958 #endif /* DHD_PKTID_AUDIT_RING */
8959 
8960 	txdesc->cmn_hdr.request_id = htol32(pktid);
8961 
8962 	DHD_TRACE(("txpost: data_len %d, pktid 0x%04x\n", txdesc->data_len,
8963 		txdesc->cmn_hdr.request_id));
8964 
8965 #ifdef DHD_LBUF_AUDIT
8966 	PKTAUDIT(dhd->osh, PKTBUF);
8967 #endif
8968 
8969 	/* Update the write pointer in TCM & ring bell */
8970 #if defined(TXP_FLUSH_NITEMS)
8971 #if defined(DHD_HP2P)
8972 	if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) {
8973 		dhd_calc_hp2p_burst(dhd, ring, flowid);
8974 	} else
8975 #endif /* HP2P */
8976 	{
8977 		if ((ring->pend_items_count == prot->txp_threshold) ||
8978 				((uint8 *) txdesc == (uint8 *) DHD_RING_END_VA(ring))) {
8979 #ifdef AGG_H2D_DB
8980 			if (agg_h2d_db_enab) {
8981 				dhd_prot_txdata_aggr_db_write_flush(dhd, flowid);
8982 				if ((uint8 *) txdesc == (uint8 *) DHD_RING_END_VA(ring)) {
8983 					dhd_prot_aggregate_db_ring_door_bell(dhd, flowid, TRUE);
8984 				}
8985 			} else
8986 #endif /* AGG_H2D_DB */
8987 			{
8988 				dhd_prot_txdata_write_flush(dhd, flowid);
8989 			}
8990 
8991 		}
8992 	}
8993 #else
8994 	/* update ring's WR index and ring doorbell to dongle */
8995 	dhd_prot_ring_write_complete(dhd, ring, txdesc, 1);
8996 #endif /* TXP_FLUSH_NITEMS */
8997 
8998 #ifdef TX_STATUS_LATENCY_STATS
8999 	/* set the time when pkt is queued to flowring */
9000 	DHD_PKT_SET_QTIME(PKTBUF, OSL_SYSUPTIME_US());
9001 #elif defined(DHD_PKTTS)
9002 	if (dhd_get_pktts_enab(dhd) == TRUE) {
9003 		/* set the time when pkt is queued to flowring */
9004 		DHD_PKT_SET_QTIME(PKTBUF, OSL_SYSUPTIME_US());
9005 	}
9006 #endif /* TX_STATUS_LATENCY_STATS */
9007 
9008 	DHD_RING_UNLOCK(ring->ring_lock, flags);
9009 
9010 	OSL_ATOMIC_INC(dhd->osh, &prot->active_tx_count);
9011 
9012 	/*
9013 	 * Take a wake lock, do not sleep if we have atleast one packet
9014 	 * to finish.
9015 	 */
9016 	DHD_TXFL_WAKE_LOCK_TIMEOUT(dhd, MAX_TX_TIMEOUT);
9017 
9018 #ifdef PCIE_INB_DW
9019 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
9020 #endif
9021 #ifdef TX_STATUS_LATENCY_STATS
9022 	flow_ring_node->flow_info.num_tx_pkts++;
9023 #endif /* TX_STATUS_LATENCY_STATS */
9024 	return BCME_OK;
9025 
9026 err_rollback_idx:
9027 	/* roll back write pointer for unprocessed message */
9028 	if (ring->wr == 0) {
9029 		ring->wr = ring->max_items - 1;
9030 	} else {
9031 		ring->wr--;
9032 		if (ring->wr == 0) {
9033 			DHD_INFO(("%s: flipping the phase now\n", ring->name));
9034 			ring->current_phase = ring->current_phase ?
9035 				0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
9036 		}
9037 	}
9038 
9039 err_free_pktid:
9040 #if defined(DHD_PCIE_PKTID)
9041 	{
9042 		void *dmah;
9043 		void *secdma;
9044 		/* Free up the PKTID. physaddr and pktlen will be garbage. */
9045 		DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, pktid,
9046 			pa, pktlen, dmah, secdma, PKTTYPE_NO_CHECK);
9047 	}
9048 
9049 err_no_res_pktfree:
9050 #endif /* DHD_PCIE_PKTID */
9051 
9052 #if defined(BCMINTERNAL) && defined(LINUX)
9053 	if (osl_is_flag_set(dhd->osh, OSL_PHYS_MEM_LESS_THAN_16MB))
9054 		PKTCFREE(dhd->osh, PKTBUF, FALSE);
9055 #endif	/* BCMINTERNAL && LINUX */
9056 
9057 	DHD_RING_UNLOCK(ring->ring_lock, flags);
9058 
9059 fail:
9060 #ifdef PCIE_INB_DW
9061 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
9062 #endif
9063 	return BCME_NORESOURCE;
9064 } /* dhd_prot_txdata */
9065 
9066 #ifdef AGG_H2D_DB
9067 static void
9068 dhd_prot_txdata_aggr_db_write_flush(dhd_pub_t *dhd, uint16 flowid)
9069 {
9070 	flow_ring_table_t *flow_ring_table;
9071 	flow_ring_node_t *flow_ring_node;
9072 	msgbuf_ring_t *ring;
9073 
9074 	if (dhd->flow_ring_table == NULL) {
9075 		return;
9076 	}
9077 
9078 	flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
9079 	flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
9080 	ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
9081 
9082 	if (ring->pend_items_count) {
9083 		dhd_prot_agg_db_ring_write(dhd, ring, ring->start_addr,
9084 				ring->pend_items_count);
9085 		ring->pend_items_count = 0;
9086 		ring->start_addr = NULL;
9087 	}
9088 
9089 }
9090 #endif /* AGG_H2D_DB */
9091 
9092 /* called with a ring_lock */
9093 /** optimization to write "n" tx items at a time to ring */
9094 void
9095 BCMFASTPATH(dhd_prot_txdata_write_flush)(dhd_pub_t *dhd, uint16 flowid)
9096 {
9097 #ifdef TXP_FLUSH_NITEMS
9098 	flow_ring_table_t *flow_ring_table;
9099 	flow_ring_node_t *flow_ring_node;
9100 	msgbuf_ring_t *ring;
9101 
9102 	if (dhd->flow_ring_table == NULL) {
9103 		return;
9104 	}
9105 
9106 	flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
9107 	flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
9108 	ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
9109 
9110 	if (ring->pend_items_count) {
9111 		/* update ring's WR index and ring doorbell to dongle */
9112 		dhd_prot_ring_write_complete(dhd, ring, ring->start_addr,
9113 			ring->pend_items_count);
9114 		ring->pend_items_count = 0;
9115 		ring->start_addr = NULL;
9116 		dhd->prot->tx_h2d_db_cnt++;
9117 	}
9118 #endif /* TXP_FLUSH_NITEMS */
9119 }
9120 
9121 #undef PKTBUF	/* Only defined in the above routine */
9122 
9123 int
9124 BCMFASTPATH(dhd_prot_hdrpull)(dhd_pub_t *dhd, int *ifidx, void *pkt, uchar *buf, uint *len)
9125 {
9126 	return 0;
9127 }
9128 
9129 /** post a set of receive buffers to the dongle */
9130 static void
9131 BCMFASTPATH(dhd_prot_return_rxbuf)(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 pktid,
9132 	uint32 rxcnt)
9133 /* XXX function name could be more descriptive, eg dhd_prot_post_rxbufs */
9134 {
9135 	dhd_prot_t *prot = dhd->prot;
9136 
9137 	if (prot->rxbufpost >= rxcnt) {
9138 		prot->rxbufpost -= (uint16)rxcnt;
9139 	} else {
9140 		/* XXX: I have seen this assert hitting.
9141 		 * Will be removed once rootcaused.
9142 		 */
9143 		/* ASSERT(0); */
9144 		prot->rxbufpost = 0;
9145 	}
9146 
9147 	if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) {
9148 		dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
9149 	} else if (dhd->dma_h2d_ring_upd_support && !IDMA_ACTIVE(dhd)) {
9150 		/* Ring DoorBell after processing the rx packets,
9151 		 * so that dongle will sync the DMA indices.
9152 		 */
9153 		dhd_prot_ring_doorbell(dhd, DHD_RDPTR_UPDATE_H2D_DB_MAGIC(ring));
9154 	}
9155 
9156 	return;
9157 }
9158 
9159 #ifdef DHD_HMAPTEST
9160 
9161 static void
9162 dhd_msgbuf_hmaptest_cmplt(dhd_pub_t *dhd)
9163 {
9164 	dhd_prot_t *prot = dhd->prot;
9165 	uint64 end_usec;
9166 	char *readbuf;
9167 	uint32 len = dhd->prot->hmaptest.len;
9168 	uint32 i;
9169 
9170 	end_usec = OSL_SYSUPTIME_US();
9171 	end_usec -= prot->hmaptest.start_usec;
9172 	DHD_ERROR(("hmaptest cmplt: %d bytes in %llu usec, %u kBps\n",
9173 		len, end_usec, (len * (1000 * 1000 / 1024) / (uint32)(end_usec + 1))));
9174 
9175 	prot->hmaptest.in_progress = FALSE;
9176 	if (prot->hmaptest.accesstype == HMAPTEST_ACCESS_M2M) {
9177 			DHD_ERROR(("HMAPTEST_ACCESS_M2M\n"));
9178 	} else if (prot->hmaptest.accesstype == HMAPTEST_ACCESS_ARM) {
9179 			DHD_ERROR(("HMAPTEST_ACCESS_ARM\n"));
9180 	} else {
9181 		return;
9182 	}
9183 	readbuf = (char *)dhd->prot->hmaptest.mem.va + dhd->prot->hmaptest.offset;
9184 	OSL_CACHE_FLUSH(dhd->prot->hmaptest.mem.va,
9185 		dhd->prot->hmaptest.mem.len);
9186 	if (prot->hmaptest.is_write) {
9187 		DHD_ERROR(("hmaptest cmplt: FW has written at 0x%p\n", readbuf));
9188 		DHD_ERROR(("hmaptest cmplt: pattern = \n"));
9189 		len = ALIGN_SIZE(len, (sizeof(int32)));
9190 		for (i = 0; i < len; i += (sizeof(int32))) {
9191 			DHD_ERROR(("0x%08x\n", *(int *)(readbuf + i)));
9192 		}
9193 		DHD_ERROR(("\n\n"));
9194 	}
9195 
9196 }
9197 /* program HMAPTEST window and window config registers
9198  * Reference for HMAP implementation in OS's that can easily leverage it
9199  * this function can be used as reference for programming HMAP windows
9200  * the function to program HMAP windows and enable it
9201  * can be called at init time or hmap iovar
9202  */
9203 static void
9204 dhdmsgbuf_set_hmaptest_windows(dhd_pub_t *dhd)
9205 {
9206 	uint32 nwindows = 0;
9207 	uint32 scratch_len;
9208 	uint64 scratch_lin, w1_start;
9209 	dmaaddr_t scratch_pa;
9210 	pcie_hmapwindow_t *hmapwindows; /* 8 windows 0-7 */
9211 	dhd_prot_t *prot = dhd->prot;
9212 	uint corerev = dhd->bus->sih->buscorerev;
9213 
9214 	scratch_pa = prot->hmaptest.mem.pa;
9215 	scratch_len = prot->hmaptest.mem.len;
9216 	scratch_lin  = (uint64)(PHYSADDRLO(scratch_pa) & 0xffffffff)
9217 		| (((uint64)PHYSADDRHI(scratch_pa)& 0xffffffff) << 32);
9218 	hmapwindows = (pcie_hmapwindow_t *)((uintptr_t)PCI_HMAP_WINDOW_BASE(corerev));
9219 	/* windows are 4kb aligned and window length is 512 byte aligned
9220 	 * window start ends with 0x1000 and window length ends with 0xe00
9221 	 * make the sandbox buffer 4kb aligned and size also 4kb aligned for hmap test
9222 	 * window0 = 0 - sandbox_start
9223 	 * window1 = sandbox_end + 1 - 0xffffffff
9224 	 * window2 = 0x100000000 - 0x1fffffe00
9225 	 * window 3 is programmed only for valid test cases
9226 	 * window3 = sandbox_start - sandbox_end
9227 	 */
9228 	w1_start  = scratch_lin +  scratch_len;
9229 		DHD_ERROR(("hmaptest: window 0 offset lower=0x%p upper=0x%p length=0x%p\n",
9230 		&(hmapwindows[0].baseaddr_lo), &(hmapwindows[0].baseaddr_hi),
9231 		&(hmapwindows[0].windowlength)));
9232 	DHD_ERROR(("hmaptest: window 1 offset lower=0x%p upper=0x%p length=0x%p\n",
9233 		&(hmapwindows[1].baseaddr_lo), &(hmapwindows[1].baseaddr_hi),
9234 		&(hmapwindows[1].windowlength)));
9235 	DHD_ERROR(("hmaptest: window 2 offset lower=0x%p upper=0x%p length=0x%p\n",
9236 		&(hmapwindows[2].baseaddr_lo), &(hmapwindows[2].baseaddr_hi),
9237 			&(hmapwindows[2].windowlength)));
9238 	DHD_ERROR(("hmaptest: window 3 offset lower=0x%p upper=0x%p length=0x%p\n",
9239 		&(hmapwindows[3].baseaddr_lo), &(hmapwindows[3].baseaddr_hi),
9240 		&(hmapwindows[3].windowlength)));
9241 		DHD_ERROR(("hmaptest: w0 base_lo=0x%08x base_hi=0x%08x len=0x%0llx\n",
9242 			0, 0, (uint64) scratch_lin));
9243 		DHD_ERROR(("hmaptest: w1 base_lo=0x%08x base_hi=0x%08x len=0x%0llx\n",
9244 			(uint32)(w1_start & 0xffffffff),
9245 			(uint32)((w1_start >> 32) & 0xffffffff),
9246 			(uint64)(0x100000000 - w1_start)));
9247 		DHD_ERROR(("hmaptest: w2 base_lo=0x%08x base_hi=0x%08x len=0x%0llx\n",
9248 			0, 1, (uint64)0xfffffe00));
9249 		/* setting window0 */
9250 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9251 			(uintptr_t)(&(hmapwindows[0].baseaddr_lo)), ~0, 0x0);
9252 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9253 			(uintptr_t)(&(hmapwindows[0].baseaddr_hi)), ~0, 0x0);
9254 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9255 			(uintptr_t)(&(hmapwindows[0].windowlength)), ~0,
9256 			(uint64)scratch_lin);
9257 		/* setting window1 */
9258 		w1_start  = scratch_lin +  scratch_len;
9259 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9260 			(uintptr_t)(&(hmapwindows[1].baseaddr_lo)), ~0,
9261 			(uint32)(w1_start & 0xffffffff));
9262 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9263 			(uintptr_t)(&(hmapwindows[1].baseaddr_hi)), ~0,
9264 			(uint32)((w1_start >> 32) & 0xffffffff));
9265 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9266 			(uintptr_t)(&(hmapwindows[1].windowlength)), ~0,
9267 			(0x100000000 - w1_start));
9268 		/* setting window2 */
9269 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9270 			(uintptr_t)(&(hmapwindows[2].baseaddr_lo)), ~0, 0x0);
9271 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9272 			(uintptr_t)(&(hmapwindows[2].baseaddr_hi)), ~0, 0x1);
9273 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9274 			(uintptr_t)(&(hmapwindows[2].windowlength)), ~0, 0xfffffe00);
9275 		nwindows = 3;
9276 		/* program only windows 0-2 with section1 +section2 */
9277 		/* setting window config */
9278 		/* set bit 8:15 in windowconfig to enable n windows in order */
9279 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9280 			(uint)PCI_HMAP_WINDOW_CONFIG(corerev), ~0, (nwindows << 8));
9281 }
9282 
9283 /* stop HMAPTEST does not check corerev
9284  * caller has to ensure corerev check
9285  */
9286 int
9287 dhdmsgbuf_hmaptest_stop(dhd_pub_t *dhd)
9288 {
9289 	uint32 window_config, nwindows, i;
9290 	pcie_hmapwindow_t *hmapwindows; /* 8 windows 0-7 */
9291 	uint corerev = dhd->bus->sih->buscorerev;
9292 
9293 	hmapwindows = (pcie_hmapwindow_t *)((uintptr_t)PCI_HMAP_WINDOW_BASE(corerev));
9294 	dhd->prot->hmaptest.in_progress = FALSE;
9295 
9296 	/* Reference for HMAP Implementation
9297 	 * Disable HMAP windows.
9298 	 * As windows were programmed in bus:hmap set call
9299 	 * disabling in hmaptest_stop.
9300 	 */
9301 	DHD_ERROR(("hmap: disable hmap windows\n"));
9302 	window_config = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9303 		(uint)PCI_HMAP_WINDOW_CONFIG(corerev), 0, 0);
9304 	nwindows = (window_config & PCI_HMAP_NWINDOWS_MASK) >> PCI_HMAP_NWINDOWS_SHIFT;
9305 	si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9306 		(uint)PCI_HMAP_WINDOW_CONFIG(corerev), ~0, 0);
9307 	/* clear all windows */
9308 	for (i = 0; i < nwindows; i++) {
9309 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9310 			(uintptr_t)(&(hmapwindows[i].baseaddr_lo)), ~0, 0);
9311 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9312 			(uintptr_t)(&(hmapwindows[i].baseaddr_hi)), ~0, 0);
9313 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9314 			(uintptr_t)(&(hmapwindows[i].windowlength)), ~0, 0);
9315 	}
9316 
9317 	return BCME_OK;
9318 }
9319 
9320 /* HMAP iovar intercept process */
9321 int
9322 dhdmsgbuf_hmap(dhd_pub_t *dhd, pcie_hmap_t *hmap_params, bool set)
9323 {
9324 
9325 	uint32 scratch_len;
9326 	uint64 scratch_lin, w1_start;
9327 	dmaaddr_t scratch_pa;
9328 	uint32 addr_lo, addr_hi, window_length, window_config, nwindows, i;
9329 	pcie_hmapwindow_t *hmapwindows; /* 8 windows 0-7 */
9330 
9331 	dhd_prot_t *prot = dhd->prot;
9332 	dhd_bus_t *bus = dhd->bus;
9333 	uint corerev = bus->sih->buscorerev;
9334 	scratch_pa = prot->hmaptest.mem.pa;
9335 	scratch_len = prot->hmaptest.mem.len;
9336 	scratch_lin  = (uint64)(PHYSADDRLO(scratch_pa) & 0xffffffff)
9337 		| (((uint64)PHYSADDRHI(scratch_pa)& 0xffffffff) << 32);
9338 	w1_start  = scratch_lin +  scratch_len;
9339 	DHD_ERROR(("HMAP:  pcicorerev = %d\n", corerev));
9340 
9341 	if (corerev < 24) {
9342 		DHD_ERROR(("HMAP not available on pci corerev = %d\n", corerev));
9343 		return BCME_UNSUPPORTED;
9344 	}
9345 	if (set) {
9346 		if (hmap_params->enable) {
9347 			dhdmsgbuf_set_hmaptest_windows(dhd);
9348 		} else {
9349 			dhdmsgbuf_hmaptest_stop(dhd); /* stop will clear all programmed windows */
9350 		}
9351 	}
9352 
9353 	OSL_CACHE_FLUSH(dhd->prot->hmaptest.mem.va,
9354 		dhd->prot->hmaptest.mem.len);
9355 
9356 	window_config = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9357 		(uint)PCI_HMAP_WINDOW_CONFIG(corerev), 0, 0);
9358 	nwindows = (window_config & PCI_HMAP_NWINDOWS_MASK) >> PCI_HMAP_NWINDOWS_SHIFT;
9359 	prot->hmap_enabled = nwindows ? TRUE : FALSE;
9360 
9361 	/* getting window config */
9362 	/* set bit 8:15 in windowconfig to enable n windows in order */
9363 	DHD_ERROR(("hmap: hmap status = %s\n", (prot->hmap_enabled ? "Enabled" : "Disabled")));
9364 	DHD_ERROR(("hmap: window config = 0x%08x\n", window_config));
9365 	DHD_ERROR(("hmap: Windows\n"));
9366 
9367 	hmapwindows = (pcie_hmapwindow_t *)((uintptr_t)PCI_HMAP_WINDOW_BASE(corerev));
9368 	/* getting windows */
9369 	if (nwindows > 8)
9370 		return BCME_ERROR;
9371 	for (i = 0; i < nwindows; i++) {
9372 		addr_lo = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9373 			(uintptr_t)(&(hmapwindows[i].baseaddr_lo)), 0, 0);
9374 		addr_hi = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9375 			(uintptr_t)(&(hmapwindows[i].baseaddr_hi)), 0, 0);
9376 		window_length = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9377 			(uintptr_t)(&(hmapwindows[i].windowlength)), 0, 0);
9378 
9379 		DHD_ERROR(("hmap: window %d address lower=0x%08x upper=0x%08x length=0x%08x\n",
9380 			i, addr_lo, addr_hi, window_length));
9381 	}
9382 	addr_hi = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9383 		(uint)(PCI_HMAP_VIOLATION_ADDR_U(corerev)), 0, 0);
9384 	addr_lo = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9385 		(uint)(PCI_HMAP_VIOLATION_ADDR_L(corerev)), 0, 0);
9386 	window_length = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
9387 		(uint)(PCI_HMAP_VIOLATION_INFO(corerev)), 0, 0);
9388 	DHD_ERROR(("hmap: violation regs\n"));
9389 	DHD_ERROR(("hmap: violationaddr_hi =0x%08x\n", addr_hi));
9390 	DHD_ERROR(("hmap: violationaddr_lo =0x%08x\n", addr_lo));
9391 	DHD_ERROR(("hmap: violation_info   =0x%08x\n", window_length));
9392 	DHD_ERROR(("hmap: Buffer allocated for HMAPTEST Start=0x%0llx len =0x%08x End =0x%0llx\n",
9393 		(uint64) scratch_lin, scratch_len, (uint64) w1_start));
9394 
9395 	return BCME_OK;
9396 }
9397 
9398 /* hmaptest iovar process
9399  * This iovar triggers HMAPTEST with given params
9400  * on chips that have HMAP
9401  * DHD programs hmap window registers with host addresses here.
9402  */
9403 int
9404 dhdmsgbuf_hmaptest(dhd_pub_t *dhd, pcie_hmaptest_t *hmaptest_params)
9405 {
9406 
9407 	dhd_prot_t *prot = dhd->prot;
9408 	int ret = BCME_OK;
9409 	uint32 offset = 0;
9410 	uint64 scratch_lin;
9411 	dhd_bus_t *bus = dhd->bus;
9412 	uint corerev = bus->sih->buscorerev;
9413 
9414 	if (prot->hmaptest.in_progress) {
9415 		DHD_ERROR(("HMAPTEST already running. Try again.\n"));
9416 		return BCME_BUSY;
9417 	}
9418 
9419 	prot->hmaptest.in_progress = TRUE;
9420 
9421 	if (corerev < 24) {
9422 		DHD_ERROR(("HMAP not available on pci corerev = %d\n", corerev));
9423 		return BCME_UNSUPPORTED;
9424 	}
9425 	prot->hmaptest.accesstype = hmaptest_params->accesstype;
9426 	prot->hmaptest.is_write = hmaptest_params->is_write;
9427 	prot->hmaptest.len = hmaptest_params->xfer_len;
9428 	prot->hmaptest.offset = hmaptest_params->host_offset;
9429 	offset = prot->hmaptest.offset;
9430 
9431 	DHD_ERROR(("hmaptest: is_write =%d accesstype=%d offset =%d len=%d value=0x%08x\n",
9432 		prot->hmaptest.is_write, prot->hmaptest.accesstype,
9433 		offset, prot->hmaptest.len, hmaptest_params->value));
9434 
9435 	DHD_ERROR(("hmaptest  dma_lo=0x%08x hi=0x%08x pa\n",
9436 		(uint32)PHYSADDRLO(prot->hmaptest.mem.pa),
9437 		(uint32)PHYSADDRHI(prot->hmaptest.mem.pa)));
9438 
9439 	if (prot->hmaptest.accesstype == HMAPTEST_ACCESS_D11) {
9440 		if (prot->hmaptest.is_write) {
9441 			/* if d11 is writing then post rxbuf from scratch area */
9442 			dhd->prot->hmaptest_rx_active = HMAPTEST_D11_RX_ACTIVE;
9443 		} else {
9444 			/* if d11 is reading then post txbuf from scratch area */
9445 			dhd->prot->hmaptest_tx_active = HMAPTEST_D11_TX_ACTIVE;
9446 		}
9447 
9448 	} else {
9449 		uint32 pattern = 0xdeadbeef;
9450 		uint32 i;
9451 		uint32 maxbuflen = MIN(prot->hmaptest.len, (PKTBUFSZ));
9452 		char *fillbuf = (char *)dhd->prot->hmaptest.mem.va
9453 			+ offset;
9454 		if ((fillbuf + maxbuflen) >
9455 			((char *)dhd->prot->hmaptest.mem.va + dhd->prot->hmaptest.mem.len)) {
9456 			DHD_ERROR(("hmaptest: M2m/ARM ERROR offset + len outside buffer\n"));
9457 			dhd->prot->hmaptest.in_progress = FALSE;
9458 			return BCME_BADARG;
9459 		}
9460 
9461 		if (prot->hmaptest.accesstype == HMAPTEST_ACCESS_M2M) {
9462 			DHD_ERROR(("HMAPTEST_ACCESS_M2M\n"));
9463 		} else if (prot->hmaptest.accesstype == HMAPTEST_ACCESS_ARM) {
9464 			DHD_ERROR(("HMAPTEST_ACCESS_ARM\n"));
9465 		} else {
9466 			prot->hmaptest.in_progress = FALSE;
9467 			DHD_ERROR(("hmaptest: accesstype error\n"));
9468 			return BCME_BADARG;
9469 		}
9470 
9471 		/* fill a pattern at offset */
9472 		maxbuflen = ALIGN_SIZE(maxbuflen, (sizeof(uint32)));
9473 		memset(fillbuf, 0, maxbuflen);
9474 		DHD_ERROR(("hmaptest: dhd write pattern at addr=0x%p\n",
9475 			fillbuf));
9476 		DHD_ERROR(("pattern = %08x, %u times",
9477 			pattern, (uint32)(maxbuflen / sizeof(uint32))));
9478 		for (i = 0; i < maxbuflen; i += sizeof(uint32)) {
9479 			*(uint32 *)(fillbuf + i) = pattern;
9480 		}
9481 		OSL_CACHE_FLUSH(dhd->prot->hmaptest.mem.va,
9482 			dhd->prot->hmaptest.mem.len);
9483 		DHD_ERROR(("\n\n"));
9484 
9485 	}
9486 
9487 	/*
9488 	 * Do not calculate address from scratch buffer + offset,
9489 	 * if user supplied absolute address
9490 	 */
9491 	if (hmaptest_params->host_addr_lo || hmaptest_params->host_addr_hi) {
9492 		if (prot->hmaptest.accesstype == HMAPTEST_ACCESS_D11) {
9493 			DHD_ERROR(("hmaptest: accesstype D11 does not support absolute addr\n"));
9494 			return BCME_UNSUPPORTED;
9495 		}
9496 	} else {
9497 		scratch_lin  = (uint64)(PHYSADDRLO(prot->hmaptest.mem.pa) & 0xffffffff)
9498 			| (((uint64)PHYSADDRHI(prot->hmaptest.mem.pa) & 0xffffffff) << 32);
9499 		scratch_lin += offset;
9500 		hmaptest_params->host_addr_lo = htol32((uint32)(scratch_lin & 0xffffffff));
9501 		hmaptest_params->host_addr_hi = htol32((uint32)((scratch_lin >> 32) & 0xffffffff));
9502 	}
9503 
9504 	DHD_INFO(("HMAPTEST Started...\n"));
9505 	prot->hmaptest.start_usec = OSL_SYSUPTIME_US();
9506 	return ret;
9507 
9508 }
9509 
9510 #endif /* DHD_HMAPTEST */
9511 
9512 /* called before an ioctl is sent to the dongle */
9513 static void
9514 dhd_prot_wlioctl_intercept(dhd_pub_t *dhd, wl_ioctl_t * ioc, void * buf)
9515 {
9516 	dhd_prot_t *prot = dhd->prot;
9517 	int slen = 0;
9518 
9519 	if (ioc->cmd == WLC_SET_VAR && buf != NULL && !strcmp(buf, "pcie_bus_tput")) {
9520 		pcie_bus_tput_params_t *tput_params;
9521 
9522 		slen = strlen("pcie_bus_tput") + 1;
9523 		tput_params = (pcie_bus_tput_params_t*)((char *)buf + slen);
9524 		bcopy(&prot->host_bus_throughput_buf.pa, &tput_params->host_buf_addr,
9525 			sizeof(tput_params->host_buf_addr));
9526 		tput_params->host_buf_len = DHD_BUS_TPUT_BUF_LEN;
9527 	}
9528 
9529 #ifdef DHD_HMAPTEST
9530 	if (buf != NULL && !strcmp(buf, "bus:hmap")) {
9531 		pcie_hmap_t *hmap_params;
9532 		slen = strlen("bus:hmap") + 1;
9533 		hmap_params = (pcie_hmap_t*)((char *)buf + slen);
9534 		dhdmsgbuf_hmap(dhd, hmap_params, (ioc->cmd == WLC_SET_VAR));
9535 	}
9536 
9537 	if (ioc->cmd == WLC_SET_VAR && buf != NULL && !strcmp(buf, "bus:hmaptest")) {
9538 		pcie_hmaptest_t *hmaptest_params;
9539 
9540 		slen = strlen("bus:hmaptest") + 1;
9541 		hmaptest_params = (pcie_hmaptest_t*)((char *)buf + slen);
9542 		dhdmsgbuf_hmaptest(dhd, hmaptest_params);
9543 	}
9544 #endif /* DHD_HMAPTEST */
9545 }
9546 
9547 /* called after an ioctl returns from dongle */
9548 static void
9549 dhd_prot_wl_ioctl_ret_intercept(dhd_pub_t *dhd, wl_ioctl_t * ioc, void * buf,
9550 	int ifidx, int ret, int len)
9551 {
9552 
9553 #ifdef DHD_HMAPTEST
9554 	if (ioc->cmd == WLC_SET_VAR && buf != NULL && !strcmp(buf, "bus:hmaptest")) {
9555 		dhd_msgbuf_hmaptest_cmplt(dhd);
9556 	}
9557 #endif /* DHD_HMAPTEST */
9558 
9559 	if (!ret && ioc->cmd == WLC_SET_VAR && buf != NULL) {
9560 		int slen;
9561 		/* Intercept the wme_dp ioctl here */
9562 		if (!strcmp(buf, "wme_dp")) {
9563 			int val = 0;
9564 			slen = strlen("wme_dp") + 1;
9565 			if (len >= (int)(slen + sizeof(int)))
9566 				bcopy(((char *)buf + slen), &val, sizeof(int));
9567 			dhd->wme_dp = (uint8) ltoh32(val);
9568 		}
9569 
9570 #ifdef DHD_AWDL
9571 		/* Intercept the awdl_peer_op ioctl here */
9572 		if (!strcmp(buf, "awdl_peer_op")) {
9573 			slen = strlen("awdl_peer_op") + 1;
9574 			dhd_awdl_peer_op(dhd, (uint8)ifidx, ((char *)buf + slen), len - slen);
9575 		}
9576 		/* Intercept the awdl ioctl here, delete flow rings if awdl is
9577 		 * disabled
9578 		 */
9579 		if (!strcmp(buf, "awdl")) {
9580 			int val = 0;
9581 			slen = strlen("awdl") + 1;
9582 			if (len >= (int)(slen + sizeof(int))) {
9583 				bcopy(((char *)buf + slen), &val, sizeof(int));
9584 				val = ltoh32(val);
9585 				if (val == TRUE) {
9586 					/**
9587 					 * Though we are updating the link status when we recieve
9588 					 * WLC_E_LINK from dongle, it is not gaurenteed always.
9589 					 * So intercepting the awdl command fired from app to
9590 					 * update the status.
9591 					 */
9592 					dhd_update_interface_link_status(dhd, (uint8)ifidx, TRUE);
9593 #if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
9594 					/* reset AWDL stats data structures when AWDL is enabled */
9595 					dhd_clear_awdl_stats(dhd);
9596 #endif /* DHD_AWDL && AWDL_SLOT_STATS */
9597 				} else if (val == FALSE) {
9598 					dhd_update_interface_link_status(dhd, (uint8)ifidx, FALSE);
9599 					dhd_del_all_sta(dhd, (uint8)ifidx);
9600 					dhd_awdl_peer_op(dhd, (uint8)ifidx, NULL, 0);
9601 
9602 				}
9603 			}
9604 
9605 		}
9606 
9607 		/* store the awdl min extension count and presence mode values
9608 		 * set by the user, same will be inserted in the LLC header for
9609 		 * each tx packet on the awdl iface
9610 		*/
9611 		slen = strlen("awdl_extcounts");
9612 		if (!strncmp(buf, "awdl_extcounts", slen)) {
9613 			awdl_extcount_t *extcnt = NULL;
9614 			slen = slen + 1;
9615 			if ((len - slen) >= sizeof(*extcnt)) {
9616 				extcnt = (awdl_extcount_t *)((char *)buf + slen);
9617 				dhd->awdl_minext = extcnt->minExt;
9618 			}
9619 		}
9620 
9621 		slen = strlen("awdl_presencemode");
9622 		if (!strncmp(buf, "awdl_presencemode", slen)) {
9623 			slen = slen + 1;
9624 			if ((len - slen) >= sizeof(uint8)) {
9625 				dhd->awdl_presmode = *((uint8 *)((char *)buf + slen));
9626 			}
9627 		}
9628 #endif /* DHD_AWDL */
9629 	}
9630 
9631 }
9632 
9633 #ifdef DHD_PM_CONTROL_FROM_FILE
9634 extern bool g_pm_control;
9635 #endif /* DHD_PM_CONTROL_FROM_FILE */
9636 
9637 /** Use protocol to issue ioctl to dongle. Only one ioctl may be in transit. */
9638 int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len)
9639 {
9640 	int ret = -1;
9641 	uint8 action;
9642 
9643 	if (dhd->bus->is_linkdown) {
9644 		DHD_ERROR_RLMT(("%s : PCIe link is down. we have nothing to do\n", __FUNCTION__));
9645 		goto done;
9646 	}
9647 
9648 	if (dhd_query_bus_erros(dhd)) {
9649 		DHD_ERROR_RLMT(("%s : some BUS error. we have nothing to do\n", __FUNCTION__));
9650 		goto done;
9651 	}
9652 
9653 	if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) {
9654 		DHD_ERROR_RLMT(("%s : bus is down. we have nothing to do -"
9655 			" bus state: %d, sent hang: %d\n", __FUNCTION__,
9656 			dhd->busstate, dhd->hang_was_sent));
9657 		goto done;
9658 	}
9659 
9660 	if (dhd->busstate == DHD_BUS_SUSPEND) {
9661 		DHD_ERROR(("%s : bus is suspended\n", __FUNCTION__));
9662 		goto done;
9663 	}
9664 
9665 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
9666 
9667 #ifdef DHD_PCIE_REG_ACCESS
9668 #ifdef BOARD_HIKEY
9669 #ifndef PCIE_LNK_SPEED_GEN1
9670 #define PCIE_LNK_SPEED_GEN1		0x1
9671 #endif
9672 	/* BUG_ON if link speed is GEN1 in Hikey for 4389B0 */
9673 	if (dhd->bus->sih->buscorerev == 72) {
9674 		if (dhd_get_pcie_linkspeed(dhd) == PCIE_LNK_SPEED_GEN1) {
9675 			DHD_ERROR(("%s: ******* Link Speed is GEN1 *********\n", __FUNCTION__));
9676 			BUG_ON(1);
9677 		}
9678 	}
9679 #endif /* BOARD_HIKEY */
9680 #endif /* DHD_PCIE_REG_ACCESS */
9681 
9682 	if (ioc->cmd == WLC_SET_PM) {
9683 #ifdef DHD_PM_CONTROL_FROM_FILE
9684 		if (g_pm_control == TRUE) {
9685 			DHD_ERROR(("%s: SET PM ignored!(Requested:%d)\n",
9686 				__FUNCTION__, buf ? *(char *)buf : 0));
9687 			goto done;
9688 		}
9689 #endif /* DHD_PM_CONTROL_FROM_FILE */
9690 #ifdef DHD_PM_OVERRIDE
9691 		{
9692 			extern bool g_pm_override;
9693 			if (g_pm_override == TRUE) {
9694 				DHD_ERROR(("%s: PM override SET PM ignored!(Requested:%d)\n",
9695 					__FUNCTION__, buf ? *(char *)buf : 0));
9696 				goto done;
9697 			}
9698 		}
9699 #endif /* DHD_PM_OVERRIDE */
9700 		DHD_TRACE_HW4(("%s: SET PM to %d\n", __FUNCTION__, buf ? *(char *)buf : 0));
9701 	}
9702 
9703 	ASSERT(len <= WLC_IOCTL_MAXLEN);
9704 
9705 	if (len > WLC_IOCTL_MAXLEN)
9706 		goto done;
9707 
9708 	action = ioc->set;
9709 
9710 	dhd_prot_wlioctl_intercept(dhd, ioc, buf);
9711 
9712 #if defined(EXT_STA)
9713 	wl_dbglog_ioctl_add(ioc, len, NULL);
9714 #endif
9715 	if (action & WL_IOCTL_ACTION_SET) {
9716 		ret = dhd_msgbuf_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
9717 	} else {
9718 		ret = dhd_msgbuf_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
9719 		if (ret > 0)
9720 			ioc->used = ret;
9721 	}
9722 
9723 	/* Too many programs assume ioctl() returns 0 on success */
9724 	if (ret >= 0) {
9725 		ret = 0;
9726 	} else {
9727 #ifndef DETAIL_DEBUG_LOG_FOR_IOCTL
9728 		DHD_INFO(("%s: status ret value is %d \n", __FUNCTION__, ret));
9729 #endif /* !DETAIL_DEBUG_LOG_FOR_IOCTL */
9730 		dhd->dongle_error = ret;
9731 	}
9732 
9733 	dhd_prot_wl_ioctl_ret_intercept(dhd, ioc, buf, ifidx, ret, len);
9734 
9735 done:
9736 	return ret;
9737 
9738 } /* dhd_prot_ioctl */
9739 
9740 /** test / loopback */
9741 
9742 /*
9743  * XXX: This will fail with new PCIe Split header Full Dongle using fixed
9744  * sized messages in control submission ring. We seem to be sending the lpbk
9745  * data via the control message, wherein the lpbk data may be larger than 1
9746  * control message that is being committed.
9747  */
9748 int
9749 dhdmsgbuf_lpbk_req(dhd_pub_t *dhd, uint len)
9750 {
9751 	unsigned long flags;
9752 	dhd_prot_t *prot = dhd->prot;
9753 	uint16 alloced = 0;
9754 
9755 	ioct_reqst_hdr_t *ioct_rqst;
9756 
9757 	uint16 hdrlen = sizeof(ioct_reqst_hdr_t);
9758 	uint16 msglen = len + hdrlen;
9759 	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
9760 
9761 	msglen = ALIGN_SIZE(msglen, DMA_ALIGN_LEN);
9762 	msglen = LIMIT_TO_MAX(msglen, MSGBUF_MAX_MSG_SIZE);
9763 
9764 #ifdef PCIE_INB_DW
9765 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
9766 		return BCME_ERROR;
9767 #endif /* PCIE_INB_DW */
9768 
9769 	DHD_RING_LOCK(ring->ring_lock, flags);
9770 
9771 	ioct_rqst = (ioct_reqst_hdr_t *)
9772 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
9773 
9774 	if (ioct_rqst == NULL) {
9775 		DHD_RING_UNLOCK(ring->ring_lock, flags);
9776 #ifdef PCIE_INB_DW
9777 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
9778 #endif
9779 		return 0;
9780 	}
9781 
9782 	{
9783 		uint8 *ptr;
9784 		uint16 i;
9785 
9786 		ptr = (uint8 *)ioct_rqst; /* XXX: failure!!! */
9787 		for (i = 0; i < msglen; i++) {
9788 			ptr[i] = i % 256;
9789 		}
9790 	}
9791 
9792 	/* Common msg buf hdr */
9793 	ioct_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
9794 	ring->seqnum++;
9795 
9796 	ioct_rqst->msg.msg_type = MSG_TYPE_LOOPBACK;
9797 	ioct_rqst->msg.if_id = 0;
9798 	ioct_rqst->msg.flags = ring->current_phase;
9799 
9800 	bcm_print_bytes("LPBK REQ: ", (uint8 *)ioct_rqst, msglen);
9801 
9802 	/* update ring's WR index and ring doorbell to dongle */
9803 	dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1);
9804 
9805 	DHD_RING_UNLOCK(ring->ring_lock, flags);
9806 
9807 #ifdef PCIE_INB_DW
9808 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
9809 #endif
9810 
9811 	return 0;
9812 }
9813 
9814 /** test / loopback */
9815 void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dmaxfer)
9816 {
9817 	if (dmaxfer == NULL)
9818 		return;
9819 
9820 	dhd_dma_buf_free(dhd, &dmaxfer->srcmem);
9821 	dhd_dma_buf_free(dhd, &dmaxfer->dstmem);
9822 }
9823 
9824 /** test / loopback */
9825 int
9826 dhd_prepare_schedule_dmaxfer_free(dhd_pub_t *dhdp)
9827 {
9828 	dhd_prot_t *prot = dhdp->prot;
9829 	dhd_dmaxfer_t *dmaxfer = &prot->dmaxfer;
9830 	dmaxref_mem_map_t *dmap = NULL;
9831 
9832 	dmap = MALLOCZ(dhdp->osh, sizeof(dmaxref_mem_map_t));
9833 	if (!dmap) {
9834 		DHD_ERROR(("%s: dmap alloc failed\n", __FUNCTION__));
9835 		goto mem_alloc_fail;
9836 	}
9837 	dmap->srcmem = &(dmaxfer->srcmem);
9838 	dmap->dstmem = &(dmaxfer->dstmem);
9839 
9840 	DMAXFER_FREE(dhdp, dmap);
9841 	return BCME_OK;
9842 
9843 mem_alloc_fail:
9844 	if (dmap) {
9845 		MFREE(dhdp->osh, dmap, sizeof(dmaxref_mem_map_t));
9846 	}
9847 	return BCME_NOMEM;
9848 } /* dhd_prepare_schedule_dmaxfer_free */
9849 
9850 /** test / loopback */
9851 void
9852 dmaxfer_free_prev_dmaaddr(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap)
9853 {
9854 
9855 	dhd_dma_buf_free(dhdp, dmmap->srcmem);
9856 	dhd_dma_buf_free(dhdp, dmmap->dstmem);
9857 
9858 	MFREE(dhdp->osh, dmmap, sizeof(dmaxref_mem_map_t));
9859 
9860 	dhdp->bus->dmaxfer_complete = TRUE;
9861 	dhd_os_dmaxfer_wake(dhdp);
9862 } /* dmaxfer_free_prev_dmaaddr */
9863 
9864 /** test / loopback */
9865 int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len,
9866 	uint srcdelay, uint destdelay, dhd_dmaxfer_t *dmaxfer)
9867 {
9868 	uint i = 0, j = 0;
9869 	if (!dmaxfer)
9870 		return BCME_ERROR;
9871 
9872 	/* First free up existing buffers */
9873 	dmaxfer_free_dmaaddr(dhd, dmaxfer);
9874 
9875 	if (dhd_dma_buf_alloc(dhd, &dmaxfer->srcmem, len)) {
9876 		return BCME_NOMEM;
9877 	}
9878 
9879 	if (dhd_dma_buf_alloc(dhd, &dmaxfer->dstmem, len + 8)) {
9880 		dhd_dma_buf_free(dhd, &dmaxfer->srcmem);
9881 		return BCME_NOMEM;
9882 	}
9883 
9884 	dmaxfer->len = len;
9885 
9886 	/* Populate source with a pattern like below
9887 	 * 0x00000000
9888 	 * 0x01010101
9889 	 * 0x02020202
9890 	 * 0x03030303
9891 	 * 0x04040404
9892 	 * 0x05050505
9893 	 * ...
9894 	 * 0xFFFFFFFF
9895 	 */
9896 	while (i < dmaxfer->len) {
9897 		((uint8*)dmaxfer->srcmem.va)[i] = j % 256;
9898 		i++;
9899 		if (i % 4 == 0) {
9900 			j++;
9901 		}
9902 	}
9903 
9904 	OSL_CACHE_FLUSH(dmaxfer->srcmem.va, dmaxfer->len);
9905 
9906 	dmaxfer->srcdelay = srcdelay;
9907 	dmaxfer->destdelay = destdelay;
9908 
9909 	return BCME_OK;
9910 } /* dmaxfer_prepare_dmaaddr */
9911 
9912 static void
9913 dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg)
9914 {
9915 	dhd_prot_t *prot = dhd->prot;
9916 	uint64 end_usec;
9917 	pcie_dmaxfer_cmplt_t *cmplt = (pcie_dmaxfer_cmplt_t *)msg;
9918 	int buf_free_scheduled;
9919 	int err = 0;
9920 
9921 	BCM_REFERENCE(cmplt);
9922 	end_usec = OSL_SYSUPTIME_US();
9923 
9924 #if defined(DHD_EFI) && defined(DHD_INTR_POLL_PERIOD_DYNAMIC)
9925 	/* restore interrupt poll period to the previous existing value */
9926 	dhd_os_set_intr_poll_period(dhd->bus, dhd->cur_intr_poll_period);
9927 #endif /* DHD_EFI && DHD_INTR_POLL_PERIOD_DYNAMIC */
9928 
9929 	DHD_ERROR(("DMA loopback status: %d\n", cmplt->compl_hdr.status));
9930 	prot->dmaxfer.status = cmplt->compl_hdr.status;
9931 	OSL_CACHE_INV(prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
9932 	if (prot->dmaxfer.d11_lpbk != M2M_WRITE_TO_RAM &&
9933 		prot->dmaxfer.d11_lpbk != M2M_READ_FROM_RAM &&
9934 		prot->dmaxfer.d11_lpbk != D11_WRITE_TO_RAM &&
9935 		prot->dmaxfer.d11_lpbk != D11_READ_FROM_RAM) {
9936 		err = memcmp(prot->dmaxfer.srcmem.va,
9937 			prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
9938 	}
9939 	if (prot->dmaxfer.srcmem.va && prot->dmaxfer.dstmem.va) {
9940 		if (err ||
9941 		        cmplt->compl_hdr.status != BCME_OK) {
9942 		        DHD_ERROR(("DMA loopback failed\n"));
9943 			/* it is observed that some times the completion
9944 			 * header status is set as OK, but the memcmp fails
9945 			 * hence always explicitly set the dmaxfer status
9946 			 * as error if this happens.
9947 			 */
9948 			prot->dmaxfer.status = BCME_ERROR;
9949 			prhex("XFER SRC: ",
9950 			    prot->dmaxfer.srcmem.va, prot->dmaxfer.len);
9951 			prhex("XFER DST: ",
9952 			    prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
9953 		}
9954 		else {
9955 			switch (prot->dmaxfer.d11_lpbk) {
9956 			case M2M_DMA_LPBK: {
9957 				DHD_ERROR(("DMA successful pcie m2m DMA loopback\n"));
9958 				} break;
9959 			case D11_LPBK: {
9960 				DHD_ERROR(("DMA successful with d11 loopback\n"));
9961 				} break;
9962 			case BMC_LPBK: {
9963 				DHD_ERROR(("DMA successful with bmc loopback\n"));
9964 				} break;
9965 			case M2M_NON_DMA_LPBK: {
9966 				DHD_ERROR(("DMA successful pcie m2m NON DMA loopback\n"));
9967 				} break;
9968 			case D11_HOST_MEM_LPBK: {
9969 				DHD_ERROR(("DMA successful d11 host mem loopback\n"));
9970 				} break;
9971 			case BMC_HOST_MEM_LPBK: {
9972 				DHD_ERROR(("DMA successful bmc host mem loopback\n"));
9973 				} break;
9974 			case M2M_WRITE_TO_RAM: {
9975 				DHD_ERROR(("DMA successful pcie m2m write to ram\n"));
9976 				} break;
9977 			case M2M_READ_FROM_RAM: {
9978 				DHD_ERROR(("DMA successful pcie m2m read from ram\n"));
9979 				prhex("XFER DST: ",
9980 					prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
9981 				} break;
9982 			case D11_WRITE_TO_RAM: {
9983 				DHD_ERROR(("DMA successful D11 write to ram\n"));
9984 				} break;
9985 			case D11_READ_FROM_RAM: {
9986 				DHD_ERROR(("DMA successful D11 read from ram\n"));
9987 				prhex("XFER DST: ",
9988 					prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
9989 				} break;
9990 			default: {
9991 				DHD_ERROR(("Invalid loopback option\n"));
9992 				} break;
9993 			}
9994 
9995 			if (DHD_LPBKDTDUMP_ON()) {
9996 				/* debug info print of the Tx and Rx buffers */
9997 				dhd_prhex("XFER SRC: ", prot->dmaxfer.srcmem.va,
9998 					prot->dmaxfer.len, DHD_INFO_VAL);
9999 				dhd_prhex("XFER DST: ", prot->dmaxfer.dstmem.va,
10000 					prot->dmaxfer.len, DHD_INFO_VAL);
10001 			}
10002 		}
10003 	}
10004 
10005 	buf_free_scheduled = dhd_prepare_schedule_dmaxfer_free(dhd);
10006 	end_usec -= prot->dmaxfer.start_usec;
10007 	if (end_usec) {
10008 		prot->dmaxfer.time_taken = end_usec;
10009 		DHD_ERROR(("DMA loopback %d bytes in %lu usec, %u kBps\n",
10010 			prot->dmaxfer.len, (unsigned long)end_usec,
10011 			(prot->dmaxfer.len * (1000 * 1000 / 1024) / (uint32)end_usec)));
10012 	}
10013 	dhd->prot->dmaxfer.in_progress = FALSE;
10014 
10015 	if (buf_free_scheduled != BCME_OK) {
10016 		dhd->bus->dmaxfer_complete = TRUE;
10017 		dhd_os_dmaxfer_wake(dhd);
10018 	}
10019 }
10020 
10021 /** Test functionality.
10022  * Transfers bytes from host to dongle and to host again using DMA
10023  * This function is not reentrant, as prot->dmaxfer.in_progress is not protected
10024  * by a spinlock.
10025  */
10026 int
10027 dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay,
10028 	uint d11_lpbk, uint core_num, uint32 mem_addr)
10029 {
10030 	unsigned long flags;
10031 	int ret = BCME_OK;
10032 	dhd_prot_t *prot = dhd->prot;
10033 	pcie_dma_xfer_params_t *dmap;
10034 	uint32 xferlen = LIMIT_TO_MAX(len, DMA_XFER_LEN_LIMIT);
10035 	uint16 alloced = 0;
10036 	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
10037 
10038 	/* XXX: prot->dmaxfer.in_progress is not protected by lock */
10039 	if (prot->dmaxfer.in_progress) {
10040 		DHD_ERROR(("DMA is in progress...\n"));
10041 		return BCME_ERROR;
10042 	}
10043 
10044 	if (d11_lpbk >= MAX_LPBK) {
10045 		DHD_ERROR(("loopback mode should be either"
10046 			" 0-PCIE_M2M_DMA, 1-D11, 2-BMC or 3-PCIE_M2M_NonDMA\n"));
10047 		return BCME_ERROR;
10048 	}
10049 
10050 #ifdef PCIE_INB_DW
10051 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) {
10052 		return BCME_ERROR;
10053 	}
10054 #endif /* PCIE_INB_DW */
10055 
10056 	prot->dmaxfer.in_progress = TRUE;
10057 	if ((ret = dmaxfer_prepare_dmaaddr(dhd, xferlen, srcdelay, destdelay,
10058 	        &prot->dmaxfer)) != BCME_OK) {
10059 		prot->dmaxfer.in_progress = FALSE;
10060 #ifdef PCIE_INB_DW
10061 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
10062 #endif
10063 		return ret;
10064 	}
10065 	DHD_RING_LOCK(ring->ring_lock, flags);
10066 	dmap = (pcie_dma_xfer_params_t *)
10067 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
10068 
10069 	if (dmap == NULL) {
10070 		dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer);
10071 		prot->dmaxfer.in_progress = FALSE;
10072 		DHD_RING_UNLOCK(ring->ring_lock, flags);
10073 #ifdef PCIE_INB_DW
10074 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
10075 #endif
10076 		return BCME_NOMEM;
10077 	}
10078 
10079 	/* Common msg buf hdr */
10080 	dmap->cmn_hdr.msg_type = MSG_TYPE_LPBK_DMAXFER;
10081 	dmap->cmn_hdr.request_id = htol32(DHD_FAKE_PKTID);
10082 	dmap->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
10083 	dmap->cmn_hdr.flags = ring->current_phase;
10084 	ring->seqnum++;
10085 
10086 	dmap->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.srcmem.pa));
10087 	dmap->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.srcmem.pa));
10088 	dmap->host_ouput_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.dstmem.pa));
10089 	dmap->host_ouput_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.dstmem.pa));
10090 	dmap->xfer_len = htol32(prot->dmaxfer.len);
10091 	dmap->srcdelay = htol32(prot->dmaxfer.srcdelay);
10092 	dmap->destdelay = htol32(prot->dmaxfer.destdelay);
10093 	prot->dmaxfer.d11_lpbk = d11_lpbk;
10094 	if (d11_lpbk == M2M_WRITE_TO_RAM) {
10095 		dmap->host_ouput_buf_addr.high = 0x0;
10096 		dmap->host_ouput_buf_addr.low = mem_addr;
10097 	} else if (d11_lpbk == M2M_READ_FROM_RAM) {
10098 		dmap->host_input_buf_addr.high = 0x0;
10099 		dmap->host_input_buf_addr.low = mem_addr;
10100 	} else if (d11_lpbk == D11_WRITE_TO_RAM) {
10101 		dmap->host_ouput_buf_addr.high = 0x0;
10102 		dmap->host_ouput_buf_addr.low = mem_addr;
10103 	} else if (d11_lpbk == D11_READ_FROM_RAM) {
10104 		dmap->host_input_buf_addr.high = 0x0;
10105 		dmap->host_input_buf_addr.low = mem_addr;
10106 	}
10107 	dmap->flags = (((core_num & PCIE_DMA_XFER_FLG_CORE_NUMBER_MASK)
10108 			<< PCIE_DMA_XFER_FLG_CORE_NUMBER_SHIFT) |
10109 			((prot->dmaxfer.d11_lpbk & PCIE_DMA_XFER_FLG_D11_LPBK_MASK)
10110 			 << PCIE_DMA_XFER_FLG_D11_LPBK_SHIFT));
10111 	prot->dmaxfer.start_usec = OSL_SYSUPTIME_US();
10112 
10113 	/* update ring's WR index and ring doorbell to dongle */
10114 	dhd_prot_ring_write_complete(dhd, ring, dmap, 1);
10115 
10116 	DHD_RING_UNLOCK(ring->ring_lock, flags);
10117 
10118 	DHD_ERROR(("DMA loopback Started... on core[%d]\n", core_num));
10119 #ifdef PCIE_INB_DW
10120 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
10121 #endif
10122 
10123 	return BCME_OK;
10124 } /* dhdmsgbuf_dmaxfer_req */
10125 
10126 int
10127 dhdmsgbuf_dmaxfer_status(dhd_pub_t *dhd, dma_xfer_info_t *result)
10128 {
10129 	dhd_prot_t *prot = dhd->prot;
10130 
10131 	if (prot->dmaxfer.in_progress)
10132 		result->status = DMA_XFER_IN_PROGRESS;
10133 	else if (prot->dmaxfer.status == 0)
10134 		result->status = DMA_XFER_SUCCESS;
10135 	else
10136 		result->status = DMA_XFER_FAILED;
10137 
10138 	result->type = prot->dmaxfer.d11_lpbk;
10139 	result->error_code = prot->dmaxfer.status;
10140 	result->num_bytes = prot->dmaxfer.len;
10141 	result->time_taken = prot->dmaxfer.time_taken;
10142 	if (prot->dmaxfer.time_taken) {
10143 		/* throughput in kBps */
10144 		result->tput =
10145 			(prot->dmaxfer.len * (1000 * 1000 / 1024)) /
10146 			(uint32)prot->dmaxfer.time_taken;
10147 	}
10148 
10149 	return BCME_OK;
10150 }
10151 
10152 /** Called in the process of submitting an ioctl to the dongle */
10153 static int
10154 dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
10155 {
10156 	int ret = 0;
10157 	uint copylen = 0;
10158 
10159 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
10160 
10161 	if (dhd->bus->is_linkdown) {
10162 		DHD_ERROR(("%s : PCIe link is down. we have nothing to do\n",
10163 			__FUNCTION__));
10164 		return -EIO;
10165 	}
10166 
10167 	if (dhd->busstate == DHD_BUS_DOWN) {
10168 		DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
10169 		return -EIO;
10170 	}
10171 
10172 	/* don't talk to the dongle if fw is about to be reloaded */
10173 	if (dhd->hang_was_sent) {
10174 		DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n",
10175 			__FUNCTION__));
10176 		return -EIO;
10177 	}
10178 
10179 	if (cmd == WLC_GET_VAR && buf)
10180 	{
10181 		if (!len || !*(uint8 *)buf) {
10182 			DHD_ERROR(("%s(): Zero length bailing\n", __FUNCTION__));
10183 			ret = BCME_BADARG;
10184 			goto done;
10185 		}
10186 
10187 		/* Respond "bcmerror" and "bcmerrorstr" with local cache */
10188 		copylen = MIN(len, BCME_STRLEN);
10189 
10190 		if ((len >= strlen("bcmerrorstr")) &&
10191 			(!strcmp((char *)buf, "bcmerrorstr"))) {
10192 			strlcpy((char *)buf, bcmerrorstr(dhd->dongle_error), copylen);
10193 			goto done;
10194 		} else if ((len >= strlen("bcmerror")) &&
10195 			!strcmp((char *)buf, "bcmerror")) {
10196 			*(uint32 *)(uint32 *)buf = dhd->dongle_error;
10197 			goto done;
10198 		}
10199 	}
10200 
10201 	DHD_CTL(("query_ioctl: ACTION %d ifdix %d cmd %d len %d \n",
10202 	    action, ifidx, cmd, len));
10203 #ifdef REPORT_FATAL_TIMEOUTS
10204 	/*
10205 	 * These timers "should" be started before sending H2D interrupt.
10206 	 * Think of the scenario where H2D interrupt is fired and the Dongle
10207 	 * responds back immediately. From the DPC we would stop the cmd, bus
10208 	 * timers. But the process context could have switched out leading to
10209 	 * a situation where the timers are Not started yet, but are actually stopped.
10210 	 *
10211 	 * Disable preemption from the time we start the timer until we are done
10212 	 * with seding H2D interrupts.
10213 	 */
10214 	OSL_DISABLE_PREEMPTION(dhd->osh);
10215 	dhd_set_request_id(dhd, dhd->prot->ioctl_trans_id+1, cmd);
10216 	dhd_start_cmd_timer(dhd);
10217 	dhd_start_bus_timer(dhd);
10218 #endif /* REPORT_FATAL_TIMEOUTS */
10219 
10220 	ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx);
10221 
10222 #ifdef REPORT_FATAL_TIMEOUTS
10223 	/* For some reason if we fail to ring door bell, stop the timers */
10224 	if (ret < 0) {
10225 		DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
10226 		dhd_stop_cmd_timer(dhd);
10227 		dhd_stop_bus_timer(dhd);
10228 		OSL_ENABLE_PREEMPTION(dhd->osh);
10229 		goto done;
10230 	}
10231 	OSL_ENABLE_PREEMPTION(dhd->osh);
10232 #else
10233 	if (ret < 0) {
10234 		DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
10235 		goto done;
10236 	}
10237 #endif /* REPORT_FATAL_TIMEOUTS */
10238 
10239 	/* wait for IOCTL completion message from dongle and get first fragment */
10240 	ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf);
10241 
10242 done:
10243 	return ret;
10244 }
10245 
10246 void
10247 dhd_msgbuf_iovar_timeout_dump(dhd_pub_t *dhd)
10248 {
10249 	uint32 intstatus;
10250 	dhd_prot_t *prot = dhd->prot;
10251 	dhd->rxcnt_timeout++;
10252 	dhd->rx_ctlerrs++;
10253 	DHD_ERROR(("%s: resumed on timeout rxcnt_timeout%s %d ioctl_cmd %d "
10254 		"trans_id %d state %d busstate=%d ioctl_received=%d\n", __FUNCTION__,
10255 		dhd->is_sched_error ? " due to scheduling problem" : "",
10256 		dhd->rxcnt_timeout, prot->curr_ioctl_cmd, prot->ioctl_trans_id,
10257 		prot->ioctl_state, dhd->busstate, prot->ioctl_received));
10258 #if defined(DHD_KERNEL_SCHED_DEBUG) && defined(DHD_FW_COREDUMP)
10259 		/* XXX DHD triggers Kernel panic if the resumed on timeout occurrs
10260 		 * due to tasklet or workqueue scheduling problems in the Linux Kernel.
10261 		 * Customer informs that it is hard to find any clue from the
10262 		 * host memory dump since the important tasklet or workqueue information
10263 		 * is already disappered due the latency while printing out the timestamp
10264 		 * logs for debugging scan timeout issue.
10265 		 * For this reason, customer requestes us to trigger Kernel Panic rather than
10266 		 * taking a SOCRAM dump.
10267 		 */
10268 		if (dhd->is_sched_error && dhd->memdump_enabled == DUMP_MEMFILE_BUGON) {
10269 			/* change g_assert_type to trigger Kernel panic */
10270 			g_assert_type = 2;
10271 			/* use ASSERT() to trigger panic */
10272 			ASSERT(0);
10273 		}
10274 #endif /* DHD_KERNEL_SCHED_DEBUG && DHD_FW_COREDUMP */
10275 
10276 	if (prot->curr_ioctl_cmd == WLC_SET_VAR ||
10277 			prot->curr_ioctl_cmd == WLC_GET_VAR) {
10278 		char iovbuf[32];
10279 		int dump_size = 128;
10280 		uint8 *ioctl_buf = (uint8 *)prot->ioctbuf.va;
10281 		memset(iovbuf, 0, sizeof(iovbuf));
10282 		strncpy(iovbuf, ioctl_buf, sizeof(iovbuf) - 1);
10283 		iovbuf[sizeof(iovbuf) - 1] = '\0';
10284 		DHD_ERROR(("Current IOVAR (%s): %s\n",
10285 			prot->curr_ioctl_cmd == WLC_SET_VAR ?
10286 			"WLC_SET_VAR" : "WLC_GET_VAR", iovbuf));
10287 		DHD_ERROR(("========== START IOCTL REQBUF DUMP ==========\n"));
10288 		prhex("ioctl_buf", (const u8 *) ioctl_buf, dump_size);
10289 		DHD_ERROR(("\n========== END IOCTL REQBUF DUMP ==========\n"));
10290 	}
10291 
10292 	/* Check the PCIe link status by reading intstatus register */
10293 	intstatus = si_corereg(dhd->bus->sih,
10294 		dhd->bus->sih->buscoreidx, dhd->bus->pcie_mailbox_int, 0, 0);
10295 	if (intstatus == (uint32)-1) {
10296 		DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__));
10297 		dhd->bus->is_linkdown = TRUE;
10298 	}
10299 
10300 	dhd_bus_dump_console_buffer(dhd->bus);
10301 	dhd_prot_debug_info_print(dhd);
10302 }
10303 
10304 /**
10305  * Waits for IOCTL completion message from the dongle, copies this into caller
10306  * provided parameter 'buf'.
10307  */
10308 static int
10309 dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf)
10310 {
10311 	dhd_prot_t *prot = dhd->prot;
10312 	int timeleft;
10313 	unsigned long flags;
10314 	int ret = 0;
10315 	static uint cnt = 0;
10316 
10317 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
10318 
10319 	if (dhd_query_bus_erros(dhd)) {
10320 		ret = -EIO;
10321 		goto out;
10322 	}
10323 #ifdef GDB_PROXY
10324 	/* Loop while timeout is caused by firmware stop in GDB */
10325 	{
10326 		uint32 prev_stop_count;
10327 		do {
10328 			prev_stop_count = dhd->gdb_proxy_stop_count;
10329 			timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received);
10330 		} while ((timeleft == 0) && ((dhd->gdb_proxy_stop_count != prev_stop_count) ||
10331 			(dhd->gdb_proxy_stop_count & GDB_PROXY_STOP_MASK)));
10332 	}
10333 #else
10334 	timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received);
10335 #endif /* GDB_PROXY */
10336 
10337 #ifdef DHD_RECOVER_TIMEOUT
10338 	if (prot->ioctl_received == 0) {
10339 		uint32 intstatus = si_corereg(dhd->bus->sih,
10340 			dhd->bus->sih->buscoreidx, dhd->bus->pcie_mailbox_int, 0, 0);
10341 		int host_irq_disbled = dhdpcie_irq_disabled(dhd->bus);
10342 		if ((intstatus) && (intstatus != (uint32)-1) &&
10343 			(timeleft == 0) && (!dhd_query_bus_erros(dhd))) {
10344 			DHD_ERROR(("%s: iovar timeout trying again intstatus=%x"
10345 				" host_irq_disabled=%d\n",
10346 				__FUNCTION__, intstatus, host_irq_disbled));
10347 			dhd_pcie_intr_count_dump(dhd);
10348 			dhd_print_tasklet_status(dhd);
10349 			dhd_prot_process_ctrlbuf(dhd);
10350 			timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received);
10351 			/* Clear Interrupts */
10352 			dhdpcie_bus_clear_intstatus(dhd->bus);
10353 		}
10354 	}
10355 #endif /* DHD_RECOVER_TIMEOUT */
10356 
10357 	if (dhd->conf->ctrl_resched > 0 && timeleft == 0 && (!dhd_query_bus_erros(dhd))) {
10358 		cnt++;
10359 		if (cnt <= dhd->conf->ctrl_resched) {
10360 			uint buscorerev = dhd->bus->sih->buscorerev;
10361 			uint32 intstatus = 0, intmask = 0;
10362 			intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxInt(buscorerev), 0, 0);
10363 			intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxMask(buscorerev), 0, 0);
10364 			if (intstatus) {
10365 				DHD_ERROR(("%s: reschedule dhd_dpc, cnt=%d, intstatus=0x%x, intmask=0x%x\n",
10366 					__FUNCTION__, cnt, intstatus, intmask));
10367 				dhd->bus->intstatus = intstatus;
10368 				dhd->bus->ipend = TRUE;
10369 				dhd->bus->dpc_sched = TRUE;
10370 				dhd_sched_dpc(dhd);
10371 				timeleft = dhd_os_ioctl_resp_wait(dhd, &prot->ioctl_received);
10372 			}
10373 		}
10374 	} else {
10375 		cnt = 0;
10376 	}
10377 
10378 	if (timeleft == 0 && (!dhd_query_bus_erros(dhd))) {
10379 		if (dhd->check_trap_rot) {
10380 			/* check dongle trap first */
10381 			DHD_ERROR(("Check dongle trap in the case of iovar timeout\n"));
10382 			dhd_bus_checkdied(dhd->bus, NULL, 0);
10383 
10384 			if (dhd->dongle_trap_occured) {
10385 #ifdef SUPPORT_LINKDOWN_RECOVERY
10386 #ifdef CONFIG_ARCH_MSM
10387 				dhd->bus->no_cfg_restore = 1;
10388 #endif /* CONFIG_ARCH_MSM */
10389 #endif /* SUPPORT_LINKDOWN_RECOVERY */
10390 				ret = -EREMOTEIO;
10391 				goto out;
10392 			}
10393 		}
10394 		/* check if resumed on time out related to scheduling issue */
10395 		dhd->is_sched_error = dhd_bus_query_dpc_sched_errors(dhd);
10396 
10397 		dhd->iovar_timeout_occured = TRUE;
10398 		dhd_msgbuf_iovar_timeout_dump(dhd);
10399 
10400 #ifdef DHD_FW_COREDUMP
10401 		/* Collect socram dump */
10402 		if (dhd->memdump_enabled) {
10403 			/* collect core dump */
10404 			dhd->memdump_type = DUMP_TYPE_RESUMED_ON_TIMEOUT;
10405 			dhd_bus_mem_dump(dhd);
10406 		}
10407 #endif /* DHD_FW_COREDUMP */
10408 
10409 #ifdef DHD_EFI
10410 		/*
10411 		* for ioctl timeout, recovery is triggered only for EFI case, because
10412 		* in linux, dhd daemon will itself trap the FW,
10413 		* so if recovery is triggered
10414 		* then there is a race between FLR and daemon initiated trap
10415 		*/
10416 		dhd_schedule_reset(dhd);
10417 #endif /* DHD_EFI */
10418 
10419 #ifdef SUPPORT_LINKDOWN_RECOVERY
10420 #ifdef CONFIG_ARCH_MSM
10421 		dhd->bus->no_cfg_restore = 1;
10422 #endif /* CONFIG_ARCH_MSM */
10423 #endif /* SUPPORT_LINKDOWN_RECOVERY */
10424 		ret = -ETIMEDOUT;
10425 		goto out;
10426 	} else {
10427 		if (prot->ioctl_received != IOCTL_RETURN_ON_SUCCESS) {
10428 			DHD_ERROR(("%s: IOCTL failure due to ioctl_received = %d\n",
10429 				__FUNCTION__, prot->ioctl_received));
10430 			ret = -EINVAL;
10431 			goto out;
10432 		}
10433 		dhd->rxcnt_timeout = 0;
10434 		dhd->rx_ctlpkts++;
10435 		DHD_CTL(("%s: ioctl resp resumed, got %d\n",
10436 			__FUNCTION__, prot->ioctl_resplen));
10437 	}
10438 
10439 	if (dhd->prot->ioctl_resplen > len)
10440 		dhd->prot->ioctl_resplen = (uint16)len;
10441 	if (buf)
10442 		bcopy(dhd->prot->retbuf.va, buf, dhd->prot->ioctl_resplen);
10443 
10444 	ret = (int)(dhd->prot->ioctl_status);
10445 
10446 out:
10447 	DHD_GENERAL_LOCK(dhd, flags);
10448 	dhd->prot->ioctl_state = 0;
10449 	dhd->prot->ioctl_resplen = 0;
10450 	dhd->prot->ioctl_received = IOCTL_WAIT;
10451 	dhd->prot->curr_ioctl_cmd = 0;
10452 	DHD_GENERAL_UNLOCK(dhd, flags);
10453 
10454 	return ret;
10455 } /* dhd_msgbuf_wait_ioctl_cmplt */
10456 
10457 static int
10458 dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
10459 {
10460 	int ret = 0;
10461 
10462 	DHD_TRACE(("%s: Enter \n", __FUNCTION__));
10463 
10464 	if (dhd->bus->is_linkdown) {
10465 		DHD_ERROR(("%s : PCIe link is down. we have nothing to do\n",
10466 			__FUNCTION__));
10467 		return -EIO;
10468 	}
10469 
10470 	if (dhd->busstate == DHD_BUS_DOWN) {
10471 		DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
10472 		return -EIO;
10473 	}
10474 
10475 	/* don't talk to the dongle if fw is about to be reloaded */
10476 	if (dhd->hang_was_sent) {
10477 		DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n",
10478 			__FUNCTION__));
10479 		return -EIO;
10480 	}
10481 
10482 	DHD_CTL(("ACTION %d ifdix %d cmd %d len %d \n",
10483 		action, ifidx, cmd, len));
10484 
10485 #ifdef REPORT_FATAL_TIMEOUTS
10486 	/*
10487 	 * These timers "should" be started before sending H2D interrupt.
10488 	 * Think of the scenario where H2D interrupt is fired and the Dongle
10489 	 * responds back immediately. From the DPC we would stop the cmd, bus
10490 	 * timers. But the process context could have switched out leading to
10491 	 * a situation where the timers are Not started yet, but are actually stopped.
10492 	 *
10493 	 * Disable preemption from the time we start the timer until we are done
10494 	 * with seding H2D interrupts.
10495 	 */
10496 	OSL_DISABLE_PREEMPTION(dhd->osh);
10497 	dhd_set_request_id(dhd, dhd->prot->ioctl_trans_id+1, cmd);
10498 	dhd_start_cmd_timer(dhd);
10499 	dhd_start_bus_timer(dhd);
10500 #endif /* REPORT_FATAL_TIMEOUTS */
10501 
10502 	/* Fill up msgbuf for ioctl req */
10503 	ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx);
10504 
10505 #ifdef REPORT_FATAL_TIMEOUTS
10506 	/* For some reason if we fail to ring door bell, stop the timers */
10507 	if (ret < 0) {
10508 		DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
10509 		dhd_stop_cmd_timer(dhd);
10510 		dhd_stop_bus_timer(dhd);
10511 		OSL_ENABLE_PREEMPTION(dhd->osh);
10512 		goto done;
10513 	}
10514 
10515 	OSL_ENABLE_PREEMPTION(dhd->osh);
10516 #else
10517 	if (ret < 0) {
10518 		DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
10519 		goto done;
10520 	}
10521 #endif /* REPORT_FATAL_TIMEOUTS */
10522 
10523 	ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf);
10524 
10525 done:
10526 	return ret;
10527 }
10528 
10529 /** Called by upper DHD layer. Handles a protocol control response asynchronously. */
10530 int dhd_prot_ctl_complete(dhd_pub_t *dhd)
10531 {
10532 	return 0;
10533 }
10534 
10535 /** Called by upper DHD layer. Check for and handle local prot-specific iovar commands */
10536 int dhd_prot_iovar_op(dhd_pub_t *dhd, const char *name,
10537                              void *params, int plen, void *arg, int len, bool set)
10538 {
10539 	return BCME_UNSUPPORTED;
10540 }
10541 
10542 #ifdef DHD_DUMP_PCIE_RINGS
10543 int dhd_d2h_h2d_ring_dump(dhd_pub_t *dhd, void *file, const void *user_buf,
10544 	unsigned long *file_posn, bool file_write)
10545 {
10546 	dhd_prot_t *prot;
10547 	msgbuf_ring_t *ring;
10548 	int ret = 0;
10549 	uint16 h2d_flowrings_total;
10550 	uint16 flowid;
10551 
10552 	if (!(dhd) || !(dhd->prot)) {
10553 		goto exit;
10554 	}
10555 	prot = dhd->prot;
10556 
10557 	/* Below is the same ring dump sequence followed in parser as well. */
10558 	ring = &prot->h2dring_ctrl_subn;
10559 	if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
10560 		goto exit;
10561 
10562 	ring = &prot->h2dring_rxp_subn;
10563 	if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
10564 		goto exit;
10565 
10566 	ring = &prot->d2hring_ctrl_cpln;
10567 	if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
10568 		goto exit;
10569 
10570 	ring = &prot->d2hring_tx_cpln;
10571 	if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
10572 		goto exit;
10573 
10574 	ring = &prot->d2hring_rx_cpln;
10575 	if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
10576 		goto exit;
10577 
10578 	h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
10579 	FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
10580 		if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0) {
10581 			goto exit;
10582 		}
10583 	}
10584 
10585 #ifdef EWP_EDL
10586 	if (dhd->dongle_edl_support) {
10587 		ring = prot->d2hring_edl;
10588 		if ((ret = dhd_edl_ring_hdr_write(dhd, ring, file, user_buf, file_posn)) < 0)
10589 			goto exit;
10590 	}
10591 	else if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !dhd->dongle_edl_support)
10592 #else
10593 	if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
10594 #endif /* EWP_EDL */
10595 	{
10596 		ring = prot->h2dring_info_subn;
10597 		if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
10598 			goto exit;
10599 
10600 		ring = prot->d2hring_info_cpln;
10601 		if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
10602 			goto exit;
10603 	}
10604 
10605 exit :
10606 	return ret;
10607 }
10608 
10609 /* Write to file */
10610 static
10611 int dhd_ring_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file,
10612 	const void *user_buf, unsigned long *file_posn)
10613 {
10614 	int ret = 0;
10615 
10616 	if (ring == NULL) {
10617 		DHD_ERROR(("%s: Ring not initialised, failed to dump ring contents\n",
10618 			__FUNCTION__));
10619 		return BCME_ERROR;
10620 	}
10621 	if (file) {
10622 		ret = dhd_os_write_file_posn(file, file_posn, (char *)(ring->dma_buf.va),
10623 				((unsigned long)(ring->max_items) * (ring->item_len)));
10624 		if (ret < 0) {
10625 			DHD_ERROR(("%s: write file error !\n", __FUNCTION__));
10626 			ret = BCME_ERROR;
10627 		}
10628 	} else if (user_buf) {
10629 		ret = dhd_export_debug_data((char *)(ring->dma_buf.va), NULL, user_buf,
10630 			((unsigned long)(ring->max_items) * (ring->item_len)), (int *)file_posn);
10631 	}
10632 	return ret;
10633 }
10634 
10635 #ifdef EWP_EDL
10636 /* Write to file */
10637 static
10638 int dhd_edl_ring_hdr_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file, const void *user_buf,
10639 	unsigned long *file_posn)
10640 {
10641 	int ret = 0, nitems = 0;
10642 	char *buf = NULL, *ptr = NULL;
10643 	uint8 *msg_addr = NULL;
10644 	uint16	rd = 0;
10645 
10646 	if (ring == NULL) {
10647 		DHD_ERROR(("%s: Ring not initialised, failed to dump ring contents\n",
10648 			__FUNCTION__));
10649 		ret = BCME_ERROR;
10650 		goto done;
10651 	}
10652 
10653 	buf = MALLOCZ(dhd->osh, (D2HRING_EDL_MAX_ITEM * D2HRING_EDL_HDR_SIZE));
10654 	if (buf == NULL) {
10655 		DHD_ERROR(("%s: buffer allocation failed\n", __FUNCTION__));
10656 		ret = BCME_ERROR;
10657 		goto done;
10658 	}
10659 	ptr = buf;
10660 
10661 	for (; nitems < D2HRING_EDL_MAX_ITEM; nitems++, rd++) {
10662 		msg_addr = (uint8 *)ring->dma_buf.va + (rd * ring->item_len);
10663 		memcpy(ptr, (char *)msg_addr, D2HRING_EDL_HDR_SIZE);
10664 		ptr += D2HRING_EDL_HDR_SIZE;
10665 	}
10666 	if (file) {
10667 		ret = dhd_os_write_file_posn(file, file_posn, buf,
10668 				(D2HRING_EDL_HDR_SIZE * D2HRING_EDL_MAX_ITEM));
10669 		if (ret < 0) {
10670 			DHD_ERROR(("%s: write file error !\n", __FUNCTION__));
10671 			goto done;
10672 		}
10673 	}
10674 	else {
10675 		ret = dhd_export_debug_data(buf, NULL, user_buf,
10676 			(D2HRING_EDL_HDR_SIZE * D2HRING_EDL_MAX_ITEM), file_posn);
10677 	}
10678 
10679 done:
10680 	if (buf) {
10681 		MFREE(dhd->osh, buf, (D2HRING_EDL_MAX_ITEM * D2HRING_EDL_HDR_SIZE));
10682 	}
10683 	return ret;
10684 }
10685 #endif /* EWP_EDL */
10686 #endif /* DHD_DUMP_PCIE_RINGS */
10687 
10688 /** Add prot dump output to a buffer */
10689 void dhd_prot_dump(dhd_pub_t *dhd, struct bcmstrbuf *b)
10690 {
10691 #if defined(BCM_ROUTER_DHD)
10692 	bcm_bprintf(b, "DHD Router: 1GMAC HotBRC forwarding mode\n");
10693 #endif /* BCM_ROUTER_DHD */
10694 
10695 	if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM)
10696 		bcm_bprintf(b, "\nd2h_sync: SEQNUM:");
10697 	else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM)
10698 		bcm_bprintf(b, "\nd2h_sync: XORCSUM:");
10699 	else
10700 		bcm_bprintf(b, "\nd2h_sync: NONE:");
10701 	bcm_bprintf(b, " d2h_sync_wait max<%lu> tot<%lu>\n",
10702 		dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot);
10703 
10704 	bcm_bprintf(b, "\nDongle DMA Indices: h2d %d  d2h %d index size %d bytes\n",
10705 		dhd->dma_h2d_ring_upd_support,
10706 		dhd->dma_d2h_ring_upd_support,
10707 		dhd->prot->rw_index_sz);
10708 	bcm_bprintf(b, "h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n",
10709 		h2d_max_txpost, dhd->prot->h2d_max_txpost);
10710 #if defined(DHD_HTPUT_TUNABLES)
10711 	bcm_bprintf(b, "h2d_htput_max_txpost: %d, prot->h2d_htput_max_txpost: %d\n",
10712 		h2d_htput_max_txpost, dhd->prot->h2d_htput_max_txpost);
10713 #endif /* DHD_HTPUT_TUNABLES */
10714 	bcm_bprintf(b, "pktid_txq_start_cnt: %d\n", dhd->prot->pktid_txq_start_cnt);
10715 	bcm_bprintf(b, "pktid_txq_stop_cnt: %d\n", dhd->prot->pktid_txq_stop_cnt);
10716 	bcm_bprintf(b, "pktid_depleted_cnt: %d\n", dhd->prot->pktid_depleted_cnt);
10717 	bcm_bprintf(b, "txcpl_db_cnt: %d\n", dhd->prot->txcpl_db_cnt);
10718 #ifdef DHD_DMA_INDICES_SEQNUM
10719 	bcm_bprintf(b, "host_seqnum %u dngl_seqnum %u\n", dhd_prot_read_seqnum(dhd, TRUE),
10720 		dhd_prot_read_seqnum(dhd, FALSE));
10721 #endif /* DHD_DMA_INDICES_SEQNUM */
10722 	bcm_bprintf(b, "tx_h2d_db_cnt:%llu\n", dhd->prot->tx_h2d_db_cnt);
10723 #ifdef AGG_H2D_DB
10724 	bcm_bprintf(b, "agg_h2d_db_enab:%d agg_h2d_db_timeout:%d agg_h2d_db_inflight_thresh:%d\n",
10725 		agg_h2d_db_enab, agg_h2d_db_timeout, agg_h2d_db_inflight_thresh);
10726 	bcm_bprintf(b, "agg_h2d_db: timer_db_cnt:%d direct_db_cnt:%d\n",
10727 		dhd->prot->agg_h2d_db_info.timer_db_cnt, dhd->prot->agg_h2d_db_info.direct_db_cnt);
10728 	dhd_agg_inflight_stats_dump(dhd, b);
10729 #endif /* AGG_H2D_DB */
10730 }
10731 
10732 /* Update local copy of dongle statistics */
10733 void dhd_prot_dstats(dhd_pub_t *dhd)
10734 {
10735 	return;
10736 }
10737 
10738 /** Called by upper DHD layer */
10739 int dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf,
10740 	uint reorder_info_len, void **pkt, uint32 *free_buf_count)
10741 {
10742 	return 0;
10743 }
10744 
10745 /** Debug related, post a dummy message to interrupt dongle. Used to process cons commands. */
10746 int
10747 dhd_post_dummy_msg(dhd_pub_t *dhd)
10748 {
10749 	unsigned long flags;
10750 	hostevent_hdr_t *hevent = NULL;
10751 	uint16 alloced = 0;
10752 
10753 	dhd_prot_t *prot = dhd->prot;
10754 	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
10755 
10756 #ifdef PCIE_INB_DW
10757 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
10758 		return BCME_ERROR;
10759 #endif /* PCIE_INB_DW */
10760 	DHD_RING_LOCK(ring->ring_lock, flags);
10761 
10762 	hevent = (hostevent_hdr_t *)
10763 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
10764 
10765 	if (hevent == NULL) {
10766 		DHD_RING_UNLOCK(ring->ring_lock, flags);
10767 #ifdef PCIE_INB_DW
10768 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
10769 #endif
10770 		return -1;
10771 	}
10772 
10773 	/* CMN msg header */
10774 	hevent->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
10775 	ring->seqnum++;
10776 	hevent->msg.msg_type = MSG_TYPE_HOST_EVNT;
10777 	hevent->msg.if_id = 0;
10778 	hevent->msg.flags = ring->current_phase;
10779 
10780 	/* Event payload */
10781 	hevent->evnt_pyld = htol32(HOST_EVENT_CONS_CMD);
10782 
10783 	/* Since, we are filling the data directly into the bufptr obtained
10784 	 * from the msgbuf, we can directly call the write_complete
10785 	 */
10786 	dhd_prot_ring_write_complete(dhd, ring, hevent, 1);
10787 
10788 	DHD_RING_UNLOCK(ring->ring_lock, flags);
10789 
10790 #ifdef PCIE_INB_DW
10791 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
10792 #endif
10793 
10794 	return 0;
10795 }
10796 
10797 /**
10798  * If exactly_nitems is true, this function will allocate space for nitems or fail
10799  * If exactly_nitems is false, this function will allocate space for nitems or less
10800  */
10801 static void *
10802 BCMFASTPATH(dhd_prot_alloc_ring_space)(dhd_pub_t *dhd, msgbuf_ring_t *ring,
10803 	uint16 nitems, uint16 * alloced, bool exactly_nitems)
10804 {
10805 	void * ret_buf;
10806 
10807 	if (nitems == 0) {
10808 		DHD_ERROR(("%s: nitems is 0 - ring(%s)\n", __FUNCTION__, ring->name));
10809 		return NULL;
10810 	}
10811 
10812 	/* Alloc space for nitems in the ring */
10813 	ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems);
10814 
10815 	if (ret_buf == NULL) {
10816 		/* if alloc failed , invalidate cached read ptr */
10817 		if (dhd->dma_d2h_ring_upd_support) {
10818 			ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
10819 		} else {
10820 			dhd_bus_cmn_readshared(dhd->bus, &(ring->rd), RING_RD_UPD, ring->idx);
10821 #ifdef SUPPORT_LINKDOWN_RECOVERY
10822 			/* Check if ring->rd is valid */
10823 			if (ring->rd >= ring->max_items) {
10824 				DHD_ERROR(("%s: Invalid rd idx=%d\n", ring->name, ring->rd));
10825 				dhd->bus->read_shm_fail = TRUE;
10826 				return NULL;
10827 			}
10828 #endif /* SUPPORT_LINKDOWN_RECOVERY */
10829 		}
10830 
10831 		/* Try allocating once more */
10832 		ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems);
10833 
10834 		if (ret_buf == NULL) {
10835 			DHD_INFO(("%s: Ring space not available  \n", ring->name));
10836 			return NULL;
10837 		}
10838 	}
10839 
10840 	if (ret_buf == HOST_RING_BASE(ring)) {
10841 		DHD_MSGBUF_INFO(("%s: setting the phase now\n", ring->name));
10842 		ring->current_phase = ring->current_phase ? 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
10843 	}
10844 
10845 	/* Return alloced space */
10846 	return ret_buf;
10847 }
10848 
10849 /**
10850  * Non inline ioct request.
10851  * Form a ioctl request first as per ioctptr_reqst_hdr_t header in the circular buffer
10852  * Form a separate request buffer where a 4 byte cmn header is added in the front
10853  * buf contents from parent function is copied to remaining section of this buffer
10854  */
10855 static int
10856 dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd, void* buf, int ifidx)
10857 {
10858 	dhd_prot_t *prot = dhd->prot;
10859 	ioctl_req_msg_t *ioct_rqst;
10860 	void * ioct_buf;	/* For ioctl payload */
10861 	uint16  rqstlen, resplen;
10862 	unsigned long flags;
10863 	uint16 alloced = 0;
10864 	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
10865 #ifdef DBG_DW_CHK_PCIE_READ_LATENCY
10866 	ulong addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
10867 	ktime_t begin_time, end_time;
10868 	s64 diff_ns;
10869 #endif /* DBG_DW_CHK_PCIE_READ_LATENCY */
10870 
10871 	if (dhd_query_bus_erros(dhd)) {
10872 		return -EIO;
10873 	}
10874 
10875 	rqstlen = len;
10876 	resplen = len;
10877 
10878 	/* Limit ioct request to MSGBUF_MAX_MSG_SIZE bytes including hdrs */
10879 	/* 8K allocation of dongle buffer fails */
10880 	/* dhd doesnt give separate input & output buf lens */
10881 	/* so making the assumption that input length can never be more than 2k */
10882 	rqstlen = MIN(rqstlen, MSGBUF_IOCTL_MAX_RQSTLEN);
10883 
10884 #ifdef PCIE_INB_DW
10885 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
10886 		return BCME_ERROR;
10887 
10888 #ifdef DBG_DW_CHK_PCIE_READ_LATENCY
10889 	preempt_disable();
10890 	begin_time = ktime_get();
10891 	R_REG(dhd->osh, (volatile uint16 *)(dhd->bus->tcm + addr));
10892 	end_time = ktime_get();
10893 	preempt_enable();
10894 	diff_ns = ktime_to_ns(ktime_sub(end_time, begin_time));
10895 	/* Check if the delta is greater than 1 msec */
10896 	if (diff_ns > (1 * NSEC_PER_MSEC)) {
10897 		DHD_ERROR(("%s: found latency over 1ms (%lld ns), ds state=%d\n", __func__,
10898 		       diff_ns, dhdpcie_bus_get_pcie_inband_dw_state(dhd->bus)));
10899 	}
10900 #endif /* DBG_DW_CHK_PCIE_READ_LATENCY */
10901 #endif /* PCIE_INB_DW */
10902 
10903 	DHD_RING_LOCK(ring->ring_lock, flags);
10904 
10905 	if (prot->ioctl_state) {
10906 		DHD_ERROR(("%s: pending ioctl %02x\n", __FUNCTION__, prot->ioctl_state));
10907 		DHD_RING_UNLOCK(ring->ring_lock, flags);
10908 #ifdef PCIE_INB_DW
10909 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
10910 #endif
10911 		return BCME_BUSY;
10912 	} else {
10913 		prot->ioctl_state = MSGBUF_IOCTL_ACK_PENDING | MSGBUF_IOCTL_RESP_PENDING;
10914 	}
10915 
10916 	/* Request for cbuf space */
10917 	ioct_rqst = (ioctl_req_msg_t*)
10918 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
10919 	if (ioct_rqst == NULL) {
10920 		DHD_ERROR(("couldn't allocate space on msgring to send ioctl request\n"));
10921 		prot->ioctl_state = 0;
10922 		prot->curr_ioctl_cmd = 0;
10923 		prot->ioctl_received = IOCTL_WAIT;
10924 		DHD_RING_UNLOCK(ring->ring_lock, flags);
10925 #ifdef PCIE_INB_DW
10926 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
10927 #endif
10928 		return -1;
10929 	}
10930 
10931 	/* Common msg buf hdr */
10932 	ioct_rqst->cmn_hdr.msg_type = MSG_TYPE_IOCTLPTR_REQ;
10933 	ioct_rqst->cmn_hdr.if_id = (uint8)ifidx;
10934 	ioct_rqst->cmn_hdr.flags = ring->current_phase;
10935 	ioct_rqst->cmn_hdr.request_id = htol32(DHD_IOCTL_REQ_PKTID);
10936 	ioct_rqst->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
10937 	ring->seqnum++;
10938 
10939 	ioct_rqst->cmd = htol32(cmd);
10940 	prot->curr_ioctl_cmd = cmd;
10941 	ioct_rqst->output_buf_len = htol16(resplen);
10942 	prot->ioctl_trans_id++;
10943 	ioct_rqst->trans_id = prot->ioctl_trans_id;
10944 
10945 	/* populate ioctl buffer info */
10946 	ioct_rqst->input_buf_len = htol16(rqstlen);
10947 	ioct_rqst->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->ioctbuf.pa));
10948 	ioct_rqst->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->ioctbuf.pa));
10949 	/* copy ioct payload */
10950 	ioct_buf = (void *) prot->ioctbuf.va;
10951 
10952 	prot->ioctl_fillup_time = OSL_LOCALTIME_NS();
10953 
10954 	if (buf)
10955 		memcpy(ioct_buf, buf, len);
10956 
10957 	OSL_CACHE_FLUSH((void *) prot->ioctbuf.va, len);
10958 
10959 	if (!ISALIGNED(ioct_buf, DMA_ALIGN_LEN))
10960 		DHD_ERROR(("host ioct address unaligned !!!!! \n"));
10961 
10962 	DHD_CTL(("submitted IOCTL request request_id %d, cmd %d, output_buf_len %d, tx_id %d\n",
10963 		ioct_rqst->cmn_hdr.request_id, cmd, ioct_rqst->output_buf_len,
10964 		ioct_rqst->trans_id));
10965 
10966 #if defined(BCMINTERNAL) && defined(DHD_DBG_DUMP)
10967 	dhd_prot_ioctl_trace(dhd, ioct_rqst, buf, len);
10968 #endif /* defined(BCMINTERNAL) && defined(DHD_DBG_DUMP) */
10969 
10970 	/* update ring's WR index and ring doorbell to dongle */
10971 	dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1);
10972 
10973 	DHD_RING_UNLOCK(ring->ring_lock, flags);
10974 
10975 #ifdef PCIE_INB_DW
10976 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
10977 #endif
10978 
10979 	return 0;
10980 } /* dhd_fillup_ioct_reqst */
10981 
10982 /**
10983  * dhd_prot_ring_attach - Initialize the msgbuf_ring object and attach a
10984  * DMA-able buffer to it. The ring is NOT tagged as inited until all the ring
10985  * information is posted to the dongle.
10986  *
10987  * Invoked in dhd_prot_attach for the common rings, and in dhd_prot_init for
10988  * each flowring in pool of flowrings.
10989  *
10990  * returns BCME_OK=0 on success
10991  * returns non-zero negative error value on failure.
10992  */
10993 static int
10994 dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring, const char *name,
10995 	uint16 max_items, uint16 item_len, uint16 ringid)
10996 {
10997 	int dma_buf_alloced = BCME_NOMEM;
10998 	uint32 dma_buf_len;
10999 	dhd_prot_t *prot = dhd->prot;
11000 	uint16 max_flowrings = dhd->bus->max_tx_flowrings;
11001 	dhd_dma_buf_t *dma_buf = NULL;
11002 
11003 	ASSERT(ring);
11004 	ASSERT(name);
11005 	ASSERT((max_items < 0xFFFF) && (item_len < 0xFFFF) && (ringid < 0xFFFF));
11006 
11007 	/* Init name */
11008 	strlcpy((char *)ring->name, name, sizeof(ring->name));
11009 
11010 	ring->idx = ringid;
11011 
11012 #if defined(DHD_HTPUT_TUNABLES)
11013 	/* Use HTPUT max items */
11014 	if (DHD_IS_FLOWRING(ringid, max_flowrings) &&
11015 		DHD_IS_FLOWID_HTPUT(dhd, DHD_RINGID_TO_FLOWID(ringid))) {
11016 		max_items = prot->h2d_htput_max_txpost;
11017 	}
11018 #endif /* DHD_HTPUT_TUNABLES */
11019 
11020 	dma_buf_len = max_items * item_len;
11021 
11022 	ring->max_items = max_items;
11023 	ring->item_len = item_len;
11024 
11025 	/* A contiguous space may be reserved for all flowrings */
11026 	if (DHD_IS_FLOWRING(ringid, max_flowrings) && (prot->flowrings_dma_buf.va)) {
11027 		/* Carve out from the contiguous DMA-able flowring buffer */
11028 		uint16 flowid;
11029 		uint32 base_offset;
11030 		dhd_dma_buf_t *rsv_buf = &prot->flowrings_dma_buf;
11031 
11032 		dma_buf = &ring->dma_buf;
11033 
11034 		flowid = DHD_RINGID_TO_FLOWID(ringid);
11035 		base_offset = (flowid - BCMPCIE_H2D_COMMON_MSGRINGS) * dma_buf_len;
11036 
11037 		ASSERT(base_offset + dma_buf_len <= rsv_buf->len);
11038 
11039 		dma_buf->len = dma_buf_len;
11040 		dma_buf->va = (void *)((uintptr)rsv_buf->va + base_offset);
11041 		PHYSADDRHISET(dma_buf->pa, PHYSADDRHI(rsv_buf->pa));
11042 		PHYSADDRLOSET(dma_buf->pa, PHYSADDRLO(rsv_buf->pa) + base_offset);
11043 
11044 		/* On 64bit, contiguous space may not span across 0x00000000FFFFFFFF */
11045 		ASSERT(PHYSADDRLO(dma_buf->pa) >= PHYSADDRLO(rsv_buf->pa));
11046 
11047 		dma_buf->dmah   = rsv_buf->dmah;
11048 		dma_buf->secdma = rsv_buf->secdma;
11049 
11050 		(void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
11051 	} else {
11052 #ifdef EWP_EDL
11053 		if (ring == dhd->prot->d2hring_edl) {
11054 			/* For EDL ring, memory is alloced during attach,
11055 			* so just need to copy the dma_buf to the ring's dma_buf
11056 			*/
11057 			memcpy(&ring->dma_buf, &dhd->edl_ring_mem, sizeof(ring->dma_buf));
11058 			dma_buf = &ring->dma_buf;
11059 			if (dma_buf->va == NULL) {
11060 				return BCME_NOMEM;
11061 			}
11062 		} else
11063 #endif /* EWP_EDL */
11064 		{
11065 			/* Allocate a dhd_dma_buf */
11066 			dma_buf_alloced = dhd_dma_buf_alloc(dhd, &ring->dma_buf, dma_buf_len);
11067 			if (dma_buf_alloced != BCME_OK) {
11068 				return BCME_NOMEM;
11069 			}
11070 		}
11071 	}
11072 
11073 	/* CAUTION: Save ring::base_addr in little endian format! */
11074 	dhd_base_addr_htolpa(&ring->base_addr, ring->dma_buf.pa);
11075 
11076 	ring->ring_lock = osl_spin_lock_init(dhd->osh);
11077 
11078 	DHD_INFO(("RING_ATTACH : %s Max item %d len item %d total size %d "
11079 		"ring start %p buf phys addr  %x:%x \n",
11080 		ring->name, ring->max_items, ring->item_len,
11081 		dma_buf_len, ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
11082 		ltoh32(ring->base_addr.low_addr)));
11083 
11084 	return BCME_OK;
11085 } /* dhd_prot_ring_attach */
11086 
11087 /**
11088  * dhd_prot_ring_init - Post the common ring information to dongle.
11089  *
11090  * Used only for common rings.
11091  *
11092  * The flowrings information is passed via the create flowring control message
11093  * (tx_flowring_create_request_t) sent over the H2D control submission common
11094  * ring.
11095  */
11096 static void
11097 dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring)
11098 {
11099 	ring->wr = 0;
11100 	ring->rd = 0;
11101 	ring->curr_rd = 0;
11102 
11103 	/* CAUTION: ring::base_addr already in Little Endian */
11104 	dhd_bus_cmn_writeshared(dhd->bus, &ring->base_addr,
11105 		sizeof(sh_addr_t), RING_BUF_ADDR, ring->idx);
11106 	dhd_bus_cmn_writeshared(dhd->bus, &ring->max_items,
11107 		sizeof(uint16), RING_MAX_ITEMS, ring->idx);
11108 	dhd_bus_cmn_writeshared(dhd->bus, &ring->item_len,
11109 		sizeof(uint16), RING_ITEM_LEN, ring->idx);
11110 
11111 	dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
11112 		sizeof(uint16), RING_WR_UPD, ring->idx);
11113 	dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd),
11114 		sizeof(uint16), RING_RD_UPD, ring->idx);
11115 
11116 	/* ring inited */
11117 	ring->inited = TRUE;
11118 
11119 } /* dhd_prot_ring_init */
11120 
11121 /**
11122  * dhd_prot_ring_reset - bzero a ring's DMA-ble buffer and cache flush
11123  * Reset WR and RD indices to 0.
11124  */
11125 static void
11126 dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring)
11127 {
11128 	DHD_TRACE(("%s\n", __FUNCTION__));
11129 
11130 	dhd_dma_buf_reset(dhd, &ring->dma_buf);
11131 
11132 	ring->rd = ring->wr = 0;
11133 	ring->curr_rd = 0;
11134 	ring->inited = FALSE;
11135 	ring->create_pending = FALSE;
11136 }
11137 
11138 /**
11139  * dhd_prot_ring_detach - Detach the DMA-able buffer and any other objects
11140  * hanging off the msgbuf_ring.
11141  */
11142 static void
11143 dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring)
11144 {
11145 	dhd_prot_t *prot = dhd->prot;
11146 	uint16 max_flowrings = dhd->bus->max_tx_flowrings;
11147 	ASSERT(ring);
11148 
11149 	ring->inited = FALSE;
11150 	/* rd = ~0, wr = ring->rd - 1, max_items = 0, len_item = ~0 */
11151 
11152 	/* If the DMA-able buffer was carved out of a pre-reserved contiguous
11153 	 * memory, then simply stop using it.
11154 	 */
11155 	if (DHD_IS_FLOWRING(ring->idx, max_flowrings) && (prot->flowrings_dma_buf.va)) {
11156 		(void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
11157 		memset(&ring->dma_buf, 0, sizeof(dhd_dma_buf_t));
11158 	} else {
11159 #ifdef EWP_EDL
11160 		if (ring == dhd->prot->d2hring_edl) {
11161 			/* For EDL ring, do not free ring mem here,
11162 			* it is done in dhd_detach
11163 			*/
11164 			memset(&ring->dma_buf, 0, sizeof(ring->dma_buf));
11165 		} else
11166 #endif /* EWP_EDL */
11167 		{
11168 			dhd_dma_buf_free(dhd, &ring->dma_buf);
11169 		}
11170 	}
11171 
11172 	osl_spin_lock_deinit(dhd->osh, ring->ring_lock);
11173 
11174 } /* dhd_prot_ring_detach */
11175 
11176 /* Fetch number of H2D flowrings given the total number of h2d rings */
11177 uint16
11178 dhd_get_max_flow_rings(dhd_pub_t *dhd)
11179 {
11180 	if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
11181 		return dhd->bus->max_tx_flowrings;
11182 	else
11183 		return (dhd->bus->max_tx_flowrings - BCMPCIE_H2D_COMMON_MSGRINGS);
11184 }
11185 
11186 /**
11187  * dhd_prot_flowrings_pool_attach - Initialize a pool of flowring msgbuf_ring_t.
11188  *
11189  * Allocate a pool of msgbuf_ring along with DMA-able buffers for flowrings.
11190  * Dongle includes common rings when it advertizes the number of H2D rings.
11191  * Allocates a pool of msgbuf_ring_t and invokes dhd_prot_ring_attach to
11192  * allocate the DMA-able buffer and initialize each msgbuf_ring_t object.
11193  *
11194  * dhd_prot_ring_attach is invoked to perform the actual initialization and
11195  * attaching the DMA-able buffer.
11196  *
11197  * Later dhd_prot_flowrings_pool_fetch() may be used to fetch a preallocated and
11198  * initialized msgbuf_ring_t object.
11199  *
11200  * returns BCME_OK=0 on success
11201  * returns non-zero negative error value on failure.
11202  */
11203 static int
11204 dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd)
11205 {
11206 	uint16 flowid;
11207 	msgbuf_ring_t *ring;
11208 	uint16 h2d_flowrings_total; /* exclude H2D common rings */
11209 	dhd_prot_t *prot = dhd->prot;
11210 	char ring_name[RING_NAME_MAX_LENGTH];
11211 
11212 	if (prot->h2d_flowrings_pool != NULL)
11213 		return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
11214 
11215 	ASSERT(prot->h2d_rings_total == 0);
11216 
11217 	/* h2d_rings_total includes H2D common rings: ctrl and rxbuf subn */
11218 	prot->h2d_rings_total = (uint16)dhd_bus_max_h2d_queues(dhd->bus);
11219 
11220 	if (prot->h2d_rings_total < BCMPCIE_H2D_COMMON_MSGRINGS) {
11221 		DHD_ERROR(("%s: h2d_rings_total advertized as %u\n",
11222 			__FUNCTION__, prot->h2d_rings_total));
11223 		return BCME_ERROR;
11224 	}
11225 
11226 	/* Subtract number of H2D common rings, to determine number of flowrings */
11227 	h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
11228 
11229 	DHD_ERROR(("Attach flowrings pool for %d rings\n", h2d_flowrings_total));
11230 
11231 	/* Allocate pool of msgbuf_ring_t objects for all flowrings */
11232 	prot->h2d_flowrings_pool = (msgbuf_ring_t *)MALLOCZ(prot->osh,
11233 		(h2d_flowrings_total * sizeof(msgbuf_ring_t)));
11234 
11235 	if (prot->h2d_flowrings_pool == NULL) {
11236 		DHD_ERROR(("%s: flowrings pool for %d flowrings, alloc failure\n",
11237 			__FUNCTION__, h2d_flowrings_total));
11238 		goto fail;
11239 	}
11240 
11241 	/* Setup & Attach a DMA-able buffer to each flowring in the flowring pool */
11242 	FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
11243 		snprintf(ring_name, sizeof(ring_name), "h2dflr_%03u", flowid);
11244 		/* For HTPUT case max_items will be changed inside dhd_prot_ring_attach */
11245 		if (dhd_prot_ring_attach(dhd, ring, ring_name,
11246 		        prot->h2d_max_txpost, H2DRING_TXPOST_ITEMSIZE,
11247 		        DHD_FLOWID_TO_RINGID(flowid)) != BCME_OK) {
11248 			goto attach_fail;
11249 		}
11250 	}
11251 
11252 	return BCME_OK;
11253 
11254 attach_fail:
11255 	/* XXX: On a per project basis, one may decide whether to continue with
11256 	 * "fewer" flowrings, and what value of fewer suffices.
11257 	 */
11258 	dhd_prot_flowrings_pool_detach(dhd); /* Free entire pool of flowrings */
11259 
11260 fail:
11261 	prot->h2d_rings_total = 0;
11262 	return BCME_NOMEM;
11263 
11264 } /* dhd_prot_flowrings_pool_attach */
11265 
11266 /**
11267  * dhd_prot_flowrings_pool_reset - Reset all msgbuf_ring_t objects in the pool.
11268  * Invokes dhd_prot_ring_reset to perform the actual reset.
11269  *
11270  * The DMA-able buffer is not freed during reset and neither is the flowring
11271  * pool freed.
11272  *
11273  * dhd_prot_flowrings_pool_reset will be invoked in dhd_prot_reset. Following
11274  * the dhd_prot_reset, dhd_prot_init will be re-invoked, and the flowring pool
11275  * from a previous flowring pool instantiation will be reused.
11276  *
11277  * This will avoid a fragmented DMA-able memory condition, if multiple
11278  * dhd_prot_reset were invoked to reboot the dongle without a full detach/attach
11279  * cycle.
11280  */
11281 static void
11282 dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd)
11283 {
11284 	uint16 flowid, h2d_flowrings_total;
11285 	msgbuf_ring_t *ring;
11286 	dhd_prot_t *prot = dhd->prot;
11287 
11288 	if (prot->h2d_flowrings_pool == NULL) {
11289 		ASSERT(prot->h2d_rings_total == 0);
11290 		return;
11291 	}
11292 	h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
11293 	/* Reset each flowring in the flowring pool */
11294 	FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
11295 		dhd_prot_ring_reset(dhd, ring);
11296 		ring->inited = FALSE;
11297 	}
11298 
11299 	/* Flowring pool state must be as-if dhd_prot_flowrings_pool_attach */
11300 }
11301 
11302 /**
11303  * dhd_prot_flowrings_pool_detach - Free pool of msgbuf_ring along with
11304  * DMA-able buffers for flowrings.
11305  * dhd_prot_ring_detach is invoked to free the DMA-able buffer and perform any
11306  * de-initialization of each msgbuf_ring_t.
11307  */
11308 static void
11309 dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd)
11310 {
11311 	int flowid;
11312 	msgbuf_ring_t *ring;
11313 	uint16 h2d_flowrings_total; /* exclude H2D common rings */
11314 	dhd_prot_t *prot = dhd->prot;
11315 
11316 	if (prot->h2d_flowrings_pool == NULL) {
11317 		ASSERT(prot->h2d_rings_total == 0);
11318 		return;
11319 	}
11320 
11321 	h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
11322 	/* Detach the DMA-able buffer for each flowring in the flowring pool */
11323 	FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
11324 		dhd_prot_ring_detach(dhd, ring);
11325 	}
11326 
11327 	MFREE(prot->osh, prot->h2d_flowrings_pool,
11328 		(h2d_flowrings_total * sizeof(msgbuf_ring_t)));
11329 
11330 	prot->h2d_rings_total = 0;
11331 
11332 } /* dhd_prot_flowrings_pool_detach */
11333 
11334 /**
11335  * dhd_prot_flowrings_pool_fetch - Fetch a preallocated and initialized
11336  * msgbuf_ring from the flowring pool, and assign it.
11337  *
11338  * Unlike common rings, which uses a dhd_prot_ring_init() to pass the common
11339  * ring information to the dongle, a flowring's information is passed via a
11340  * flowring create control message.
11341  *
11342  * Only the ring state (WR, RD) index are initialized.
11343  */
11344 static msgbuf_ring_t *
11345 dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd, uint16 flowid)
11346 {
11347 	msgbuf_ring_t *ring;
11348 	dhd_prot_t *prot = dhd->prot;
11349 
11350 	ASSERT(flowid >= DHD_FLOWRING_START_FLOWID);
11351 	ASSERT(flowid < prot->h2d_rings_total);
11352 	ASSERT(prot->h2d_flowrings_pool != NULL);
11353 
11354 	ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid);
11355 
11356 	/* ASSERT flow_ring->inited == FALSE */
11357 
11358 	ring->wr = 0;
11359 	ring->rd = 0;
11360 	ring->curr_rd = 0;
11361 	ring->inited = TRUE;
11362 	/**
11363 	 * Every time a flowring starts dynamically, initialize current_phase with 0
11364 	 * then flip to BCMPCIE_CMNHDR_PHASE_BIT_INIT
11365 	 */
11366 	ring->current_phase = 0;
11367 	return ring;
11368 }
11369 
11370 /**
11371  * dhd_prot_flowrings_pool_release - release a previously fetched flowring's
11372  * msgbuf_ring back to the flow_ring pool.
11373  */
11374 void
11375 dhd_prot_flowrings_pool_release(dhd_pub_t *dhd, uint16 flowid, void *flow_ring)
11376 {
11377 	msgbuf_ring_t *ring;
11378 	dhd_prot_t *prot = dhd->prot;
11379 
11380 	ASSERT(flowid >= DHD_FLOWRING_START_FLOWID);
11381 	ASSERT(flowid < prot->h2d_rings_total);
11382 	ASSERT(prot->h2d_flowrings_pool != NULL);
11383 
11384 	ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid);
11385 
11386 	ASSERT(ring == (msgbuf_ring_t*)flow_ring);
11387 	/* ASSERT flow_ring->inited == TRUE */
11388 
11389 	(void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
11390 
11391 	ring->wr = 0;
11392 	ring->rd = 0;
11393 	ring->inited = FALSE;
11394 
11395 	ring->curr_rd = 0;
11396 }
11397 
11398 #ifdef AGG_H2D_DB
11399 void
11400 dhd_prot_schedule_aggregate_h2d_db(dhd_pub_t *dhd, uint16 flowid)
11401 {
11402 	dhd_prot_t *prot = dhd->prot;
11403 	msgbuf_ring_t *ring;
11404 	uint16 inflight;
11405 	bool db_req = FALSE;
11406 	bool flush;
11407 
11408 	ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid);
11409 	flush = !!ring->pend_items_count;
11410 	dhd_prot_txdata_aggr_db_write_flush(dhd, flowid);
11411 
11412 	inflight = OSL_ATOMIC_READ(dhd->osh, &ring->inflight);
11413 	if (flush && inflight) {
11414 		if (inflight <= agg_h2d_db_inflight_thresh) {
11415 			db_req = TRUE;
11416 		}
11417 		dhd_agg_inflights_stats_update(dhd, inflight);
11418 		dhd_prot_aggregate_db_ring_door_bell(dhd, flowid, db_req);
11419 	}
11420 }
11421 #endif /* AGG_H2D_DB */
11422 
11423 /* Assumes only one index is updated at a time */
11424 /* FIXME Need to fix it */
11425 /* If exactly_nitems is true, this function will allocate space for nitems or fail */
11426 /*    Exception: when wrap around is encountered, to prevent hangup (last nitems of ring buffer) */
11427 /* If exactly_nitems is false, this function will allocate space for nitems or less */
11428 static void *
11429 BCMFASTPATH(dhd_prot_get_ring_space)(msgbuf_ring_t *ring, uint16 nitems, uint16 * alloced,
11430 	bool exactly_nitems)
11431 {
11432 	void *ret_ptr = NULL;
11433 	uint16 ring_avail_cnt;
11434 
11435 	ASSERT(nitems <= ring->max_items);
11436 
11437 	ring_avail_cnt = CHECK_WRITE_SPACE(ring->rd, ring->wr, ring->max_items);
11438 
11439 	if ((ring_avail_cnt == 0) ||
11440 	       (exactly_nitems && (ring_avail_cnt < nitems) &&
11441 	       ((ring->max_items - ring->wr) >= nitems))) {
11442 		DHD_MSGBUF_INFO(("Space not available: ring %s items %d write %d read %d\n",
11443 			ring->name, nitems, ring->wr, ring->rd));
11444 		return NULL;
11445 	}
11446 	*alloced = MIN(nitems, ring_avail_cnt);
11447 
11448 	/* Return next available space */
11449 	ret_ptr = (char *)DHD_RING_BGN_VA(ring) + (ring->wr * ring->item_len);
11450 
11451 	/* Update write index */
11452 	if ((ring->wr + *alloced) == ring->max_items)
11453 		ring->wr = 0;
11454 	else if ((ring->wr + *alloced) < ring->max_items)
11455 		ring->wr += *alloced;
11456 	else {
11457 		/* Should never hit this */
11458 		ASSERT(0);
11459 		return NULL;
11460 	}
11461 
11462 	return ret_ptr;
11463 } /* dhd_prot_get_ring_space */
11464 
11465 #ifdef AGG_H2D_DB
11466 
11467 static void
11468 dhd_prot_agg_db_ring_write(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p,
11469 		uint16 nitems)
11470 {
11471 	uint16 max_flowrings = dhd->bus->max_tx_flowrings;
11472 	unsigned long flags_bus;
11473 
11474 #ifdef DHD_FAKE_TX_STATUS
11475 	/* if fake tx status is enabled, we should not update
11476 	 * dongle side rd/wr index for the tx flowring
11477 	 * and also should not ring the doorbell
11478 	 */
11479 	if (DHD_IS_FLOWRING(ring->idx, max_flowrings)) {
11480 		return;
11481 	}
11482 #endif /* DHD_FAKE_TX_STATUS */
11483 
11484 	DHD_BUS_LP_STATE_LOCK(dhd->bus->bus_lp_state_lock, flags_bus);
11485 
11486 	/* cache flush */
11487 	OSL_CACHE_FLUSH(p, ring->item_len * nitems);
11488 
11489 	if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
11490 			dhd_prot_dma_indx_set(dhd, ring->wr,
11491 			                      H2D_DMA_INDX_WR_UPD, ring->idx);
11492 	} else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings)) {
11493 			dhd_prot_dma_indx_set(dhd, ring->wr,
11494 			H2D_IFRM_INDX_WR_UPD, ring->idx);
11495 	} else {
11496 			dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
11497 				sizeof(uint16), RING_WR_UPD, ring->idx);
11498 	}
11499 
11500 	DHD_BUS_LP_STATE_UNLOCK(dhd->bus->bus_lp_state_lock, flags_bus);
11501 }
11502 
11503 static void
11504 dhd_prot_aggregate_db_ring_door_bell(dhd_pub_t *dhd, uint16 flowid, bool ring_db)
11505 {
11506 	dhd_prot_t *prot = dhd->prot;
11507 	flow_ring_table_t *flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
11508 	flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
11509 	msgbuf_ring_t *ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
11510 	uint32 db_index;
11511 	uint corerev;
11512 
11513 	if (ring_db == TRUE) {
11514 		dhd_msgbuf_agg_h2d_db_timer_cancel(dhd);
11515 		prot->agg_h2d_db_info.direct_db_cnt++;
11516 		/* raise h2d interrupt */
11517 		if (IDMA_ACTIVE(dhd) || (IFRM_ACTIVE(dhd))) {
11518 			db_index = IDMA_IDX0;
11519 			/* this api is called in wl down path..in that case sih is freed already */
11520 			if (dhd->bus->sih) {
11521 				corerev = dhd->bus->sih->buscorerev;
11522 				/* We need to explictly configure the type of DMA for
11523 				 * core rev >= 24
11524 				 */
11525 				if (corerev >= 24) {
11526 					db_index |= (DMA_TYPE_IDMA << DMA_TYPE_SHIFT);
11527 				}
11528 			}
11529 			prot->mb_2_ring_fn(dhd->bus, db_index, TRUE);
11530 		} else {
11531 			prot->mb_ring_fn(dhd->bus, DHD_WRPTR_UPDATE_H2D_DB_MAGIC(ring));
11532 		}
11533 	} else {
11534 		dhd_msgbuf_agg_h2d_db_timer_start(prot);
11535 	}
11536 }
11537 
11538 #endif /* AGG_H2D_DB */
11539 
11540 /**
11541  * dhd_prot_ring_write_complete - Host updates the new WR index on producing
11542  * new messages in a H2D ring. The messages are flushed from cache prior to
11543  * posting the new WR index. The new WR index will be updated in the DMA index
11544  * array or directly in the dongle's ring state memory.
11545  * A PCIE doorbell will be generated to wake up the dongle.
11546  * This is a non-atomic function, make sure the callers
11547  * always hold appropriate locks.
11548  */
11549 static void
11550 BCMFASTPATH(__dhd_prot_ring_write_complete)(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p,
11551 	uint16 nitems)
11552 {
11553 	dhd_prot_t *prot = dhd->prot;
11554 	uint32 db_index;
11555 	uint16 max_flowrings = dhd->bus->max_tx_flowrings;
11556 	uint corerev;
11557 
11558 	/* cache flush */
11559 	OSL_CACHE_FLUSH(p, ring->item_len * nitems);
11560 
11561 	if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
11562 			dhd_prot_dma_indx_set(dhd, ring->wr,
11563 			                      H2D_DMA_INDX_WR_UPD, ring->idx);
11564 	} else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings)) {
11565 			dhd_prot_dma_indx_set(dhd, ring->wr,
11566 			H2D_IFRM_INDX_WR_UPD, ring->idx);
11567 	} else {
11568 			dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
11569 				sizeof(uint16), RING_WR_UPD, ring->idx);
11570 	}
11571 
11572 	/* raise h2d interrupt */
11573 	if (IDMA_ACTIVE(dhd) ||
11574 		(IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings))) {
11575 		db_index = IDMA_IDX0;
11576 		/* this api is called in wl down path..in that case sih is freed already */
11577 		if (dhd->bus->sih) {
11578 			corerev = dhd->bus->sih->buscorerev;
11579 			/* We need to explictly configure the type of DMA for core rev >= 24 */
11580 			if (corerev >= 24) {
11581 				db_index |= (DMA_TYPE_IDMA << DMA_TYPE_SHIFT);
11582 			}
11583 		}
11584 		prot->mb_2_ring_fn(dhd->bus, db_index, TRUE);
11585 	} else {
11586 		prot->mb_ring_fn(dhd->bus, DHD_WRPTR_UPDATE_H2D_DB_MAGIC(ring));
11587 	}
11588 }
11589 
11590 static void
11591 BCMFASTPATH(dhd_prot_ring_write_complete)(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p,
11592 	uint16 nitems)
11593 {
11594 	unsigned long flags_bus;
11595 	DHD_BUS_LP_STATE_LOCK(dhd->bus->bus_lp_state_lock, flags_bus);
11596 	__dhd_prot_ring_write_complete(dhd, ring, p, nitems);
11597 	DHD_BUS_LP_STATE_UNLOCK(dhd->bus->bus_lp_state_lock, flags_bus);
11598 }
11599 
11600 static void
11601 BCMFASTPATH(dhd_prot_ring_doorbell)(dhd_pub_t *dhd, uint32 value)
11602 {
11603 	unsigned long flags_bus;
11604 	DHD_BUS_LP_STATE_LOCK(dhd->bus->bus_lp_state_lock, flags_bus);
11605 	dhd->prot->mb_ring_fn(dhd->bus, value);
11606 	DHD_BUS_LP_STATE_UNLOCK(dhd->bus->bus_lp_state_lock, flags_bus);
11607 }
11608 
11609 /**
11610  * dhd_prot_ring_write_complete_mbdata - will be called from dhd_prot_h2d_mbdata_send_ctrlmsg,
11611  * which will hold DHD_BUS_LP_STATE_LOCK to update WR pointer, Ring DB and also update
11612  * bus_low_power_state to indicate D3_INFORM sent in the same BUS_LP_STATE_LOCK.
11613  */
11614 static void
11615 BCMFASTPATH(dhd_prot_ring_write_complete_mbdata)(dhd_pub_t *dhd, msgbuf_ring_t * ring, void *p,
11616 	uint16 nitems, uint32 mb_data)
11617 {
11618 	unsigned long flags_bus;
11619 
11620 	DHD_BUS_LP_STATE_LOCK(dhd->bus->bus_lp_state_lock, flags_bus);
11621 
11622 	__dhd_prot_ring_write_complete(dhd, ring, p, nitems);
11623 
11624 	/* Mark D3_INFORM in the same context to skip ringing H2D DB after D3_INFORM */
11625 	if (mb_data == H2D_HOST_D3_INFORM) {
11626 		__DHD_SET_BUS_LPS_D3_INFORMED(dhd->bus);
11627 	}
11628 
11629 	DHD_BUS_LP_STATE_UNLOCK(dhd->bus->bus_lp_state_lock, flags_bus);
11630 }
11631 
11632 /**
11633  * dhd_prot_upd_read_idx - Host updates the new RD index on consuming messages
11634  * from a D2H ring. The new RD index will be updated in the DMA Index array or
11635  * directly in dongle's ring state memory.
11636  */
11637 static void
11638 dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring)
11639 {
11640 	dhd_prot_t *prot = dhd->prot;
11641 	uint32 db_index;
11642 	uint corerev;
11643 
11644 	/* update read index */
11645 	/* If dma'ing h2d indices supported
11646 	 * update the r -indices in the
11647 	 * host memory o/w in TCM
11648 	 */
11649 	if (IDMA_ACTIVE(dhd)) {
11650 		dhd_prot_dma_indx_set(dhd, ring->rd,
11651 			D2H_DMA_INDX_RD_UPD, ring->idx);
11652 		db_index = IDMA_IDX1;
11653 		if (dhd->bus->sih) {
11654 			corerev = dhd->bus->sih->buscorerev;
11655 			/* We need to explictly configure the type of DMA for core rev >= 24 */
11656 			if (corerev >= 24) {
11657 				db_index |= (DMA_TYPE_IDMA << DMA_TYPE_SHIFT);
11658 			}
11659 		}
11660 		prot->mb_2_ring_fn(dhd->bus, db_index, FALSE);
11661 	} else if (dhd->dma_h2d_ring_upd_support) {
11662 		dhd_prot_dma_indx_set(dhd, ring->rd,
11663 		                      D2H_DMA_INDX_RD_UPD, ring->idx);
11664 	} else {
11665 		dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd),
11666 			sizeof(uint16), RING_RD_UPD, ring->idx);
11667 	}
11668 }
11669 
11670 static int
11671 dhd_send_d2h_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create,
11672 	uint16 ring_type, uint32 req_id)
11673 {
11674 	unsigned long flags;
11675 	d2h_ring_create_req_t  *d2h_ring;
11676 	uint16 alloced = 0;
11677 	int ret = BCME_OK;
11678 	uint16 max_h2d_rings = dhd->bus->max_submission_rings;
11679 	msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn;
11680 
11681 #ifdef PCIE_INB_DW
11682 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
11683 		return BCME_ERROR;
11684 #endif /* PCIE_INB_DW */
11685 	DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
11686 
11687 	DHD_TRACE(("%s trying to send D2H ring create Req\n", __FUNCTION__));
11688 
11689 	if (ring_to_create == NULL) {
11690 		DHD_ERROR(("%s: FATAL: ring_to_create is NULL\n", __FUNCTION__));
11691 		ret = BCME_ERROR;
11692 		goto err;
11693 	}
11694 
11695 	/* Request for ring buffer space */
11696 	d2h_ring = (d2h_ring_create_req_t *) dhd_prot_alloc_ring_space(dhd,
11697 		ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
11698 		&alloced, FALSE);
11699 
11700 	if (d2h_ring == NULL) {
11701 		DHD_ERROR(("%s: FATAL: No space in control ring to send D2H ring create\n",
11702 			__FUNCTION__));
11703 		ret = BCME_NOMEM;
11704 		goto err;
11705 	}
11706 	ring_to_create->create_req_id = (uint16)req_id;
11707 	ring_to_create->create_pending = TRUE;
11708 
11709 	/* Common msg buf hdr */
11710 	d2h_ring->msg.msg_type = MSG_TYPE_D2H_RING_CREATE;
11711 	d2h_ring->msg.if_id = 0;
11712 	d2h_ring->msg.flags = ctrl_ring->current_phase;
11713 	d2h_ring->msg.request_id = htol32(ring_to_create->create_req_id);
11714 	d2h_ring->ring_id = htol16(DHD_D2H_RING_OFFSET(ring_to_create->idx, max_h2d_rings));
11715 	DHD_ERROR(("%s ringid: %d idx: %d max_h2d: %d\n", __FUNCTION__, d2h_ring->ring_id,
11716 			ring_to_create->idx, max_h2d_rings));
11717 
11718 	d2h_ring->ring_type = ring_type;
11719 	d2h_ring->max_items = htol16(ring_to_create->max_items);
11720 	d2h_ring->len_item = htol16(ring_to_create->item_len);
11721 	d2h_ring->ring_ptr.low_addr = ring_to_create->base_addr.low_addr;
11722 	d2h_ring->ring_ptr.high_addr = ring_to_create->base_addr.high_addr;
11723 
11724 	d2h_ring->flags = 0;
11725 	d2h_ring->msg.epoch =
11726 		ctrl_ring->seqnum % H2D_EPOCH_MODULO;
11727 	ctrl_ring->seqnum++;
11728 
11729 #ifdef EWP_EDL
11730 	if (ring_type == BCMPCIE_D2H_RING_TYPE_EDL) {
11731 		DHD_ERROR(("%s: sending d2h EDL ring create: "
11732 			"\n max items=%u; len_item=%u; ring_id=%u; low_addr=0x%x; high_addr=0x%x\n",
11733 			__FUNCTION__, ltoh16(d2h_ring->max_items),
11734 			ltoh16(d2h_ring->len_item),
11735 			ltoh16(d2h_ring->ring_id),
11736 			d2h_ring->ring_ptr.low_addr,
11737 			d2h_ring->ring_ptr.high_addr));
11738 	}
11739 #endif /* EWP_EDL */
11740 
11741 	/* Update the flow_ring's WRITE index */
11742 	dhd_prot_ring_write_complete(dhd, ctrl_ring, d2h_ring,
11743 		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
11744 
11745 	DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
11746 
11747 #ifdef PCIE_INB_DW
11748 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
11749 #endif
11750 
11751 	return ret;
11752 err:
11753 	DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
11754 
11755 #ifdef PCIE_INB_DW
11756 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
11757 #endif
11758 	return ret;
11759 }
11760 
11761 static int
11762 dhd_send_h2d_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create, uint8 ring_type, uint32 id)
11763 {
11764 	unsigned long flags;
11765 	h2d_ring_create_req_t  *h2d_ring;
11766 	uint16 alloced = 0;
11767 	uint8 i = 0;
11768 	int ret = BCME_OK;
11769 	msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn;
11770 
11771 #ifdef PCIE_INB_DW
11772 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
11773 		return BCME_ERROR;
11774 #endif /* PCIE_INB_DW */
11775 	DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
11776 
11777 	DHD_TRACE(("%s trying to send H2D ring create Req\n", __FUNCTION__));
11778 
11779 	if (ring_to_create == NULL) {
11780 		DHD_ERROR(("%s: FATAL: ring_to_create is NULL\n", __FUNCTION__));
11781 		ret = BCME_ERROR;
11782 		goto err;
11783 	}
11784 
11785 	/* Request for ring buffer space */
11786 	h2d_ring = (h2d_ring_create_req_t *)dhd_prot_alloc_ring_space(dhd,
11787 		ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
11788 		&alloced, FALSE);
11789 
11790 	if (h2d_ring == NULL) {
11791 		DHD_ERROR(("%s: FATAL: No space in control ring to send H2D ring create\n",
11792 			__FUNCTION__));
11793 		ret = BCME_NOMEM;
11794 		goto err;
11795 	}
11796 	ring_to_create->create_req_id = (uint16)id;
11797 	ring_to_create->create_pending = TRUE;
11798 
11799 	/* Common msg buf hdr */
11800 	h2d_ring->msg.msg_type = MSG_TYPE_H2D_RING_CREATE;
11801 	h2d_ring->msg.if_id = 0;
11802 	h2d_ring->msg.request_id = htol32(ring_to_create->create_req_id);
11803 	h2d_ring->msg.flags = ctrl_ring->current_phase;
11804 	h2d_ring->ring_id = htol16(DHD_H2D_RING_OFFSET(ring_to_create->idx));
11805 	h2d_ring->ring_type = ring_type;
11806 	h2d_ring->max_items = htol16(H2DRING_DYNAMIC_INFO_MAX_ITEM);
11807 	h2d_ring->n_completion_ids = ring_to_create->n_completion_ids;
11808 	h2d_ring->len_item = htol16(H2DRING_INFO_BUFPOST_ITEMSIZE);
11809 	h2d_ring->ring_ptr.low_addr = ring_to_create->base_addr.low_addr;
11810 	h2d_ring->ring_ptr.high_addr = ring_to_create->base_addr.high_addr;
11811 
11812 	for (i = 0; i < ring_to_create->n_completion_ids; i++) {
11813 		h2d_ring->completion_ring_ids[i] = htol16(ring_to_create->compeltion_ring_ids[i]);
11814 	}
11815 
11816 	h2d_ring->flags = 0;
11817 	h2d_ring->msg.epoch =
11818 		ctrl_ring->seqnum % H2D_EPOCH_MODULO;
11819 	ctrl_ring->seqnum++;
11820 
11821 	/* Update the flow_ring's WRITE index */
11822 	dhd_prot_ring_write_complete(dhd, ctrl_ring, h2d_ring,
11823 		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
11824 
11825 	DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
11826 
11827 #ifdef PCIE_INB_DW
11828 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
11829 #endif
11830 	return ret;
11831 err:
11832 	DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
11833 
11834 #ifdef PCIE_INB_DW
11835 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
11836 #endif
11837 	return ret;
11838 }
11839 
11840 /**
11841  * dhd_prot_dma_indx_set - set a new WR or RD index in the DMA index array.
11842  * Dongle will DMA the entire array (if DMA_INDX feature is enabled).
11843  * See dhd_prot_dma_indx_init()
11844  */
11845 void
11846 dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type, uint16 ringid)
11847 {
11848 	uint8 *ptr;
11849 	uint16 offset;
11850 	dhd_prot_t *prot = dhd->prot;
11851 	uint16 max_h2d_rings = dhd->bus->max_submission_rings;
11852 
11853 	switch (type) {
11854 		case H2D_DMA_INDX_WR_UPD:
11855 			ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va);
11856 			offset = DHD_H2D_RING_OFFSET(ringid);
11857 			break;
11858 
11859 		case D2H_DMA_INDX_RD_UPD:
11860 			ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va);
11861 			offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
11862 			break;
11863 
11864 		case H2D_IFRM_INDX_WR_UPD:
11865 			ptr = (uint8 *)(prot->h2d_ifrm_indx_wr_buf.va);
11866 			offset = DHD_H2D_FRM_FLOW_RING_OFFSET(ringid);
11867 			break;
11868 
11869 		default:
11870 			DHD_ERROR(("%s: Invalid option for DMAing read/write index\n",
11871 				__FUNCTION__));
11872 			return;
11873 	}
11874 
11875 	ASSERT(prot->rw_index_sz != 0);
11876 	ptr += offset * prot->rw_index_sz;
11877 
11878 	/* XXX: Test casting ptr to uint16* for 32bit indices case on Big Endian */
11879 	*(uint16*)ptr = htol16(new_index);
11880 
11881 	OSL_CACHE_FLUSH((void *)ptr, prot->rw_index_sz);
11882 
11883 	DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n",
11884 		__FUNCTION__, new_index, type, ringid, ptr, offset));
11885 
11886 } /* dhd_prot_dma_indx_set */
11887 
11888 /**
11889  * dhd_prot_dma_indx_get - Fetch a WR or RD index from the dongle DMA-ed index
11890  * array.
11891  * Dongle DMAes an entire array to host memory (if the feature is enabled).
11892  * See dhd_prot_dma_indx_init()
11893  */
11894 static uint16
11895 dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid)
11896 {
11897 	uint8 *ptr;
11898 	uint16 data;
11899 	uint16 offset;
11900 	dhd_prot_t *prot = dhd->prot;
11901 	uint16 max_h2d_rings = dhd->bus->max_submission_rings;
11902 
11903 	switch (type) {
11904 		case H2D_DMA_INDX_WR_UPD:
11905 			ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va);
11906 			offset = DHD_H2D_RING_OFFSET(ringid);
11907 			break;
11908 
11909 		case H2D_DMA_INDX_RD_UPD:
11910 #ifdef DHD_DMA_INDICES_SEQNUM
11911 			if (prot->h2d_dma_indx_rd_copy_buf) {
11912 				ptr = (uint8 *)(prot->h2d_dma_indx_rd_copy_buf);
11913 			} else
11914 #endif /* DHD_DMA_INDICES_SEQNUM */
11915 			{
11916 				ptr = (uint8 *)(prot->h2d_dma_indx_rd_buf.va);
11917 			}
11918 			offset = DHD_H2D_RING_OFFSET(ringid);
11919 			break;
11920 
11921 		case D2H_DMA_INDX_WR_UPD:
11922 #ifdef DHD_DMA_INDICES_SEQNUM
11923 			if (prot->d2h_dma_indx_wr_copy_buf) {
11924 				ptr = (uint8 *)(prot->d2h_dma_indx_wr_copy_buf);
11925 			} else
11926 #endif /* DHD_DMA_INDICES_SEQNUM */
11927 			{
11928 				ptr = (uint8 *)(prot->d2h_dma_indx_wr_buf.va);
11929 			}
11930 			offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
11931 			break;
11932 
11933 		case D2H_DMA_INDX_RD_UPD:
11934 			ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va);
11935 			offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
11936 			break;
11937 
11938 		default:
11939 			DHD_ERROR(("%s: Invalid option for DMAing read/write index\n",
11940 				__FUNCTION__));
11941 			return 0;
11942 	}
11943 
11944 	ASSERT(prot->rw_index_sz != 0);
11945 	ptr += offset * prot->rw_index_sz;
11946 
11947 	OSL_CACHE_INV((void *)ptr, prot->rw_index_sz);
11948 
11949 	/* XXX: Test casting ptr to uint16* for 32bit indices case on Big Endian */
11950 	data = LTOH16(*((uint16*)ptr));
11951 
11952 	DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n",
11953 		__FUNCTION__, data, type, ringid, ptr, offset));
11954 
11955 	return (data);
11956 
11957 } /* dhd_prot_dma_indx_get */
11958 
11959 #ifdef DHD_DMA_INDICES_SEQNUM
11960 void
11961 dhd_prot_write_host_seqnum(dhd_pub_t *dhd, uint32 seq_num)
11962 {
11963 	uint8 *ptr;
11964 	dhd_prot_t *prot = dhd->prot;
11965 
11966 	/* Update host sequence number in first four bytes of scratchbuf */
11967 	ptr = (uint8 *)(prot->d2h_dma_scratch_buf.va);
11968 	*(uint32*)ptr = htol32(seq_num);
11969 	OSL_CACHE_FLUSH((void *)ptr, prot->d2h_dma_scratch_buf.len);
11970 
11971 	DHD_TRACE(("%s: data %d ptr 0x%p\n", __FUNCTION__, seq_num, ptr));
11972 
11973 } /* dhd_prot_dma_indx_set */
11974 
11975 uint32
11976 dhd_prot_read_seqnum(dhd_pub_t *dhd, bool host)
11977 {
11978 	uint8 *ptr;
11979 	dhd_prot_t *prot = dhd->prot;
11980 	uint32 data;
11981 
11982 	OSL_CACHE_INV((void *)ptr, d2h_dma_scratch_buf.len);
11983 
11984 	/* First four bytes of scratchbuf contains the host sequence number.
11985 	 * Next four bytes of scratchbuf contains the Dongle sequence number.
11986 	 */
11987 	if (host) {
11988 		ptr = (uint8 *)(prot->d2h_dma_scratch_buf.va);
11989 		data = LTOH32(*((uint32*)ptr));
11990 	} else {
11991 		ptr = ((uint8 *)(prot->d2h_dma_scratch_buf.va) + sizeof(uint32));
11992 		data = LTOH32(*((uint32*)ptr));
11993 	}
11994 	DHD_TRACE(("%s: data %d ptr 0x%p\n", __FUNCTION__, data, ptr));
11995 	return data;
11996 } /* dhd_prot_dma_indx_set */
11997 
11998 void
11999 dhd_prot_save_dmaidx(dhd_pub_t *dhd)
12000 {
12001 	dhd_prot_t *prot = dhd->prot;
12002 	uint32 dngl_seqnum;
12003 
12004 	dngl_seqnum = dhd_prot_read_seqnum(dhd, FALSE);
12005 
12006 	DHD_TRACE(("%s: host_seqnum %u dngl_seqnum %u\n", __FUNCTION__,
12007 			prot->host_seqnum, dngl_seqnum));
12008 	if (prot->d2h_dma_indx_wr_copy_buf && prot->h2d_dma_indx_rd_copy_buf) {
12009 		if (prot->host_seqnum == dngl_seqnum) {
12010 			memcpy_s(prot->d2h_dma_indx_wr_copy_buf, prot->d2h_dma_indx_wr_copy_bufsz,
12011 				prot->d2h_dma_indx_wr_buf.va, prot->d2h_dma_indx_wr_copy_bufsz);
12012 			memcpy_s(prot->h2d_dma_indx_rd_copy_buf, prot->h2d_dma_indx_rd_copy_bufsz,
12013 				prot->h2d_dma_indx_rd_buf.va, prot->h2d_dma_indx_rd_copy_bufsz);
12014 			dhd_prot_write_host_seqnum(dhd, prot->host_seqnum);
12015 			/* Ring DoorBell */
12016 			dhd_prot_ring_doorbell(dhd, DHD_DMA_INDX_SEQ_H2D_DB_MAGIC);
12017 			prot->host_seqnum++;
12018 			prot->host_seqnum %= D2H_EPOCH_MODULO;
12019 		}
12020 	}
12021 }
12022 
12023 int
12024 dhd_prot_dma_indx_copybuf_init(dhd_pub_t *dhd, uint32 buf_sz, uint8 type)
12025 {
12026 	dhd_prot_t *prot = dhd->prot;
12027 
12028 	switch (type) {
12029 		case D2H_DMA_INDX_WR_BUF:
12030 			prot->d2h_dma_indx_wr_copy_buf = MALLOCZ(dhd->osh, buf_sz);
12031 			if (prot->d2h_dma_indx_wr_copy_buf == NULL) {
12032 				DHD_ERROR(("%s: MALLOC failed for size %d\n",
12033 					__FUNCTION__, buf_sz));
12034 				goto ret_no_mem;
12035 			}
12036 			prot->d2h_dma_indx_wr_copy_bufsz = buf_sz;
12037 		break;
12038 
12039 		case H2D_DMA_INDX_RD_BUF:
12040 			prot->h2d_dma_indx_rd_copy_buf = MALLOCZ(dhd->osh, buf_sz);
12041 			if (prot->h2d_dma_indx_rd_copy_buf == NULL) {
12042 				DHD_ERROR(("%s: MALLOC failed for size %d\n",
12043 					__FUNCTION__, buf_sz));
12044 				goto ret_no_mem;
12045 			}
12046 			prot->h2d_dma_indx_rd_copy_bufsz = buf_sz;
12047 			break;
12048 
12049 		default:
12050 			break;
12051 	}
12052 	return BCME_OK;
12053 ret_no_mem:
12054 	return BCME_NOMEM;
12055 
12056 }
12057 #endif /* DHD_DMA_INDICES_SEQNUM */
12058 
12059 /**
12060  * An array of DMA read/write indices, containing information about host rings, can be maintained
12061  * either in host memory or in device memory, dependent on preprocessor options. This function is,
12062  * dependent on these options, called during driver initialization. It reserves and initializes
12063  * blocks of DMA'able host memory containing an array of DMA read or DMA write indices. The physical
12064  * address of these host memory blocks are communicated to the dongle later on. By reading this host
12065  * memory, the dongle learns about the state of the host rings.
12066  */
12067 
12068 static INLINE int
12069 dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type,
12070 	dhd_dma_buf_t *dma_buf, uint32 bufsz)
12071 {
12072 	int rc;
12073 
12074 	if ((dma_buf->len == bufsz) || (dma_buf->va != NULL))
12075 		return BCME_OK;
12076 
12077 	rc = dhd_dma_buf_alloc(dhd, dma_buf, bufsz);
12078 
12079 	return rc;
12080 }
12081 
12082 int
12083 dhd_prot_dma_indx_init(dhd_pub_t *dhd, uint32 rw_index_sz, uint8 type, uint32 length)
12084 {
12085 	uint32 bufsz;
12086 	dhd_prot_t *prot = dhd->prot;
12087 	dhd_dma_buf_t *dma_buf;
12088 
12089 	if (prot == NULL) {
12090 		DHD_ERROR(("prot is not inited\n"));
12091 		return BCME_ERROR;
12092 	}
12093 
12094 	/* Dongle advertizes 2B or 4B RW index size */
12095 	ASSERT(rw_index_sz != 0);
12096 	prot->rw_index_sz = rw_index_sz;
12097 
12098 	bufsz = rw_index_sz * length;
12099 
12100 	switch (type) {
12101 		case H2D_DMA_INDX_WR_BUF:
12102 			dma_buf = &prot->h2d_dma_indx_wr_buf;
12103 			if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
12104 				goto ret_no_mem;
12105 			DHD_ERROR(("H2D DMA WR INDX : array size %d = %d * %d\n",
12106 				dma_buf->len, rw_index_sz, length));
12107 			break;
12108 
12109 		case H2D_DMA_INDX_RD_BUF:
12110 			dma_buf = &prot->h2d_dma_indx_rd_buf;
12111 			if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
12112 				goto ret_no_mem;
12113 			DHD_ERROR(("H2D DMA RD INDX : array size %d = %d * %d\n",
12114 				dma_buf->len, rw_index_sz, length));
12115 			break;
12116 
12117 		case D2H_DMA_INDX_WR_BUF:
12118 			dma_buf = &prot->d2h_dma_indx_wr_buf;
12119 			if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
12120 				goto ret_no_mem;
12121 			DHD_ERROR(("D2H DMA WR INDX : array size %d = %d * %d\n",
12122 				dma_buf->len, rw_index_sz, length));
12123 			break;
12124 
12125 		case D2H_DMA_INDX_RD_BUF:
12126 			dma_buf = &prot->d2h_dma_indx_rd_buf;
12127 			if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
12128 				goto ret_no_mem;
12129 			DHD_ERROR(("D2H DMA RD INDX : array size %d = %d * %d\n",
12130 				dma_buf->len, rw_index_sz, length));
12131 			break;
12132 
12133 		case H2D_IFRM_INDX_WR_BUF:
12134 			dma_buf = &prot->h2d_ifrm_indx_wr_buf;
12135 			if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
12136 				goto ret_no_mem;
12137 			DHD_ERROR(("H2D IFRM WR INDX : array size %d = %d * %d\n",
12138 				dma_buf->len, rw_index_sz, length));
12139 			break;
12140 
12141 		default:
12142 			DHD_ERROR(("%s: Unexpected option\n", __FUNCTION__));
12143 			return BCME_BADOPTION;
12144 	}
12145 
12146 	return BCME_OK;
12147 
12148 ret_no_mem:
12149 	DHD_ERROR(("%s: dhd_prot_dma_indx_alloc type %d buf_sz %d failure\n",
12150 		__FUNCTION__, type, bufsz));
12151 	return BCME_NOMEM;
12152 
12153 } /* dhd_prot_dma_indx_init */
12154 
12155 /**
12156  * Called on checking for 'completion' messages from the dongle. Returns next host buffer to read
12157  * from, or NULL if there are no more messages to read.
12158  */
12159 static uint8*
12160 dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 *available_len)
12161 {
12162 	uint16 wr;
12163 	uint16 rd;
12164 	uint16 depth;
12165 	uint16 items;
12166 	void  *read_addr = NULL; /* address of next msg to be read in ring */
12167 	uint16 d2h_wr = 0;
12168 
12169 	DHD_TRACE(("%s: d2h_dma_indx_rd_buf %p, d2h_dma_indx_wr_buf %p\n",
12170 		__FUNCTION__, (uint32 *)(dhd->prot->d2h_dma_indx_rd_buf.va),
12171 		(uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va)));
12172 
12173 	/* Remember the read index in a variable.
12174 	 * This is becuase ring->rd gets updated in the end of this function
12175 	 * So if we have to print the exact read index from which the
12176 	 * message is read its not possible.
12177 	 */
12178 	ring->curr_rd = ring->rd;
12179 
12180 	/* update write pointer */
12181 	if (dhd->dma_d2h_ring_upd_support) {
12182 		/* DMAing write/read indices supported */
12183 		d2h_wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
12184 		ring->wr = d2h_wr;
12185 	} else {
12186 		dhd_bus_cmn_readshared(dhd->bus, &(ring->wr), RING_WR_UPD, ring->idx);
12187 	}
12188 
12189 	wr = ring->wr;
12190 	rd = ring->rd;
12191 	depth = ring->max_items;
12192 
12193 	/* check for avail space, in number of ring items */
12194 	items = READ_AVAIL_SPACE(wr, rd, depth);
12195 	if (items == 0)
12196 		return NULL;
12197 
12198 	/*
12199 	 * Note that there are builds where Assert translates to just printk
12200 	 * so, even if we had hit this condition we would never halt. Now
12201 	 * dhd_prot_process_msgtype can get into an big loop if this
12202 	 * happens.
12203 	 */
12204 	if (items > ring->max_items) {
12205 		DHD_ERROR(("\r\n======================= \r\n"));
12206 		DHD_ERROR(("%s(): ring %p, ring->name %s, ring->max_items %d, items %d \r\n",
12207 			__FUNCTION__, ring, ring->name, ring->max_items, items));
12208 		DHD_ERROR(("wr: %d,  rd: %d,  depth: %d  \r\n", wr, rd, depth));
12209 		DHD_ERROR(("dhd->busstate %d bus->wait_for_d3_ack %d \r\n",
12210 			dhd->busstate, dhd->bus->wait_for_d3_ack));
12211 		DHD_ERROR(("\r\n======================= \r\n"));
12212 #ifdef SUPPORT_LINKDOWN_RECOVERY
12213 		if (wr >= ring->max_items) {
12214 			dhd->bus->read_shm_fail = TRUE;
12215 		}
12216 #else
12217 #ifdef DHD_FW_COREDUMP
12218 		if (dhd->memdump_enabled) {
12219 			/* collect core dump */
12220 			dhd->memdump_type = DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR;
12221 			dhd_bus_mem_dump(dhd);
12222 
12223 		}
12224 #endif /* DHD_FW_COREDUMP */
12225 #endif /* SUPPORT_LINKDOWN_RECOVERY */
12226 
12227 		*available_len = 0;
12228 		dhd_schedule_reset(dhd);
12229 
12230 		return NULL;
12231 	}
12232 
12233 	/* if space is available, calculate address to be read */
12234 	read_addr = (char*)ring->dma_buf.va + (rd * ring->item_len);
12235 
12236 	/* update read pointer */
12237 	if ((ring->rd + items) >= ring->max_items)
12238 		ring->rd = 0;
12239 	else
12240 		ring->rd += items;
12241 
12242 	ASSERT(ring->rd < ring->max_items);
12243 
12244 	/* convert items to bytes : available_len must be 32bits */
12245 	*available_len = (uint32)(items * ring->item_len);
12246 
12247 	/* XXX Double cache invalidate for ARM with L2 cache/prefetch */
12248 	OSL_CACHE_INV(read_addr, *available_len);
12249 
12250 	/* return read address */
12251 	return read_addr;
12252 
12253 } /* dhd_prot_get_read_addr */
12254 
12255 /**
12256  * dhd_prot_h2d_mbdata_send_ctrlmsg is a non-atomic function,
12257  * make sure the callers always hold appropriate locks.
12258  */
12259 int dhd_prot_h2d_mbdata_send_ctrlmsg(dhd_pub_t *dhd, uint32 mb_data)
12260 {
12261 	h2d_mailbox_data_t *h2d_mb_data;
12262 	uint16 alloced = 0;
12263 	msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn;
12264 	unsigned long flags;
12265 	int num_post = 1;
12266 	int i;
12267 
12268 	DHD_MSGBUF_INFO(("%s Sending H2D MB data Req data 0x%04x\n",
12269 		__FUNCTION__, mb_data));
12270 	if (!ctrl_ring->inited) {
12271 		DHD_ERROR(("%s: Ctrl Submit Ring: not inited\n", __FUNCTION__));
12272 		return BCME_ERROR;
12273 	}
12274 
12275 #ifdef PCIE_INB_DW
12276 	if ((INBAND_DW_ENAB(dhd->bus)) &&
12277 		(dhdpcie_bus_get_pcie_inband_dw_state(dhd->bus) ==
12278 			DW_DEVICE_DS_DEV_SLEEP)) {
12279 		if (mb_data == H2D_HOST_CONS_INT) {
12280 			/* One additional device_wake post needed */
12281 			num_post = 2;
12282 		}
12283 	}
12284 #endif /* PCIE_INB_DW */
12285 
12286 	for (i = 0; i < num_post; i ++) {
12287 		DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
12288 		/* Request for ring buffer space */
12289 		h2d_mb_data = (h2d_mailbox_data_t *)dhd_prot_alloc_ring_space(dhd,
12290 			ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
12291 			&alloced, FALSE);
12292 
12293 		if (h2d_mb_data == NULL) {
12294 			DHD_ERROR(("%s: FATAL: No space in control ring to send H2D Mb data\n",
12295 				__FUNCTION__));
12296 			DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
12297 			return BCME_NOMEM;
12298 		}
12299 
12300 		memset(h2d_mb_data, 0, sizeof(h2d_mailbox_data_t));
12301 		/* Common msg buf hdr */
12302 		h2d_mb_data->msg.msg_type = MSG_TYPE_H2D_MAILBOX_DATA;
12303 		h2d_mb_data->msg.flags = ctrl_ring->current_phase;
12304 
12305 		h2d_mb_data->msg.epoch =
12306 			ctrl_ring->seqnum % H2D_EPOCH_MODULO;
12307 		ctrl_ring->seqnum++;
12308 
12309 		/* Update flow create message */
12310 		h2d_mb_data->mail_box_data = htol32(mb_data);
12311 #ifdef PCIE_INB_DW
12312 		/* post device_wake first */
12313 		if ((num_post == 2) && (i == 0)) {
12314 			h2d_mb_data->mail_box_data = htol32(H2DMB_DS_DEVICE_WAKE);
12315 		} else
12316 #endif /* PCIE_INB_DW */
12317 		{
12318 			h2d_mb_data->mail_box_data = htol32(mb_data);
12319 		}
12320 
12321 		DHD_MSGBUF_INFO(("%s Send H2D MB data Req data 0x%04x\n", __FUNCTION__, mb_data));
12322 
12323 		/* upd wrt ptr and raise interrupt */
12324 		dhd_prot_ring_write_complete_mbdata(dhd, ctrl_ring, h2d_mb_data,
12325 			DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, mb_data);
12326 
12327 		DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
12328 
12329 #ifdef PCIE_INB_DW
12330 		/* Add a delay if device_wake is posted */
12331 		if ((num_post == 2) && (i == 0)) {
12332 			OSL_DELAY(1000);
12333 		}
12334 #endif /* PCIE_INB_DW */
12335 	}
12336 	return 0;
12337 }
12338 
12339 /** Creates a flow ring and informs dongle of this event */
12340 int
12341 dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
12342 {
12343 	tx_flowring_create_request_t *flow_create_rqst;
12344 	msgbuf_ring_t *flow_ring;
12345 	dhd_prot_t *prot = dhd->prot;
12346 	unsigned long flags;
12347 	uint16 alloced = 0;
12348 	msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
12349 	uint16 max_flowrings = dhd->bus->max_tx_flowrings;
12350 
12351 	/* Fetch a pre-initialized msgbuf_ring from the flowring pool */
12352 	flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid);
12353 	if (flow_ring == NULL) {
12354 		DHD_ERROR(("%s: dhd_prot_flowrings_pool_fetch TX Flowid %d failed\n",
12355 			__FUNCTION__, flow_ring_node->flowid));
12356 		return BCME_NOMEM;
12357 	}
12358 
12359 #ifdef PCIE_INB_DW
12360 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
12361 		return BCME_ERROR;
12362 #endif /* PCIE_INB_DW */
12363 	DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
12364 
12365 	/* Request for ctrl_ring buffer space */
12366 	flow_create_rqst = (tx_flowring_create_request_t *)
12367 		dhd_prot_alloc_ring_space(dhd, ctrl_ring, 1, &alloced, FALSE);
12368 
12369 	if (flow_create_rqst == NULL) {
12370 		dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring);
12371 		DHD_ERROR(("%s: Flow Create Req flowid %d - failure ring space\n",
12372 			__FUNCTION__, flow_ring_node->flowid));
12373 		DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
12374 #ifdef PCIE_INB_DW
12375 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
12376 #endif
12377 		return BCME_NOMEM;
12378 	}
12379 
12380 	flow_ring_node->prot_info = (void *)flow_ring;
12381 
12382 	/* Common msg buf hdr */
12383 	flow_create_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_CREATE;
12384 	flow_create_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
12385 	flow_create_rqst->msg.request_id = htol32(0); /* TBD */
12386 	flow_create_rqst->msg.flags = ctrl_ring->current_phase;
12387 
12388 	flow_create_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
12389 	ctrl_ring->seqnum++;
12390 
12391 	/* Update flow create message */
12392 	flow_create_rqst->tid = flow_ring_node->flow_info.tid;
12393 	flow_create_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
12394 	memcpy(flow_create_rqst->sa, flow_ring_node->flow_info.sa, sizeof(flow_create_rqst->sa));
12395 	memcpy(flow_create_rqst->da, flow_ring_node->flow_info.da, sizeof(flow_create_rqst->da));
12396 	/* CAUTION: ring::base_addr already in Little Endian */
12397 	flow_create_rqst->flow_ring_ptr.low_addr = flow_ring->base_addr.low_addr;
12398 	flow_create_rqst->flow_ring_ptr.high_addr = flow_ring->base_addr.high_addr;
12399 	flow_create_rqst->max_items = htol16(flow_ring->max_items);
12400 	flow_create_rqst->len_item = htol16(H2DRING_TXPOST_ITEMSIZE);
12401 	flow_create_rqst->if_flags = 0;
12402 
12403 #ifdef DHD_HP2P
12404 	/* Create HPP flow ring if HP2P is enabled and TID=7  and AWDL interface */
12405 	/* and traffic is not multicast */
12406 	/* Allow infra interface only if user enabled hp2p_infra_enable thru iovar */
12407 	if (dhd->hp2p_capable && dhd->hp2p_ring_more &&
12408 		flow_ring_node->flow_info.tid == HP2P_PRIO &&
12409 		(dhd->hp2p_infra_enable || flow_create_rqst->msg.if_id) &&
12410 		!ETHER_ISMULTI(flow_create_rqst->da)) {
12411 		flow_create_rqst->if_flags |= BCMPCIE_FLOW_RING_INTF_HP2P;
12412 		flow_ring_node->hp2p_ring = TRUE;
12413 		/* Allow multiple HP2P Flow if mf override is enabled */
12414 		if (!dhd->hp2p_mf_enable) {
12415 			dhd->hp2p_ring_more = FALSE;
12416 		}
12417 
12418 		DHD_ERROR(("%s: flow ring for HP2P tid = %d flowid = %d\n",
12419 				__FUNCTION__, flow_ring_node->flow_info.tid,
12420 				flow_ring_node->flowid));
12421 	}
12422 #endif /* DHD_HP2P */
12423 
12424 	/* definition for ifrm mask : bit0:d11ac core, bit1:d11ad core
12425 	 * currently it is not used for priority. so uses solely for ifrm mask
12426 	 */
12427 	if (IFRM_ACTIVE(dhd))
12428 		flow_create_rqst->priority_ifrmmask = (1 << IFRM_DEV_0);
12429 
12430 	DHD_ERROR(("%s: Send Flow Create Req flow ID %d for peer " MACDBG
12431 		" prio %d ifindex %d items %d\n", __FUNCTION__, flow_ring_node->flowid,
12432 		MAC2STRDBG(flow_ring_node->flow_info.da), flow_ring_node->flow_info.tid,
12433 		flow_ring_node->flow_info.ifindex, flow_ring->max_items));
12434 
12435 	/* Update the flow_ring's WRITE index */
12436 	if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
12437 		dhd_prot_dma_indx_set(dhd, flow_ring->wr,
12438 		                      H2D_DMA_INDX_WR_UPD, flow_ring->idx);
12439 	} else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(flow_ring->idx, max_flowrings)) {
12440 		dhd_prot_dma_indx_set(dhd, flow_ring->wr,
12441 			H2D_IFRM_INDX_WR_UPD, flow_ring->idx);
12442 	} else {
12443 		dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr),
12444 			sizeof(uint16), RING_WR_UPD, flow_ring->idx);
12445 	}
12446 
12447 	/* update control subn ring's WR index and ring doorbell to dongle */
12448 	dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_create_rqst, 1);
12449 
12450 	DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
12451 
12452 #ifdef PCIE_INB_DW
12453 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
12454 #endif
12455 	return BCME_OK;
12456 } /* dhd_prot_flow_ring_create */
12457 
12458 /** called on receiving MSG_TYPE_FLOW_RING_CREATE_CMPLT message from dongle */
12459 static void
12460 dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg)
12461 {
12462 	tx_flowring_create_response_t *flow_create_resp = (tx_flowring_create_response_t *)msg;
12463 
12464 	DHD_ERROR(("%s: Flow Create Response status = %d Flow %d\n", __FUNCTION__,
12465 		ltoh16(flow_create_resp->cmplt.status),
12466 		ltoh16(flow_create_resp->cmplt.flow_ring_id)));
12467 
12468 	dhd_bus_flow_ring_create_response(dhd->bus,
12469 		ltoh16(flow_create_resp->cmplt.flow_ring_id),
12470 		ltoh16(flow_create_resp->cmplt.status));
12471 }
12472 
12473 #if !defined(BCM_ROUTER_DHD)
12474 static void
12475 dhd_prot_process_h2d_ring_create_complete(dhd_pub_t *dhd, void *buf)
12476 {
12477 	h2d_ring_create_response_t *resp = (h2d_ring_create_response_t *)buf;
12478 	DHD_INFO(("%s ring create Response status = %d ring %d, id 0x%04x\n", __FUNCTION__,
12479 		ltoh16(resp->cmplt.status),
12480 		ltoh16(resp->cmplt.ring_id),
12481 		ltoh32(resp->cmn_hdr.request_id)));
12482 	if ((ltoh32(resp->cmn_hdr.request_id) != DHD_H2D_DBGRING_REQ_PKTID) &&
12483 		(ltoh32(resp->cmn_hdr.request_id) != DHD_H2D_BTLOGRING_REQ_PKTID)) {
12484 		DHD_ERROR(("invalid request ID with h2d ring create complete\n"));
12485 		return;
12486 	}
12487 	if (dhd->prot->h2dring_info_subn->create_req_id == ltoh32(resp->cmn_hdr.request_id) &&
12488 		!dhd->prot->h2dring_info_subn->create_pending) {
12489 		DHD_ERROR(("info ring create status for not pending submit ring\n"));
12490 	}
12491 #ifdef BTLOG
12492 	if (dhd->prot->h2dring_btlog_subn &&
12493 		dhd->prot->h2dring_btlog_subn->create_req_id == ltoh32(resp->cmn_hdr.request_id) &&
12494 		!dhd->prot->h2dring_btlog_subn->create_pending) {
12495 		DHD_ERROR(("btlog ring create status for not pending submit ring\n"));
12496 	}
12497 #endif	/* BTLOG */
12498 
12499 	if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
12500 		DHD_ERROR(("info/btlog ring create failed with status %d\n",
12501 			ltoh16(resp->cmplt.status)));
12502 		return;
12503 	}
12504 	if (dhd->prot->h2dring_info_subn->create_req_id == ltoh32(resp->cmn_hdr.request_id)) {
12505 		dhd->prot->h2dring_info_subn->create_pending = FALSE;
12506 		dhd->prot->h2dring_info_subn->inited = TRUE;
12507 		DHD_ERROR(("info buffer post after ring create\n"));
12508 		dhd_prot_infobufpost(dhd, dhd->prot->h2dring_info_subn);
12509 	}
12510 #ifdef BTLOG
12511 	if (dhd->prot->h2dring_btlog_subn &&
12512 		dhd->prot->h2dring_btlog_subn->create_req_id == ltoh32(resp->cmn_hdr.request_id)) {
12513 		dhd->prot->h2dring_btlog_subn->create_pending = FALSE;
12514 		dhd->prot->h2dring_btlog_subn->inited = TRUE;
12515 		DHD_ERROR(("btlog buffer post after ring create\n"));
12516 		dhd_prot_infobufpost(dhd, dhd->prot->h2dring_btlog_subn);
12517 	}
12518 #endif	/* BTLOG */
12519 }
12520 #endif /* !BCM_ROUTER_DHD */
12521 
12522 static void
12523 dhd_prot_process_d2h_ring_create_complete(dhd_pub_t *dhd, void *buf)
12524 {
12525 	d2h_ring_create_response_t *resp = (d2h_ring_create_response_t *)buf;
12526 	DHD_ERROR(("%s ring create Response status = %d ring %d, id 0x%04x\n", __FUNCTION__,
12527 		ltoh16(resp->cmplt.status),
12528 		ltoh16(resp->cmplt.ring_id),
12529 		ltoh32(resp->cmn_hdr.request_id)));
12530 	if ((ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_DBGRING_REQ_PKTID) &&
12531 		(ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_BTLOGRING_REQ_PKTID) &&
12532 #ifdef DHD_HP2P
12533 		(ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_HPPRING_TXREQ_PKTID) &&
12534 		(ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_HPPRING_RXREQ_PKTID) &&
12535 #endif /* DHD_HP2P */
12536 		TRUE) {
12537 		DHD_ERROR(("invalid request ID with d2h ring create complete\n"));
12538 		return;
12539 	}
12540 	if (ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_DBGRING_REQ_PKTID) {
12541 #ifdef EWP_EDL
12542 		if (!dhd->dongle_edl_support)
12543 #endif
12544 		{
12545 
12546 			if (!dhd->prot->d2hring_info_cpln->create_pending) {
12547 				DHD_ERROR(("info ring create status for not pending cpl ring\n"));
12548 				return;
12549 			}
12550 
12551 			if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
12552 				DHD_ERROR(("info cpl ring create failed with status %d\n",
12553 					ltoh16(resp->cmplt.status)));
12554 				return;
12555 			}
12556 			dhd->prot->d2hring_info_cpln->create_pending = FALSE;
12557 			dhd->prot->d2hring_info_cpln->inited = TRUE;
12558 		}
12559 #ifdef EWP_EDL
12560 		else {
12561 			if (!dhd->prot->d2hring_edl->create_pending) {
12562 				DHD_ERROR(("edl ring create status for not pending cpl ring\n"));
12563 				return;
12564 			}
12565 
12566 			if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
12567 				DHD_ERROR(("edl cpl ring create failed with status %d\n",
12568 					ltoh16(resp->cmplt.status)));
12569 				return;
12570 			}
12571 			dhd->prot->d2hring_edl->create_pending = FALSE;
12572 			dhd->prot->d2hring_edl->inited = TRUE;
12573 		}
12574 #endif /* EWP_EDL */
12575 	}
12576 
12577 #ifdef BTLOG
12578 	if (ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_BTLOGRING_REQ_PKTID) {
12579 		if (!dhd->prot->d2hring_btlog_cpln->create_pending) {
12580 			DHD_ERROR(("btlog ring create status for not pending cpl ring\n"));
12581 			return;
12582 		}
12583 
12584 		if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
12585 			DHD_ERROR(("btlog cpl ring create failed with status %d\n",
12586 				ltoh16(resp->cmplt.status)));
12587 			return;
12588 		}
12589 		dhd->prot->d2hring_btlog_cpln->create_pending = FALSE;
12590 		dhd->prot->d2hring_btlog_cpln->inited = TRUE;
12591 	}
12592 #endif	/* BTLOG */
12593 #ifdef DHD_HP2P
12594 	if (dhd->prot->d2hring_hp2p_txcpl &&
12595 		ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_HPPRING_TXREQ_PKTID) {
12596 		if (!dhd->prot->d2hring_hp2p_txcpl->create_pending) {
12597 			DHD_ERROR(("HPP tx ring create status for not pending cpl ring\n"));
12598 			return;
12599 		}
12600 
12601 		if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
12602 			DHD_ERROR(("HPP tx cpl ring create failed with status %d\n",
12603 				ltoh16(resp->cmplt.status)));
12604 			return;
12605 		}
12606 		dhd->prot->d2hring_hp2p_txcpl->create_pending = FALSE;
12607 		dhd->prot->d2hring_hp2p_txcpl->inited = TRUE;
12608 	}
12609 	if (dhd->prot->d2hring_hp2p_rxcpl &&
12610 		ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_HPPRING_RXREQ_PKTID) {
12611 		if (!dhd->prot->d2hring_hp2p_rxcpl->create_pending) {
12612 			DHD_ERROR(("HPP rx ring create status for not pending cpl ring\n"));
12613 			return;
12614 		}
12615 
12616 		if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
12617 			DHD_ERROR(("HPP rx cpl ring create failed with status %d\n",
12618 				ltoh16(resp->cmplt.status)));
12619 			return;
12620 		}
12621 		dhd->prot->d2hring_hp2p_rxcpl->create_pending = FALSE;
12622 		dhd->prot->d2hring_hp2p_rxcpl->inited = TRUE;
12623 	}
12624 #endif /* DHD_HP2P */
12625 }
12626 
12627 static void
12628 dhd_prot_process_d2h_mb_data(dhd_pub_t *dhd, void* buf)
12629 {
12630 	d2h_mailbox_data_t *d2h_data;
12631 
12632 	d2h_data = (d2h_mailbox_data_t *)buf;
12633 	DHD_MSGBUF_INFO(("%s dhd_prot_process_d2h_mb_data, 0x%04x\n", __FUNCTION__,
12634 		d2h_data->d2h_mailbox_data));
12635 	dhd_bus_handle_mb_data(dhd->bus, d2h_data->d2h_mailbox_data);
12636 }
12637 
12638 static void
12639 dhd_prot_process_d2h_host_ts_complete(dhd_pub_t *dhd, void* buf)
12640 {
12641 #ifdef DHD_TIMESYNC
12642 	host_timestamp_msg_cpl_t  *host_ts_cpl;
12643 	uint32 pktid;
12644 	dhd_prot_t *prot = dhd->prot;
12645 
12646 	host_ts_cpl = (host_timestamp_msg_cpl_t *)buf;
12647 	DHD_INFO(("%s host TS cpl: status %d, req_ID: 0x%04x, xt_id %d \n", __FUNCTION__,
12648 		host_ts_cpl->cmplt.status, host_ts_cpl->msg.request_id, host_ts_cpl->xt_id));
12649 
12650 	pktid = ltoh32(host_ts_cpl->msg.request_id);
12651 	if (prot->hostts_req_buf_inuse == FALSE) {
12652 		DHD_ERROR(("No Pending Host TS req, but completion\n"));
12653 		return;
12654 	}
12655 	prot->hostts_req_buf_inuse = FALSE;
12656 	if (pktid != DHD_H2D_HOSTTS_REQ_PKTID) {
12657 		DHD_ERROR(("Host TS req CPL, but req ID different 0x%04x, exp 0x%04x\n",
12658 			pktid, DHD_H2D_HOSTTS_REQ_PKTID));
12659 		return;
12660 	}
12661 	dhd_timesync_handle_host_ts_complete(dhd->ts, host_ts_cpl->xt_id,
12662 		host_ts_cpl->cmplt.status);
12663 #else /* DHD_TIMESYNC */
12664 	DHD_ERROR(("Timesunc feature not compiled in but GOT HOST_TS_COMPLETE\n"));
12665 #endif /* DHD_TIMESYNC */
12666 
12667 }
12668 
12669 /** called on e.g. flow ring delete */
12670 void dhd_prot_clean_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info)
12671 {
12672 	msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info;
12673 	dhd_prot_ring_detach(dhd, flow_ring);
12674 	DHD_INFO(("%s Cleaning up Flow \n", __FUNCTION__));
12675 }
12676 
12677 void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info, bool h2d,
12678 	struct bcmstrbuf *strbuf, const char * fmt)
12679 {
12680 	const char *default_fmt =
12681 		"TRD:%d HLRD:%d HDRD:%d TWR:%d HLWR:%d HDWR:%d  BASE(VA) %p BASE(PA) %x:%x SIZE %d "
12682 		"WORK_ITEM_SIZE %d MAX_WORK_ITEMS %d TOTAL_SIZE %d\n";
12683 	msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info;
12684 	uint16 rd, wr, drd = 0, dwr = 0;
12685 	uint32 dma_buf_len = flow_ring->max_items * flow_ring->item_len;
12686 
12687 	if (fmt == NULL) {
12688 		fmt = default_fmt;
12689 	}
12690 
12691 	if (dhd->bus->is_linkdown) {
12692 		DHD_ERROR(("%s: Skip dumping flowring due to Link down\n", __FUNCTION__));
12693 		return;
12694 	}
12695 
12696 	dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx);
12697 	dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx);
12698 	if (dhd->dma_d2h_ring_upd_support) {
12699 		if (h2d) {
12700 			drd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, flow_ring->idx);
12701 			dwr = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_WR_UPD, flow_ring->idx);
12702 		} else {
12703 			drd = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, flow_ring->idx);
12704 			dwr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, flow_ring->idx);
12705 		}
12706 	}
12707 	bcm_bprintf(strbuf, fmt, rd, flow_ring->rd, drd, wr, flow_ring->wr, dwr,
12708 		flow_ring->dma_buf.va,
12709 		ltoh32(flow_ring->base_addr.high_addr),
12710 		ltoh32(flow_ring->base_addr.low_addr),
12711 		flow_ring->item_len, flow_ring->max_items,
12712 		dma_buf_len);
12713 }
12714 
12715 void dhd_prot_print_info(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
12716 {
12717 	dhd_prot_t *prot = dhd->prot;
12718 	bcm_bprintf(strbuf, "IPCrevs: Dev %d, \t Host %d, \tactive %d\n",
12719 		dhd->prot->device_ipc_version,
12720 		dhd->prot->host_ipc_version,
12721 		dhd->prot->active_ipc_version);
12722 
12723 	bcm_bprintf(strbuf, "max Host TS bufs to post: %d, \t posted %d \n",
12724 		dhd->prot->max_tsbufpost, dhd->prot->cur_ts_bufs_posted);
12725 	bcm_bprintf(strbuf, "max INFO bufs to post: %d, \t posted %d \n",
12726 		dhd->prot->max_infobufpost, dhd->prot->infobufpost);
12727 #ifdef BTLOG
12728 	bcm_bprintf(strbuf, "max BTLOG bufs to post: %d, \t posted %d \n",
12729 		dhd->prot->max_btlogbufpost, dhd->prot->btlogbufpost);
12730 #endif	/* BTLOG */
12731 	bcm_bprintf(strbuf, "max event bufs to post: %d, \t posted %d \n",
12732 		dhd->prot->max_eventbufpost, dhd->prot->cur_event_bufs_posted);
12733 	bcm_bprintf(strbuf, "max ioctlresp bufs to post: %d, \t posted %d \n",
12734 		dhd->prot->max_ioctlrespbufpost, dhd->prot->cur_ioctlresp_bufs_posted);
12735 	bcm_bprintf(strbuf, "max RX bufs to post: %d, \t posted %d \n",
12736 		dhd->prot->max_rxbufpost, dhd->prot->rxbufpost);
12737 
12738 	bcm_bprintf(strbuf, "Total RX bufs posted: %d, \t RX cpl got %d \n",
12739 		dhd->prot->tot_rxbufpost, dhd->prot->tot_rxcpl);
12740 
12741 	bcm_bprintf(strbuf, "Total TX packets: %lu, \t TX cpl got %lu \n",
12742 		dhd->actual_tx_pkts, dhd->tot_txcpl);
12743 
12744 	bcm_bprintf(strbuf,
12745 		"%14s %18s %18s %17s %17s %14s %14s %10s\n",
12746 		"Type", "TRD: HLRD: HDRD", "TWR: HLWR: HDWR", "BASE(VA)", "BASE(PA)",
12747 		"WORK_ITEM_SIZE", "MAX_WORK_ITEMS", "TOTAL_SIZE");
12748 	bcm_bprintf(strbuf, "%14s", "H2DCtrlPost");
12749 	dhd_prot_print_flow_ring(dhd, &prot->h2dring_ctrl_subn, TRUE, strbuf,
12750 		" %5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d\n");
12751 	bcm_bprintf(strbuf, "%14s", "D2HCtrlCpl");
12752 	dhd_prot_print_flow_ring(dhd, &prot->d2hring_ctrl_cpln, FALSE, strbuf,
12753 		" %5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d\n");
12754 	bcm_bprintf(strbuf, "%14s", "H2DRxPost");
12755 	dhd_prot_print_flow_ring(dhd, &prot->h2dring_rxp_subn, TRUE, strbuf,
12756 		" %5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d\n");
12757 	bcm_bprintf(strbuf, "%14s", "D2HRxCpl");
12758 	dhd_prot_print_flow_ring(dhd, &prot->d2hring_rx_cpln, FALSE, strbuf,
12759 		" %5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d\n");
12760 	bcm_bprintf(strbuf, "%14s", "D2HTxCpl");
12761 	dhd_prot_print_flow_ring(dhd, &prot->d2hring_tx_cpln, FALSE, strbuf,
12762 		" %5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d\n");
12763 	if (dhd->prot->h2dring_info_subn != NULL && dhd->prot->d2hring_info_cpln != NULL) {
12764 		bcm_bprintf(strbuf, "%14s", "H2DRingInfoSub");
12765 		dhd_prot_print_flow_ring(dhd, prot->h2dring_info_subn, TRUE, strbuf,
12766 			" %5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d\n");
12767 		bcm_bprintf(strbuf, "%14s", "D2HRingInfoCpl");
12768 		dhd_prot_print_flow_ring(dhd, prot->d2hring_info_cpln, FALSE, strbuf,
12769 			" %5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d\n");
12770 	}
12771 	if (dhd->prot->d2hring_edl != NULL) {
12772 		bcm_bprintf(strbuf, "%14s", "D2HRingEDL");
12773 		dhd_prot_print_flow_ring(dhd, prot->d2hring_edl, FALSE, strbuf,
12774 			" %5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d\n");
12775 	}
12776 
12777 	bcm_bprintf(strbuf, "active_tx_count %d	 pktidmap_avail(ctrl/rx/tx) %d %d %d\n",
12778 		OSL_ATOMIC_READ(dhd->osh, &dhd->prot->active_tx_count),
12779 		DHD_PKTID_AVAIL(dhd->prot->pktid_ctrl_map),
12780 		DHD_PKTID_AVAIL(dhd->prot->pktid_rx_map),
12781 		DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map));
12782 
12783 #if defined(BCMINTERNAL) && defined(DHD_DBG_DUMP)
12784 	dhd_prot_ioctl_dump(dhd->prot, strbuf);
12785 #endif /* defined(BCMINTERNAL) && defined(DHD_DBG_DUMP) */
12786 #ifdef DHD_MMIO_TRACE
12787 	dhd_dump_bus_mmio_trace(dhd->bus, strbuf);
12788 #endif /* DHD_MMIO_TRACE */
12789 	dhd_dump_bus_ds_trace(dhd->bus, strbuf);
12790 #ifdef DHD_FLOW_RING_STATUS_TRACE
12791 	dhd_dump_bus_flow_ring_status_isr_trace(dhd->bus, strbuf);
12792 	dhd_dump_bus_flow_ring_status_dpc_trace(dhd->bus, strbuf);
12793 #endif /* DHD_FLOW_RING_STATUS_TRACE */
12794 }
12795 
12796 int
12797 dhd_prot_flow_ring_delete(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
12798 {
12799 	tx_flowring_delete_request_t *flow_delete_rqst;
12800 	dhd_prot_t *prot = dhd->prot;
12801 	unsigned long flags;
12802 	uint16 alloced = 0;
12803 	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
12804 
12805 #ifdef PCIE_INB_DW
12806 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
12807 		return BCME_ERROR;
12808 #endif /* PCIE_INB_DW */
12809 
12810 	DHD_RING_LOCK(ring->ring_lock, flags);
12811 
12812 	/* Request for ring buffer space */
12813 	flow_delete_rqst = (tx_flowring_delete_request_t *)
12814 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
12815 
12816 	if (flow_delete_rqst == NULL) {
12817 		DHD_RING_UNLOCK(ring->ring_lock, flags);
12818 		DHD_ERROR(("%s: Flow Delete Req - failure ring space\n", __FUNCTION__));
12819 #ifdef PCIE_INB_DW
12820 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
12821 #endif
12822 		return BCME_NOMEM;
12823 	}
12824 
12825 	/* Common msg buf hdr */
12826 	flow_delete_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_DELETE;
12827 	flow_delete_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
12828 	flow_delete_rqst->msg.request_id = htol32(0); /* TBD */
12829 	flow_delete_rqst->msg.flags = ring->current_phase;
12830 
12831 	flow_delete_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
12832 	ring->seqnum++;
12833 
12834 	/* Update Delete info */
12835 	flow_delete_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
12836 	flow_delete_rqst->reason = htol16(BCME_OK);
12837 
12838 	DHD_ERROR(("%s: Send Flow Delete Req RING ID %d for peer %pM"
12839 		" prio %d ifindex %d\n", __FUNCTION__, flow_ring_node->flowid,
12840 		flow_ring_node->flow_info.da, flow_ring_node->flow_info.tid,
12841 		flow_ring_node->flow_info.ifindex));
12842 
12843 	/* update ring's WR index and ring doorbell to dongle */
12844 	dhd_prot_ring_write_complete(dhd, ring, flow_delete_rqst, 1);
12845 
12846 	DHD_RING_UNLOCK(ring->ring_lock, flags);
12847 
12848 #ifdef PCIE_INB_DW
12849 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
12850 #endif
12851 	return BCME_OK;
12852 }
12853 
12854 static void
12855 BCMFASTPATH(dhd_prot_flow_ring_fastdelete)(dhd_pub_t *dhd, uint16 flowid, uint16 rd_idx)
12856 {
12857 	flow_ring_node_t *flow_ring_node = DHD_FLOW_RING(dhd, flowid);
12858 	msgbuf_ring_t *ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
12859 	host_txbuf_cmpl_t txstatus;
12860 	host_txbuf_post_t *txdesc;
12861 	uint16 wr_idx;
12862 
12863 	DHD_INFO(("%s: FAST delete ring, flowid=%d, rd_idx=%d, wr_idx=%d\n",
12864 		__FUNCTION__, flowid, rd_idx, ring->wr));
12865 
12866 	memset(&txstatus, 0, sizeof(txstatus));
12867 	txstatus.compl_hdr.flow_ring_id = flowid;
12868 	txstatus.cmn_hdr.if_id = flow_ring_node->flow_info.ifindex;
12869 	wr_idx = ring->wr;
12870 
12871 	while (wr_idx != rd_idx) {
12872 		if (wr_idx)
12873 			wr_idx--;
12874 		else
12875 			wr_idx = ring->max_items - 1;
12876 		txdesc = (host_txbuf_post_t *)((char *)DHD_RING_BGN_VA(ring) +
12877 			(wr_idx * ring->item_len));
12878 		txstatus.cmn_hdr.request_id = txdesc->cmn_hdr.request_id;
12879 		dhd_prot_txstatus_process(dhd, &txstatus);
12880 	}
12881 }
12882 
12883 static void
12884 dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg)
12885 {
12886 	tx_flowring_delete_response_t *flow_delete_resp = (tx_flowring_delete_response_t *)msg;
12887 
12888 	DHD_ERROR(("%s: Flow Delete Response status = %d Flow %d\n", __FUNCTION__,
12889 		flow_delete_resp->cmplt.status, flow_delete_resp->cmplt.flow_ring_id));
12890 
12891 	if (dhd->fast_delete_ring_support) {
12892 		dhd_prot_flow_ring_fastdelete(dhd, flow_delete_resp->cmplt.flow_ring_id,
12893 			flow_delete_resp->read_idx);
12894 	}
12895 	dhd_bus_flow_ring_delete_response(dhd->bus, flow_delete_resp->cmplt.flow_ring_id,
12896 		flow_delete_resp->cmplt.status);
12897 }
12898 
12899 static void
12900 dhd_prot_process_flow_ring_resume_response(dhd_pub_t *dhd, void* msg)
12901 {
12902 #ifdef IDLE_TX_FLOW_MGMT
12903 	tx_idle_flowring_resume_response_t	*flow_resume_resp =
12904 		(tx_idle_flowring_resume_response_t *)msg;
12905 
12906 	DHD_ERROR(("%s Flow resume Response status = %d Flow %d\n", __FUNCTION__,
12907 		flow_resume_resp->cmplt.status, flow_resume_resp->cmplt.flow_ring_id));
12908 
12909 	dhd_bus_flow_ring_resume_response(dhd->bus, flow_resume_resp->cmplt.flow_ring_id,
12910 		flow_resume_resp->cmplt.status);
12911 #endif /* IDLE_TX_FLOW_MGMT */
12912 }
12913 
12914 static void
12915 dhd_prot_process_flow_ring_suspend_response(dhd_pub_t *dhd, void* msg)
12916 {
12917 #ifdef IDLE_TX_FLOW_MGMT
12918 	int16 status;
12919 	tx_idle_flowring_suspend_response_t	*flow_suspend_resp =
12920 		(tx_idle_flowring_suspend_response_t *)msg;
12921 	status = flow_suspend_resp->cmplt.status;
12922 
12923 	DHD_ERROR(("%s Flow id %d suspend Response status = %d\n",
12924 		__FUNCTION__, flow_suspend_resp->cmplt.flow_ring_id,
12925 		status));
12926 
12927 	if (status != BCME_OK) {
12928 
12929 		DHD_ERROR(("%s Error in Suspending Flow rings!!"
12930 			"Dongle will still be polling idle rings!!Status = %d \n",
12931 			__FUNCTION__, status));
12932 	}
12933 #endif /* IDLE_TX_FLOW_MGMT */
12934 }
12935 
12936 int
12937 dhd_prot_flow_ring_flush(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
12938 {
12939 	tx_flowring_flush_request_t *flow_flush_rqst;
12940 	dhd_prot_t *prot = dhd->prot;
12941 	unsigned long flags;
12942 	uint16 alloced = 0;
12943 	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
12944 
12945 #ifdef PCIE_INB_DW
12946 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
12947 		return BCME_ERROR;
12948 #endif /* PCIE_INB_DW */
12949 
12950 	DHD_RING_LOCK(ring->ring_lock, flags);
12951 
12952 	/* Request for ring buffer space */
12953 	flow_flush_rqst = (tx_flowring_flush_request_t *)
12954 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
12955 	if (flow_flush_rqst == NULL) {
12956 		DHD_RING_UNLOCK(ring->ring_lock, flags);
12957 		DHD_ERROR(("%s: Flow Flush Req - failure ring space\n", __FUNCTION__));
12958 #ifdef PCIE_INB_DW
12959 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
12960 #endif
12961 		return BCME_NOMEM;
12962 	}
12963 
12964 	/* Common msg buf hdr */
12965 	flow_flush_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_FLUSH;
12966 	flow_flush_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
12967 	flow_flush_rqst->msg.request_id = htol32(0); /* TBD */
12968 	flow_flush_rqst->msg.flags = ring->current_phase;
12969 	flow_flush_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
12970 	ring->seqnum++;
12971 
12972 	flow_flush_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
12973 	flow_flush_rqst->reason = htol16(BCME_OK);
12974 
12975 	DHD_INFO(("%s: Send Flow Flush Req\n", __FUNCTION__));
12976 
12977 	/* update ring's WR index and ring doorbell to dongle */
12978 	dhd_prot_ring_write_complete(dhd, ring, flow_flush_rqst, 1);
12979 
12980 	DHD_RING_UNLOCK(ring->ring_lock, flags);
12981 
12982 #ifdef PCIE_INB_DW
12983 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
12984 #endif
12985 	return BCME_OK;
12986 } /* dhd_prot_flow_ring_flush */
12987 
12988 static void
12989 dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg)
12990 {
12991 	tx_flowring_flush_response_t *flow_flush_resp = (tx_flowring_flush_response_t *)msg;
12992 
12993 	DHD_INFO(("%s: Flow Flush Response status = %d\n", __FUNCTION__,
12994 		flow_flush_resp->cmplt.status));
12995 
12996 	dhd_bus_flow_ring_flush_response(dhd->bus, flow_flush_resp->cmplt.flow_ring_id,
12997 		flow_flush_resp->cmplt.status);
12998 }
12999 
13000 /**
13001  * Request dongle to configure soft doorbells for D2H rings. Host populated soft
13002  * doorbell information is transferred to dongle via the d2h ring config control
13003  * message.
13004  */
13005 void
13006 dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd)
13007 {
13008 #if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT)
13009 	uint16 ring_idx;
13010 	uint8 *msg_next;
13011 	void *msg_start;
13012 	uint16 alloced = 0;
13013 	unsigned long flags;
13014 	dhd_prot_t *prot = dhd->prot;
13015 	ring_config_req_t *ring_config_req;
13016 	bcmpcie_soft_doorbell_t *soft_doorbell;
13017 	msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
13018 	const uint16 d2h_rings = BCMPCIE_D2H_COMMON_MSGRINGS;
13019 
13020 #ifdef PCIE_INB_DW
13021 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
13022 		return BCME_ERROR;
13023 #endif /* PCIE_INB_DW */
13024 	/* Claim space for d2h_ring number of d2h_ring_config_req_t messages */
13025 	DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
13026 	msg_start = dhd_prot_alloc_ring_space(dhd, ctrl_ring, d2h_rings, &alloced, TRUE);
13027 
13028 	if (msg_start == NULL) {
13029 		DHD_ERROR(("%s Msgbuf no space for %d D2H ring config soft doorbells\n",
13030 			__FUNCTION__, d2h_rings));
13031 		DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
13032 #ifdef PCIE_INB_DW
13033 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
13034 #endif
13035 		return;
13036 	}
13037 
13038 	msg_next = (uint8*)msg_start;
13039 
13040 	for (ring_idx = 0; ring_idx < d2h_rings; ring_idx++) {
13041 
13042 		/* position the ring_config_req into the ctrl subm ring */
13043 		ring_config_req = (ring_config_req_t *)msg_next;
13044 
13045 		/* Common msg header */
13046 		ring_config_req->msg.msg_type = MSG_TYPE_D2H_RING_CONFIG;
13047 		ring_config_req->msg.if_id = 0;
13048 		ring_config_req->msg.flags = 0;
13049 
13050 		ring_config_req->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
13051 		ctrl_ring->seqnum++;
13052 
13053 		ring_config_req->msg.request_id = htol32(DHD_FAKE_PKTID); /* unused */
13054 
13055 		/* Ring Config subtype and d2h ring_id */
13056 		ring_config_req->subtype = htol16(D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL);
13057 		ring_config_req->ring_id = htol16(DHD_D2H_RINGID(ring_idx));
13058 
13059 		/* Host soft doorbell configuration */
13060 		soft_doorbell = &prot->soft_doorbell[ring_idx];
13061 
13062 		ring_config_req->soft_doorbell.value = htol32(soft_doorbell->value);
13063 		ring_config_req->soft_doorbell.haddr.high =
13064 			htol32(soft_doorbell->haddr.high);
13065 		ring_config_req->soft_doorbell.haddr.low =
13066 			htol32(soft_doorbell->haddr.low);
13067 		ring_config_req->soft_doorbell.items = htol16(soft_doorbell->items);
13068 		ring_config_req->soft_doorbell.msecs = htol16(soft_doorbell->msecs);
13069 
13070 		DHD_INFO(("%s: Soft doorbell haddr 0x%08x 0x%08x value 0x%08x\n",
13071 			__FUNCTION__, ring_config_req->soft_doorbell.haddr.high,
13072 			ring_config_req->soft_doorbell.haddr.low,
13073 			ring_config_req->soft_doorbell.value));
13074 
13075 		msg_next = msg_next + ctrl_ring->item_len;
13076 	}
13077 
13078 	/* update control subn ring's WR index and ring doorbell to dongle */
13079 	dhd_prot_ring_write_complete(dhd, ctrl_ring, msg_start, d2h_rings);
13080 
13081 	DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
13082 
13083 #ifdef PCIE_INB_DW
13084 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
13085 #endif
13086 #endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */
13087 }
13088 
13089 static void
13090 dhd_prot_process_d2h_ring_config_complete(dhd_pub_t *dhd, void *msg)
13091 {
13092 	DHD_INFO(("%s: Ring Config Response - status %d ringid %d\n",
13093 		__FUNCTION__, ltoh16(((ring_config_resp_t *)msg)->compl_hdr.status),
13094 		ltoh16(((ring_config_resp_t *)msg)->compl_hdr.flow_ring_id)));
13095 }
13096 
13097 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
13098 void
13099 copy_ext_trap_sig(dhd_pub_t *dhd, trap_t *tr)
13100 {
13101 	uint32 *ext_data = dhd->extended_trap_data;
13102 	hnd_ext_trap_hdr_t *hdr;
13103 	const bcm_tlv_t *tlv;
13104 
13105 	if (ext_data == NULL) {
13106 		return;
13107 	}
13108 	/* First word is original trap_data */
13109 	ext_data++;
13110 
13111 	/* Followed by the extended trap data header */
13112 	hdr = (hnd_ext_trap_hdr_t *)ext_data;
13113 
13114 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE);
13115 	if (tlv) {
13116 		memcpy(tr, &tlv->data, sizeof(struct _trap_struct));
13117 	}
13118 }
13119 #define TRAP_T_NAME_OFFSET(var) {#var, OFFSETOF(trap_t, var)}
13120 
13121 typedef struct {
13122 	char name[HANG_INFO_TRAP_T_NAME_MAX];
13123 	uint32 offset;
13124 } hang_info_trap_t;
13125 
13126 #ifdef DHD_EWPR_VER2
13127 static hang_info_trap_t hang_info_trap_tbl[] = {
13128 	{"reason", 0},
13129 	{"ver", VENDOR_SEND_HANG_EXT_INFO_VER},
13130 	{"stype", 0},
13131 	TRAP_T_NAME_OFFSET(type),
13132 	TRAP_T_NAME_OFFSET(epc),
13133 	{"resrvd", 0},
13134 	{"resrvd", 0},
13135 	{"resrvd", 0},
13136 	{"resrvd", 0},
13137 	{"", 0}
13138 };
13139 #else
13140 static hang_info_trap_t hang_info_trap_tbl[] = {
13141 	{"reason", 0},
13142 	{"ver", VENDOR_SEND_HANG_EXT_INFO_VER},
13143 	{"stype", 0},
13144 	TRAP_T_NAME_OFFSET(type),
13145 	TRAP_T_NAME_OFFSET(epc),
13146 	TRAP_T_NAME_OFFSET(cpsr),
13147 	TRAP_T_NAME_OFFSET(spsr),
13148 	TRAP_T_NAME_OFFSET(r0),
13149 	TRAP_T_NAME_OFFSET(r1),
13150 	TRAP_T_NAME_OFFSET(r2),
13151 	TRAP_T_NAME_OFFSET(r3),
13152 	TRAP_T_NAME_OFFSET(r4),
13153 	TRAP_T_NAME_OFFSET(r5),
13154 	TRAP_T_NAME_OFFSET(r6),
13155 	TRAP_T_NAME_OFFSET(r7),
13156 	TRAP_T_NAME_OFFSET(r8),
13157 	TRAP_T_NAME_OFFSET(r9),
13158 	TRAP_T_NAME_OFFSET(r10),
13159 	TRAP_T_NAME_OFFSET(r11),
13160 	TRAP_T_NAME_OFFSET(r12),
13161 	TRAP_T_NAME_OFFSET(r13),
13162 	TRAP_T_NAME_OFFSET(r14),
13163 	TRAP_T_NAME_OFFSET(pc),
13164 	{"", 0}
13165 };
13166 #endif /* DHD_EWPR_VER2 */
13167 
13168 #define TAG_TRAP_IS_STATE(tag) \
13169 	((tag == TAG_TRAP_MEMORY) || (tag == TAG_TRAP_PCIE_Q) || \
13170 	(tag == TAG_TRAP_WLC_STATE) || (tag == TAG_TRAP_LOG_DATA) || \
13171 	(tag == TAG_TRAP_CODE))
13172 
13173 static void
13174 copy_hang_info_head(char *dest, trap_t *src, int len, int field_name,
13175 		int *bytes_written, int *cnt, char *cookie)
13176 {
13177 	uint8 *ptr;
13178 	int remain_len;
13179 	int i;
13180 
13181 	ptr = (uint8 *)src;
13182 
13183 	memset(dest, 0, len);
13184 	remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13185 
13186 	/* hang reason, hang info ver */
13187 	for (i = 0; (i < HANG_INFO_TRAP_T_SUBTYPE_IDX) && (*cnt < HANG_FIELD_CNT_MAX);
13188 			i++, (*cnt)++) {
13189 		if (field_name) {
13190 			remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13191 			*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
13192 					hang_info_trap_tbl[i].name, HANG_KEY_DEL);
13193 		}
13194 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13195 		*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%d%c",
13196 				hang_info_trap_tbl[i].offset, HANG_KEY_DEL);
13197 
13198 	}
13199 
13200 	if (*cnt < HANG_FIELD_CNT_MAX) {
13201 		if (field_name) {
13202 			remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13203 			*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
13204 					"cookie", HANG_KEY_DEL);
13205 		}
13206 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13207 		*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s%c",
13208 				cookie, HANG_KEY_DEL);
13209 		(*cnt)++;
13210 	}
13211 
13212 	if (*cnt < HANG_FIELD_CNT_MAX) {
13213 		if (field_name) {
13214 			remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13215 			*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
13216 					hang_info_trap_tbl[HANG_INFO_TRAP_T_SUBTYPE_IDX].name,
13217 					HANG_KEY_DEL);
13218 		}
13219 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13220 		*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%08x%c",
13221 				hang_info_trap_tbl[HANG_INFO_TRAP_T_SUBTYPE_IDX].offset,
13222 				HANG_KEY_DEL);
13223 		(*cnt)++;
13224 	}
13225 
13226 	if (*cnt < HANG_FIELD_CNT_MAX) {
13227 		if (field_name) {
13228 			remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13229 			*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
13230 					hang_info_trap_tbl[HANG_INFO_TRAP_T_EPC_IDX].name,
13231 					HANG_KEY_DEL);
13232 		}
13233 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13234 		*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%08x%c",
13235 				*(uint32 *)
13236 				(ptr + hang_info_trap_tbl[HANG_INFO_TRAP_T_EPC_IDX].offset),
13237 				HANG_KEY_DEL);
13238 		(*cnt)++;
13239 	}
13240 #ifdef DHD_EWPR_VER2
13241 	/* put 0 for HG03 ~ HG06 (reserved for future use) */
13242 	for (i = 0; (i < HANG_INFO_BIGDATA_EXTRA_KEY) && (*cnt < HANG_FIELD_CNT_MAX);
13243 			i++, (*cnt)++) {
13244 		if (field_name) {
13245 			remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13246 			*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
13247 				hang_info_trap_tbl[HANG_INFO_TRAP_T_EXTRA_KEY_IDX+i].name,
13248 				HANG_KEY_DEL);
13249 		}
13250 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13251 		*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%d%c",
13252 			hang_info_trap_tbl[HANG_INFO_TRAP_T_EXTRA_KEY_IDX+i].offset,
13253 			HANG_KEY_DEL);
13254 	}
13255 #endif /* DHD_EWPR_VER2 */
13256 }
13257 #ifndef DHD_EWPR_VER2
13258 static void
13259 copy_hang_info_trap_t(char *dest, trap_t *src, int len, int field_name,
13260 		int *bytes_written, int *cnt, char *cookie)
13261 {
13262 	uint8 *ptr;
13263 	int remain_len;
13264 	int i;
13265 
13266 	ptr = (uint8 *)src;
13267 
13268 	remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13269 
13270 	for (i = HANG_INFO_TRAP_T_OFFSET_IDX;
13271 			(hang_info_trap_tbl[i].name[0] != 0) && (*cnt < HANG_FIELD_CNT_MAX);
13272 			i++, (*cnt)++) {
13273 		if (field_name) {
13274 			remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13275 			*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%s:",
13276 					HANG_RAW_DEL, hang_info_trap_tbl[i].name);
13277 		}
13278 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13279 		*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%08x",
13280 				HANG_RAW_DEL, *(uint32 *)(ptr + hang_info_trap_tbl[i].offset));
13281 	}
13282 }
13283 
13284 static void
13285 copy_hang_info_stack(dhd_pub_t *dhd, char *dest, int *bytes_written, int *cnt)
13286 {
13287 	int remain_len;
13288 	int i = 0;
13289 	const uint32 *stack;
13290 	uint32 *ext_data = dhd->extended_trap_data;
13291 	hnd_ext_trap_hdr_t *hdr;
13292 	const bcm_tlv_t *tlv;
13293 	int remain_stack_cnt = 0;
13294 	uint32 dummy_data = 0;
13295 	int bigdata_key_stack_cnt = 0;
13296 
13297 	if (ext_data == NULL) {
13298 		return;
13299 	}
13300 	/* First word is original trap_data */
13301 	ext_data++;
13302 
13303 	/* Followed by the extended trap data header */
13304 	hdr = (hnd_ext_trap_hdr_t *)ext_data;
13305 
13306 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK);
13307 
13308 	remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13309 
13310 	GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
13311 	if (tlv) {
13312 		stack = (const uint32 *)tlv->data;
13313 		*bytes_written += scnprintf(&dest[*bytes_written], remain_len,
13314 				"%08x", *(uint32 *)(stack++));
13315 		(*cnt)++;
13316 		if (*cnt >= HANG_FIELD_CNT_MAX) {
13317 			return;
13318 		}
13319 		for (i = 1; i < (uint32)(tlv->len / sizeof(uint32)); i++, bigdata_key_stack_cnt++) {
13320 			remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13321 			/* Raw data for bigdata use '_' and Key data for bigdata use space */
13322 			*bytes_written += scnprintf(&dest[*bytes_written], remain_len,
13323 				"%c%08x",
13324 				i <= HANG_INFO_BIGDATA_KEY_STACK_CNT ? HANG_KEY_DEL : HANG_RAW_DEL,
13325 				*(uint32 *)(stack++));
13326 
13327 			(*cnt)++;
13328 			if ((*cnt >= HANG_FIELD_CNT_MAX) ||
13329 					(i >= HANG_FIELD_TRAP_T_STACK_CNT_MAX)) {
13330 				return;
13331 			}
13332 		}
13333 	}
13334 
13335 	remain_stack_cnt = HANG_FIELD_TRAP_T_STACK_CNT_MAX - i;
13336 
13337 	for (i = 0; i < remain_stack_cnt; i++) {
13338 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13339 		*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%08x",
13340 				HANG_RAW_DEL, dummy_data);
13341 		(*cnt)++;
13342 		if (*cnt >= HANG_FIELD_CNT_MAX) {
13343 			return;
13344 		}
13345 	}
13346 	GCC_DIAGNOSTIC_POP();
13347 
13348 }
13349 
13350 static void
13351 copy_hang_info_specific(dhd_pub_t *dhd, char *dest, int *bytes_written, int *cnt)
13352 {
13353 	int remain_len;
13354 	int i;
13355 	const uint32 *data;
13356 	uint32 *ext_data = dhd->extended_trap_data;
13357 	hnd_ext_trap_hdr_t *hdr;
13358 	const bcm_tlv_t *tlv;
13359 	int remain_trap_data = 0;
13360 	uint8 buf_u8[sizeof(uint32)] = { 0, };
13361 	const uint8 *p_u8;
13362 
13363 	if (ext_data == NULL) {
13364 		return;
13365 	}
13366 	/* First word is original trap_data */
13367 	ext_data++;
13368 
13369 	/* Followed by the extended trap data header */
13370 	hdr = (hnd_ext_trap_hdr_t *)ext_data;
13371 
13372 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE);
13373 	if (tlv) {
13374 		/* header include tlv hader */
13375 		remain_trap_data = (hdr->len - tlv->len - sizeof(uint16));
13376 	}
13377 
13378 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK);
13379 	if (tlv) {
13380 		/* header include tlv hader */
13381 		remain_trap_data -= (tlv->len + sizeof(uint16));
13382 	}
13383 
13384 	data = (const uint32 *)(hdr->data + (hdr->len  - remain_trap_data));
13385 
13386 	remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13387 
13388 	for (i = 0; i < (uint32)(remain_trap_data / sizeof(uint32)) && *cnt < HANG_FIELD_CNT_MAX;
13389 			i++, (*cnt)++) {
13390 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13391 		GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
13392 		*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%08x",
13393 				HANG_RAW_DEL, *(uint32 *)(data++));
13394 		GCC_DIAGNOSTIC_POP();
13395 	}
13396 
13397 	if (*cnt >= HANG_FIELD_CNT_MAX) {
13398 		return;
13399 	}
13400 
13401 	remain_trap_data -= (sizeof(uint32) * i);
13402 
13403 	if (remain_trap_data > sizeof(buf_u8)) {
13404 		DHD_ERROR(("%s: resize remain_trap_data\n", __FUNCTION__));
13405 		remain_trap_data =  sizeof(buf_u8);
13406 	}
13407 
13408 	if (remain_trap_data) {
13409 		p_u8 = (const uint8 *)data;
13410 		for (i = 0; i < remain_trap_data; i++) {
13411 			buf_u8[i] = *(const uint8 *)(p_u8++);
13412 		}
13413 
13414 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13415 		*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%08x",
13416 				HANG_RAW_DEL, ltoh32_ua(buf_u8));
13417 		(*cnt)++;
13418 	}
13419 }
13420 #endif /* DHD_EWPR_VER2 */
13421 
13422 static void
13423 get_hang_info_trap_subtype(dhd_pub_t *dhd, uint32 *subtype)
13424 {
13425 	uint32 i;
13426 	uint32 *ext_data = dhd->extended_trap_data;
13427 	hnd_ext_trap_hdr_t *hdr;
13428 	const bcm_tlv_t *tlv;
13429 
13430 	/* First word is original trap_data */
13431 	ext_data++;
13432 
13433 	/* Followed by the extended trap data header */
13434 	hdr = (hnd_ext_trap_hdr_t *)ext_data;
13435 
13436 	/* Dump a list of all tags found  before parsing data */
13437 	for (i = TAG_TRAP_DEEPSLEEP; i < TAG_TRAP_LAST; i++) {
13438 		tlv = bcm_parse_tlvs(hdr->data, hdr->len, i);
13439 		if (tlv) {
13440 			if (!TAG_TRAP_IS_STATE(i)) {
13441 				*subtype = i;
13442 				return;
13443 			}
13444 		}
13445 	}
13446 }
13447 #ifdef DHD_EWPR_VER2
13448 static void
13449 copy_hang_info_etd_base64(dhd_pub_t *dhd, char *dest, int *bytes_written, int *cnt)
13450 {
13451 	int remain_len;
13452 	uint32 *ext_data = dhd->extended_trap_data;
13453 	hnd_ext_trap_hdr_t *hdr;
13454 	char *base64_out = NULL;
13455 	int base64_cnt;
13456 	int max_base64_len = HANG_INFO_BASE64_BUFFER_SIZE;
13457 
13458 	if (ext_data == NULL) {
13459 		return;
13460 	}
13461 	/* First word is original trap_data */
13462 	ext_data++;
13463 
13464 	/* Followed by the extended trap data header */
13465 	hdr = (hnd_ext_trap_hdr_t *)ext_data;
13466 
13467 	remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
13468 
13469 	if (remain_len <= 0) {
13470 		DHD_ERROR(("%s: no space to put etd\n", __FUNCTION__));
13471 		return;
13472 	}
13473 
13474 	if (remain_len < max_base64_len) {
13475 		DHD_ERROR(("%s: change max base64 length to remain length %d\n", __FUNCTION__,
13476 			remain_len));
13477 		max_base64_len = remain_len;
13478 	}
13479 
13480 	base64_out = MALLOCZ(dhd->osh, HANG_INFO_BASE64_BUFFER_SIZE);
13481 	if (base64_out == NULL) {
13482 		DHD_ERROR(("%s: MALLOC failed for size %d\n",
13483 			__FUNCTION__, HANG_INFO_BASE64_BUFFER_SIZE));
13484 		return;
13485 	}
13486 
13487 	if (hdr->len > 0) {
13488 		base64_cnt = dhd_base64_encode(hdr->data, hdr->len, base64_out, max_base64_len);
13489 		if (base64_cnt == 0) {
13490 			DHD_ERROR(("%s: base64 encoding error\n", __FUNCTION__));
13491 		}
13492 	}
13493 
13494 	*bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s",
13495 			base64_out);
13496 	(*cnt)++;
13497 	MFREE(dhd->osh, base64_out, HANG_INFO_BASE64_BUFFER_SIZE);
13498 }
13499 #endif /* DHD_EWPR_VER2 */
13500 
13501 void
13502 copy_hang_info_trap(dhd_pub_t *dhd)
13503 {
13504 	trap_t tr;
13505 	int bytes_written;
13506 	int trap_subtype = 0;
13507 
13508 	if (!dhd || !dhd->hang_info) {
13509 		DHD_ERROR(("%s dhd=%p hang_info=%p\n", __FUNCTION__,
13510 			dhd, (dhd ? dhd->hang_info : NULL)));
13511 		return;
13512 	}
13513 
13514 	if (!dhd->dongle_trap_occured) {
13515 		DHD_ERROR(("%s: dongle_trap_occured is FALSE\n", __FUNCTION__));
13516 		return;
13517 	}
13518 
13519 	memset(&tr, 0x00, sizeof(struct _trap_struct));
13520 
13521 	copy_ext_trap_sig(dhd, &tr);
13522 	get_hang_info_trap_subtype(dhd, &trap_subtype);
13523 
13524 	hang_info_trap_tbl[HANG_INFO_TRAP_T_REASON_IDX].offset = HANG_REASON_DONGLE_TRAP;
13525 	hang_info_trap_tbl[HANG_INFO_TRAP_T_SUBTYPE_IDX].offset = trap_subtype;
13526 
13527 	bytes_written = 0;
13528 	dhd->hang_info_cnt = 0;
13529 	get_debug_dump_time(dhd->debug_dump_time_hang_str);
13530 	copy_debug_dump_time(dhd->debug_dump_time_str, dhd->debug_dump_time_hang_str);
13531 
13532 	copy_hang_info_head(dhd->hang_info, &tr, VENDOR_SEND_HANG_EXT_INFO_LEN, FALSE,
13533 			&bytes_written, &dhd->hang_info_cnt, dhd->debug_dump_time_hang_str);
13534 
13535 	DHD_INFO(("hang info head cnt: %d len: %d data: %s\n",
13536 		dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
13537 
13538 	clear_debug_dump_time(dhd->debug_dump_time_hang_str);
13539 
13540 #ifdef DHD_EWPR_VER2
13541 	/* stack info & trap info are included in etd data */
13542 
13543 	/* extended trap data dump */
13544 	if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
13545 		copy_hang_info_etd_base64(dhd, dhd->hang_info, &bytes_written, &dhd->hang_info_cnt);
13546 		DHD_INFO(("hang info specific cnt: %d len: %d data: %s\n",
13547 			dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
13548 	}
13549 #else
13550 	if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
13551 		copy_hang_info_stack(dhd, dhd->hang_info, &bytes_written, &dhd->hang_info_cnt);
13552 		DHD_INFO(("hang info stack cnt: %d len: %d data: %s\n",
13553 			dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
13554 	}
13555 
13556 	if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
13557 		copy_hang_info_trap_t(dhd->hang_info, &tr, VENDOR_SEND_HANG_EXT_INFO_LEN, FALSE,
13558 				&bytes_written, &dhd->hang_info_cnt, dhd->debug_dump_time_hang_str);
13559 		DHD_INFO(("hang info trap_t cnt: %d len: %d data: %s\n",
13560 			dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
13561 	}
13562 
13563 	if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
13564 		copy_hang_info_specific(dhd, dhd->hang_info, &bytes_written, &dhd->hang_info_cnt);
13565 		DHD_INFO(("hang info specific cnt: %d len: %d data: %s\n",
13566 			dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
13567 	}
13568 #endif /* DHD_EWPR_VER2 */
13569 }
13570 
13571 void
13572 copy_hang_info_linkdown(dhd_pub_t *dhd)
13573 {
13574 	int bytes_written = 0;
13575 	int remain_len;
13576 
13577 	if (!dhd || !dhd->hang_info) {
13578 		DHD_ERROR(("%s dhd=%p hang_info=%p\n", __FUNCTION__,
13579 			dhd, (dhd ? dhd->hang_info : NULL)));
13580 		return;
13581 	}
13582 
13583 	if (!dhd->bus->is_linkdown) {
13584 		DHD_ERROR(("%s: link down is not happened\n", __FUNCTION__));
13585 		return;
13586 	}
13587 
13588 	dhd->hang_info_cnt = 0;
13589 
13590 	get_debug_dump_time(dhd->debug_dump_time_hang_str);
13591 	copy_debug_dump_time(dhd->debug_dump_time_str, dhd->debug_dump_time_hang_str);
13592 
13593 	/* hang reason code (0x8808) */
13594 	if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
13595 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
13596 		bytes_written += scnprintf(&dhd->hang_info[bytes_written], remain_len, "%d%c",
13597 				HANG_REASON_PCIE_LINK_DOWN_EP_DETECT, HANG_KEY_DEL);
13598 		dhd->hang_info_cnt++;
13599 	}
13600 
13601 	/* EWP version */
13602 	if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
13603 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
13604 		bytes_written += scnprintf(&dhd->hang_info[bytes_written], remain_len, "%d%c",
13605 				VENDOR_SEND_HANG_EXT_INFO_VER, HANG_KEY_DEL);
13606 		dhd->hang_info_cnt++;
13607 	}
13608 
13609 	/* cookie - dump time stamp */
13610 	if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
13611 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
13612 		bytes_written += scnprintf(&dhd->hang_info[bytes_written], remain_len, "%s%c",
13613 				dhd->debug_dump_time_hang_str, HANG_KEY_DEL);
13614 		dhd->hang_info_cnt++;
13615 	}
13616 
13617 	clear_debug_dump_time(dhd->debug_dump_time_hang_str);
13618 
13619 	/* dump PCIE RC registers */
13620 	dhd_dump_pcie_rc_regs_for_linkdown(dhd, &bytes_written);
13621 
13622 	DHD_INFO(("hang info haed cnt: %d len: %d data: %s\n",
13623 		dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
13624 
13625 }
13626 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
13627 
13628 int
13629 dhd_prot_debug_info_print(dhd_pub_t *dhd)
13630 {
13631 	dhd_prot_t *prot = dhd->prot;
13632 	msgbuf_ring_t *ring;
13633 	uint16 rd, wr, drd, dwr;
13634 	uint32 dma_buf_len;
13635 	uint64 current_time;
13636 	ulong ring_tcm_rd_addr; /* dongle address */
13637 	ulong ring_tcm_wr_addr; /* dongle address */
13638 
13639 	DHD_ERROR(("\n ------- DUMPING VERSION INFORMATION ------- \r\n"));
13640 	DHD_ERROR(("DHD: %s\n", dhd_version));
13641 	DHD_ERROR(("Firmware: %s\n", fw_version));
13642 
13643 #ifdef DHD_FW_COREDUMP
13644 	DHD_ERROR(("\n ------- DUMPING CONFIGURATION INFORMATION ------ \r\n"));
13645 	DHD_ERROR(("memdump mode: %d\n", dhd->memdump_enabled));
13646 #endif /* DHD_FW_COREDUMP */
13647 
13648 	DHD_ERROR(("\n ------- DUMPING PROTOCOL INFORMATION ------- \r\n"));
13649 	DHD_ERROR(("ICPrevs: Dev %d, Host %d, active %d\n",
13650 		prot->device_ipc_version,
13651 		prot->host_ipc_version,
13652 		prot->active_ipc_version));
13653 	DHD_ERROR(("d2h_intr_method -> %s\n",
13654 			dhd->bus->d2h_intr_method ? "PCIE_MSI" : "PCIE_INTX"));
13655 	DHD_ERROR(("max Host TS bufs to post: %d, posted %d\n",
13656 		prot->max_tsbufpost, prot->cur_ts_bufs_posted));
13657 	DHD_ERROR(("max INFO bufs to post: %d, posted %d\n",
13658 		prot->max_infobufpost, prot->infobufpost));
13659 	DHD_ERROR(("max event bufs to post: %d, posted %d\n",
13660 		prot->max_eventbufpost, prot->cur_event_bufs_posted));
13661 	DHD_ERROR(("max ioctlresp bufs to post: %d, posted %d\n",
13662 		prot->max_ioctlrespbufpost, prot->cur_ioctlresp_bufs_posted));
13663 	DHD_ERROR(("max RX bufs to post: %d, posted %d\n",
13664 		prot->max_rxbufpost, prot->rxbufpost));
13665 	DHD_ERROR(("h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n",
13666 		h2d_max_txpost, prot->h2d_max_txpost));
13667 #if defined(DHD_HTPUT_TUNABLES)
13668 	DHD_ERROR(("h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n",
13669 		h2d_htput_max_txpost, prot->h2d_htput_max_txpost));
13670 #endif /* DHD_HTPUT_TUNABLES */
13671 
13672 	current_time = OSL_LOCALTIME_NS();
13673 	DHD_ERROR(("current_time="SEC_USEC_FMT"\n", GET_SEC_USEC(current_time)));
13674 	DHD_ERROR(("ioctl_fillup_time="SEC_USEC_FMT
13675 		" ioctl_ack_time="SEC_USEC_FMT
13676 		" ioctl_cmplt_time="SEC_USEC_FMT"\n",
13677 		GET_SEC_USEC(prot->ioctl_fillup_time),
13678 		GET_SEC_USEC(prot->ioctl_ack_time),
13679 		GET_SEC_USEC(prot->ioctl_cmplt_time)));
13680 
13681 	/* Check PCIe INT registers */
13682 	if (!dhd_pcie_dump_int_regs(dhd)) {
13683 		DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__));
13684 		dhd->bus->is_linkdown = TRUE;
13685 	}
13686 
13687 	DHD_ERROR(("\n ------- DUMPING IOCTL RING RD WR Pointers ------- \r\n"));
13688 
13689 	ring = &prot->h2dring_ctrl_subn;
13690 	dma_buf_len = ring->max_items * ring->item_len;
13691 	ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
13692 	ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
13693 	DHD_ERROR(("CtrlPost: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
13694 		"SIZE %d \r\n",
13695 		ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
13696 		ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr, dma_buf_len));
13697 	DHD_ERROR(("CtrlPost: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
13698 	if (dhd->dma_d2h_ring_upd_support) {
13699 		drd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
13700 		dwr = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_WR_UPD, ring->idx);
13701 		DHD_ERROR(("CtrlPost: From Host DMA mem: RD: %d WR %d \r\n", drd, dwr));
13702 	}
13703 	if (dhd->bus->is_linkdown) {
13704 		DHD_ERROR(("CtrlPost: From Shared Mem: RD and WR are invalid"
13705 			" due to PCIe link down\r\n"));
13706 	} else {
13707 		dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
13708 		dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
13709 		DHD_ERROR(("CtrlPost: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
13710 	}
13711 	DHD_ERROR(("CtrlPost: seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
13712 
13713 	ring = &prot->d2hring_ctrl_cpln;
13714 	dma_buf_len = ring->max_items * ring->item_len;
13715 	ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
13716 	ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
13717 	DHD_ERROR(("CtrlCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
13718 		"SIZE %d \r\n",
13719 		ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
13720 		ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr, dma_buf_len));
13721 	DHD_ERROR(("CtrlCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
13722 	if (dhd->dma_d2h_ring_upd_support) {
13723 		drd = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
13724 		dwr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
13725 		DHD_ERROR(("CtrlCpl: From Host DMA mem: RD: %d WR %d \r\n", drd, dwr));
13726 	}
13727 	if (dhd->bus->is_linkdown) {
13728 		DHD_ERROR(("CtrlCpl: From Shared Mem: RD and WR are invalid"
13729 			" due to PCIe link down\r\n"));
13730 	} else {
13731 		dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
13732 		dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
13733 		DHD_ERROR(("CtrlCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
13734 	}
13735 	DHD_ERROR(("CtrlCpl: Expected seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
13736 
13737 	ring = prot->h2dring_info_subn;
13738 	if (ring) {
13739 		dma_buf_len = ring->max_items * ring->item_len;
13740 		ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
13741 		ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
13742 		DHD_ERROR(("InfoSub: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
13743 			"SIZE %d \r\n",
13744 			ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
13745 			ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
13746 			dma_buf_len));
13747 		DHD_ERROR(("InfoSub: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
13748 		if (dhd->dma_d2h_ring_upd_support) {
13749 			drd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
13750 			dwr = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_WR_UPD, ring->idx);
13751 			DHD_ERROR(("InfoSub: From Host DMA mem: RD: %d WR %d \r\n", drd, dwr));
13752 		}
13753 		if (dhd->bus->is_linkdown) {
13754 			DHD_ERROR(("InfoSub: From Shared Mem: RD and WR are invalid"
13755 				" due to PCIe link down\r\n"));
13756 		} else {
13757 			dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
13758 			dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
13759 			DHD_ERROR(("InfoSub: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
13760 		}
13761 		DHD_ERROR(("InfoSub: seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
13762 	}
13763 	ring = prot->d2hring_info_cpln;
13764 	if (ring) {
13765 		dma_buf_len = ring->max_items * ring->item_len;
13766 		ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
13767 		ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
13768 		DHD_ERROR(("InfoCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
13769 			"SIZE %d \r\n",
13770 			ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
13771 			ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
13772 			dma_buf_len));
13773 		DHD_ERROR(("InfoCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
13774 		if (dhd->dma_d2h_ring_upd_support) {
13775 			drd = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
13776 			dwr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
13777 			DHD_ERROR(("InfoCpl: From Host DMA mem: RD: %d WR %d \r\n", drd, dwr));
13778 		}
13779 		if (dhd->bus->is_linkdown) {
13780 			DHD_ERROR(("InfoCpl: From Shared Mem: RD and WR are invalid"
13781 				" due to PCIe link down\r\n"));
13782 		} else {
13783 			dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
13784 			dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
13785 			DHD_ERROR(("InfoCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
13786 		}
13787 		DHD_ERROR(("InfoCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
13788 	}
13789 #ifdef EWP_EDL
13790 	ring = prot->d2hring_edl;
13791 	if (ring) {
13792 		ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
13793 		ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
13794 		dma_buf_len = ring->max_items * ring->item_len;
13795 		DHD_ERROR(("EdlRing: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
13796 			"SIZE %d \r\n",
13797 			ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
13798 			ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
13799 			dma_buf_len));
13800 		DHD_ERROR(("EdlRing: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
13801 		if (dhd->dma_d2h_ring_upd_support) {
13802 			drd = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
13803 			dwr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
13804 			DHD_ERROR(("EdlRing: From Host DMA mem: RD: %d WR %d \r\n", drd, dwr));
13805 		}
13806 		if (dhd->bus->is_linkdown) {
13807 			DHD_ERROR(("EdlRing: From Shared Mem: RD and WR are invalid"
13808 				" due to PCIe link down\r\n"));
13809 		} else {
13810 			dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
13811 			dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
13812 			DHD_ERROR(("EdlRing: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
13813 		}
13814 		DHD_ERROR(("EdlRing: Expected seq num: %d \r\n",
13815 			ring->seqnum % D2H_EPOCH_MODULO));
13816 	}
13817 #endif /* EWP_EDL */
13818 
13819 	ring = &prot->d2hring_tx_cpln;
13820 	if (ring) {
13821 		ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
13822 		ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
13823 		dma_buf_len = ring->max_items * ring->item_len;
13824 		DHD_ERROR(("TxCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
13825 			"SIZE %d \r\n",
13826 			ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
13827 			ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
13828 			dma_buf_len));
13829 		DHD_ERROR(("TxCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
13830 		if (dhd->dma_d2h_ring_upd_support) {
13831 			drd = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
13832 			dwr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
13833 			DHD_ERROR(("TxCpl: From Host DMA mem: RD: %d WR %d \r\n", drd, dwr));
13834 		}
13835 		if (dhd->bus->is_linkdown) {
13836 			DHD_ERROR(("TxCpl: From Shared Mem: RD and WR are invalid"
13837 				" due to PCIe link down\r\n"));
13838 		} else {
13839 			dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
13840 			dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
13841 			DHD_ERROR(("TxCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
13842 		}
13843 		DHD_ERROR(("TxCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
13844 	}
13845 
13846 	ring = &prot->d2hring_rx_cpln;
13847 	if (ring) {
13848 		ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
13849 		ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
13850 		dma_buf_len = ring->max_items * ring->item_len;
13851 		DHD_ERROR(("RxCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
13852 			"SIZE %d \r\n",
13853 			ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
13854 			ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
13855 			dma_buf_len));
13856 		DHD_ERROR(("RxCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
13857 		if (dhd->dma_d2h_ring_upd_support) {
13858 			drd = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
13859 			dwr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
13860 			DHD_ERROR(("RxCpl: From Host DMA mem: RD: %d WR %d \r\n", drd, dwr));
13861 		}
13862 		if (dhd->bus->is_linkdown) {
13863 			DHD_ERROR(("RxCpl: From Shared Mem: RD and WR are invalid"
13864 				" due to PCIe link down\r\n"));
13865 		} else {
13866 			dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
13867 			dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
13868 			DHD_ERROR(("RxCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
13869 		}
13870 		DHD_ERROR(("RxCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
13871 	}
13872 
13873 	ring = &prot->h2dring_rxp_subn;
13874 	if (ring) {
13875 		ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
13876 		ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
13877 		dma_buf_len = ring->max_items * ring->item_len;
13878 		DHD_ERROR(("RxSub: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
13879 			"SIZE %d \r\n",
13880 			ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
13881 			ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
13882 			dma_buf_len));
13883 		DHD_ERROR(("RxSub: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
13884 		if (dhd->dma_d2h_ring_upd_support) {
13885 			drd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
13886 			dwr = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_WR_UPD, ring->idx);
13887 			DHD_ERROR(("RxSub: From Host DMA mem: RD: %d WR %d \r\n", drd, dwr));
13888 		}
13889 		if (dhd->bus->is_linkdown) {
13890 			DHD_ERROR(("RxSub: From Shared Mem: RD and WR are invalid"
13891 				" due to PCIe link down\r\n"));
13892 		} else {
13893 			dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
13894 			dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
13895 			DHD_ERROR(("RxSub: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
13896 		}
13897 		DHD_ERROR(("RxSub: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
13898 	}
13899 
13900 	DHD_ERROR(("%s: cur_ioctlresp_bufs_posted %d cur_event_bufs_posted %d\n",
13901 		__FUNCTION__, prot->cur_ioctlresp_bufs_posted, prot->cur_event_bufs_posted));
13902 #ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
13903 	DHD_ERROR(("%s: multi_client_flow_rings:%d max_multi_client_flow_rings:%d\n",
13904 		__FUNCTION__, dhd->multi_client_flow_rings, dhd->max_multi_client_flow_rings));
13905 #endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */
13906 
13907 	DHD_ERROR(("pktid_txq_start_cnt: %d\n", prot->pktid_txq_start_cnt));
13908 	DHD_ERROR(("pktid_txq_stop_cnt: %d\n", prot->pktid_txq_stop_cnt));
13909 	DHD_ERROR(("pktid_depleted_cnt: %d\n", prot->pktid_depleted_cnt));
13910 	dhd_pcie_debug_info_dump(dhd);
13911 #ifdef DHD_LB_STATS
13912 	DHD_ERROR(("\nlb_rxp_stop_thr_hitcnt: %llu lb_rxp_strt_thr_hitcnt: %llu\n",
13913 		dhd->lb_rxp_stop_thr_hitcnt, dhd->lb_rxp_strt_thr_hitcnt));
13914 	DHD_ERROR(("\nlb_rxp_napi_sched_cnt: %llu lb_rxp_napi_complete_cnt: %llu\n",
13915 		dhd->lb_rxp_napi_sched_cnt, dhd->lb_rxp_napi_complete_cnt));
13916 #endif /* DHD_LB_STATS */
13917 #ifdef DHD_TIMESYNC
13918 	dhd_timesync_debug_info_print(dhd);
13919 #endif /* DHD_TIMESYNC */
13920 	return 0;
13921 }
13922 
13923 int
13924 dhd_prot_ringupd_dump(dhd_pub_t *dhd, struct bcmstrbuf *b)
13925 {
13926 	uint32 *ptr;
13927 	uint32 value;
13928 
13929 	if (dhd->prot->d2h_dma_indx_wr_buf.va) {
13930 		uint32 i;
13931 		uint32 max_h2d_queues = dhd_bus_max_h2d_queues(dhd->bus);
13932 
13933 		OSL_CACHE_INV((void *)dhd->prot->d2h_dma_indx_wr_buf.va,
13934 			dhd->prot->d2h_dma_indx_wr_buf.len);
13935 
13936 		ptr = (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va);
13937 
13938 		bcm_bprintf(b, "\n max_tx_queues %d\n", max_h2d_queues);
13939 
13940 		bcm_bprintf(b, "\nRPTR block H2D common rings, 0x%4p\n", ptr);
13941 		value = ltoh32(*ptr);
13942 		bcm_bprintf(b, "\tH2D CTRL: value 0x%04x\n", value);
13943 		ptr++;
13944 		value = ltoh32(*ptr);
13945 		bcm_bprintf(b, "\tH2D RXPOST: value 0x%04x\n", value);
13946 
13947 		ptr++;
13948 		bcm_bprintf(b, "RPTR block Flow rings , 0x%4p\n", ptr);
13949 		for (i = BCMPCIE_H2D_COMMON_MSGRINGS; i < max_h2d_queues; i++) {
13950 			value = ltoh32(*ptr);
13951 			bcm_bprintf(b, "\tflowring ID %d: value 0x%04x\n", i, value);
13952 			ptr++;
13953 		}
13954 	}
13955 
13956 	if (dhd->prot->h2d_dma_indx_rd_buf.va) {
13957 		OSL_CACHE_INV((void *)dhd->prot->h2d_dma_indx_rd_buf.va,
13958 			dhd->prot->h2d_dma_indx_rd_buf.len);
13959 
13960 		ptr = (uint32 *)(dhd->prot->h2d_dma_indx_rd_buf.va);
13961 
13962 		bcm_bprintf(b, "\nWPTR block D2H common rings, 0x%4p\n", ptr);
13963 		value = ltoh32(*ptr);
13964 		bcm_bprintf(b, "\tD2H CTRLCPLT: value 0x%04x\n", value);
13965 		ptr++;
13966 		value = ltoh32(*ptr);
13967 		bcm_bprintf(b, "\tD2H TXCPLT: value 0x%04x\n", value);
13968 		ptr++;
13969 		value = ltoh32(*ptr);
13970 		bcm_bprintf(b, "\tD2H RXCPLT: value 0x%04x\n", value);
13971 	}
13972 
13973 	return 0;
13974 }
13975 
13976 uint32
13977 dhd_prot_metadata_dbg_set(dhd_pub_t *dhd, bool val)
13978 {
13979 	dhd_prot_t *prot = dhd->prot;
13980 #if DHD_DBG_SHOW_METADATA
13981 	prot->metadata_dbg = val;
13982 #endif
13983 	return (uint32)prot->metadata_dbg;
13984 }
13985 
13986 uint32
13987 dhd_prot_metadata_dbg_get(dhd_pub_t *dhd)
13988 {
13989 	dhd_prot_t *prot = dhd->prot;
13990 	return (uint32)prot->metadata_dbg;
13991 }
13992 
13993 uint32
13994 dhd_prot_metadatalen_set(dhd_pub_t *dhd, uint32 val, bool rx)
13995 {
13996 #if !(defined(BCM_ROUTER_DHD))
13997 	dhd_prot_t *prot = dhd->prot;
13998 	if (rx)
13999 		prot->rx_metadata_offset = (uint16)val;
14000 	else
14001 		prot->tx_metadata_offset = (uint16)val;
14002 #endif /* ! BCM_ROUTER_DHD */
14003 	return dhd_prot_metadatalen_get(dhd, rx);
14004 }
14005 
14006 uint32
14007 dhd_prot_metadatalen_get(dhd_pub_t *dhd, bool rx)
14008 {
14009 	dhd_prot_t *prot = dhd->prot;
14010 	if (rx)
14011 		return prot->rx_metadata_offset;
14012 	else
14013 		return prot->tx_metadata_offset;
14014 }
14015 
14016 /** optimization to write "n" tx items at a time to ring */
14017 uint32
14018 dhd_prot_txp_threshold(dhd_pub_t *dhd, bool set, uint32 val)
14019 {
14020 	dhd_prot_t *prot = dhd->prot;
14021 	if (set)
14022 		prot->txp_threshold = (uint16)val;
14023 	val = prot->txp_threshold;
14024 	return val;
14025 }
14026 
14027 #ifdef DHD_RX_CHAINING
14028 
14029 static INLINE void
14030 BCMFASTPATH(dhd_rxchain_reset)(rxchain_info_t *rxchain)
14031 {
14032 	rxchain->pkt_count = 0;
14033 }
14034 
14035 static void
14036 BCMFASTPATH(dhd_rxchain_frame)(dhd_pub_t *dhd, void *pkt, uint ifidx)
14037 {
14038 	uint8 *eh;
14039 	uint8 prio;
14040 	dhd_prot_t *prot = dhd->prot;
14041 	rxchain_info_t *rxchain = &prot->rxchain;
14042 
14043 	ASSERT(!PKTISCHAINED(pkt));
14044 	ASSERT(PKTCLINK(pkt) == NULL);
14045 	ASSERT(PKTCGETATTR(pkt) == 0);
14046 
14047 	eh = PKTDATA(dhd->osh, pkt);
14048 	prio = IP_TOS46(eh + ETHER_HDR_LEN) >> IPV4_TOS_PREC_SHIFT;
14049 
14050 	if (rxchain->pkt_count && !(PKT_CTF_CHAINABLE(dhd, ifidx, eh, prio, rxchain->h_sa,
14051 		rxchain->h_da, rxchain->h_prio))) {
14052 		/* Different flow - First release the existing chain */
14053 		dhd_rxchain_commit(dhd);
14054 	}
14055 
14056 	/* For routers, with HNDCTF, link the packets using PKTSETCLINK, */
14057 	/* so that the chain can be handed off to CTF bridge as is. */
14058 	if (rxchain->pkt_count == 0) {
14059 		/* First packet in chain */
14060 		rxchain->pkthead = rxchain->pkttail = pkt;
14061 
14062 		/* Keep a copy of ptr to ether_da, ether_sa and prio */
14063 		rxchain->h_da = ((struct ether_header *)eh)->ether_dhost;
14064 		rxchain->h_sa = ((struct ether_header *)eh)->ether_shost;
14065 		rxchain->h_prio = prio;
14066 		rxchain->ifidx = ifidx;
14067 		rxchain->pkt_count++;
14068 	} else {
14069 		/* Same flow - keep chaining */
14070 		PKTSETCLINK(rxchain->pkttail, pkt);
14071 		rxchain->pkttail = pkt;
14072 		rxchain->pkt_count++;
14073 	}
14074 
14075 	if ((dhd_rx_pkt_chainable(dhd, ifidx)) && (!ETHER_ISMULTI(rxchain->h_da)) &&
14076 		((((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IP)) ||
14077 		(((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IPV6)))) {
14078 		PKTSETCHAINED(dhd->osh, pkt);
14079 		PKTCINCRCNT(rxchain->pkthead);
14080 		PKTCADDLEN(rxchain->pkthead, PKTLEN(dhd->osh, pkt));
14081 	} else {
14082 		dhd_rxchain_commit(dhd);
14083 		return;
14084 	}
14085 
14086 	/* If we have hit the max chain length, dispatch the chain and reset */
14087 	if (rxchain->pkt_count >= DHD_PKT_CTF_MAX_CHAIN_LEN) {
14088 		dhd_rxchain_commit(dhd);
14089 	}
14090 }
14091 
14092 static void
14093 BCMFASTPATH(dhd_rxchain_commit)(dhd_pub_t *dhd)
14094 {
14095 	dhd_prot_t *prot = dhd->prot;
14096 	rxchain_info_t *rxchain = &prot->rxchain;
14097 
14098 	if (rxchain->pkt_count == 0)
14099 		return;
14100 
14101 	/* Release the packets to dhd_linux */
14102 	dhd_bus_rx_frame(dhd->bus, rxchain->pkthead, rxchain->ifidx, rxchain->pkt_count);
14103 
14104 	/* Reset the chain */
14105 	dhd_rxchain_reset(rxchain);
14106 }
14107 
14108 #endif /* DHD_RX_CHAINING */
14109 
14110 #ifdef IDLE_TX_FLOW_MGMT
14111 int
14112 dhd_prot_flow_ring_resume(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
14113 {
14114 	tx_idle_flowring_resume_request_t *flow_resume_rqst;
14115 	msgbuf_ring_t *flow_ring;
14116 	dhd_prot_t *prot = dhd->prot;
14117 	unsigned long flags;
14118 	uint16 alloced = 0;
14119 	msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
14120 
14121 	/* Fetch a pre-initialized msgbuf_ring from the flowring pool */
14122 	flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid);
14123 	if (flow_ring == NULL) {
14124 		DHD_ERROR(("%s: dhd_prot_flowrings_pool_fetch TX Flowid %d failed\n",
14125 			__FUNCTION__, flow_ring_node->flowid));
14126 		return BCME_NOMEM;
14127 	}
14128 
14129 #ifdef PCIE_INB_DW
14130 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
14131 		return BCME_ERROR;
14132 #endif /* PCIE_INB_DW */
14133 
14134 	DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
14135 
14136 	/* Request for ctrl_ring buffer space */
14137 	flow_resume_rqst = (tx_idle_flowring_resume_request_t *)
14138 		dhd_prot_alloc_ring_space(dhd, ctrl_ring, 1, &alloced, FALSE);
14139 
14140 	if (flow_resume_rqst == NULL) {
14141 		dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring);
14142 		DHD_ERROR(("%s: Flow resume Req flowid %d - failure ring space\n",
14143 			__FUNCTION__, flow_ring_node->flowid));
14144 		DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
14145 #ifdef PCIE_INB_DW
14146 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
14147 #endif
14148 		return BCME_NOMEM;
14149 	}
14150 
14151 	flow_ring_node->prot_info = (void *)flow_ring;
14152 
14153 	/* Common msg buf hdr */
14154 	flow_resume_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_RESUME;
14155 	flow_resume_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
14156 	flow_resume_rqst->msg.request_id = htol32(0); /* TBD */
14157 
14158 	flow_resume_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
14159 	ctrl_ring->seqnum++;
14160 
14161 	flow_resume_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
14162 	DHD_ERROR(("%s Send Flow resume Req flow ID %d\n",
14163 		__FUNCTION__, flow_ring_node->flowid));
14164 
14165 	/* Update the flow_ring's WRITE index */
14166 	if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
14167 		dhd_prot_dma_indx_set(dhd, flow_ring->wr,
14168 		                      H2D_DMA_INDX_WR_UPD, flow_ring->idx);
14169 	} else if (IFRM_ACTIVE(dhd) && (flow_ring->idx >= BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START)) {
14170 		dhd_prot_dma_indx_set(dhd, flow_ring->wr,
14171 			H2D_IFRM_INDX_WR_UPD,
14172 			(flow_ring->idx - BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START));
14173 	} else {
14174 		dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr),
14175 			sizeof(uint16), RING_WR_UPD, flow_ring->idx);
14176 	}
14177 
14178 	/* update control subn ring's WR index and ring doorbell to dongle */
14179 	dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_resume_rqst, 1);
14180 
14181 	DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
14182 
14183 #ifdef PCIE_INB_DW
14184 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
14185 #endif
14186 	return BCME_OK;
14187 } /* dhd_prot_flow_ring_create */
14188 
14189 int
14190 dhd_prot_flow_ring_batch_suspend_request(dhd_pub_t *dhd, uint16 *ringid, uint16 count)
14191 {
14192 	tx_idle_flowring_suspend_request_t *flow_suspend_rqst;
14193 	dhd_prot_t *prot = dhd->prot;
14194 	unsigned long flags;
14195 	uint16 index;
14196 	uint16 alloced = 0;
14197 	msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
14198 
14199 #ifdef PCIE_INB_DW
14200 	if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
14201 		return BCME_ERROR;
14202 #endif /* PCIE_INB_DW */
14203 
14204 	DHD_RING_LOCK(ring->ring_lock, flags);
14205 
14206 	/* Request for ring buffer space */
14207 	flow_suspend_rqst = (tx_idle_flowring_suspend_request_t *)
14208 		dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
14209 
14210 	if (flow_suspend_rqst == NULL) {
14211 		DHD_RING_UNLOCK(ring->ring_lock, flags);
14212 		DHD_ERROR(("%s: Flow suspend Req - failure ring space\n", __FUNCTION__));
14213 #ifdef PCIE_INB_DW
14214 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
14215 #endif
14216 		return BCME_NOMEM;
14217 	}
14218 
14219 	/* Common msg buf hdr */
14220 	flow_suspend_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_SUSPEND;
14221 	/* flow_suspend_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; */
14222 	flow_suspend_rqst->msg.request_id = htol32(0); /* TBD */
14223 
14224 	flow_suspend_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
14225 	ring->seqnum++;
14226 
14227 	/* Update flow id  info */
14228 	for (index = 0; index < count; index++)
14229 	{
14230 		flow_suspend_rqst->ring_id[index] = ringid[index];
14231 	}
14232 	flow_suspend_rqst->num = count;
14233 
14234 	DHD_ERROR(("%s sending batch suspend!! count is %d\n", __FUNCTION__, count));
14235 
14236 	/* update ring's WR index and ring doorbell to dongle */
14237 	dhd_prot_ring_write_complete(dhd, ring, flow_suspend_rqst, 1);
14238 
14239 	DHD_RING_UNLOCK(ring->ring_lock, flags);
14240 
14241 #ifdef PCIE_INB_DW
14242 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
14243 #endif
14244 
14245 	return BCME_OK;
14246 }
14247 #endif /* IDLE_TX_FLOW_MGMT */
14248 
14249 #if defined(BCMINTERNAL) && defined(DHD_DBG_DUMP)
14250 static void
14251 dhd_prot_ioctl_trace(dhd_pub_t *dhd, ioctl_req_msg_t *ioct_rqst, uchar *buf, int len)
14252 {
14253 	struct dhd_prot *prot = dhd->prot;
14254 	uint32 cnt = prot->ioctl_trace_count % MAX_IOCTL_TRACE_SIZE;
14255 
14256 	prot->ioctl_trace[cnt].cmd = ioct_rqst->cmd;
14257 	prot->ioctl_trace[cnt].transid = ioct_rqst->trans_id;
14258 	if ((ioct_rqst->cmd == 262 || ioct_rqst->cmd == 263) && buf)
14259 		memcpy(prot->ioctl_trace[cnt].ioctl_buf, buf,
14260 			len > MAX_IOCTL_BUF_SIZE ? MAX_IOCTL_BUF_SIZE : len);
14261 	else
14262 		memset(prot->ioctl_trace[cnt].ioctl_buf, 0, MAX_IOCTL_BUF_SIZE);
14263 	prot->ioctl_trace[cnt].timestamp = OSL_SYSUPTIME_US();
14264 	prot->ioctl_trace_count ++;
14265 }
14266 
14267 static void
14268 dhd_prot_ioctl_dump(dhd_prot_t *prot, struct bcmstrbuf *strbuf)
14269 {
14270 	int dumpsz;
14271 	int i;
14272 
14273 	dumpsz = prot->ioctl_trace_count < MAX_IOCTL_TRACE_SIZE ?
14274 		prot->ioctl_trace_count : MAX_IOCTL_TRACE_SIZE;
14275 	if (dumpsz == 0) {
14276 		bcm_bprintf(strbuf, "\nEmpty IOCTL TRACE\n");
14277 		return;
14278 	}
14279 	bcm_bprintf(strbuf, "----------- IOCTL TRACE --------------\n");
14280 	bcm_bprintf(strbuf, "Timestamp us\t\tCMD\tTransID\tIOVAR\n");
14281 	for (i = 0; i < dumpsz; i ++) {
14282 		bcm_bprintf(strbuf, "%llu\t%d\t%d\t%s\n",
14283 			prot->ioctl_trace[i].timestamp,
14284 			prot->ioctl_trace[i].cmd,
14285 			prot->ioctl_trace[i].transid,
14286 			prot->ioctl_trace[i].ioctl_buf);
14287 	}
14288 }
14289 #endif /* defined(BCMINTERNAL) && defined(DHD_DBG_DUMP) */
14290 
14291 static void dump_psmwd_v1(const bcm_tlv_t *tlv, struct bcmstrbuf *b)
14292 {
14293 	const hnd_ext_trap_psmwd_v1_t* psmwd = NULL;
14294 	uint32 i;
14295 	psmwd = (const hnd_ext_trap_psmwd_v1_t *)tlv;
14296 	for (i = 0; i < PSMDBG_REG_READ_CNT_FOR_PSMWDTRAP_V1; i++) {
14297 		bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, psmwd->i32_psmdebug[i]);
14298 	}
14299 	bcm_bprintf(b, " gated clock en: 0x%x\n", psmwd->i16_0x1a8);
14300 	bcm_bprintf(b, " Rcv Fifo Ctrl: 0x%x\n", psmwd->i16_0x406);
14301 	bcm_bprintf(b, " Rx ctrl 1: 0x%x\n", psmwd->i16_0x408);
14302 	bcm_bprintf(b, " Rxe Status 1: 0x%x\n", psmwd->i16_0x41a);
14303 	bcm_bprintf(b, " Rxe Status 2: 0x%x\n", psmwd->i16_0x41c);
14304 	bcm_bprintf(b, " rcv wrd count 0: 0x%x\n", psmwd->i16_0x424);
14305 	bcm_bprintf(b, " rcv wrd count 1: 0x%x\n", psmwd->i16_0x426);
14306 	bcm_bprintf(b, " RCV_LFIFO_STS: 0x%x\n", psmwd->i16_0x456);
14307 	bcm_bprintf(b, " PSM_SLP_TMR: 0x%x\n", psmwd->i16_0x480);
14308 	bcm_bprintf(b, " TXE CTRL: 0x%x\n", psmwd->i16_0x500);
14309 	bcm_bprintf(b, " TXE Status: 0x%x\n", psmwd->i16_0x50e);
14310 	bcm_bprintf(b, " TXE_xmtdmabusy: 0x%x\n", psmwd->i16_0x55e);
14311 	bcm_bprintf(b, " TXE_XMTfifosuspflush: 0x%x\n", psmwd->i16_0x566);
14312 	bcm_bprintf(b, " IFS Stat: 0x%x\n", psmwd->i16_0x690);
14313 	bcm_bprintf(b, " IFS_MEDBUSY_CTR: 0x%x\n", psmwd->i16_0x692);
14314 	bcm_bprintf(b, " IFS_TX_DUR: 0x%x\n", psmwd->i16_0x694);
14315 	bcm_bprintf(b, " SLow_CTL: 0x%x\n", psmwd->i16_0x6a0);
14316 	bcm_bprintf(b, " PSM BRC: 0x%x\n", psmwd->i16_0x490);
14317 	bcm_bprintf(b, " TXE_AQM fifo Ready: 0x%x\n", psmwd->i16_0x838);
14318 	bcm_bprintf(b, " Dagg ctrl: 0x%x\n", psmwd->i16_0x8c0);
14319 	bcm_bprintf(b, " shm_prewds_cnt: 0x%x\n", psmwd->shm_prewds_cnt);
14320 	bcm_bprintf(b, " shm_txtplufl_cnt: 0x%x\n", psmwd->shm_txtplufl_cnt);
14321 	bcm_bprintf(b, " shm_txphyerr_cnt: 0x%x\n", psmwd->shm_txphyerr_cnt);
14322 
14323 }
14324 
14325 static void dump_psmwd_v2(const bcm_tlv_t *tlv, struct bcmstrbuf *b)
14326 {
14327 	const hnd_ext_trap_psmwd_t* psmwd = NULL;
14328 	uint32 i;
14329 	psmwd = (const hnd_ext_trap_psmwd_t *)tlv;
14330 	for (i = 0; i < PSMDBG_REG_READ_CNT_FOR_PSMWDTRAP_V2; i++) {
14331 		bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, psmwd->i32_psmdebug[i]);
14332 	}
14333 
14334 	bcm_bprintf(b, " psm_brwk0: 0x%x\n", psmwd->i16_0x4b8);
14335 	bcm_bprintf(b, " psm_brwk1: 0x%x\n", psmwd->i16_0x4ba);
14336 	bcm_bprintf(b, " psm_brwk2: 0x%x\n", psmwd->i16_0x4bc);
14337 	bcm_bprintf(b, " psm_brwk3: 0x%x\n", psmwd->i16_0x4be);
14338 	bcm_bprintf(b, " PSM BRC_1: 0x%x\n", psmwd->i16_0x4da);
14339 	bcm_bprintf(b, " gated clock en: 0x%x\n", psmwd->i16_0x1a8);
14340 	bcm_bprintf(b, " Rcv Fifo Ctrl: 0x%x\n", psmwd->i16_0x406);
14341 	bcm_bprintf(b, " Rx ctrl 1: 0x%x\n", psmwd->i16_0x408);
14342 	bcm_bprintf(b, " Rxe Status 1: 0x%x\n", psmwd->i16_0x41a);
14343 	bcm_bprintf(b, " Rxe Status 2: 0x%x\n", psmwd->i16_0x41c);
14344 	bcm_bprintf(b, " rcv wrd count 0: 0x%x\n", psmwd->i16_0x424);
14345 	bcm_bprintf(b, " rcv wrd count 1: 0x%x\n", psmwd->i16_0x426);
14346 	bcm_bprintf(b, " RCV_LFIFO_STS: 0x%x\n", psmwd->i16_0x456);
14347 	bcm_bprintf(b, " PSM_SLP_TMR: 0x%x\n", psmwd->i16_0x480);
14348 	bcm_bprintf(b, " TXE CTRL: 0x%x\n", psmwd->i16_0x500);
14349 	bcm_bprintf(b, " TXE Status: 0x%x\n", psmwd->i16_0x50e);
14350 	bcm_bprintf(b, " TXE_xmtdmabusy: 0x%x\n", psmwd->i16_0x55e);
14351 	bcm_bprintf(b, " TXE_XMTfifosuspflush: 0x%x\n", psmwd->i16_0x566);
14352 	bcm_bprintf(b, " IFS Stat: 0x%x\n", psmwd->i16_0x690);
14353 	bcm_bprintf(b, " IFS_MEDBUSY_CTR: 0x%x\n", psmwd->i16_0x692);
14354 	bcm_bprintf(b, " IFS_TX_DUR: 0x%x\n", psmwd->i16_0x694);
14355 	bcm_bprintf(b, " SLow_CTL: 0x%x\n", psmwd->i16_0x6a0);
14356 	bcm_bprintf(b, " PSM BRC: 0x%x\n", psmwd->i16_0x490);
14357 	bcm_bprintf(b, " TXE_AQM fifo Ready: 0x%x\n", psmwd->i16_0x838);
14358 	bcm_bprintf(b, " Dagg ctrl: 0x%x\n", psmwd->i16_0x8c0);
14359 	bcm_bprintf(b, " shm_prewds_cnt: 0x%x\n", psmwd->shm_prewds_cnt);
14360 	bcm_bprintf(b, " shm_txtplufl_cnt: 0x%x\n", psmwd->shm_txtplufl_cnt);
14361 	bcm_bprintf(b, " shm_txphyerr_cnt: 0x%x\n", psmwd->shm_txphyerr_cnt);
14362 }
14363 
14364 static const char* etd_trap_name(hnd_ext_tag_trap_t tag)
14365 {
14366 	switch (tag) {
14367 	case TAG_TRAP_SIGNATURE: return "TAG_TRAP_SIGNATURE";
14368 	case TAG_TRAP_STACK: return "TAG_TRAP_STACK";
14369 	case TAG_TRAP_MEMORY: return "TAG_TRAP_MEMORY";
14370 	case TAG_TRAP_DEEPSLEEP: return "TAG_TRAP_DEEPSLEEP";
14371 	case TAG_TRAP_PSM_WD: return "TAG_TRAP_PSM_WD";
14372 	case TAG_TRAP_PHY: return "TAG_TRAP_PHY";
14373 	case TAG_TRAP_BUS: return "TAG_TRAP_BUS";
14374 	case TAG_TRAP_MAC_SUSP: return "TAG_TRAP_MAC_SUSP";
14375 	case TAG_TRAP_BACKPLANE: return "TAG_TRAP_BACKPLANE";
14376 	case TAG_TRAP_PCIE_Q: return "TAG_TRAP_PCIE_Q";
14377 	case TAG_TRAP_WLC_STATE: return "TAG_TRAP_WLC_STATE";
14378 	case TAG_TRAP_MAC_WAKE: return "TAG_TRAP_MAC_WAKE";
14379 	case TAG_TRAP_HMAP: return "TAG_TRAP_HMAP";
14380 	case TAG_TRAP_PHYTXERR_THRESH: return "TAG_TRAP_PHYTXERR_THRESH";
14381 	case TAG_TRAP_HC_DATA: return "TAG_TRAP_HC_DATA";
14382 	case TAG_TRAP_LOG_DATA: return "TAG_TRAP_LOG_DATA";
14383 	case TAG_TRAP_CODE: return "TAG_TRAP_CODE";
14384 	case TAG_TRAP_MEM_BIT_FLIP: return "TAG_TRAP_MEM_BIT_FLIP";
14385 	case TAG_TRAP_LAST:
14386 	default:
14387 		return "Unknown";
14388 	}
14389 	return "Unknown";
14390 }
14391 
14392 int dhd_prot_dump_extended_trap(dhd_pub_t *dhdp, struct bcmstrbuf *b, bool raw)
14393 {
14394 	uint32 i;
14395 	uint32 *ext_data;
14396 	hnd_ext_trap_hdr_t *hdr;
14397 	const bcm_tlv_t *tlv;
14398 	const trap_t *tr;
14399 	const uint32 *stack;
14400 	const hnd_ext_trap_bp_err_t *bpe;
14401 	uint32 raw_len;
14402 
14403 	ext_data = dhdp->extended_trap_data;
14404 
14405 	/* return if there is no extended trap data */
14406 	if (!ext_data || !(dhdp->dongle_trap_data & D2H_DEV_EXT_TRAP_DATA)) {
14407 		bcm_bprintf(b, "%d (0x%x)", dhdp->dongle_trap_data, dhdp->dongle_trap_data);
14408 		return BCME_OK;
14409 	}
14410 
14411 	bcm_bprintf(b, "Extended trap data\n");
14412 
14413 	/* First word is original trap_data */
14414 	bcm_bprintf(b, "trap_data = 0x%08x\n", *ext_data);
14415 	ext_data++;
14416 
14417 	/* Followed by the extended trap data header */
14418 	hdr = (hnd_ext_trap_hdr_t *)ext_data;
14419 	bcm_bprintf(b, "version: %d, len: %d\n", hdr->version, hdr->len);
14420 
14421 	/* Dump a list of all tags found  before parsing data */
14422 	bcm_bprintf(b, "\nTags Found:\n");
14423 	for (i = 0; i < TAG_TRAP_LAST; i++) {
14424 		tlv = bcm_parse_tlvs(hdr->data, hdr->len, i);
14425 		if (tlv)
14426 			bcm_bprintf(b, "Tag: %d (%s), Length: %d\n", i, etd_trap_name(i), tlv->len);
14427 	}
14428 
14429 	/* XXX debug dump */
14430 	if (raw) {
14431 		raw_len = sizeof(hnd_ext_trap_hdr_t) + (hdr->len / 4) + (hdr->len % 4 ? 1 : 0);
14432 		for (i = 0; i < raw_len; i++)
14433 		{
14434 			bcm_bprintf(b, "0x%08x ", ext_data[i]);
14435 			if (i % 4 == 3)
14436 				bcm_bprintf(b, "\n");
14437 		}
14438 		return BCME_OK;
14439 	}
14440 
14441 	/* Extract the various supported TLVs from the extended trap data */
14442 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_CODE);
14443 	if (tlv) {
14444 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_CODE), tlv->len);
14445 		bcm_bprintf(b, "ETD TYPE: %d\n", tlv->data[0]);
14446 	}
14447 
14448 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE);
14449 	if (tlv) {
14450 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_SIGNATURE), tlv->len);
14451 		tr = (const trap_t *)tlv->data;
14452 
14453 		bcm_bprintf(b, "TRAP %x: pc %x, lr %x, sp %x, cpsr %x, spsr %x\n",
14454 		       tr->type, tr->pc, tr->r14, tr->r13, tr->cpsr, tr->spsr);
14455 		bcm_bprintf(b, "  r0 %x, r1 %x, r2 %x, r3 %x, r4 %x, r5 %x, r6 %x\n",
14456 		       tr->r0, tr->r1, tr->r2, tr->r3, tr->r4, tr->r5, tr->r6);
14457 		bcm_bprintf(b, "  r7 %x, r8 %x, r9 %x, r10 %x, r11 %x, r12 %x\n",
14458 		       tr->r7, tr->r8, tr->r9, tr->r10, tr->r11, tr->r12);
14459 	}
14460 
14461 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK);
14462 	if (tlv) {
14463 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_STACK), tlv->len);
14464 		stack = (const uint32 *)tlv->data;
14465 		for (i = 0; i < (uint32)(tlv->len / 4); i++)
14466 		{
14467 			bcm_bprintf(b, "  0x%08x\n", *stack);
14468 			stack++;
14469 		}
14470 	}
14471 
14472 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_BACKPLANE);
14473 	if (tlv) {
14474 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_BACKPLANE), tlv->len);
14475 		bpe = (const hnd_ext_trap_bp_err_t *)tlv->data;
14476 		bcm_bprintf(b, " error: %x\n", bpe->error);
14477 		bcm_bprintf(b, " coreid: %x\n", bpe->coreid);
14478 		bcm_bprintf(b, " baseaddr: %x\n", bpe->baseaddr);
14479 		bcm_bprintf(b, " ioctrl: %x\n", bpe->ioctrl);
14480 		bcm_bprintf(b, " iostatus: %x\n", bpe->iostatus);
14481 		bcm_bprintf(b, " resetctrl: %x\n", bpe->resetctrl);
14482 		bcm_bprintf(b, " resetstatus: %x\n", bpe->resetstatus);
14483 		bcm_bprintf(b, " errlogctrl: %x\n", bpe->errlogctrl);
14484 		bcm_bprintf(b, " errlogdone: %x\n", bpe->errlogdone);
14485 		bcm_bprintf(b, " errlogstatus: %x\n", bpe->errlogstatus);
14486 		bcm_bprintf(b, " errlogaddrlo: %x\n", bpe->errlogaddrlo);
14487 		bcm_bprintf(b, " errlogaddrhi: %x\n", bpe->errlogaddrhi);
14488 		bcm_bprintf(b, " errlogid: %x\n", bpe->errlogid);
14489 		bcm_bprintf(b, " errloguser: %x\n", bpe->errloguser);
14490 		bcm_bprintf(b, " errlogflags: %x\n", bpe->errlogflags);
14491 	}
14492 
14493 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MEMORY);
14494 	if (tlv) {
14495 		const hnd_ext_trap_heap_err_t* hme;
14496 
14497 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MEMORY), tlv->len);
14498 		hme = (const hnd_ext_trap_heap_err_t *)tlv->data;
14499 		bcm_bprintf(b, " arena total: %d\n", hme->arena_total);
14500 		bcm_bprintf(b, " heap free: %d\n", hme->heap_free);
14501 		bcm_bprintf(b, " heap in use: %d\n", hme->heap_inuse);
14502 		bcm_bprintf(b, " mf count: %d\n", hme->mf_count);
14503 		bcm_bprintf(b, " stack LWM: %x\n", hme->stack_lwm);
14504 
14505 		bcm_bprintf(b, " Histogram:\n");
14506 		for (i = 0; i < (HEAP_HISTOGRAM_DUMP_LEN * 2); i += 2) {
14507 			if (hme->heap_histogm[i] == 0xfffe)
14508 				bcm_bprintf(b, " Others\t%d\t?\n", hme->heap_histogm[i + 1]);
14509 			else if (hme->heap_histogm[i] == 0xffff)
14510 				bcm_bprintf(b, " >= 256K\t%d\t?\n", hme->heap_histogm[i + 1]);
14511 			else
14512 				bcm_bprintf(b, " %d\t%d\t%d\n", hme->heap_histogm[i] << 2,
14513 					hme->heap_histogm[i + 1], (hme->heap_histogm[i] << 2)
14514 					* hme->heap_histogm[i + 1]);
14515 		}
14516 
14517 		bcm_bprintf(b, " Max free block: %d\n", hme->max_sz_free_blk[0] << 2);
14518 		for (i = 1; i < HEAP_MAX_SZ_BLKS_LEN; i++) {
14519 			bcm_bprintf(b, " Next lgst free block: %d\n", hme->max_sz_free_blk[i] << 2);
14520 		}
14521 	}
14522 
14523 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PCIE_Q);
14524 	if (tlv) {
14525 		const hnd_ext_trap_pcie_mem_err_t* pqme;
14526 
14527 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PCIE_Q), tlv->len);
14528 		pqme = (const hnd_ext_trap_pcie_mem_err_t *)tlv->data;
14529 		bcm_bprintf(b, " d2h queue len: %x\n", pqme->d2h_queue_len);
14530 		bcm_bprintf(b, " d2h req queue len: %x\n", pqme->d2h_req_queue_len);
14531 	}
14532 
14533 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_WLC_STATE);
14534 	if (tlv) {
14535 		const hnd_ext_trap_wlc_mem_err_t* wsme;
14536 
14537 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_WLC_STATE), tlv->len);
14538 		wsme = (const hnd_ext_trap_wlc_mem_err_t *)tlv->data;
14539 		bcm_bprintf(b, " instance: %d\n", wsme->instance);
14540 		bcm_bprintf(b, " associated: %d\n", wsme->associated);
14541 		bcm_bprintf(b, " peer count: %d\n", wsme->peer_cnt);
14542 		bcm_bprintf(b, " client count: %d\n", wsme->soft_ap_client_cnt);
14543 		bcm_bprintf(b, " TX_AC_BK_FIFO: %d\n", wsme->txqueue_len[0]);
14544 		bcm_bprintf(b, " TX_AC_BE_FIFO: %d\n", wsme->txqueue_len[1]);
14545 		bcm_bprintf(b, " TX_AC_VI_FIFO: %d\n", wsme->txqueue_len[2]);
14546 		bcm_bprintf(b, " TX_AC_VO_FIFO: %d\n", wsme->txqueue_len[3]);
14547 
14548 		if (tlv->len >= (sizeof(*wsme) * 2)) {
14549 			wsme++;
14550 			bcm_bprintf(b, "\n instance: %d\n", wsme->instance);
14551 			bcm_bprintf(b, " associated: %d\n", wsme->associated);
14552 			bcm_bprintf(b, " peer count: %d\n", wsme->peer_cnt);
14553 			bcm_bprintf(b, " client count: %d\n", wsme->soft_ap_client_cnt);
14554 			bcm_bprintf(b, " TX_AC_BK_FIFO: %d\n", wsme->txqueue_len[0]);
14555 			bcm_bprintf(b, " TX_AC_BE_FIFO: %d\n", wsme->txqueue_len[1]);
14556 			bcm_bprintf(b, " TX_AC_VI_FIFO: %d\n", wsme->txqueue_len[2]);
14557 			bcm_bprintf(b, " TX_AC_VO_FIFO: %d\n", wsme->txqueue_len[3]);
14558 		}
14559 	}
14560 
14561 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PHY);
14562 	if (tlv) {
14563 		const hnd_ext_trap_phydbg_t* phydbg;
14564 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PHY), tlv->len);
14565 		phydbg = (const hnd_ext_trap_phydbg_t *)tlv->data;
14566 		bcm_bprintf(b, " err: 0x%x\n", phydbg->err);
14567 		bcm_bprintf(b, " RxFeStatus: 0x%x\n", phydbg->RxFeStatus);
14568 		bcm_bprintf(b, " TxFIFOStatus0: 0x%x\n", phydbg->TxFIFOStatus0);
14569 		bcm_bprintf(b, " TxFIFOStatus1: 0x%x\n", phydbg->TxFIFOStatus1);
14570 		bcm_bprintf(b, " RfseqMode: 0x%x\n", phydbg->RfseqMode);
14571 		bcm_bprintf(b, " RfseqStatus0: 0x%x\n", phydbg->RfseqStatus0);
14572 		bcm_bprintf(b, " RfseqStatus1: 0x%x\n", phydbg->RfseqStatus1);
14573 		bcm_bprintf(b, " RfseqStatus_Ocl: 0x%x\n", phydbg->RfseqStatus_Ocl);
14574 		bcm_bprintf(b, " RfseqStatus_Ocl1: 0x%x\n", phydbg->RfseqStatus_Ocl1);
14575 		bcm_bprintf(b, " OCLControl1: 0x%x\n", phydbg->OCLControl1);
14576 		bcm_bprintf(b, " TxError: 0x%x\n", phydbg->TxError);
14577 		bcm_bprintf(b, " bphyTxError: 0x%x\n", phydbg->bphyTxError);
14578 		bcm_bprintf(b, " TxCCKError: 0x%x\n", phydbg->TxCCKError);
14579 		bcm_bprintf(b, " TxCtrlWrd0: 0x%x\n", phydbg->TxCtrlWrd0);
14580 		bcm_bprintf(b, " TxCtrlWrd1: 0x%x\n", phydbg->TxCtrlWrd1);
14581 		bcm_bprintf(b, " TxCtrlWrd2: 0x%x\n", phydbg->TxCtrlWrd2);
14582 		bcm_bprintf(b, " TxLsig0: 0x%x\n", phydbg->TxLsig0);
14583 		bcm_bprintf(b, " TxLsig1: 0x%x\n", phydbg->TxLsig1);
14584 		bcm_bprintf(b, " TxVhtSigA10: 0x%x\n", phydbg->TxVhtSigA10);
14585 		bcm_bprintf(b, " TxVhtSigA11: 0x%x\n", phydbg->TxVhtSigA11);
14586 		bcm_bprintf(b, " TxVhtSigA20: 0x%x\n", phydbg->TxVhtSigA20);
14587 		bcm_bprintf(b, " TxVhtSigA21: 0x%x\n", phydbg->TxVhtSigA21);
14588 		bcm_bprintf(b, " txPktLength: 0x%x\n", phydbg->txPktLength);
14589 		bcm_bprintf(b, " txPsdulengthCtr: 0x%x\n", phydbg->txPsdulengthCtr);
14590 		bcm_bprintf(b, " gpioClkControl: 0x%x\n", phydbg->gpioClkControl);
14591 		bcm_bprintf(b, " gpioSel: 0x%x\n", phydbg->gpioSel);
14592 		bcm_bprintf(b, " pktprocdebug: 0x%x\n", phydbg->pktprocdebug);
14593 		for (i = 0; i < 3; i++)
14594 			bcm_bprintf(b, " gpioOut[%d]: 0x%x\n", i, phydbg->gpioOut[i]);
14595 	}
14596 
14597 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PSM_WD);
14598 	if (tlv) {
14599 		const hnd_ext_trap_psmwd_t* psmwd;
14600 
14601 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PSM_WD), tlv->len);
14602 		psmwd = (const hnd_ext_trap_psmwd_t *)tlv->data;
14603 		bcm_bprintf(b, " version: 0x%x\n", psmwd->version);
14604 		bcm_bprintf(b, " maccontrol: 0x%x\n", psmwd->i32_maccontrol);
14605 		bcm_bprintf(b, " maccommand: 0x%x\n", psmwd->i32_maccommand);
14606 		bcm_bprintf(b, " macintstatus: 0x%x\n", psmwd->i32_macintstatus);
14607 		bcm_bprintf(b, " phydebug: 0x%x\n", psmwd->i32_phydebug);
14608 		bcm_bprintf(b, " clk_ctl_st: 0x%x\n", psmwd->i32_clk_ctl_st);
14609 		if (psmwd->version == 1) {
14610 			dump_psmwd_v1(tlv, b);
14611 		}
14612 		if (psmwd->version == 2) {
14613 			dump_psmwd_v2(tlv, b);
14614 		}
14615 	}
14616 /* PHY TxErr MacDump */
14617 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PHYTXERR_THRESH);
14618 	if (tlv) {
14619 		const hnd_ext_trap_macphytxerr_t* phytxerr = NULL;
14620 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PHYTXERR_THRESH), tlv->len);
14621 		phytxerr = (const hnd_ext_trap_macphytxerr_t *)tlv->data;
14622 		bcm_bprintf(b, " version: 0x%x\n", phytxerr->version);
14623 		bcm_bprintf(b, " trap_reason: %d\n", phytxerr->trap_reason);
14624 		bcm_bprintf(b, " Tsf_rx_ts_0x63E: 0x%x\n", phytxerr->i16_0x63E);
14625 		bcm_bprintf(b, " Tsf_tx_ts_0x640: 0x%x\n", phytxerr->i16_0x640);
14626 		bcm_bprintf(b, " tsf_tmr_rx_end_ts_0x642: 0x%x\n", phytxerr->i16_0x642);
14627 		bcm_bprintf(b, " TDC_FrmLen0_0x846: 0x%x\n", phytxerr->i16_0x846);
14628 		bcm_bprintf(b, " TDC_FrmLen1_0x848: 0x%x\n", phytxerr->i16_0x848);
14629 		bcm_bprintf(b, " TDC_Txtime_0x84a: 0x%x\n", phytxerr->i16_0x84a);
14630 		bcm_bprintf(b, " TXE_BytCntInTxFrmLo_0xa5a: 0x%x\n", phytxerr->i16_0xa5a);
14631 		bcm_bprintf(b, " TXE_BytCntInTxFrmHi_0xa5c: 0x%x\n", phytxerr->i16_0xa5c);
14632 		bcm_bprintf(b, " TDC_VhtPsduLen0_0x856: 0x%x\n", phytxerr->i16_0x856);
14633 		bcm_bprintf(b, " TDC_VhtPsduLen1_0x858: 0x%x\n", phytxerr->i16_0x858);
14634 		bcm_bprintf(b, " PSM_BRC: 0x%x\n", phytxerr->i16_0x490);
14635 		bcm_bprintf(b, " PSM_BRC_1: 0x%x\n", phytxerr->i16_0x4d8);
14636 		bcm_bprintf(b, " shm_txerr_reason: 0x%x\n", phytxerr->shm_txerr_reason);
14637 		bcm_bprintf(b, " shm_pctl0: 0x%x\n", phytxerr->shm_pctl0);
14638 		bcm_bprintf(b, " shm_pctl1: 0x%x\n", phytxerr->shm_pctl1);
14639 		bcm_bprintf(b, " shm_pctl2: 0x%x\n", phytxerr->shm_pctl2);
14640 		bcm_bprintf(b, " shm_lsig0: 0x%x\n", phytxerr->shm_lsig0);
14641 		bcm_bprintf(b, " shm_lsig1: 0x%x\n", phytxerr->shm_lsig1);
14642 		bcm_bprintf(b, " shm_plcp0: 0x%x\n", phytxerr->shm_plcp0);
14643 		bcm_bprintf(b, " shm_plcp1: 0x%x\n", phytxerr->shm_plcp1);
14644 		bcm_bprintf(b, " shm_plcp2: 0x%x\n", phytxerr->shm_plcp2);
14645 		bcm_bprintf(b, " shm_vht_sigb0: 0x%x\n", phytxerr->shm_vht_sigb0);
14646 		bcm_bprintf(b, " shm_vht_sigb1: 0x%x\n", phytxerr->shm_vht_sigb1);
14647 		bcm_bprintf(b, " shm_tx_tst: 0x%x\n", phytxerr->shm_tx_tst);
14648 		bcm_bprintf(b, " shm_txerr_tm: 0x%x\n", phytxerr->shm_txerr_tm);
14649 		bcm_bprintf(b, " shm_curchannel: 0x%x\n", phytxerr->shm_curchannel);
14650 		bcm_bprintf(b, " shm_blk_crx_rxtsf_pos: 0x%x\n", phytxerr->shm_crx_rxtsf_pos);
14651 		bcm_bprintf(b, " shm_lasttx_tsf: 0x%x\n", phytxerr->shm_lasttx_tsf);
14652 		bcm_bprintf(b, " shm_s_rxtsftmrval: 0x%x\n", phytxerr->shm_s_rxtsftmrval);
14653 		bcm_bprintf(b, " Phy_0x29: 0x%x\n", phytxerr->i16_0x29);
14654 		bcm_bprintf(b, " Phy_0x2a: 0x%x\n", phytxerr->i16_0x2a);
14655 	}
14656 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MAC_SUSP);
14657 	if (tlv) {
14658 		const hnd_ext_trap_macsusp_t* macsusp;
14659 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MAC_SUSP), tlv->len);
14660 		macsusp = (const hnd_ext_trap_macsusp_t *)tlv->data;
14661 		bcm_bprintf(b, " version: %d\n", macsusp->version);
14662 		bcm_bprintf(b, " trap_reason: %d\n", macsusp->trap_reason);
14663 		bcm_bprintf(b, " maccontrol: 0x%x\n", macsusp->i32_maccontrol);
14664 		bcm_bprintf(b, " maccommand: 0x%x\n", macsusp->i32_maccommand);
14665 		bcm_bprintf(b, " macintstatus: 0x%x\n", macsusp->i32_macintstatus);
14666 		for (i = 0; i < 4; i++)
14667 			bcm_bprintf(b, " phydebug[%d]: 0x%x\n", i, macsusp->i32_phydebug[i]);
14668 		for (i = 0; i < 8; i++)
14669 			bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, macsusp->i32_psmdebug[i]);
14670 		bcm_bprintf(b, " Rxe Status_1: 0x%x\n", macsusp->i16_0x41a);
14671 		bcm_bprintf(b, " Rxe Status_2: 0x%x\n", macsusp->i16_0x41c);
14672 		bcm_bprintf(b, " PSM BRC: 0x%x\n", macsusp->i16_0x490);
14673 		bcm_bprintf(b, " TXE Status: 0x%x\n", macsusp->i16_0x50e);
14674 		bcm_bprintf(b, " TXE xmtdmabusy: 0x%x\n", macsusp->i16_0x55e);
14675 		bcm_bprintf(b, " TXE XMTfifosuspflush: 0x%x\n", macsusp->i16_0x566);
14676 		bcm_bprintf(b, " IFS Stat: 0x%x\n", macsusp->i16_0x690);
14677 		bcm_bprintf(b, " IFS MEDBUSY CTR: 0x%x\n", macsusp->i16_0x692);
14678 		bcm_bprintf(b, " IFS TX DUR: 0x%x\n", macsusp->i16_0x694);
14679 		bcm_bprintf(b, " WEP CTL: 0x%x\n", macsusp->i16_0x7c0);
14680 		bcm_bprintf(b, " TXE AQM fifo Ready: 0x%x\n", macsusp->i16_0x838);
14681 		bcm_bprintf(b, " MHP status: 0x%x\n", macsusp->i16_0x880);
14682 		bcm_bprintf(b, " shm_prewds_cnt: 0x%x\n", macsusp->shm_prewds_cnt);
14683 		bcm_bprintf(b, " shm_ucode_dbgst: 0x%x\n", macsusp->shm_ucode_dbgst);
14684 	}
14685 
14686 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MAC_WAKE);
14687 	if (tlv) {
14688 		const hnd_ext_trap_macenab_t* macwake;
14689 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MAC_WAKE), tlv->len);
14690 		macwake = (const hnd_ext_trap_macenab_t *)tlv->data;
14691 		bcm_bprintf(b, " version: 0x%x\n", macwake->version);
14692 		bcm_bprintf(b, " trap_reason: 0x%x\n", macwake->trap_reason);
14693 		bcm_bprintf(b, " maccontrol: 0x%x\n", macwake->i32_maccontrol);
14694 		bcm_bprintf(b, " maccommand: 0x%x\n", macwake->i32_maccommand);
14695 		bcm_bprintf(b, " macintstatus: 0x%x\n", macwake->i32_macintstatus);
14696 		for (i = 0; i < 8; i++)
14697 			bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, macwake->i32_psmdebug[i]);
14698 		bcm_bprintf(b, " clk_ctl_st: 0x%x\n", macwake->i32_clk_ctl_st);
14699 		bcm_bprintf(b, " powerctl: 0x%x\n", macwake->i32_powerctl);
14700 		bcm_bprintf(b, " gated clock en: 0x%x\n", macwake->i16_0x1a8);
14701 		bcm_bprintf(b, " PSM_SLP_TMR: 0x%x\n", macwake->i16_0x480);
14702 		bcm_bprintf(b, " PSM BRC: 0x%x\n", macwake->i16_0x490);
14703 		bcm_bprintf(b, " TSF CTL: 0x%x\n", macwake->i16_0x600);
14704 		bcm_bprintf(b, " IFS Stat: 0x%x\n", macwake->i16_0x690);
14705 		bcm_bprintf(b, " IFS_MEDBUSY_CTR: 0x%x\n", macwake->i16_0x692);
14706 		bcm_bprintf(b, " Slow_CTL: 0x%x\n", macwake->i16_0x6a0);
14707 		bcm_bprintf(b, " Slow_FRAC: 0x%x\n", macwake->i16_0x6a6);
14708 		bcm_bprintf(b, " fast power up delay: 0x%x\n", macwake->i16_0x6a8);
14709 		bcm_bprintf(b, " Slow_PER: 0x%x\n", macwake->i16_0x6aa);
14710 		bcm_bprintf(b, " shm_ucode_dbgst: 0x%x\n", macwake->shm_ucode_dbgst);
14711 	}
14712 
14713 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_BUS);
14714 	if (tlv) {
14715 		const bcm_dngl_pcie_hc_t* hc;
14716 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_BUS), tlv->len);
14717 		hc = (const bcm_dngl_pcie_hc_t *)tlv->data;
14718 		bcm_bprintf(b, " version: 0x%x\n", hc->version);
14719 		bcm_bprintf(b, " reserved: 0x%x\n", hc->reserved);
14720 		bcm_bprintf(b, " pcie_err_ind_type: 0x%x\n", hc->pcie_err_ind_type);
14721 		bcm_bprintf(b, " pcie_flag: 0x%x\n", hc->pcie_flag);
14722 		bcm_bprintf(b, " pcie_control_reg: 0x%x\n", hc->pcie_control_reg);
14723 		for (i = 0; i < HC_PCIEDEV_CONFIG_REGLIST_MAX; i++)
14724 			bcm_bprintf(b, " pcie_config_regs[%d]: 0x%x\n", i, hc->pcie_config_regs[i]);
14725 	}
14726 
14727 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_HMAP);
14728 	if (tlv) {
14729 		const pcie_hmapviolation_t* hmap;
14730 		hmap = (const pcie_hmapviolation_t *)tlv->data;
14731 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_HMAP), tlv->len);
14732 		bcm_bprintf(b, " HMAP Vio Addr Low: 0x%x\n", hmap->hmap_violationaddr_lo);
14733 		bcm_bprintf(b, " HMAP Vio Addr Hi: 0x%x\n", hmap->hmap_violationaddr_hi);
14734 		bcm_bprintf(b, " HMAP Vio Info: 0x%x\n", hmap->hmap_violation_info);
14735 	}
14736 
14737 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MEM_BIT_FLIP);
14738 	if (tlv) {
14739 		const hnd_ext_trap_fb_mem_err_t* fbit;
14740 		bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MEM_BIT_FLIP), tlv->len);
14741 		fbit = (const hnd_ext_trap_fb_mem_err_t *)tlv->data;
14742 		bcm_bprintf(b, " version: %d\n", fbit->version);
14743 		bcm_bprintf(b, " flip_bit_err_time: %d\n", fbit->flip_bit_err_time);
14744 	}
14745 
14746 	return BCME_OK;
14747 }
14748 
14749 #ifdef BCMPCIE
14750 int
14751 dhd_prot_send_host_timestamp(dhd_pub_t *dhdp, uchar *tlvs, uint16 tlv_len,
14752 	uint16 seqnum, uint16 xt_id)
14753 {
14754 	dhd_prot_t *prot = dhdp->prot;
14755 	host_timestamp_msg_t *ts_req;
14756 	unsigned long flags;
14757 	uint16 alloced = 0;
14758 	uchar *ts_tlv_buf;
14759 	msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
14760 
14761 	if ((tlvs == NULL) || (tlv_len == 0)) {
14762 		DHD_ERROR(("%s: argument error tlv: %p, tlv_len %d\n",
14763 			__FUNCTION__, tlvs, tlv_len));
14764 		return -1;
14765 	}
14766 
14767 #ifdef PCIE_INB_DW
14768 	if (dhd_prot_inc_hostactive_devwake_assert(dhdp->bus) != BCME_OK)
14769 		return BCME_ERROR;
14770 #endif /* PCIE_INB_DW */
14771 
14772 	DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
14773 
14774 	/* if Host TS req already pending go away */
14775 	if (prot->hostts_req_buf_inuse == TRUE) {
14776 		DHD_ERROR(("one host TS request already pending at device\n"));
14777 		DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
14778 #ifdef PCIE_INB_DW
14779 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus);
14780 #endif
14781 		return -1;
14782 	}
14783 
14784 	/* Request for cbuf space */
14785 	ts_req = (host_timestamp_msg_t*)dhd_prot_alloc_ring_space(dhdp, ctrl_ring,
14786 		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,	&alloced, FALSE);
14787 	if (ts_req == NULL) {
14788 		DHD_ERROR(("couldn't allocate space on msgring to send host TS request\n"));
14789 		DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
14790 #ifdef PCIE_INB_DW
14791 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus);
14792 #endif
14793 		return -1;
14794 	}
14795 
14796 	/* Common msg buf hdr */
14797 	ts_req->msg.msg_type = MSG_TYPE_HOSTTIMSTAMP;
14798 	ts_req->msg.if_id = 0;
14799 	ts_req->msg.flags =  ctrl_ring->current_phase;
14800 	ts_req->msg.request_id = DHD_H2D_HOSTTS_REQ_PKTID;
14801 
14802 	ts_req->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
14803 	ctrl_ring->seqnum++;
14804 
14805 	ts_req->xt_id = xt_id;
14806 	ts_req->seqnum = seqnum;
14807 	/* populate TS req buffer info */
14808 	ts_req->input_data_len = htol16(tlv_len);
14809 	ts_req->host_buf_addr.high = htol32(PHYSADDRHI(prot->hostts_req_buf.pa));
14810 	ts_req->host_buf_addr.low = htol32(PHYSADDRLO(prot->hostts_req_buf.pa));
14811 	/* copy ioct payload */
14812 	ts_tlv_buf = (void *) prot->hostts_req_buf.va;
14813 	prot->hostts_req_buf_inuse = TRUE;
14814 	memcpy(ts_tlv_buf, tlvs, tlv_len);
14815 
14816 	OSL_CACHE_FLUSH((void *) prot->hostts_req_buf.va, tlv_len);
14817 
14818 	if (ISALIGNED(ts_tlv_buf, DMA_ALIGN_LEN) == FALSE) {
14819 		DHD_ERROR(("host TS req buffer address unaligned !!!!! \n"));
14820 	}
14821 
14822 	DHD_CTL(("submitted Host TS request request_id %d, data_len %d, tx_id %d, seq %d\n",
14823 		ts_req->msg.request_id, ts_req->input_data_len,
14824 		ts_req->xt_id, ts_req->seqnum));
14825 
14826 	/* upd wrt ptr and raise interrupt */
14827 	dhd_prot_ring_write_complete(dhdp, ctrl_ring, ts_req,
14828 		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
14829 
14830 	DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
14831 
14832 #ifdef PCIE_INB_DW
14833 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus);
14834 #endif
14835 	return 0;
14836 } /* dhd_prot_send_host_timestamp */
14837 
14838 bool
14839 dhd_prot_data_path_tx_timestamp_logging(dhd_pub_t *dhd,  bool enable, bool set)
14840 {
14841 	if (set)
14842 		dhd->prot->tx_ts_log_enabled = enable;
14843 
14844 	return dhd->prot->tx_ts_log_enabled;
14845 }
14846 
14847 bool
14848 dhd_prot_data_path_rx_timestamp_logging(dhd_pub_t *dhd,  bool enable, bool set)
14849 {
14850 	if (set)
14851 		dhd->prot->rx_ts_log_enabled = enable;
14852 
14853 	return dhd->prot->rx_ts_log_enabled;
14854 }
14855 
14856 bool
14857 dhd_prot_pkt_noretry(dhd_pub_t *dhd, bool enable, bool set)
14858 {
14859 	if (set)
14860 		dhd->prot->no_retry = enable;
14861 
14862 	return dhd->prot->no_retry;
14863 }
14864 
14865 bool
14866 dhd_prot_pkt_noaggr(dhd_pub_t *dhd, bool enable, bool set)
14867 {
14868 	if (set)
14869 		dhd->prot->no_aggr = enable;
14870 
14871 	return dhd->prot->no_aggr;
14872 }
14873 
14874 bool
14875 dhd_prot_pkt_fixed_rate(dhd_pub_t *dhd, bool enable, bool set)
14876 {
14877 	if (set)
14878 		dhd->prot->fixed_rate = enable;
14879 
14880 	return dhd->prot->fixed_rate;
14881 }
14882 #endif /* BCMPCIE */
14883 
14884 void
14885 dhd_prot_dma_indx_free(dhd_pub_t *dhd)
14886 {
14887 	dhd_prot_t *prot = dhd->prot;
14888 
14889 	dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf);
14890 	dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf);
14891 }
14892 
14893 void
14894 dhd_msgbuf_delay_post_ts_bufs(dhd_pub_t *dhd)
14895 {
14896 	if (dhd->prot->max_tsbufpost > 0)
14897 		dhd_msgbuf_rxbuf_post_ts_bufs(dhd);
14898 }
14899 
14900 static void
14901 BCMFASTPATH(dhd_prot_process_fw_timestamp)(dhd_pub_t *dhd, void* buf)
14902 {
14903 #ifdef DHD_TIMESYNC
14904 	fw_timestamp_event_msg_t *resp;
14905 	uint32 pktid;
14906 	uint16 buflen, seqnum;
14907 	void * pkt;
14908 
14909 	resp = (fw_timestamp_event_msg_t *)buf;
14910 	pktid = ltoh32(resp->msg.request_id);
14911 	buflen = ltoh16(resp->buf_len);
14912 	seqnum = ltoh16(resp->seqnum);
14913 
14914 #if defined(DHD_PKTID_AUDIT_RING)
14915 	DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_ctrl_map, pktid,
14916 		DHD_DUPLICATE_FREE);
14917 #endif /* DHD_PKTID_AUDIT_RING */
14918 
14919 	DHD_INFO(("id 0x%04x, len %d, phase 0x%02x, seqnum %d\n",
14920 		pktid, buflen, resp->msg.flags, ltoh16(resp->seqnum)));
14921 
14922 	if (!dhd->prot->cur_ts_bufs_posted) {
14923 		DHD_ERROR(("tsbuf posted are zero, but there is a completion\n"));
14924 		return;
14925 	}
14926 
14927 	dhd->prot->cur_ts_bufs_posted--;
14928 
14929 	if (!dhd_timesync_delay_post_bufs(dhd)) {
14930 		if (dhd->prot->max_tsbufpost > 0) {
14931 			dhd_msgbuf_rxbuf_post_ts_bufs(dhd);
14932 		}
14933 	}
14934 
14935 	pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_TSBUF_RX, TRUE);
14936 
14937 	if (!pkt) {
14938 		DHD_ERROR(("no ts buffer associated with pktid 0x%04x\n", pktid));
14939 		return;
14940 	}
14941 
14942 	PKTSETLEN(dhd->osh, pkt, buflen);
14943 	dhd_timesync_handle_fw_timestamp(dhd->ts, PKTDATA(dhd->osh, pkt), buflen, seqnum);
14944 #ifdef DHD_USE_STATIC_CTRLBUF
14945 	PKTFREE_STATIC(dhd->osh, pkt, TRUE);
14946 #else
14947 	PKTFREE(dhd->osh, pkt, TRUE);
14948 #endif /* DHD_USE_STATIC_CTRLBUF */
14949 #else /* DHD_TIMESYNC */
14950 	DHD_ERROR(("Timesunc feature not compiled in but GOT FW TS message\n"));
14951 #endif /* DHD_TIMESYNC */
14952 
14953 }
14954 
14955 uint16
14956 dhd_prot_get_ioctl_trans_id(dhd_pub_t *dhdp)
14957 {
14958 	return dhdp->prot->ioctl_trans_id;
14959 }
14960 
14961 #ifdef SNAPSHOT_UPLOAD
14962 /* send request to take snapshot */
14963 int
14964 dhd_prot_send_snapshot_request(dhd_pub_t *dhdp, uint8 snapshot_type, uint8 snapshot_param)
14965 {
14966 	dhd_prot_t *prot = dhdp->prot;
14967 	dhd_dma_buf_t *dma_buf = &prot->snapshot_upload_buf;
14968 	snapshot_upload_request_msg_t *snap_req;
14969 	unsigned long flags;
14970 	uint16 alloced = 0;
14971 	msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
14972 
14973 #ifdef PCIE_INB_DW
14974 	if (dhd_prot_inc_hostactive_devwake_assert(dhdp->bus) != BCME_OK)
14975 		return BCME_ERROR;
14976 #endif /* PCIE_INB_DW */
14977 
14978 	DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
14979 
14980 	/* Request for cbuf space */
14981 	snap_req = (snapshot_upload_request_msg_t *)dhd_prot_alloc_ring_space(dhdp,
14982 		ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
14983 		&alloced, FALSE);
14984 	if (snap_req == NULL) {
14985 		DHD_ERROR(("couldn't allocate space on msgring to send snapshot request\n"));
14986 		DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
14987 #ifdef PCIE_INB_DW
14988 		dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus);
14989 #endif
14990 		return BCME_ERROR;
14991 	}
14992 
14993 	/* Common msg buf hdr */
14994 	snap_req->cmn_hdr.msg_type = MSG_TYPE_SNAPSHOT_UPLOAD;
14995 	snap_req->cmn_hdr.if_id = 0;
14996 	snap_req->cmn_hdr.flags =  ctrl_ring->current_phase;
14997 	snap_req->cmn_hdr.request_id = DHD_H2D_SNAPSHOT_UPLOAD_REQ_PKTID;
14998 	snap_req->cmn_hdr.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
14999 	ctrl_ring->seqnum++;
15000 
15001 	/* snapshot request msg */
15002 	snap_req->snapshot_buf_len = htol32(dma_buf->len);
15003 	snap_req->snapshot_type = snapshot_type;
15004 	snap_req->snapshot_param = snapshot_param;
15005 	snap_req->host_buf_addr.high = htol32(PHYSADDRHI(dma_buf->pa));
15006 	snap_req->host_buf_addr.low = htol32(PHYSADDRLO(dma_buf->pa));
15007 
15008 	if (ISALIGNED(dma_buf->va, DMA_ALIGN_LEN) == FALSE) {
15009 		DHD_ERROR(("snapshot req buffer address unaligned !!!!! \n"));
15010 	}
15011 
15012 	/* clear previous snapshot upload */
15013 	memset(dma_buf->va, 0, dma_buf->len);
15014 	prot->snapshot_upload_len = 0;
15015 	prot->snapshot_type = snapshot_type;
15016 	prot->snapshot_cmpl_pending = TRUE;
15017 
15018 	DHD_CTL(("submitted snapshot request request_id %d, buf_len %d, type %d, param %d\n",
15019 		snap_req->cmn_hdr.request_id, snap_req->snapshot_buf_len,
15020 		snap_req->snapshot_type, snap_req->snapshot_param));
15021 
15022 	/* upd wrt ptr and raise interrupt */
15023 	dhd_prot_ring_write_complete(dhdp, ctrl_ring, snap_req,
15024 		DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
15025 
15026 	DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
15027 
15028 #ifdef PCIE_INB_DW
15029 	dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus);
15030 #endif
15031 
15032 	return BCME_OK;
15033 } /* dhd_prot_send_snapshot_request */
15034 
15035 /* get uploaded snapshot */
15036 int
15037 dhd_prot_get_snapshot(dhd_pub_t *dhdp, uint8 snapshot_type, uint32 offset,
15038 	uint32 dst_buf_size, uint8 *dst_buf, uint32 *dst_size, bool *is_more)
15039 {
15040 	dhd_prot_t *prot = dhdp->prot;
15041 	uint8 *buf = prot->snapshot_upload_buf.va;
15042 	uint8 *buf_end = buf + prot->snapshot_upload_len;
15043 	uint32 copy_size;
15044 
15045 	/* snapshot type must match */
15046 	if (prot->snapshot_type != snapshot_type) {
15047 		return BCME_DATA_NOTFOUND;
15048 	}
15049 
15050 	/* snapshot not completed */
15051 	if (prot->snapshot_cmpl_pending) {
15052 		return BCME_NOTREADY;
15053 	}
15054 
15055 	/* offset within the buffer */
15056 	if (buf + offset >= buf_end) {
15057 		return BCME_BADARG;
15058 	}
15059 
15060 	/* copy dst buf size or remaining size */
15061 	copy_size = MIN(dst_buf_size, buf_end - (buf + offset));
15062 	memcpy(dst_buf, buf + offset, copy_size);
15063 
15064 	/* return size and is_more */
15065 	*dst_size = copy_size;
15066 	*is_more = (offset + copy_size < prot->snapshot_upload_len) ?
15067 		TRUE : FALSE;
15068 	return BCME_OK;
15069 } /* dhd_prot_get_snapshot */
15070 
15071 #endif	/* SNAPSHOT_UPLOAD */
15072 
15073 int dhd_get_hscb_info(dhd_pub_t *dhd, void ** va, uint32 *len)
15074 {
15075 	if (!dhd->hscb_enable) {
15076 		if (len) {
15077 			/* prevent "Operation not supported" dhd message */
15078 			*len = 0;
15079 			return BCME_OK;
15080 		}
15081 		return BCME_UNSUPPORTED;
15082 	}
15083 
15084 	if (va) {
15085 		*va = dhd->prot->host_scb_buf.va;
15086 	}
15087 	if (len) {
15088 		*len = dhd->prot->host_scb_buf.len;
15089 	}
15090 
15091 	return BCME_OK;
15092 }
15093 
15094 #ifdef DHD_BUS_MEM_ACCESS
15095 int dhd_get_hscb_buff(dhd_pub_t *dhd, uint32 offset, uint32 length, void * buff)
15096 {
15097 	if (!dhd->hscb_enable) {
15098 		return BCME_UNSUPPORTED;
15099 	}
15100 
15101 	if (dhd->prot->host_scb_buf.va == NULL ||
15102 		((uint64)offset + length > (uint64)dhd->prot->host_scb_buf.len)) {
15103 		return BCME_BADADDR;
15104 	}
15105 
15106 	memcpy(buff, (char*)dhd->prot->host_scb_buf.va + offset, length);
15107 
15108 	return BCME_OK;
15109 }
15110 #endif /* DHD_BUS_MEM_ACCESS */
15111 
15112 #ifdef DHD_HP2P
15113 uint32
15114 dhd_prot_pkt_threshold(dhd_pub_t *dhd, bool set, uint32 val)
15115 {
15116 	if (set)
15117 		dhd->pkt_thresh = (uint16)val;
15118 
15119 	val = dhd->pkt_thresh;
15120 
15121 	return val;
15122 }
15123 
15124 uint32
15125 dhd_prot_time_threshold(dhd_pub_t *dhd, bool set, uint32 val)
15126 {
15127 	if (set)
15128 		dhd->time_thresh = (uint16)val;
15129 
15130 	val = dhd->time_thresh;
15131 
15132 	return val;
15133 }
15134 
15135 uint32
15136 dhd_prot_pkt_expiry(dhd_pub_t *dhd, bool set, uint32 val)
15137 {
15138 	if (set)
15139 		dhd->pkt_expiry = (uint16)val;
15140 
15141 	val = dhd->pkt_expiry;
15142 
15143 	return val;
15144 }
15145 
15146 uint8
15147 dhd_prot_hp2p_enable(dhd_pub_t *dhd, bool set, int enable)
15148 {
15149 	uint8 ret = 0;
15150 	if (set) {
15151 		dhd->hp2p_enable = (enable & 0xf) ? TRUE : FALSE;
15152 		dhd->hp2p_infra_enable = ((enable >> 4) & 0xf) ? TRUE : FALSE;
15153 
15154 		if (enable) {
15155 			dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_TID_MAP);
15156 		} else {
15157 			dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_AC_MAP);
15158 		}
15159 	}
15160 	ret = dhd->hp2p_infra_enable ? 0x1:0x0;
15161 	ret <<= 4;
15162 	ret |= dhd->hp2p_enable ? 0x1:0x0;
15163 
15164 	return ret;
15165 }
15166 
15167 static void
15168 dhd_update_hp2p_rxstats(dhd_pub_t *dhd, host_rxbuf_cmpl_t *rxstatus)
15169 {
15170 	ts_timestamp_t *ts = (ts_timestamp_t *)&rxstatus->ts;
15171 	hp2p_info_t *hp2p_info;
15172 	uint32 dur1;
15173 
15174 	hp2p_info = &dhd->hp2p_info[0];
15175 	dur1 = ((ts->high & 0x3FF) * HP2P_TIME_SCALE) / 100;
15176 
15177 	if (dur1 > (MAX_RX_HIST_BIN - 1)) {
15178 		dur1 = MAX_RX_HIST_BIN - 1;
15179 		DHD_INFO(("%s: 0x%x 0x%x\n",
15180 			__FUNCTION__, ts->low, ts->high));
15181 	}
15182 
15183 	hp2p_info->rx_t0[dur1 % MAX_RX_HIST_BIN]++;
15184 	return;
15185 }
15186 
15187 static void
15188 dhd_update_hp2p_txstats(dhd_pub_t *dhd, host_txbuf_cmpl_t *txstatus)
15189 {
15190 	ts_timestamp_t *ts = (ts_timestamp_t *)&txstatus->ts;
15191 	uint16 flowid = txstatus->compl_hdr.flow_ring_id;
15192 	uint32 hp2p_flowid, dur1, dur2;
15193 	hp2p_info_t *hp2p_info;
15194 
15195 	hp2p_flowid = dhd->bus->max_submission_rings -
15196 		dhd->bus->max_cmn_rings - flowid + 1;
15197 	hp2p_info = &dhd->hp2p_info[hp2p_flowid];
15198 	ts = (ts_timestamp_t *)&(txstatus->ts);
15199 
15200 	dur1 = ((ts->high & 0x3FF) * HP2P_TIME_SCALE) / 1000;
15201 	if (dur1 > (MAX_TX_HIST_BIN - 1)) {
15202 		dur1 = MAX_TX_HIST_BIN - 1;
15203 		DHD_INFO(("%s: 0x%x 0x%x\n", __FUNCTION__, ts->low, ts->high));
15204 	}
15205 	hp2p_info->tx_t0[dur1 % MAX_TX_HIST_BIN]++;
15206 
15207 	dur2 = (((ts->high >> 10) & 0x3FF) * HP2P_TIME_SCALE) / 1000;
15208 	if (dur2 > (MAX_TX_HIST_BIN - 1)) {
15209 		dur2 = MAX_TX_HIST_BIN - 1;
15210 		DHD_INFO(("%s: 0x%x 0x%x\n", __FUNCTION__, ts->low, ts->high));
15211 	}
15212 
15213 	hp2p_info->tx_t1[dur2 % MAX_TX_HIST_BIN]++;
15214 	return;
15215 }
15216 
15217 enum hrtimer_restart dhd_hp2p_write(struct hrtimer *timer)
15218 {
15219 	hp2p_info_t *hp2p_info;
15220 	unsigned long flags;
15221 	dhd_pub_t *dhdp;
15222 
15223 	GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
15224 	hp2p_info = container_of(timer, hp2p_info_t, timer);
15225 	GCC_DIAGNOSTIC_POP();
15226 
15227 	dhdp = hp2p_info->dhd_pub;
15228 	if (!dhdp) {
15229 		goto done;
15230 	}
15231 
15232 	DHD_INFO(("%s: pend_item = %d flowid = %d\n",
15233 		__FUNCTION__, ((msgbuf_ring_t *)hp2p_info->ring)->pend_items_count,
15234 		hp2p_info->flowid));
15235 
15236 	flags = dhd_os_hp2plock(dhdp);
15237 
15238 	dhd_prot_txdata_write_flush(dhdp, hp2p_info->flowid);
15239 	hp2p_info->hrtimer_init = FALSE;
15240 	hp2p_info->num_timer_limit++;
15241 
15242 	dhd_os_hp2punlock(dhdp, flags);
15243 done:
15244 	return HRTIMER_NORESTART;
15245 }
15246 
15247 static void
15248 dhd_calc_hp2p_burst(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint16 flowid)
15249 {
15250 	hp2p_info_t *hp2p_info;
15251 	uint16 hp2p_flowid;
15252 
15253 	hp2p_flowid = dhd->bus->max_submission_rings -
15254 		dhd->bus->max_cmn_rings - flowid + 1;
15255 	hp2p_info = &dhd->hp2p_info[hp2p_flowid];
15256 
15257 	if (ring->pend_items_count == dhd->pkt_thresh) {
15258 		dhd_prot_txdata_write_flush(dhd, flowid);
15259 
15260 		hp2p_info->hrtimer_init = FALSE;
15261 		hp2p_info->ring = NULL;
15262 		hp2p_info->num_pkt_limit++;
15263 		hrtimer_cancel(&hp2p_info->timer);
15264 
15265 		DHD_INFO(("%s: cancel hrtimer for flowid = %d \n"
15266 			"hp2p_flowid = %d pkt_thresh = %d\n",
15267 			__FUNCTION__, flowid, hp2p_flowid, dhd->pkt_thresh));
15268 	} else {
15269 		if (hp2p_info->hrtimer_init == FALSE) {
15270 			hp2p_info->hrtimer_init = TRUE;
15271 			hp2p_info->flowid = flowid;
15272 			hp2p_info->dhd_pub = dhd;
15273 			hp2p_info->ring = ring;
15274 			hp2p_info->num_timer_start++;
15275 
15276 			hrtimer_start(&hp2p_info->timer,
15277 				ktime_set(0, dhd->time_thresh * 1000), HRTIMER_MODE_REL);
15278 
15279 			DHD_INFO(("%s: start hrtimer for flowid = %d hp2_flowid = %d\n",
15280 					__FUNCTION__, flowid, hp2p_flowid));
15281 		}
15282 	}
15283 	return;
15284 }
15285 
15286 static void
15287 dhd_update_hp2p_txdesc(dhd_pub_t *dhd, host_txbuf_post_t *txdesc)
15288 {
15289 	uint64 ts;
15290 
15291 	ts = local_clock();
15292 	do_div(ts, 1000);
15293 
15294 	txdesc->metadata_buf_len = 0;
15295 	txdesc->metadata_buf_addr.high_addr = htol32((ts >> 32) & 0xFFFFFFFF);
15296 	txdesc->metadata_buf_addr.low_addr = htol32(ts & 0xFFFFFFFF);
15297 	txdesc->exp_time = dhd->pkt_expiry;
15298 
15299 	DHD_INFO(("%s: metadata_high = 0x%x metadata_low = 0x%x exp_time = %x\n",
15300 		__FUNCTION__, txdesc->metadata_buf_addr.high_addr,
15301 		txdesc->metadata_buf_addr.low_addr,
15302 		txdesc->exp_time));
15303 
15304 	return;
15305 }
15306 #endif /* DHD_HP2P */
15307 
15308 #ifdef DHD_MAP_LOGGING
15309 void
15310 dhd_prot_smmu_fault_dump(dhd_pub_t *dhdp)
15311 {
15312 	dhd_prot_debug_info_print(dhdp);
15313 	OSL_DMA_MAP_DUMP(dhdp->osh);
15314 #ifdef DHD_MAP_PKTID_LOGGING
15315 	dhd_pktid_logging_dump(dhdp);
15316 #endif /* DHD_MAP_PKTID_LOGGING */
15317 #ifdef DHD_FW_COREDUMP
15318 	dhdp->memdump_type = DUMP_TYPE_SMMU_FAULT;
15319 #ifdef DNGL_AXI_ERROR_LOGGING
15320 	dhdp->memdump_enabled = DUMP_MEMFILE;
15321 	dhd_bus_get_mem_dump(dhdp);
15322 #else
15323 	dhdp->memdump_enabled = DUMP_MEMONLY;
15324 	dhd_bus_mem_dump(dhdp);
15325 #endif /* DNGL_AXI_ERROR_LOGGING */
15326 #endif /* DHD_FW_COREDUMP */
15327 }
15328 #endif /* DHD_MAP_LOGGING */
15329 
15330 #ifdef DHD_FLOW_RING_STATUS_TRACE
15331 void
15332 dhd_dump_bus_flow_ring_status_trace(
15333 	dhd_bus_t *bus, struct bcmstrbuf *strbuf, dhd_frs_trace_t *frs_trace, int dumpsz, char *str)
15334 {
15335 	int i;
15336 	dhd_prot_t *prot = bus->dhd->prot;
15337 	uint32 isr_cnt = bus->frs_isr_count % FRS_TRACE_SIZE;
15338 	uint32 dpc_cnt = bus->frs_dpc_count % FRS_TRACE_SIZE;
15339 
15340 	bcm_bprintf(strbuf, "---- %s ------ isr_cnt: %d dpc_cnt %d\n",
15341 		str, isr_cnt, dpc_cnt);
15342 	bcm_bprintf(strbuf, "%s\t%s\t%s\t%s\t%s\t%s\t",
15343 		"Timestamp ns", "H2DCtrlPost", "D2HCtrlCpl",
15344 		"H2DRxPost", "D2HRxCpl", "D2HTxCpl");
15345 	if (prot->h2dring_info_subn != NULL && prot->d2hring_info_cpln != NULL) {
15346 		bcm_bprintf(strbuf, "%s\t%s\t", "H2DRingInfoPost", "D2HRingInfoCpl");
15347 	}
15348 	if (prot->d2hring_edl != NULL) {
15349 		bcm_bprintf(strbuf, "%s", "D2HRingEDL");
15350 	}
15351 	bcm_bprintf(strbuf, "\n");
15352 	for (i = 0; i < dumpsz; i ++) {
15353 		bcm_bprintf(strbuf, "%llu\t%6u-%u\t%6u-%u\t%6u-%u\t%6u-%u\t%6u-%u\t",
15354 				frs_trace[i].timestamp,
15355 				frs_trace[i].h2d_ctrl_post_drd,
15356 				frs_trace[i].h2d_ctrl_post_dwr,
15357 				frs_trace[i].d2h_ctrl_cpln_drd,
15358 				frs_trace[i].d2h_ctrl_cpln_dwr,
15359 				frs_trace[i].h2d_rx_post_drd,
15360 				frs_trace[i].h2d_rx_post_dwr,
15361 				frs_trace[i].d2h_rx_cpln_drd,
15362 				frs_trace[i].d2h_rx_cpln_dwr,
15363 				frs_trace[i].d2h_tx_cpln_drd,
15364 				frs_trace[i].d2h_tx_cpln_dwr);
15365 		if (prot->h2dring_info_subn != NULL && prot->d2hring_info_cpln != NULL) {
15366 			bcm_bprintf(strbuf, "%6u-%u\t%6u-%u\t",
15367 				frs_trace[i].h2d_info_post_drd,
15368 				frs_trace[i].h2d_info_post_dwr,
15369 				frs_trace[i].d2h_info_cpln_drd,
15370 				frs_trace[i].d2h_info_cpln_dwr);
15371 		}
15372 		if (prot->d2hring_edl != NULL) {
15373 			bcm_bprintf(strbuf, "%6u-%u",
15374 				frs_trace[i].d2h_ring_edl_drd,
15375 				frs_trace[i].d2h_ring_edl_dwr);
15376 
15377 		}
15378 		bcm_bprintf(strbuf, "\n");
15379 	}
15380 	bcm_bprintf(strbuf, "--------------------------\n");
15381 }
15382 
15383 void
15384 dhd_dump_bus_flow_ring_status_isr_trace(dhd_bus_t *bus, struct bcmstrbuf *strbuf)
15385 {
15386 	int dumpsz;
15387 
15388 	dumpsz = bus->frs_isr_count < FRS_TRACE_SIZE ?
15389 		bus->frs_isr_count : FRS_TRACE_SIZE;
15390 	if (dumpsz == 0) {
15391 		bcm_bprintf(strbuf, "\nEMPTY ISR FLOW RING TRACE\n");
15392 		return;
15393 	}
15394 	dhd_dump_bus_flow_ring_status_trace(bus, strbuf, bus->frs_isr_trace,
15395 		dumpsz, "ISR FLOW RING TRACE DRD-DWR");
15396 }
15397 
15398 void
15399 dhd_dump_bus_flow_ring_status_dpc_trace(dhd_bus_t *bus, struct bcmstrbuf *strbuf)
15400 {
15401 	int dumpsz;
15402 
15403 	dumpsz = bus->frs_dpc_count < FRS_TRACE_SIZE ?
15404 		bus->frs_dpc_count : FRS_TRACE_SIZE;
15405 	if (dumpsz == 0) {
15406 		bcm_bprintf(strbuf, "\nEMPTY ISR FLOW RING TRACE\n");
15407 		return;
15408 	}
15409 	dhd_dump_bus_flow_ring_status_trace(bus, strbuf, bus->frs_dpc_trace,
15410 		dumpsz, "DPC FLOW RING TRACE DRD-DWR");
15411 }
15412 static void
15413 dhd_bus_flow_ring_status_trace(dhd_pub_t *dhd, dhd_frs_trace_t *frs_trace)
15414 {
15415 	dhd_prot_t *prot = dhd->prot;
15416 	msgbuf_ring_t *ring;
15417 
15418 	ring = &prot->h2dring_ctrl_subn;
15419 	frs_trace->h2d_ctrl_post_drd =
15420 		dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
15421 	frs_trace->h2d_ctrl_post_dwr =
15422 		dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_WR_UPD, ring->idx);
15423 
15424 	ring = &prot->d2hring_ctrl_cpln;
15425 	frs_trace->d2h_ctrl_cpln_drd =
15426 		dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
15427 	frs_trace->d2h_ctrl_cpln_dwr =
15428 		dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
15429 
15430 	ring = &prot->h2dring_rxp_subn;
15431 	frs_trace->h2d_rx_post_drd =
15432 		dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
15433 	frs_trace->h2d_rx_post_dwr =
15434 		dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_WR_UPD, ring->idx);
15435 
15436 	ring = &prot->d2hring_rx_cpln;
15437 	frs_trace->d2h_rx_cpln_drd =
15438 		dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
15439 	frs_trace->d2h_rx_cpln_dwr =
15440 		dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
15441 
15442 	ring = &prot->d2hring_tx_cpln;
15443 	frs_trace->d2h_tx_cpln_drd =
15444 		dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
15445 	frs_trace->d2h_tx_cpln_dwr =
15446 		dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
15447 
15448 	if (dhd->prot->h2dring_info_subn != NULL && dhd->prot->d2hring_info_cpln != NULL) {
15449 		ring = prot->h2dring_info_subn;
15450 		frs_trace->h2d_info_post_drd =
15451 			dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
15452 		frs_trace->h2d_info_post_dwr =
15453 			dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_WR_UPD, ring->idx);
15454 
15455 		ring = prot->d2hring_info_cpln;
15456 		frs_trace->d2h_info_cpln_drd =
15457 			dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
15458 		frs_trace->d2h_info_cpln_dwr =
15459 			dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
15460 	}
15461 	if (prot->d2hring_edl != NULL) {
15462 		ring = prot->d2hring_edl;
15463 		frs_trace->d2h_ring_edl_drd =
15464 			dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_RD_UPD, ring->idx);
15465 		frs_trace->d2h_ring_edl_dwr =
15466 			dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
15467 	}
15468 
15469 }
15470 
15471 void
15472 dhd_bus_flow_ring_status_isr_trace(dhd_pub_t *dhd)
15473 {
15474 	uint32 cnt = dhd->bus->frs_isr_count % FRS_TRACE_SIZE;
15475 	dhd_frs_trace_t *frs_isr_trace = &dhd->bus->frs_isr_trace[cnt];
15476 	uint64 time_ns_prev = frs_isr_trace[cnt].timestamp;
15477 	uint64 time_ns_now = OSL_LOCALTIME_NS();
15478 
15479 	if ((time_ns_now - time_ns_prev) < 250000) { /* delta less than 250us */
15480 		return;
15481 	}
15482 
15483 	dhd_bus_flow_ring_status_trace(dhd, frs_isr_trace);
15484 
15485 	frs_isr_trace->timestamp = OSL_LOCALTIME_NS();
15486 	dhd->bus->frs_isr_count ++;
15487 }
15488 
15489 void
15490 dhd_bus_flow_ring_status_dpc_trace(dhd_pub_t *dhd)
15491 {
15492 	uint32 cnt = dhd->bus->frs_dpc_count % FRS_TRACE_SIZE;
15493 	dhd_frs_trace_t *frs_dpc_trace = &dhd->bus->frs_dpc_trace[cnt];
15494 	uint64 time_ns_prev = frs_dpc_trace[cnt].timestamp;
15495 	uint64 time_ns_now = OSL_LOCALTIME_NS();
15496 
15497 	if ((time_ns_now - time_ns_prev) < 250000) { /* delta less than 250us */
15498 		return;
15499 	}
15500 
15501 	dhd_bus_flow_ring_status_trace(dhd, frs_dpc_trace);
15502 
15503 	frs_dpc_trace->timestamp = OSL_LOCALTIME_NS();
15504 	dhd->bus->frs_dpc_count ++;
15505 }
15506 #endif /* DHD_FLOW_RING_STATUS_TRACE */
15507