• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * @file definition of host message ring functionality
3  * Provides type definitions and function prototypes used to link the
4  * DHD OS, bus, and protocol modules.
5  *
6  * Copyright (C) 1999-2019, Broadcom.
7  *
8  *      Unless you and Broadcom execute a separate written software license
9  * agreement governing use of this software, this software is licensed to you
10  * under the terms of the GNU General Public License version 2 (the "GPL"),
11  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
12  * following added to such license:
13  *
14  *      As a special exception, the copyright holders of this software give you
15  * permission to link this software with independent modules, and to copy and
16  * distribute the resulting executable under terms of your choice, provided that
17  * you also meet, for each linked independent module, the terms and conditions
18  * of the license of that module.  An independent module is a module which is
19  * not derived from this software.  The special exception does not apply to any
20  * modifications of the software.
21  *
22  *      Notwithstanding the above, under no circumstances may you combine this
23  * software in any way with any other Broadcom software provided under a license
24  * other than the GPL, without Broadcom's express prior written consent.
25  *
26  *
27  * <<Broadcom-WL-IPTag/Open:>>
28  *
29  * $Id: dhd_msgbuf.c 825801 2019-06-17 10:51:10Z $
30  */
31 
32 #include <typedefs.h>
33 #include <osl.h>
34 
35 #include <bcmutils.h>
36 #include <bcmmsgbuf.h>
37 #include <bcmendian.h>
38 #include <bcmstdlib_s.h>
39 
40 #include <dngl_stats.h>
41 #include <dhd.h>
42 #include <dhd_proto.h>
43 
44 #include <dhd_bus.h>
45 
46 #include <dhd_dbg.h>
47 #include <siutils.h>
48 #include <dhd_debug.h>
49 
50 #include <dhd_flowring.h>
51 
52 #include <pcie_core.h>
53 #include <bcmpcie.h>
54 #include <dhd_pcie.h>
55 #include <dhd_config.h>
56 
57 #if defined(DHD_LB)
58 #include <linux/cpu.h>
59 #include <bcm_ring.h>
60 #define DHD_LB_WORKQ_SZ (8192)
61 #define DHD_LB_WORKQ_SYNC (16)
62 #define DHD_LB_WORK_SCHED (DHD_LB_WORKQ_SYNC * 2)
63 #endif /* DHD_LB */
64 
65 #include <etd.h>
66 #include <hnd_debug.h>
67 #include <bcmtlv.h>
68 #include <hnd_armtrap.h>
69 #include <dnglevent.h>
70 
71 #ifdef DHD_EWPR_VER2
72 #include <dhd_bitpack.h>
73 #endif /* DHD_EWPR_VER2 */
74 
75 extern char dhd_version[];
76 extern char fw_version[];
77 
78 /* Dependency Check */
79 #if defined(IOCTLRESP_USE_CONSTMEM) && defined(DHD_USE_STATIC_CTRLBUF)
80 #error "DHD_USE_STATIC_CTRLBUF is NOT working with DHD_USE_OSLPKT_FOR_RESPBUF"
81 #endif /* IOCTLRESP_USE_CONSTMEM && DHD_USE_STATIC_CTRLBUF */
82 
83 #define RETRIES 2 /* # of retries to retrieve matching ioctl response */
84 
85 #define DEFAULT_RX_BUFFERS_TO_POST 256
86 #define RXBUFPOST_THRESHOLD 32
87 #define RX_BUF_BURST 32 /* Rx buffers for MSDU Data */
88 
89 #define DHD_STOP_QUEUE_THRESHOLD 200
90 #define DHD_START_QUEUE_THRESHOLD 100
91 
92 #define RX_DMA_OFFSET 8 /* Mem2mem DMA inserts an extra 8 */
93 #define IOCT_RETBUF_SIZE (RX_DMA_OFFSET + WLC_IOCTL_MAXLEN)
94 
95 /* flags for ioctl pending status */
96 #define MSGBUF_IOCTL_ACK_PENDING (1 << 0)
97 #define MSGBUF_IOCTL_RESP_PENDING (1 << 1)
98 
99 #define DHD_IOCTL_REQ_PKTBUFSZ 2048
100 #define MSGBUF_IOCTL_MAX_RQSTLEN                                               \
101     (DHD_IOCTL_REQ_PKTBUFSZ - H2DRING_CTRL_SUB_ITEMSIZE)
102 
103 #define DMA_ALIGN_LEN 4
104 
105 #define DMA_D2H_SCRATCH_BUF_LEN 8
106 #define DMA_XFER_LEN_LIMIT 0x400000
107 
108 #ifdef BCM_HOST_BUF
109 #ifndef DMA_HOST_BUFFER_LEN
110 #define DMA_HOST_BUFFER_LEN 0x200000
111 #endif // endif
112 #endif /* BCM_HOST_BUF */
113 
114 #define DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ 8192
115 
116 #define DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D 1
117 #define DHD_FLOWRING_MAX_EVENTBUF_POST 32
118 #define DHD_FLOWRING_MAX_IOCTLRESPBUF_POST 8
119 #define DHD_H2D_INFORING_MAX_BUF_POST 32
120 #define DHD_MAX_TSBUF_POST 8
121 
122 #define DHD_PROT_FUNCS 43
123 
124 /* Length of buffer in host for bus throughput measurement */
125 #define DHD_BUS_TPUT_BUF_LEN 2048
126 
127 #define TXP_FLUSH_NITEMS
128 
129 /* optimization to write "n" tx items at a time to ring */
130 #define TXP_FLUSH_MAX_ITEMS_FLUSH_CNT 48
131 
132 #define RING_NAME_MAX_LENGTH 24
133 #define CTRLSUB_HOSTTS_MEESAGE_SIZE 1024
134 /* Giving room before ioctl_trans_id rollsover. */
135 #define BUFFER_BEFORE_ROLLOVER 300
136 
137 /* 512K memory + 32K registers */
138 #define SNAPSHOT_UPLOAD_BUF_SIZE ((512 + 32) * 1024)
139 
140 struct msgbuf_ring; /* ring context for common and flow rings */
141 
142 /**
143  * PCIE D2H DMA Complete Sync Modes
144  *
145  * Firmware may interrupt the host, prior to the D2H Mem2Mem DMA completes into
146  * Host system memory. A WAR using one of 3 approaches is needed:
147  * 1. Dongle places a modulo-253 seqnum in last word of each D2H message
148  * 2. XOR Checksum, with epoch# in each work item. Dongle builds an XOR checksum
149  *    writes in the last word of each work item. Each work item has a seqnum
150  *    number = sequence num % 253.
151  *
152  * 3. Read Barrier: Dongle does a host memory read access prior to posting an
153  *    interrupt, ensuring that D2H data transfer indeed completed.
154  * 4. Dongle DMA's all indices after producing items in the D2H ring, flushing
155  *    ring contents before the indices.
156  *
157  * Host does not sync for DMA to complete with option #3 or #4, and a noop sync
158  * callback (see dhd_prot_d2h_sync_none) may be bound.
159  *
160  * Dongle advertizes host side sync mechanism requirements.
161  */
162 
163 #define PCIE_D2H_SYNC_WAIT_TRIES (512U)
164 #define PCIE_D2H_SYNC_NUM_OF_STEPS (5U)
165 #define PCIE_D2H_SYNC_DELAY (100UL) /* in terms of usecs */
166 
167 #define HWA_DB_TYPE_RXPOST (0x0050)
168 #define HWA_DB_TYPE_TXCPLT (0x0060)
169 #define HWA_DB_TYPE_RXCPLT (0x0170)
170 #define HWA_DB_INDEX_VALUE(val) ((uint32)(val) << 16)
171 
172 #define HWA_ENAB_BITMAP_RXPOST (1U << 0) /* 1A */
173 #define HWA_ENAB_BITMAP_RXCPLT (1U << 1) /* 2B */
174 #define HWA_ENAB_BITMAP_TXCPLT (1U << 2) /* 4B */
175 
176 /**
177  * Custom callback attached based upon D2H DMA Sync mode advertized by dongle.
178  *
179  * On success: return cmn_msg_hdr_t::msg_type
180  * On failure: return 0 (invalid msg_type)
181  */
182 typedef uint8 (*d2h_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring,
183                                volatile cmn_msg_hdr_t *msg, int msglen);
184 
185 /**
186  * Custom callback attached based upon D2H DMA Sync mode advertized by dongle.
187  * For EDL messages.
188  *
189  * On success: return cmn_msg_hdr_t::msg_type
190  * On failure: return 0 (invalid msg_type)
191  */
192 #ifdef EWP_EDL
193 typedef int (*d2h_edl_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring,
194                                  volatile cmn_msg_hdr_t *msg);
195 #endif /* EWP_EDL */
196 
197 /*
198  * +----------------------------------------------------------------------------
199  *
200  * RingIds and FlowId are not equivalent as ringids include D2H rings whereas
201  * flowids do not.
202  *
203  * Dongle advertizes the max H2D rings, as max_sub_queues = 'N' which includes
204  * the H2D common rings as well as the (N-BCMPCIE_H2D_COMMON_MSGRINGS) flowrings
205  *
206  * Here is a sample mapping for (based on PCIE Full Dongle Rev5) where,
207  *  BCMPCIE_H2D_COMMON_MSGRINGS = 2, i.e. 2 H2D common rings,
208  *  BCMPCIE_COMMON_MSGRINGS     = 5, i.e. include 3 D2H common rings.
209  *
210  *  H2D Control  Submit   RingId = 0        FlowId = 0 reserved never allocated
211  *  H2D RxPost   Submit   RingId = 1        FlowId = 1 reserved never allocated
212  *
213  *  D2H Control  Complete RingId = 2
214  *  D2H Transmit Complete RingId = 3
215  *  D2H Receive  Complete RingId = 4
216  *
217  *  H2D TxPost   FLOWRING RingId = 5         FlowId = 2     (1st flowring)
218  *  H2D TxPost   FLOWRING RingId = 6         FlowId = 3     (2nd flowring)
219  *  H2D TxPost   FLOWRING RingId = 5 + (N-1) FlowId = (N-1) (Nth flowring)
220  *
221  * When TxPost FlowId(s) are allocated, the FlowIds [0..FLOWID_RESERVED) are
222  * unused, where FLOWID_RESERVED is BCMPCIE_H2D_COMMON_MSGRINGS.
223  *
224  * Example: when a system supports 4 bc/mc and 128 uc flowrings, with
225  * BCMPCIE_H2D_COMMON_MSGRINGS = 2, and BCMPCIE_H2D_COMMON_MSGRINGS = 5, and the
226  * FlowId values would be in the range [2..133] and the corresponding
227  * RingId values would be in the range [5..136].
228  *
229  * The flowId allocator, may chose to, allocate Flowids:
230  *   bc/mc (per virtual interface) in one consecutive range [2..(2+VIFS))
231  *   X# of uc flowids in consecutive ranges (per station Id), where X is the
232  *   packet's access category (e.g. 4 uc flowids per station).
233  *
234  * CAUTION:
235  * When DMA indices array feature is used, RingId=5, corresponding to the 0th
236  * FLOWRING, will actually use the FlowId as index into the H2D DMA index,
237  * since the FlowId truly represents the index in the H2D DMA indices array.
238  *
239  * Likewise, in the D2H direction, the RingId - BCMPCIE_H2D_COMMON_MSGRINGS,
240  * will represent the index in the D2H DMA indices array.
241  *
242  * +----------------------------------------------------------------------------
243  */
244 
245 /* First TxPost Flowring Id */
246 #define DHD_FLOWRING_START_FLOWID BCMPCIE_H2D_COMMON_MSGRINGS
247 
248 /* Determine whether a ringid belongs to a TxPost flowring */
249 #define DHD_IS_FLOWRING(ringid, max_flow_rings)                                \
250     ((ringid) >= BCMPCIE_COMMON_MSGRINGS &&                                    \
251      (ringid) < ((max_flow_rings) + BCMPCIE_COMMON_MSGRINGS))
252 
253 /* Convert a H2D TxPost FlowId to a MsgBuf RingId */
254 #define DHD_FLOWID_TO_RINGID(flowid)                                           \
255     (BCMPCIE_COMMON_MSGRINGS + ((flowid)-BCMPCIE_H2D_COMMON_MSGRINGS))
256 
257 /* Convert a MsgBuf RingId to a H2D TxPost FlowId */
258 #define DHD_RINGID_TO_FLOWID(ringid)                                           \
259     (BCMPCIE_H2D_COMMON_MSGRINGS + ((ringid)-BCMPCIE_COMMON_MSGRINGS))
260 
261 /* Convert a H2D MsgBuf RingId to an offset index into the H2D DMA indices array
262  * This may be used for the H2D DMA WR index array or H2D DMA RD index array or
263  * any array of H2D rings.
264  */
265 #define DHD_H2D_RING_OFFSET(ringid)                                            \
266     (((ringid) >= BCMPCIE_COMMON_MSGRINGS) ? DHD_RINGID_TO_FLOWID(ringid)      \
267                                            : (ringid))
268 
269 /* Convert a H2D MsgBuf Flowring Id to an offset index into the H2D DMA indices
270  * array This may be used for IFRM.
271  */
272 #define DHD_H2D_FRM_FLOW_RING_OFFSET(ringid) ((ringid)-BCMPCIE_COMMON_MSGRINGS)
273 
274 /* Convert a D2H MsgBuf RingId to an offset index into the D2H DMA indices array
275  * This may be used for the D2H DMA WR index array or D2H DMA RD index array or
276  * any array of D2H rings.
277  * d2h debug ring is located at the end, i.e. after all the tx flow rings and
278  * h2d debug ring max_h2d_rings: total number of h2d rings
279  */
280 #define DHD_D2H_RING_OFFSET(ringid, max_h2d_rings)                             \
281     ((ringid) > (max_h2d_rings) ? ((ringid)-max_h2d_rings)                     \
282                                 : ((ringid)-BCMPCIE_H2D_COMMON_MSGRINGS))
283 
284 /* Convert a D2H DMA Indices Offset to a RingId */
285 #define DHD_D2H_RINGID(offset) ((offset) + BCMPCIE_H2D_COMMON_MSGRINGS)
286 
287 #define DHD_DMAH_NULL ((void *)NULL)
288 
289 /*
290  * Pad a DMA-able buffer by an additional cachline. If the end of the DMA-able
291  * buffer does not occupy the entire cacheline, and another object is placed
292  * following the DMA-able buffer, data corruption may occur if the DMA-able
293  * buffer is used to DMAing into (e.g. D2H direction), when HW cache coherency
294  * is not available.
295  */
296 #if defined(L1_CACHE_BYTES)
297 #define DHD_DMA_PAD (L1_CACHE_BYTES)
298 #else
299 #define DHD_DMA_PAD (128)
300 #endif // endif
301 
302 /*
303  * +----------------------------------------------------------------------------
304  * Flowring Pool
305  *
306  * Unlike common rings, which are attached very early on (dhd_prot_attach),
307  * flowrings are dynamically instantiated. Moreover, flowrings may require a
308  * larger DMA-able buffer. To avoid issues with fragmented cache coherent
309  * DMA-able memory, a pre-allocated pool of msgbuf_ring_t is allocated once.
310  * The DMA-able buffers are attached to these pre-allocated msgbuf_ring.
311  *
312  * Each DMA-able buffer may be allocated independently, or may be carved out
313  * of a single large contiguous region that is registered with the protocol
314  * layer into flowrings_dma_buf. On a 64bit platform, this contiguous region
315  * may not span 0x00000000FFFFFFFF (avoid dongle side 64bit ptr arithmetic).
316  *
317  * No flowring pool action is performed in dhd_prot_attach(), as the number
318  * of h2d rings is not yet known.
319  *
320  * In dhd_prot_init(), the dongle advertized number of h2d rings is used to
321  * determine the number of flowrings required, and a pool of msgbuf_rings are
322  * allocated and a DMA-able buffer (carved or allocated) is attached.
323  * See: dhd_prot_flowrings_pool_attach()
324  *
325  * A flowring msgbuf_ring object may be fetched from this pool during flowring
326  * creation, using the flowid. Likewise, flowrings may be freed back into the
327  * pool on flowring deletion.
328  * See: dhd_prot_flowrings_pool_fetch(), dhd_prot_flowrings_pool_release()
329  *
330  * In dhd_prot_detach(), the flowring pool is detached. The DMA-able buffers
331  * are detached (returned back to the carved region or freed), and the pool of
332  * msgbuf_ring and any objects allocated against it are freed.
333  * See: dhd_prot_flowrings_pool_detach()
334  *
335  * In dhd_prot_reset(), the flowring pool is simply reset by returning it to a
336  * state as-if upon an attach. All DMA-able buffers are retained.
337  * Following a dhd_prot_reset(), in a subsequent dhd_prot_init(), the flowring
338  * pool attach will notice that the pool persists and continue to use it. This
339  * will avoid the case of a fragmented DMA-able region.
340  *
341  * +----------------------------------------------------------------------------
342  */
343 
344 /* Conversion of a flowid to a flowring pool index */
345 #define DHD_FLOWRINGS_POOL_OFFSET(flowid) ((flowid)-BCMPCIE_H2D_COMMON_MSGRINGS)
346 
347 /* Fetch the msgbuf_ring_t from the flowring pool given a flowid */
348 #define DHD_RING_IN_FLOWRINGS_POOL(prot, flowid)                               \
349     (msgbuf_ring_t *)((prot)->h2d_flowrings_pool) +                            \
350         DHD_FLOWRINGS_POOL_OFFSET(flowid)
351 
352 /* Traverse each flowring in the flowring pool, assigning ring and flowid */
353 #define FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, total_flowrings)    \
354     for ((flowid) = DHD_FLOWRING_START_FLOWID,                                 \
355         (ring) = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid);                     \
356          (flowid) < ((total_flowrings) + DHD_FLOWRING_START_FLOWID);           \
357          (ring)++, (flowid)++)
358 
359 /* Used in loopback tests */
360 typedef struct dhd_dmaxfer {
361     dhd_dma_buf_t srcmem;
362     dhd_dma_buf_t dstmem;
363     uint32 srcdelay;
364     uint32 destdelay;
365     uint32 len;
366     bool in_progress;
367     uint64 start_usec;
368     uint64 time_taken;
369     uint32 d11_lpbk;
370     int status;
371 } dhd_dmaxfer_t;
372 
373 /**
374  * msgbuf_ring : This object manages the host side ring that includes a DMA-able
375  * buffer, the WR and RD indices, ring parameters such as max number of items
376  * an length of each items, and other miscellaneous runtime state.
377  * A msgbuf_ring may be used to represent a H2D or D2H common ring or a
378  * H2D TxPost ring as specified in the PCIE FullDongle Spec.
379  * Ring parameters are conveyed to the dongle, which maintains its own peer end
380  * ring state. Depending on whether the DMA Indices feature is supported, the
381  * host will update the WR/RD index in the DMA indices array in host memory or
382  * directly in dongle memory.
383  */
384 typedef struct msgbuf_ring {
385     bool inited;
386     uint16 idx;            /* ring id */
387     uint16 rd;             /* read index */
388     uint16 curr_rd;        /* read index for debug */
389     uint16 wr;             /* write index */
390     uint16 max_items;      /* maximum number of items in ring */
391     uint16 item_len;       /* length of each item in the ring */
392     sh_addr_t base_addr;   /* LITTLE ENDIAN formatted: base address */
393     dhd_dma_buf_t dma_buf; /* DMA-able buffer: pa, va, len, dmah, secdma */
394     uint32 seqnum;         /* next expected item's sequence number */
395 #ifdef TXP_FLUSH_NITEMS
396     void *start_addr;
397     /* # of messages on ring not yet announced to dongle */
398     uint16 pend_items_count;
399 #endif /* TXP_FLUSH_NITEMS */
400 
401     uint8 ring_type;
402     uint16 hwa_db_type; /* hwa type non-zero for Data path rings */
403     uint8 n_completion_ids;
404     bool create_pending;
405     uint16 create_req_id;
406     uint8 current_phase;
407     uint16 compeltion_ring_ids[MAX_COMPLETION_RING_IDS_ASSOCIATED];
408     uchar name[RING_NAME_MAX_LENGTH];
409     uint32 ring_mem_allocated;
410     void *ring_lock;
411 } msgbuf_ring_t;
412 
413 #define DHD_RING_BGN_VA(ring) ((ring)->dma_buf.va)
414 #define DHD_RING_END_VA(ring)                                                  \
415     ((uint8 *)(DHD_RING_BGN_VA((ring))) +                                      \
416      (((ring)->max_items - 1) * (ring)->item_len))
417 
418 /* This can be overwritten by module parameter defined in dhd_linux.c
419  * or by dhd iovar h2d_max_txpost.
420  */
421 int h2d_max_txpost = H2DRING_TXPOST_MAX_ITEM;
422 
423 /** DHD protocol handle. Is an opaque type to other DHD software layers. */
424 typedef struct dhd_prot {
425     osl_t *osh; /* OSL handle */
426     uint16 rxbufpost_sz;
427     uint16 rxbufpost;
428     uint16 max_rxbufpost;
429     uint16 max_eventbufpost;
430     uint16 max_ioctlrespbufpost;
431     uint16 max_tsbufpost;
432     uint16 max_infobufpost;
433     uint16 infobufpost;
434     uint16 cur_event_bufs_posted;
435     uint16 cur_ioctlresp_bufs_posted;
436     uint16 cur_ts_bufs_posted;
437 
438     /* Flow control mechanism based on active transmits pending */
439     osl_atomic_t active_tx_count; /* increments/decrements on every packet
440                                      tx/tx_status */
441     uint16 h2d_max_txpost;
442     uint16 txp_threshold; /* optimization to write "n" tx items at a time to
443                              ring */
444 
445     /* MsgBuf Ring info: has a dhd_dma_buf that is dynamically allocated */
446     msgbuf_ring_t h2dring_ctrl_subn;  /* H2D ctrl message submission ring */
447     msgbuf_ring_t h2dring_rxp_subn;   /* H2D RxBuf post ring */
448     msgbuf_ring_t d2hring_ctrl_cpln;  /* D2H ctrl completion ring */
449     msgbuf_ring_t d2hring_tx_cpln;    /* D2H Tx complete message ring */
450     msgbuf_ring_t d2hring_rx_cpln;    /* D2H Rx complete message ring */
451     msgbuf_ring_t *h2dring_info_subn; /* H2D info submission ring */
452     msgbuf_ring_t *d2hring_info_cpln; /* D2H info completion ring */
453     msgbuf_ring_t *d2hring_edl;       /* D2H Enhanced Debug Lane (EDL) ring */
454 
455     msgbuf_ring_t *h2d_flowrings_pool; /* Pool of preallocated flowings */
456     dhd_dma_buf_t flowrings_dma_buf;   /* Contiguous DMA buffer for flowrings */
457     uint16 h2d_rings_total; /* total H2D (common rings + flowrings) */
458 
459     uint32 rx_dataoffset;
460 
461     dhd_mb_ring_t
462         mb_ring_fn; /* called when dongle needs to be notified of new msg */
463     dhd_mb_ring_2_t
464         mb_2_ring_fn; /* called when dongle needs to be notified of new msg */
465 
466     /* ioctl related resources */
467     uint8 ioctl_state;
468     int16 ioctl_status; /* status returned from dongle */
469     uint16 ioctl_resplen;
470     dhd_ioctl_recieved_status_t ioctl_received;
471     uint curr_ioctl_cmd;
472     dhd_dma_buf_t retbuf;  /* For holding ioctl response */
473     dhd_dma_buf_t ioctbuf; /* For holding ioctl request */
474 
475     dhd_dma_buf_t d2h_dma_scratch_buf; /* For holding d2h scratch */
476 
477     /* DMA-able arrays for holding WR and RD indices */
478     uint32 rw_index_sz;                 /* Size of a RD or WR index in dongle */
479     dhd_dma_buf_t h2d_dma_indx_wr_buf;  /* Array of H2D WR indices */
480     dhd_dma_buf_t h2d_dma_indx_rd_buf;  /* Array of H2D RD indices */
481     dhd_dma_buf_t d2h_dma_indx_wr_buf;  /* Array of D2H WR indices */
482     dhd_dma_buf_t d2h_dma_indx_rd_buf;  /* Array of D2H RD indices */
483     dhd_dma_buf_t h2d_ifrm_indx_wr_buf; /* Array of H2D WR indices for ifrm */
484 
485     dhd_dma_buf_t host_bus_throughput_buf; /* bus throughput measure buffer */
486 
487     dhd_dma_buf_t *flowring_buf; /* pool of flow ring buf */
488     uint32 flowring_num;
489 
490     d2h_sync_cb_t d2h_sync_cb; /* Sync on D2H DMA done: SEQNUM or XORCSUM */
491 #ifdef EWP_EDL
492     d2h_edl_sync_cb_t
493         d2h_edl_sync_cb;     /* Sync on EDL D2H DMA done: SEQNUM or XORCSUM */
494 #endif                       /* EWP_EDL */
495     ulong d2h_sync_wait_max; /* max number of wait loops to receive one msg */
496     ulong d2h_sync_wait_tot; /* total wait loops */
497 
498     dhd_dmaxfer_t dmaxfer; /* for test/DMA loopback */
499 
500     uint16 ioctl_seq_no;
501     uint16 data_seq_no;
502     uint16 ioctl_trans_id;
503     void *pktid_ctrl_map; /* a pktid maps to a packet and its metadata */
504     void *pktid_rx_map;   /* pktid map for rx path */
505     void *pktid_tx_map;   /* pktid map for tx path */
506     bool metadata_dbg;
507     void *pktid_map_handle_ioctl;
508 #ifdef DHD_MAP_PKTID_LOGGING
509     void *pktid_dma_map;       /* pktid map for DMA MAP */
510     void *pktid_dma_unmap;     /* pktid map for DMA UNMAP */
511 #endif                         /* DHD_MAP_PKTID_LOGGING */
512     uint32 pktid_depleted_cnt; /* pktid depleted count */
513     /* netif tx queue stop count */
514     uint8 pktid_txq_stop_cnt;
515     /* netif tx queue start count */
516     uint8 pktid_txq_start_cnt;
517     uint64 ioctl_fillup_time; /* timestamp for ioctl fillup */
518     uint64 ioctl_ack_time;    /* timestamp for ioctl ack */
519     uint64 ioctl_cmplt_time;  /* timestamp for ioctl completion */
520 
521     /* Applications/utilities can read tx and rx metadata using IOVARs */
522     uint16 rx_metadata_offset;
523     uint16 tx_metadata_offset;
524 
525 #if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT)
526     /* Host's soft doorbell configuration */
527     bcmpcie_soft_doorbell_t soft_doorbell[BCMPCIE_D2H_COMMON_MSGRINGS];
528 #endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */
529 
530     /* Work Queues to be used by the producer and the consumer, and threshold
531      * when the WRITE index must be synced to consumer's workq
532      */
533 #if defined(DHD_LB_TXC)
534     uint32 tx_compl_prod_sync ____cacheline_aligned;
535     bcm_workq_t tx_compl_prod, tx_compl_cons;
536 #endif /* DHD_LB_TXC */
537 #if defined(DHD_LB_RXC)
538     uint32 rx_compl_prod_sync ____cacheline_aligned;
539     bcm_workq_t rx_compl_prod, rx_compl_cons;
540 #endif /* DHD_LB_RXC */
541 
542     dhd_dma_buf_t fw_trap_buf; /* firmware trap buffer */
543 
544     uint32 host_ipc_version;      /* Host sypported IPC rev */
545     uint32 device_ipc_version;    /* FW supported IPC rev */
546     uint32 active_ipc_version;    /* Host advertised IPC rev */
547     dhd_dma_buf_t hostts_req_buf; /* For holding host timestamp request buf */
548     bool hostts_req_buf_inuse;
549     bool rx_ts_log_enabled;
550     bool tx_ts_log_enabled;
551     bool no_retry;
552     bool no_aggr;
553     bool fixed_rate;
554     dhd_dma_buf_t host_scb_buf; /* scb host offload buffer */
555 #ifdef DHD_HP2P
556     msgbuf_ring_t *d2hring_hp2p_txcpl; /* D2H HPP Tx completion ring */
557     msgbuf_ring_t *d2hring_hp2p_rxcpl; /* D2H HPP Rx completion ring */
558 #endif                                 /* DHD_HP2P */
559     bool no_tx_resource;
560 } dhd_prot_t;
561 
562 #ifdef DHD_EWPR_VER2
563 #define HANG_INFO_BASE64_BUFFER_SIZE 640
564 #endif // endif
565 
566 #ifdef DHD_DUMP_PCIE_RINGS
567 static int dhd_ring_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file,
568                           const void *user_buf, unsigned long *file_posn);
569 #ifdef EWP_EDL
570 static int dhd_edl_ring_hdr_write(dhd_pub_t *dhd, msgbuf_ring_t *ring,
571                                   void *file, const void *user_buf,
572                                   unsigned long *file_posn);
573 #endif /* EWP_EDL */
574 #endif /* DHD_DUMP_PCIE_RINGS */
575 
576 extern bool dhd_timesync_delay_post_bufs(dhd_pub_t *dhdp);
577 extern void dhd_schedule_dmaxfer_free(dhd_pub_t *dhdp,
578                                       dmaxref_mem_map_t *dmmap);
579 /* Convert a dmaaddr_t to a base_addr with htol operations */
580 static INLINE void dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa);
581 
582 /* APIs for managing a DMA-able buffer */
583 static int dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
584 static void dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
585 
586 /* msgbuf ring management */
587 static int dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring,
588                                 const char *name, uint16 max_items,
589                                 uint16 len_item, uint16 ringid);
590 static void dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring);
591 static void dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring);
592 static void dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring);
593 static void dhd_prot_process_fw_timestamp(dhd_pub_t *dhd, void *buf);
594 
595 /* Pool of pre-allocated msgbuf_ring_t with DMA-able buffers for Flowrings */
596 static int dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd);
597 static void dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd);
598 static void dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd);
599 
600 /* Fetch and Release a flowring msgbuf_ring from flowring  pool */
601 static msgbuf_ring_t *dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd,
602                                                     uint16 flowid);
603 /* see also dhd_prot_flowrings_pool_release() in dhd_prot.h */
604 
605 /* Producer: Allocate space in a msgbuf ring */
606 static void *dhd_prot_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring,
607                                        uint16 nitems, uint16 *alloced,
608                                        bool exactly_nitems);
609 static void *dhd_prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems,
610                                      uint16 *alloced, bool exactly_nitems);
611 
612 /* Consumer: Determine the location where the next message may be consumed */
613 static uint8 *dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring,
614                                      uint32 *available_len);
615 
616 /* Producer (WR index update) or Consumer (RD index update) indication */
617 static void dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t *ring,
618                                          void *p, uint16 len);
619 static void dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t *ring);
620 
621 static INLINE int dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type,
622                                           dhd_dma_buf_t *dma_buf, uint32 bufsz);
623 
624 /* Set/Get a RD or WR index in the array of indices */
625 /* See also: dhd_prot_dma_indx_init() */
626 void dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type,
627                            uint16 ringid);
628 static uint16 dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid);
629 
630 /* Locate a packet given a pktid */
631 static INLINE void *dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid,
632                                         uint8 pkttype, bool free_pktid);
633 /* Locate a packet given a PktId and free it. */
634 static INLINE void dhd_prot_packet_free(dhd_pub_t *dhd, void *pkt,
635                                         uint8 pkttype, bool send);
636 
637 static int dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd,
638                                   void *buf, uint len, uint8 action);
639 static int dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf,
640                                 uint len, uint8 action);
641 static int dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf);
642 static int dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd,
643                                  void *buf, int ifidx);
644 
645 /* Post buffers for Rx, control ioctl response and events */
646 static uint16 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, uint8 msgid,
647                                              uint32 max_to_post);
648 static void dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *pub);
649 static void dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *pub);
650 static void dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid);
651 static int dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count,
652                                bool use_rsv_pktid);
653 static int dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t *pub);
654 
655 static void dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint32 pktid, uint32 rxcnt);
656 
657 /* D2H Message handling */
658 static int dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring,
659                                     uint8 *buf, uint32 len);
660 
661 /* D2H Message handlers */
662 static void dhd_prot_noop(dhd_pub_t *dhd, void *msg);
663 static void dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg);
664 static void dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg);
665 static void dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg);
666 static void dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg);
667 static void dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg);
668 static void dhd_prot_event_process(dhd_pub_t *dhd, void *msg);
669 
670 /* Loopback test with dongle */
671 static void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dma);
672 static int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len, uint srcdelay,
673                                    uint destdelay, dhd_dmaxfer_t *dma);
674 static void dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg);
675 
676 /* Flowring management communication with dongle */
677 static void dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd,
678                                                        void *msg);
679 static void dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd,
680                                                        void *msg);
681 static void dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd,
682                                                       void *msg);
683 static void dhd_prot_process_flow_ring_resume_response(dhd_pub_t *dhd,
684                                                        void *msg);
685 static void dhd_prot_process_flow_ring_suspend_response(dhd_pub_t *dhd,
686                                                         void *msg);
687 
688 /* Monitor Mode */
689 #ifdef WL_MONITOR
690 extern bool dhd_monitor_enabled(dhd_pub_t *dhd, int ifidx);
691 extern void dhd_rx_mon_pkt(dhd_pub_t *dhdp, host_rxbuf_cmpl_t *msg, void *pkt,
692                            int ifidx);
693 #endif /* WL_MONITOR */
694 
695 /* Configure a soft doorbell per D2H ring */
696 static void dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd);
697 static void dhd_prot_process_d2h_ring_config_complete(dhd_pub_t *dhd,
698                                                       void *msg);
699 static void dhd_prot_process_d2h_ring_create_complete(dhd_pub_t *dhd,
700                                                       void *buf);
701 static void dhd_prot_process_h2d_ring_create_complete(dhd_pub_t *dhd,
702                                                       void *buf);
703 static void dhd_prot_process_infobuf_complete(dhd_pub_t *dhd, void *buf);
704 static void dhd_prot_process_d2h_mb_data(dhd_pub_t *dhd, void *buf);
705 static void dhd_prot_detach_info_rings(dhd_pub_t *dhd);
706 #ifdef DHD_HP2P
707 static void dhd_prot_detach_hp2p_rings(dhd_pub_t *dhd);
708 #endif /* DHD_HP2P */
709 #ifdef EWP_EDL
710 static void dhd_prot_detach_edl_rings(dhd_pub_t *dhd);
711 #endif // endif
712 static void dhd_prot_process_d2h_host_ts_complete(dhd_pub_t *dhd, void *buf);
713 static void dhd_prot_process_snapshot_complete(dhd_pub_t *dhd, void *buf);
714 
715 #ifdef DHD_HP2P
716 static void dhd_update_hp2p_rxstats(dhd_pub_t *dhd,
717                                     host_rxbuf_cmpl_t *rxstatus);
718 static void dhd_update_hp2p_txstats(dhd_pub_t *dhd,
719                                     host_txbuf_cmpl_t *txstatus);
720 static void dhd_calc_hp2p_burst(dhd_pub_t *dhd, msgbuf_ring_t *ring,
721                                 uint16 flowid);
722 static void dhd_update_hp2p_txdesc(dhd_pub_t *dhd, host_txbuf_post_t *txdesc);
723 #endif // endif
724 typedef void (*dhd_msgbuf_func_t)(dhd_pub_t *dhd, void *msg);
725 
726 /** callback functions for messages generated by the dongle */
727 #define MSG_TYPE_INVALID 0
728 
729 static dhd_msgbuf_func_t table_lookup[DHD_PROT_FUNCS] = {
730     dhd_prot_noop,               /* 0 is MSG_TYPE_INVALID */
731     dhd_prot_genstatus_process,  /* MSG_TYPE_GEN_STATUS */
732     dhd_prot_ringstatus_process, /* MSG_TYPE_RING_STATUS */
733     NULL,
734     dhd_prot_flow_ring_create_response_process, /* MSG_TYPE_FLOW_RING_CREATE_CMPLT
735                                                  */
736     NULL,
737     dhd_prot_flow_ring_delete_response_process, /* MSG_TYPE_FLOW_RING_DELETE_CMPLT
738                                                  */
739     NULL,
740     dhd_prot_flow_ring_flush_response_process, /* MSG_TYPE_FLOW_RING_FLUSH_CMPLT
741                                                 */
742     NULL,
743     dhd_prot_ioctack_process, /* MSG_TYPE_IOCTLPTR_REQ_ACK */
744     NULL,
745     dhd_prot_ioctcmplt_process, /* MSG_TYPE_IOCTL_CMPLT */
746     NULL,
747     dhd_prot_event_process, /* MSG_TYPE_WL_EVENT */
748     NULL,
749     dhd_prot_txstatus_process, /* MSG_TYPE_TX_STATUS */
750     NULL,
751     NULL, /* MSG_TYPE_RX_CMPLT use dedicated handler */
752     NULL,
753     dhd_msgbuf_dmaxfer_process, /* MSG_TYPE_LPBK_DMAXFER_CMPLT */
754     NULL,                       /* MSG_TYPE_FLOW_RING_RESUME */
755     dhd_prot_process_flow_ring_resume_response, /* MSG_TYPE_FLOW_RING_RESUME_CMPLT
756                                                  */
757     NULL,                                       /* MSG_TYPE_FLOW_RING_SUSPEND */
758     dhd_prot_process_flow_ring_suspend_response, /* MSG_TYPE_FLOW_RING_SUSPEND_CMPLT
759                                                   */
760     NULL,                                        /* MSG_TYPE_INFO_BUF_POST */
761     dhd_prot_process_infobuf_complete,           /* MSG_TYPE_INFO_BUF_CMPLT */
762     NULL,                                        /* MSG_TYPE_H2D_RING_CREATE */
763     NULL,                                        /* MSG_TYPE_D2H_RING_CREATE */
764     dhd_prot_process_h2d_ring_create_complete, /* MSG_TYPE_H2D_RING_CREATE_CMPLT
765                                                 */
766     dhd_prot_process_d2h_ring_create_complete, /* MSG_TYPE_D2H_RING_CREATE_CMPLT
767                                                 */
768     NULL,                                      /* MSG_TYPE_H2D_RING_CONFIG */
769     NULL,                                      /* MSG_TYPE_D2H_RING_CONFIG */
770     NULL, /* MSG_TYPE_H2D_RING_CONFIG_CMPLT */
771     dhd_prot_process_d2h_ring_config_complete, /* MSG_TYPE_D2H_RING_CONFIG_CMPLT
772                                                 */
773     NULL,                                      /* MSG_TYPE_H2D_MAILBOX_DATA */
774     dhd_prot_process_d2h_mb_data,              /* MSG_TYPE_D2H_MAILBOX_DATA */
775     NULL,                                      /* MSG_TYPE_TIMSTAMP_BUFPOST */
776     NULL,                                      /* MSG_TYPE_HOSTTIMSTAMP */
777     dhd_prot_process_d2h_host_ts_complete,     /* MSG_TYPE_HOSTTIMSTAMP_CMPLT */
778     dhd_prot_process_fw_timestamp,             /* MSG_TYPE_FIRMWARE_TIMESTAMP */
779     NULL,                                      /* MSG_TYPE_SNAPSHOT_UPLOAD */
780     dhd_prot_process_snapshot_complete,        /* MSG_TYPE_SNAPSHOT_CMPLT */
781 };
782 
783 #ifdef DHD_RX_CHAINING
784 
785 #define PKT_CTF_CHAINABLE(dhd, ifidx, evh, prio, h_sa, h_da, h_prio)           \
786     (dhd_wet_chainable(dhd) && dhd_rx_pkt_chainable((dhd), (ifidx)) &&         \
787      !ETHER_ISNULLDEST(((struct ether_header *)(evh))->ether_dhost) &&         \
788      !ETHER_ISMULTI(((struct ether_header *)(evh))->ether_dhost) &&            \
789      !eacmp((h_da), ((struct ether_header *)(evh))->ether_dhost) &&            \
790      !eacmp((h_sa), ((struct ether_header *)(evh))->ether_shost) &&            \
791      ((h_prio) == (prio)) && (dhd_ctf_hotbrc_check((dhd), (evh), (ifidx))) &&  \
792      ((((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IP)) || \
793       (((struct ether_header *)(evh))->ether_type ==                           \
794        HTON16(ETHER_TYPE_IPV6))))
795 
796 static INLINE void BCMFASTPATH dhd_rxchain_reset(rxchain_info_t *rxchain);
797 static void BCMFASTPATH dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt,
798                                           uint ifidx);
799 static void BCMFASTPATH dhd_rxchain_commit(dhd_pub_t *dhd);
800 
801 #define DHD_PKT_CTF_MAX_CHAIN_LEN 64
802 
803 #endif /* DHD_RX_CHAINING */
804 
805 #define DHD_LPBKDTDUMP_ON() (dhd_msg_level & DHD_LPBKDTDUMP_VAL)
806 
807 static void dhd_prot_h2d_sync_init(dhd_pub_t *dhd);
808 
dhd_prot_is_cmpl_ring_empty(dhd_pub_t * dhd,void * prot_info)809 bool dhd_prot_is_cmpl_ring_empty(dhd_pub_t *dhd, void *prot_info)
810 {
811     msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)prot_info;
812     uint16 rd, wr;
813     bool ret;
814 
815     if (dhd->dma_d2h_ring_upd_support) {
816         wr = flow_ring->wr;
817     } else {
818         dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx);
819     }
820     if (dhd->dma_h2d_ring_upd_support) {
821         rd = flow_ring->rd;
822     } else {
823         dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx);
824     }
825     ret = (wr == rd) ? TRUE : FALSE;
826     return ret;
827 }
828 
dhd_prot_dump_ring_ptrs(void * prot_info)829 void dhd_prot_dump_ring_ptrs(void *prot_info)
830 {
831     msgbuf_ring_t *ring = (msgbuf_ring_t *)prot_info;
832     DHD_ERROR(("%s curr_rd: %d rd: %d wr: %d \n", __FUNCTION__, ring->curr_rd,
833                ring->rd, ring->wr));
834 }
835 
dhd_prot_get_h2d_max_txpost(dhd_pub_t * dhd)836 uint16 dhd_prot_get_h2d_max_txpost(dhd_pub_t *dhd)
837 {
838     return (uint16)h2d_max_txpost;
839 }
dhd_prot_set_h2d_max_txpost(dhd_pub_t * dhd,uint16 max_txpost)840 void dhd_prot_set_h2d_max_txpost(dhd_pub_t *dhd, uint16 max_txpost)
841 {
842     h2d_max_txpost = max_txpost;
843 }
844 /**
845  * D2H DMA to completion callback handlers. Based on the mode advertised by the
846  * dongle through the PCIE shared region, the appropriate callback will be
847  * registered in the proto layer to be invoked prior to precessing any message
848  * from a D2H DMA ring. If the dongle uses a read barrier or another mode that
849  * does not require host participation, then a noop callback handler will be
850  * bound that simply returns the msg_type.
851  */
852 static void dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 msg_seqnum,
853                                        msgbuf_ring_t *ring, uint32 tries,
854                                        volatile uchar *msg, int msglen);
855 static uint8 dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
856                                       volatile cmn_msg_hdr_t *msg, int msglen);
857 static uint8 dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
858                                        volatile cmn_msg_hdr_t *msg, int msglen);
859 static uint8 dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
860                                     volatile cmn_msg_hdr_t *msg, int msglen);
861 static void dhd_prot_d2h_sync_init(dhd_pub_t *dhd);
862 static int dhd_send_d2h_ringcreate(dhd_pub_t *dhd,
863                                    msgbuf_ring_t *ring_to_create,
864                                    uint16 ring_type, uint32 id);
865 static int dhd_send_h2d_ringcreate(dhd_pub_t *dhd,
866                                    msgbuf_ring_t *ring_to_create, uint8 type,
867                                    uint32 id);
868 
869 /**
870  * dhd_prot_d2h_sync_livelock - when the host determines that a DMA transfer has
871  * not completed, a livelock condition occurs. Host will avert this livelock by
872  * dropping this message and moving to the next. This dropped message can lead
873  * to a packet leak, or even something disastrous in the case the dropped
874  * message happens to be a control response.
875  * Here we will log this condition. One may choose to reboot the dongle.
876  *
877  */
dhd_prot_d2h_sync_livelock(dhd_pub_t * dhd,uint32 msg_seqnum,msgbuf_ring_t * ring,uint32 tries,volatile uchar * msg,int msglen)878 static void dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 msg_seqnum,
879                                        msgbuf_ring_t *ring, uint32 tries,
880                                        volatile uchar *msg, int msglen)
881 {
882     uint32 ring_seqnum = ring->seqnum;
883 
884     if (dhd_query_bus_erros(dhd)) {
885         return;
886     }
887 
888     DHD_ERROR(("LIVELOCK DHD<%p> ring<%s> msg_seqnum<%u> ring_seqnum<%u:%u> "
889                "tries<%u> max<%lu>"
890                " tot<%lu> dma_buf va<%p> msg<%p> curr_rd<%d> rd<%d> wr<%d>\n",
891                dhd, ring->name, msg_seqnum, ring_seqnum,
892                ring_seqnum % D2H_EPOCH_MODULO, tries,
893                dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot,
894                ring->dma_buf.va, msg, ring->curr_rd, ring->rd, ring->wr));
895 
896     dhd_prhex("D2H MsgBuf Failure", msg, msglen, DHD_ERROR_VAL);
897 
898     /* Try to resume if already suspended or suspend in progress */
899 
900     /* Skip if still in suspended or suspend in progress */
901     if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhd)) {
902         DHD_ERROR(
903             ("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
904              __FUNCTION__, dhd->busstate, dhd->dhd_bus_busy_state));
905         goto exit;
906     }
907 
908     dhd_bus_dump_console_buffer(dhd->bus);
909     dhd_prot_debug_info_print(dhd);
910 
911 #ifdef DHD_FW_COREDUMP
912     if (dhd->memdump_enabled) {
913         /* collect core dump */
914         dhd->memdump_type = DUMP_TYPE_BY_LIVELOCK;
915         dhd_bus_mem_dump(dhd);
916     }
917 #endif /* DHD_FW_COREDUMP */
918 
919 exit:
920     dhd_schedule_reset(dhd);
921 
922     dhd->livelock_occured = TRUE;
923 }
924 
925 /**
926  * dhd_prot_d2h_sync_seqnum - Sync on a D2H DMA completion using the SEQNUM
927  * mode. Sequence number is always in the last word of a message.
928  */
dhd_prot_d2h_sync_seqnum(dhd_pub_t * dhd,msgbuf_ring_t * ring,volatile cmn_msg_hdr_t * msg,int msglen)929 static uint8 BCMFASTPATH dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd,
930                                                   msgbuf_ring_t *ring,
931                                                   volatile cmn_msg_hdr_t *msg,
932                                                   int msglen)
933 {
934     uint32 tries;
935     uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
936     int num_words = msglen / sizeof(uint32); /* num of 32bit words */
937     volatile uint32 *marker =
938         (volatile uint32 *)msg + (num_words - 1); /* last word */
939     dhd_prot_t *prot = dhd->prot;
940     uint32 msg_seqnum;
941     uint32 step = 0;
942     uint32 delay = PCIE_D2H_SYNC_DELAY;
943     uint32 total_tries = 0;
944 
945     ASSERT(msglen == ring->item_len);
946 
947     BCM_REFERENCE(delay);
948     /*
949      * For retries we have to make some sort of stepper algorithm.
950      * We see that every time when the Dongle comes out of the D3
951      * Cold state, the first D2H mem2mem DMA takes more time to
952      * complete, leading to livelock issues.
953      *
954      * Case 1 - Apart from Host CPU some other bus master is
955      * accessing the DDR port, probably page close to the ring
956      * so, PCIE does not get a change to update the memory.
957      * Solution - Increase the number of tries.
958      *
959      * Case 2 - The 50usec delay given by the Host CPU is not
960      * sufficient for the PCIe RC to start its work.
961      * In this case the breathing time of 50usec given by
962      * the Host CPU is not sufficient.
963      * Solution: Increase the delay in a stepper fashion.
964      * This is done to ensure that there are no
965      * unwanted extra delay introdcued in normal conditions.
966      */
967     for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
968         for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
969             msg_seqnum = *marker;
970             if (ltoh32(msg_seqnum) ==
971                 ring_seqnum) {  /* dma upto last word done */
972                 ring->seqnum++; /* next expected sequence number */
973                 /* Check for LIVELOCK induce flag, which is set by firing
974                  * dhd iovar to induce LIVELOCK error. If flag is set,
975                  * MSG_TYPE_INVALID is returned, which results in to LIVELOCK
976                  * error.
977                  */
978                 if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) {
979                     goto dma_completed;
980                 }
981             }
982 
983             total_tries =
984                 (uint32)(((step - 1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries);
985 
986             if (total_tries > prot->d2h_sync_wait_max) {
987                 prot->d2h_sync_wait_max = total_tries;
988             }
989 
990             OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
991             OSL_CPU_RELAX(); /* CPU relax for msg_seqnum  value to update */
992             OSL_DELAY(delay * step); /* Add stepper delay */
993 
994         } /* for PCIE_D2H_SYNC_WAIT_TRIES */
995     }     /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
996 
997     dhd_prot_d2h_sync_livelock(dhd, msg_seqnum, ring, total_tries,
998                                (volatile uchar *)msg, msglen);
999 
1000     ring->seqnum++;          /* skip this message ... leak of a pktid */
1001     return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */
1002 
1003 dma_completed:
1004 
1005     prot->d2h_sync_wait_tot += tries;
1006     return msg->msg_type;
1007 }
1008 
1009 /**
1010  * dhd_prot_d2h_sync_xorcsum - Sync on a D2H DMA completion using the XORCSUM
1011  * mode. The xorcsum is placed in the last word of a message. Dongle will also
1012  * place a seqnum in the epoch field of the cmn_msg_hdr.
1013  */
dhd_prot_d2h_sync_xorcsum(dhd_pub_t * dhd,msgbuf_ring_t * ring,volatile cmn_msg_hdr_t * msg,int msglen)1014 static uint8 BCMFASTPATH dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd,
1015                                                    msgbuf_ring_t *ring,
1016                                                    volatile cmn_msg_hdr_t *msg,
1017                                                    int msglen)
1018 {
1019     uint32 tries;
1020     uint32 prot_checksum = 0;                /* computed checksum */
1021     int num_words = msglen / sizeof(uint32); /* num of 32bit words */
1022     uint8 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
1023     dhd_prot_t *prot = dhd->prot;
1024     uint32 step = 0;
1025     uint32 delay = PCIE_D2H_SYNC_DELAY;
1026     uint32 total_tries = 0;
1027 
1028     ASSERT(msglen == ring->item_len);
1029 
1030     BCM_REFERENCE(delay);
1031     /*
1032      * For retries we have to make some sort of stepper algorithm.
1033      * We see that every time when the Dongle comes out of the D3
1034      * Cold state, the first D2H mem2mem DMA takes more time to
1035      * complete, leading to livelock issues.
1036      *
1037      * Case 1 - Apart from Host CPU some other bus master is
1038      * accessing the DDR port, probably page close to the ring
1039      * so, PCIE does not get a change to update the memory.
1040      * Solution - Increase the number of tries.
1041      *
1042      * Case 2 - The 50usec delay given by the Host CPU is not
1043      * sufficient for the PCIe RC to start its work.
1044      * In this case the breathing time of 50usec given by
1045      * the Host CPU is not sufficient.
1046      * Solution: Increase the delay in a stepper fashion.
1047      * This is done to ensure that there are no
1048      * unwanted extra delay introdcued in normal conditions.
1049      */
1050     for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
1051         for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
1052             /* First verify if the seqnumber has been update,
1053              * if yes, then only check xorcsum.
1054              * Once seqnum and xorcsum is proper that means
1055              * complete message has arrived.
1056              */
1057             if (msg->epoch == ring_seqnum) {
1058                 prot_checksum =
1059                     bcm_compute_xor32((volatile uint32 *)msg, num_words);
1060                 if (prot_checksum == 0U) { /* checksum is OK */
1061                     ring->seqnum++;        /* next expected sequence number */
1062                     /* Check for LIVELOCK induce flag, which is set by firing
1063                      * dhd iovar to induce LIVELOCK error. If flag is set,
1064                      * MSG_TYPE_INVALID is returned, which results in to
1065                      * LIVELOCK error.
1066                      */
1067                     if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) {
1068                         goto dma_completed;
1069                     }
1070                 }
1071             }
1072 
1073             total_tries = ((step - 1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries;
1074 
1075             if (total_tries > prot->d2h_sync_wait_max) {
1076                 prot->d2h_sync_wait_max = total_tries;
1077             }
1078 
1079             OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
1080             OSL_CPU_RELAX(); /* CPU relax for msg_seqnum  value to update */
1081             OSL_DELAY(delay * step); /* Add stepper delay */
1082 
1083         } /* for PCIE_D2H_SYNC_WAIT_TRIES */
1084     }     /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
1085 
1086     DHD_ERROR(("%s: prot_checksum = 0x%x\n", __FUNCTION__, prot_checksum));
1087     dhd_prot_d2h_sync_livelock(dhd, msg->epoch, ring, total_tries,
1088                                (volatile uchar *)msg, msglen);
1089 
1090     ring->seqnum++;          /* skip this message ... leak of a pktid */
1091     return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */
1092 
1093 dma_completed:
1094 
1095     prot->d2h_sync_wait_tot += tries;
1096     return msg->msg_type;
1097 }
1098 
1099 /**
1100  * dhd_prot_d2h_sync_none - Dongle ensure that the DMA will complete and host
1101  * need to try to sync. This noop sync handler will be bound when the dongle
1102  * advertises that neither the SEQNUM nor XORCSUM mode of DMA sync is required.
1103  */
dhd_prot_d2h_sync_none(dhd_pub_t * dhd,msgbuf_ring_t * ring,volatile cmn_msg_hdr_t * msg,int msglen)1104 static uint8 BCMFASTPATH dhd_prot_d2h_sync_none(dhd_pub_t *dhd,
1105                                                 msgbuf_ring_t *ring,
1106                                                 volatile cmn_msg_hdr_t *msg,
1107                                                 int msglen)
1108 {
1109     /* Check for LIVELOCK induce flag, which is set by firing
1110      * dhd iovar to induce LIVELOCK error. If flag is set,
1111      * MSG_TYPE_INVALID is returned, which results in to LIVELOCK error.
1112      */
1113     if (dhd->dhd_induce_error == DHD_INDUCE_LIVELOCK) {
1114         DHD_ERROR(("%s: Inducing livelock\n", __FUNCTION__));
1115         return MSG_TYPE_INVALID;
1116     } else {
1117         return msg->msg_type;
1118     }
1119 }
1120 
1121 #ifdef EWP_EDL
1122 /**
1123  * dhd_prot_d2h_sync_edl - Sync on a D2H DMA completion by validating the
1124  * cmn_msg_hdr_t header values at both the beginning and end of the payload. The
1125  * cmn_msg_hdr_t is placed at the start and end of the payload in each work item
1126  * in the EDL ring. Dongle will place a seqnum inside the cmn_msg_hdr_t 'epoch'
1127  * field and the length of the payload in the 'request_id' field. Structure of
1128  * each work item in the EDL ring: | cmn_msg_hdr_t | payload (var len) |
1129  * cmn_msg_hdr_t | NOTE: - it was felt that calculating xorcsum for the entire
1130  * payload (max length of 1648 bytes) is too costly on the dongle side and might
1131  * take up too many ARM cycles, hence the xorcsum sync method is not being used
1132  * for EDL ring.
1133  */
BCMFASTPATH(dhd_prot_d2h_sync_edl)1134 static int BCMFASTPATH(dhd_prot_d2h_sync_edl)(dhd_pub_t *dhd,
1135                                               msgbuf_ring_t *ring,
1136                                               volatile cmn_msg_hdr_t *msg)
1137 {
1138     uint32 tries;
1139     int msglen = 0, len = 0;
1140     uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
1141     dhd_prot_t *prot = dhd->prot;
1142     uint32 step = 0;
1143     uint32 delay = PCIE_D2H_SYNC_DELAY;
1144     uint32 total_tries = 0;
1145     volatile cmn_msg_hdr_t *trailer = NULL;
1146     volatile uint8 *buf = NULL;
1147     bool valid_msg = FALSE;
1148 
1149     BCM_REFERENCE(delay);
1150     /*
1151      * For retries we have to make some sort of stepper algorithm.
1152      * We see that every time when the Dongle comes out of the D3
1153      * Cold state, the first D2H mem2mem DMA takes more time to
1154      * complete, leading to livelock issues.
1155      *
1156      * Case 1 - Apart from Host CPU some other bus master is
1157      * accessing the DDR port, probably page close to the ring
1158      * so, PCIE does not get a change to update the memory.
1159      * Solution - Increase the number of tries.
1160      *
1161      * Case 2 - The 50usec delay given by the Host CPU is not
1162      * sufficient for the PCIe RC to start its work.
1163      * In this case the breathing time of 50usec given by
1164      * the Host CPU is not sufficient.
1165      * Solution: Increase the delay in a stepper fashion.
1166      * This is done to ensure that there are no
1167      * unwanted extra delay introdcued in normal conditions.
1168      */
1169     for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
1170         for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
1171             /* First verify if the seqnumber has been updated,
1172              * if yes, only then validate the header and trailer.
1173              * Once seqnum, header and trailer have been validated, it means
1174              * that the complete message has arrived.
1175              */
1176             valid_msg = FALSE;
1177             if (msg->epoch == ring_seqnum &&
1178                 msg->msg_type == MSG_TYPE_INFO_PYLD && msg->request_id > 0 &&
1179                 msg->request_id <= ring->item_len) {
1180                 /* proceed to check trailer only if header is valid */
1181                 buf = (volatile uint8 *)msg;
1182                 msglen = sizeof(cmn_msg_hdr_t) + msg->request_id;
1183                 buf += msglen;
1184                 if (msglen + sizeof(cmn_msg_hdr_t) <= ring->item_len) {
1185                     trailer = (volatile cmn_msg_hdr_t *)buf;
1186                     valid_msg = (trailer->epoch == ring_seqnum) &&
1187                                 (trailer->msg_type == msg->msg_type) &&
1188                                 (trailer->request_id == msg->request_id);
1189                     if (!valid_msg) {
1190                         DHD_TRACE(
1191                             ("%s:invalid trailer! seqnum=%u;reqid=%u"
1192                              " expected, seqnum=%u; reqid=%u. Retrying... \n",
1193                              __FUNCTION__, trailer->epoch, trailer->request_id,
1194                              msg->epoch, msg->request_id));
1195                     }
1196                 } else {
1197                     DHD_TRACE(("%s: invalid payload length (%u)! Retrying.. \n",
1198                                __FUNCTION__, msg->request_id));
1199                 }
1200 
1201                 if (valid_msg) {
1202                     /* data is OK */
1203                     ring->seqnum++; /* next expected sequence number */
1204                     if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) {
1205                         goto dma_completed;
1206                     }
1207                 }
1208             } else {
1209                 DHD_TRACE(("%s: wrong hdr, seqnum expected %u, got %u."
1210                            " msg_type=0x%x, request_id=%u."
1211                            " Retrying...\n",
1212                            __FUNCTION__, ring_seqnum, msg->epoch, msg->msg_type,
1213                            msg->request_id));
1214             }
1215 
1216             total_tries = ((step - 1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries;
1217 
1218             if (total_tries > prot->d2h_sync_wait_max) {
1219                 prot->d2h_sync_wait_max = total_tries;
1220             }
1221 
1222             OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
1223             OSL_CPU_RELAX(); /* CPU relax for msg_seqnum  value to update */
1224             OSL_DELAY(delay * step); /* Add stepper delay */
1225 
1226         } /* for PCIE_D2H_SYNC_WAIT_TRIES */
1227     }     /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
1228 
1229     DHD_ERROR(("%s: EDL header check fails !\n", __FUNCTION__));
1230     DHD_ERROR(("%s: header: seqnum=%u; expected-seqnum=%u"
1231                " msgtype=0x%x; expected-msgtype=0x%x"
1232                " length=%u; expected-max-length=%u",
1233                __FUNCTION__, msg->epoch, ring_seqnum, msg->msg_type,
1234                MSG_TYPE_INFO_PYLD, msg->request_id, ring->item_len));
1235     dhd_prhex("msg header bytes: ", (volatile uchar *)msg, sizeof(*msg),
1236               DHD_ERROR_VAL);
1237     if (trailer && msglen > 0 &&
1238         (msglen + sizeof(cmn_msg_hdr_t)) <= ring->item_len) {
1239         DHD_ERROR(("%s: trailer: seqnum=%u; expected-seqnum=%u"
1240                    " msgtype=0x%x; expected-msgtype=0x%x"
1241                    " length=%u; expected-length=%u",
1242                    __FUNCTION__, trailer->epoch, ring_seqnum, trailer->msg_type,
1243                    MSG_TYPE_INFO_PYLD, trailer->request_id, msg->request_id));
1244         dhd_prhex("msg trailer bytes: ", (volatile uchar *)trailer,
1245                   sizeof(*trailer), DHD_ERROR_VAL);
1246     }
1247 
1248     if ((msglen + sizeof(cmn_msg_hdr_t)) <= ring->item_len) {
1249         len = msglen + sizeof(cmn_msg_hdr_t);
1250     } else {
1251         len = ring->item_len;
1252     }
1253 
1254     dhd_prot_d2h_sync_livelock(dhd, msg->epoch, ring, total_tries,
1255                                (volatile uchar *)msg, len);
1256 
1257     ring->seqnum++;    /* skip this message */
1258     return BCME_ERROR; /* invalid msg_type 0 -> noop callback */
1259 
1260 dma_completed:
1261     DHD_TRACE(("%s: EDL header check pass, seqnum=%u; reqid=%u\n", __FUNCTION__,
1262                msg->epoch, msg->request_id));
1263 
1264     prot->d2h_sync_wait_tot += tries;
1265     return BCME_OK;
1266 }
1267 
1268 /**
1269  * dhd_prot_d2h_sync_edl_none - Dongle ensure that the DMA will complete and
1270  * host need to try to sync. This noop sync handler will be bound when the
1271  * dongle advertises that neither the SEQNUM nor XORCSUM mode of DMA sync is
1272  * required.
1273  */
dhd_prot_d2h_sync_edl_none(dhd_pub_t * dhd,msgbuf_ring_t * ring,volatile cmn_msg_hdr_t * msg)1274 static int BCMFASTPATH dhd_prot_d2h_sync_edl_none(dhd_pub_t *dhd,
1275                                                   msgbuf_ring_t *ring,
1276                                                   volatile cmn_msg_hdr_t *msg)
1277 {
1278     /* Check for LIVELOCK induce flag, which is set by firing
1279      * dhd iovar to induce LIVELOCK error. If flag is set,
1280      * MSG_TYPE_INVALID is returned, which results in to LIVELOCK error.
1281      */
1282     if (dhd->dhd_induce_error == DHD_INDUCE_LIVELOCK) {
1283         DHD_ERROR(("%s: Inducing livelock\n", __FUNCTION__));
1284         return BCME_ERROR;
1285     } else {
1286         if (msg->msg_type == MSG_TYPE_INFO_PYLD) {
1287             return BCME_OK;
1288         } else {
1289             return msg->msg_type;
1290         }
1291     }
1292 }
1293 #endif /* EWP_EDL */
1294 
dhd_wakeup_ioctl_event(dhd_pub_t * dhd,dhd_ioctl_recieved_status_t reason)1295 INLINE void dhd_wakeup_ioctl_event(dhd_pub_t *dhd,
1296                                    dhd_ioctl_recieved_status_t reason)
1297 {
1298     /* To synchronize with the previous memory operations call wmb() */
1299     OSL_SMP_WMB();
1300     dhd->prot->ioctl_received = reason;
1301     /* Call another wmb() to make sure before waking up the other event value
1302      * gets updated */
1303     OSL_SMP_WMB();
1304     dhd_os_ioctl_resp_wake(dhd);
1305 }
1306 
1307 /**
1308  * dhd_prot_d2h_sync_init - Setup the host side DMA sync mode based on what
1309  * dongle advertizes.
1310  */
dhd_prot_d2h_sync_init(dhd_pub_t * dhd)1311 static void dhd_prot_d2h_sync_init(dhd_pub_t *dhd)
1312 {
1313     dhd_prot_t *prot = dhd->prot;
1314     prot->d2h_sync_wait_max = 0UL;
1315     prot->d2h_sync_wait_tot = 0UL;
1316 
1317     prot->d2hring_ctrl_cpln.seqnum = D2H_EPOCH_INIT_VAL;
1318     prot->d2hring_ctrl_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
1319 
1320     prot->d2hring_tx_cpln.seqnum = D2H_EPOCH_INIT_VAL;
1321     prot->d2hring_tx_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
1322 
1323     prot->d2hring_rx_cpln.seqnum = D2H_EPOCH_INIT_VAL;
1324     prot->d2hring_rx_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
1325 
1326     if (HWA_ACTIVE(dhd)) {
1327         prot->d2hring_tx_cpln.hwa_db_type =
1328             (dhd->bus->hwa_enab_bmap & HWA_ENAB_BITMAP_TXCPLT)
1329                 ? HWA_DB_TYPE_TXCPLT
1330                 : 0;
1331         prot->d2hring_rx_cpln.hwa_db_type =
1332             (dhd->bus->hwa_enab_bmap & HWA_ENAB_BITMAP_RXCPLT)
1333                 ? HWA_DB_TYPE_RXCPLT
1334                 : 0;
1335         DHD_ERROR(("%s: TXCPLT hwa_db_type:0x%x RXCPLT hwa_db_type:0x%x\n",
1336                    __FUNCTION__, prot->d2hring_tx_cpln.hwa_db_type,
1337                    prot->d2hring_rx_cpln.hwa_db_type));
1338     }
1339 
1340     if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) {
1341         prot->d2h_sync_cb = dhd_prot_d2h_sync_seqnum;
1342 #ifdef EWP_EDL
1343         prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl;
1344 #endif /* EWP_EDL */
1345         DHD_ERROR(("%s(): D2H sync mechanism is SEQNUM \r\n", __FUNCTION__));
1346     } else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) {
1347         prot->d2h_sync_cb = dhd_prot_d2h_sync_xorcsum;
1348 #ifdef EWP_EDL
1349         prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl;
1350 #endif /* EWP_EDL */
1351         DHD_ERROR(("%s(): D2H sync mechanism is XORCSUM \r\n", __FUNCTION__));
1352     } else {
1353         prot->d2h_sync_cb = dhd_prot_d2h_sync_none;
1354 #ifdef EWP_EDL
1355         prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl_none;
1356 #endif /* EWP_EDL */
1357         DHD_ERROR(("%s(): D2H sync mechanism is NONE \r\n", __FUNCTION__));
1358     }
1359 }
1360 
1361 /**
1362  * dhd_prot_h2d_sync_init - Per H2D common ring, setup the msgbuf ring seqnum
1363  */
dhd_prot_h2d_sync_init(dhd_pub_t * dhd)1364 static void dhd_prot_h2d_sync_init(dhd_pub_t *dhd)
1365 {
1366     dhd_prot_t *prot = dhd->prot;
1367     prot->h2dring_rxp_subn.seqnum = H2D_EPOCH_INIT_VAL;
1368 
1369     if (HWA_ACTIVE(dhd)) {
1370         prot->h2dring_rxp_subn.hwa_db_type =
1371             (dhd->bus->hwa_enab_bmap & HWA_ENAB_BITMAP_RXPOST)
1372                 ? HWA_DB_TYPE_RXPOST
1373                 : 0;
1374         DHD_ERROR(("%s: RXPOST hwa_db_type:0x%x\n", __FUNCTION__,
1375                    prot->d2hring_tx_cpln.hwa_db_type));
1376     }
1377 
1378     prot->h2dring_rxp_subn.current_phase = 0;
1379 
1380     prot->h2dring_ctrl_subn.seqnum = H2D_EPOCH_INIT_VAL;
1381     prot->h2dring_ctrl_subn.current_phase = 0;
1382 }
1383 
1384 /* +-----------------  End of PCIE DHD H2D DMA SYNC ------------------------+ */
1385 
1386 /*
1387  * +---------------------------------------------------------------------------+
1388  * PCIE DMA-able buffer. Sets up a dhd_dma_buf_t object, which includes the
1389  * virtual and physical address, the buffer lenght and the DMA handler.
1390  * A secdma handler is also included in the dhd_dma_buf object.
1391  * +---------------------------------------------------------------------------+
1392  */
1393 
dhd_base_addr_htolpa(sh_addr_t * base_addr,dmaaddr_t pa)1394 static INLINE void dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa)
1395 {
1396     base_addr->low_addr = htol32(PHYSADDRLO(pa));
1397     base_addr->high_addr = htol32(PHYSADDRHI(pa));
1398 }
1399 
1400 /**
1401  * dhd_dma_buf_audit - Any audits on a DHD DMA Buffer.
1402  */
dhd_dma_buf_audit(dhd_pub_t * dhd,dhd_dma_buf_t * dma_buf)1403 static int dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
1404 {
1405     uint32 pa_lowaddr, end; /* dongle uses 32bit ptr arithmetic */
1406     ASSERT(dma_buf);
1407     pa_lowaddr = PHYSADDRLO(dma_buf->pa);
1408     ASSERT(PHYSADDRLO(dma_buf->pa) || PHYSADDRHI(dma_buf->pa));
1409     ASSERT(ISALIGNED(pa_lowaddr, DMA_ALIGN_LEN));
1410     ASSERT(dma_buf->len != 0);
1411 
1412     /* test 32bit offset arithmetic over dma buffer for loss of carry-over */
1413     end = (pa_lowaddr + dma_buf->len); /* end address */
1414 
1415     if ((end & 0xFFFFFFFF) <
1416         (pa_lowaddr & 0xFFFFFFFF)) { /* exclude carryover */
1417         DHD_ERROR(("%s: dma_buf %x len %d spans dongle 32bit ptr arithmetic\n",
1418                    __FUNCTION__, pa_lowaddr, dma_buf->len));
1419         return BCME_ERROR;
1420     }
1421 
1422     return BCME_OK;
1423 }
1424 
1425 /**
1426  * dhd_dma_buf_alloc - Allocate a cache coherent DMA-able buffer.
1427  * returns BCME_OK=0 on success
1428  * returns non-zero negative error value on failure.
1429  */
dhd_dma_buf_alloc(dhd_pub_t * dhd,dhd_dma_buf_t * dma_buf,uint32 buf_len)1430 int dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len)
1431 {
1432     uint32 dma_pad = 0;
1433     osl_t *osh = dhd->osh;
1434     uint16 dma_align = DMA_ALIGN_LEN;
1435     uint32 rem = 0;
1436 
1437     ASSERT(dma_buf != NULL);
1438     ASSERT(dma_buf->va == NULL);
1439     ASSERT(dma_buf->len == 0);
1440 
1441     /* Pad the buffer length to align to cacheline size. */
1442     rem = (buf_len % DHD_DMA_PAD);
1443     dma_pad = rem ? (DHD_DMA_PAD - rem) : 0;
1444 
1445     dma_buf->va =
1446         DMA_ALLOC_CONSISTENT(osh, buf_len + dma_pad, dma_align,
1447                              &dma_buf->_alloced, &dma_buf->pa, &dma_buf->dmah);
1448 
1449     if (dma_buf->va == NULL) {
1450         DHD_ERROR(
1451             ("%s: buf_len %d, no memory available\n", __FUNCTION__, buf_len));
1452         return BCME_NOMEM;
1453     }
1454 
1455     dma_buf->len = buf_len; /* not including padded len */
1456 
1457     if (dhd_dma_buf_audit(dhd, dma_buf) != BCME_OK) { /* audit dma buf */
1458         dhd_dma_buf_free(dhd, dma_buf);
1459         return BCME_ERROR;
1460     }
1461 
1462     dhd_dma_buf_reset(dhd, dma_buf); /* zero out and cache flush */
1463 
1464     return BCME_OK;
1465 }
1466 
1467 /**
1468  * dhd_dma_buf_reset - Reset a cache coherent DMA-able buffer.
1469  */
dhd_dma_buf_reset(dhd_pub_t * dhd,dhd_dma_buf_t * dma_buf)1470 static void dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
1471 {
1472     if ((dma_buf == NULL) || (dma_buf->va == NULL)) {
1473         return;
1474     }
1475 
1476     (void)dhd_dma_buf_audit(dhd, dma_buf);
1477 
1478     /* Zero out the entire buffer and cache flush */
1479     memset((void *)dma_buf->va, 0, dma_buf->len);
1480     OSL_CACHE_FLUSH((void *)dma_buf->va, dma_buf->len);
1481 }
1482 
1483 /**
1484  * dhd_dma_buf_free - Free a DMA-able buffer that was previously allocated using
1485  * dhd_dma_buf_alloc().
1486  */
dhd_dma_buf_free(dhd_pub_t * dhd,dhd_dma_buf_t * dma_buf)1487 void dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
1488 {
1489     osl_t *osh = dhd->osh;
1490 
1491     ASSERT(dma_buf);
1492 
1493     if (dma_buf->va == NULL) {
1494         return; /* Allow for free invocation, when alloc failed */
1495     }
1496 
1497     /* DEBUG: dhd_dma_buf_reset(dhd, dma_buf) */
1498     (void)dhd_dma_buf_audit(dhd, dma_buf);
1499 
1500     /* dma buffer may have been padded at allocation */
1501     DMA_FREE_CONSISTENT(osh, dma_buf->va, dma_buf->_alloced, dma_buf->pa,
1502                         dma_buf->dmah);
1503 
1504     memset(dma_buf, 0, sizeof(dhd_dma_buf_t));
1505 }
1506 
1507 /**
1508  * dhd_dma_buf_init - Initialize a dhd_dma_buf with speicifed values.
1509  * Do not use dhd_dma_buf_init to zero out a dhd_dma_buf_t object. Use memset 0.
1510  */
dhd_dma_buf_init(dhd_pub_t * dhd,void * dhd_dma_buf,void * va,uint32 len,dmaaddr_t pa,void * dmah,void * secdma)1511 void dhd_dma_buf_init(dhd_pub_t *dhd, void *dhd_dma_buf, void *va, uint32 len,
1512                       dmaaddr_t pa, void *dmah, void *secdma)
1513 {
1514     dhd_dma_buf_t *dma_buf;
1515     ASSERT(dhd_dma_buf);
1516     dma_buf = (dhd_dma_buf_t *)dhd_dma_buf;
1517     dma_buf->va = va;
1518     dma_buf->len = len;
1519     dma_buf->pa = pa;
1520     dma_buf->dmah = dmah;
1521     dma_buf->secdma = secdma;
1522 
1523     /* Audit user defined configuration */
1524     (void)dhd_dma_buf_audit(dhd, dma_buf);
1525 }
1526 
1527 /* +------------------  End of PCIE DHD DMA BUF ADT ------------------------+ */
1528 
1529 /*
1530  * +---------------------------------------------------------------------------+
1531  * DHD_MAP_PKTID_LOGGING
1532  * Logging the PKTID and DMA map/unmap information for the SMMU fault issue
1533  * debugging in customer platform.
1534  * +---------------------------------------------------------------------------+
1535  */
1536 
1537 #ifdef DHD_MAP_PKTID_LOGGING
1538 typedef struct dhd_pktid_log_item {
1539     dmaaddr_t pa;   /* DMA bus address */
1540     uint64 ts_nsec; /* Timestamp: nsec */
1541     uint32 size;    /* DMA map/unmap size */
1542     uint32 pktid;   /* Packet ID */
1543     uint8 pkttype;  /* Packet Type */
1544     uint8 rsvd[7];  /* Reserved for future use */
1545 } dhd_pktid_log_item_t;
1546 
1547 typedef struct dhd_pktid_log {
1548     uint32 items;                /* number of total items */
1549     uint32 index;                /* index of pktid_log_item */
1550     dhd_pktid_log_item_t map[0]; /* metadata storage */
1551 } dhd_pktid_log_t;
1552 
1553 typedef void *dhd_pktid_log_handle_t; /* opaque handle to pktid log */
1554 
1555 #define MAX_PKTID_LOG (2048)
1556 #define DHD_PKTID_LOG_ITEM_SZ (sizeof(dhd_pktid_log_item_t))
1557 #define DHD_PKTID_LOG_SZ(items)                                                \
1558     (uint32)((sizeof(dhd_pktid_log_t)) + ((DHD_PKTID_LOG_ITEM_SZ) * (items)))
1559 
1560 #define DHD_PKTID_LOG_INIT(dhd, hdl) dhd_pktid_logging_init((dhd), (hdl))
1561 #define DHD_PKTID_LOG_FINI(dhd, hdl) dhd_pktid_logging_fini((dhd), (hdl))
1562 #define DHD_PKTID_LOG(dhd, hdl, pa, pktid, len, pkttype)                       \
1563     dhd_pktid_logging((dhd), (hdl), (pa), (pktid), (len), (pkttype))
1564 #define DHD_PKTID_LOG_DUMP(dhd) dhd_pktid_logging_dump((dhd))
1565 
dhd_pktid_logging_init(dhd_pub_t * dhd,uint32 num_items)1566 static dhd_pktid_log_handle_t *dhd_pktid_logging_init(dhd_pub_t *dhd,
1567                                                       uint32 num_items)
1568 {
1569     dhd_pktid_log_t *log;
1570     uint32 log_size;
1571 
1572     log_size = DHD_PKTID_LOG_SZ(num_items);
1573     log = (dhd_pktid_log_t *)MALLOCZ(dhd->osh, log_size);
1574     if (log == NULL) {
1575         DHD_ERROR(("%s: MALLOC failed for size %d\n", __FUNCTION__, log_size));
1576         return (dhd_pktid_log_handle_t *)NULL;
1577     }
1578 
1579     log->items = num_items;
1580     log->index = 0;
1581 
1582     return (dhd_pktid_log_handle_t *)log; /* opaque handle */
1583 }
1584 
dhd_pktid_logging_fini(dhd_pub_t * dhd,dhd_pktid_log_handle_t * handle)1585 static void dhd_pktid_logging_fini(dhd_pub_t *dhd,
1586                                    dhd_pktid_log_handle_t *handle)
1587 {
1588     dhd_pktid_log_t *log;
1589     uint32 log_size;
1590 
1591     if (handle == NULL) {
1592         DHD_ERROR(("%s: handle is NULL\n", __FUNCTION__));
1593         return;
1594     }
1595 
1596     log = (dhd_pktid_log_t *)handle;
1597     log_size = DHD_PKTID_LOG_SZ(log->items);
1598     MFREE(dhd->osh, handle, log_size);
1599 }
1600 
dhd_pktid_logging(dhd_pub_t * dhd,dhd_pktid_log_handle_t * handle,dmaaddr_t pa,uint32 pktid,uint32 len,uint8 pkttype)1601 static void dhd_pktid_logging(dhd_pub_t *dhd, dhd_pktid_log_handle_t *handle,
1602                               dmaaddr_t pa, uint32 pktid, uint32 len,
1603                               uint8 pkttype)
1604 {
1605     dhd_pktid_log_t *log;
1606     uint32 idx;
1607 
1608     if (handle == NULL) {
1609         DHD_ERROR(("%s: handle is NULL\n", __FUNCTION__));
1610         return;
1611     }
1612 
1613     log = (dhd_pktid_log_t *)handle;
1614     idx = log->index;
1615     log->map[idx].ts_nsec = OSL_LOCALTIME_NS();
1616     log->map[idx].pa = pa;
1617     log->map[idx].pktid = pktid;
1618     log->map[idx].size = len;
1619     log->map[idx].pkttype = pkttype;
1620     log->index = (idx + 1) % (log->items); /* update index */
1621 }
1622 
dhd_pktid_logging_dump(dhd_pub_t * dhd)1623 void dhd_pktid_logging_dump(dhd_pub_t *dhd)
1624 {
1625     dhd_prot_t *prot = dhd->prot;
1626     dhd_pktid_log_t *map_log, *unmap_log;
1627     uint64 ts_sec, ts_usec;
1628 
1629     if (prot == NULL) {
1630         DHD_ERROR(("%s: prot is NULL\n", __FUNCTION__));
1631         return;
1632     }
1633 
1634     map_log = (dhd_pktid_log_t *)(prot->pktid_dma_map);
1635     unmap_log = (dhd_pktid_log_t *)(prot->pktid_dma_unmap);
1636     OSL_GET_LOCALTIME(&ts_sec, &ts_usec);
1637     if (map_log && unmap_log) {
1638         DHD_ERROR(("%s: map_idx=%d unmap_idx=%d "
1639                    "current time=[%5lu.%06lu]\n",
1640                    __FUNCTION__, map_log->index, unmap_log->index,
1641                    (unsigned long)ts_sec, (unsigned long)ts_usec));
1642         DHD_ERROR(("%s: pktid_map_log(pa)=0x%llx size=%d, "
1643                    "pktid_unmap_log(pa)=0x%llx size=%d\n",
1644                    __FUNCTION__, (uint64)__virt_to_phys((ulong)(map_log->map)),
1645                    (uint32)(DHD_PKTID_LOG_ITEM_SZ * map_log->items),
1646                    (uint64)__virt_to_phys((ulong)(unmap_log->map)),
1647                    (uint32)(DHD_PKTID_LOG_ITEM_SZ * unmap_log->items)));
1648     }
1649 }
1650 #endif /* DHD_MAP_PKTID_LOGGING */
1651 
1652 /* +-----------------  End of DHD_MAP_PKTID_LOGGING -----------------------+ */
1653 
1654 /*
1655  * +---------------------------------------------------------------------------+
1656  * PktId Map: Provides a native packet pointer to unique 32bit PktId mapping.
1657  * Main purpose is to save memory on the dongle, has other purposes as well.
1658  * The packet id map, also includes storage for some packet parameters that
1659  * may be saved. A native packet pointer along with the parameters may be saved
1660  * and a unique 32bit pkt id will be returned. Later, the saved packet pointer
1661  * and the metadata may be retrieved using the previously allocated packet id.
1662  * +---------------------------------------------------------------------------+
1663  */
1664 #define DHD_PCIE_PKTID
1665 #define MAX_CTRL_PKTID (1024) /* Maximum number of pktids supported */
1666 #define MAX_RX_PKTID (1024)
1667 #define MAX_TX_PKTID (3072 * 12)
1668 
1669 /* On Router, the pktptr serves as a pktid. */
1670 
1671 #if defined(PROP_TXSTATUS) && !defined(DHD_PCIE_PKTID)
1672 #error "PKTIDMAP must be supported with PROP_TXSTATUS/WLFC"
1673 #endif // endif
1674 
1675 /* Enum for marking the buffer color based on usage */
1676 typedef enum dhd_pkttype {
1677     PKTTYPE_DATA_TX = 0,
1678     PKTTYPE_DATA_RX,
1679     PKTTYPE_IOCTL_RX,
1680     PKTTYPE_EVENT_RX,
1681     PKTTYPE_INFO_RX,
1682     /* dhd_prot_pkt_free no check, if pktid reserved and no space avail case */
1683     PKTTYPE_NO_CHECK,
1684     PKTTYPE_TSBUF_RX
1685 } dhd_pkttype_t;
1686 
1687 #define DHD_PKTID_MIN_AVAIL_COUNT 512U
1688 #define DHD_PKTID_DEPLETED_MAX_COUNT (DHD_PKTID_MIN_AVAIL_COUNT * 2U)
1689 #define DHD_PKTID_INVALID (0U)
1690 #define DHD_IOCTL_REQ_PKTID (0xFFFE)
1691 #define DHD_FAKE_PKTID (0xFACE)
1692 #define DHD_H2D_DBGRING_REQ_PKTID 0xFFFD
1693 #define DHD_D2H_DBGRING_REQ_PKTID 0xFFFC
1694 #define DHD_H2D_HOSTTS_REQ_PKTID 0xFFFB
1695 #define DHD_H2D_BTLOGRING_REQ_PKTID 0xFFFA
1696 #define DHD_D2H_BTLOGRING_REQ_PKTID 0xFFF9
1697 #define DHD_H2D_SNAPSHOT_UPLOAD_REQ_PKTID 0xFFF8
1698 #ifdef DHD_HP2P
1699 #define DHD_D2H_HPPRING_TXREQ_PKTID 0xFFF7
1700 #define DHD_D2H_HPPRING_RXREQ_PKTID 0xFFF6
1701 #endif /* DHD_HP2P */
1702 
1703 #define IS_FLOWRING(ring)                                                      \
1704     ((strncmp(ring->name, "h2dflr", sizeof("h2dflr"))) == (0))
1705 
1706 typedef void *dhd_pktid_map_handle_t; /* opaque handle to a pktid map */
1707 
1708 /* Construct a packet id mapping table, returning an opaque map handle */
1709 static dhd_pktid_map_handle_t *dhd_pktid_map_init(dhd_pub_t *dhd,
1710                                                   uint32 num_items);
1711 
1712 /* Destroy a packet id mapping table, freeing all packets active in the table */
1713 static void dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map);
1714 
1715 #define DHD_NATIVE_TO_PKTID_INIT(dhd, items) dhd_pktid_map_init((dhd), (items))
1716 #define DHD_NATIVE_TO_PKTID_RESET(dhd, map) dhd_pktid_map_reset((dhd), (map))
1717 #define DHD_NATIVE_TO_PKTID_FINI(dhd, map) dhd_pktid_map_fini((dhd), (map))
1718 #define DHD_NATIVE_TO_PKTID_FINI_IOCTL(osh, map)                               \
1719     dhd_pktid_map_fini_ioctl((osh), (map))
1720 
1721 #ifdef MACOSX_DHD
1722 #undef DHD_PCIE_PKTID
1723 #define DHD_PCIE_PKTID 1
1724 #endif /* MACOSX_DHD */
1725 
1726 #if defined(DHD_PCIE_PKTID)
1727 #if defined(MACOSX_DHD)
1728 #define IOCTLRESP_USE_CONSTMEM
1729 static void free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf);
1730 static int alloc_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf);
1731 #endif // endif
1732 
1733 /* Determine number of pktids that are available */
1734 static INLINE uint32 dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle);
1735 
1736 /* Allocate a unique pktid against which a pkt and some metadata is saved */
1737 static INLINE uint32 dhd_pktid_map_reserve(dhd_pub_t *dhd,
1738                                            dhd_pktid_map_handle_t *handle,
1739                                            void *pkt, dhd_pkttype_t pkttype);
1740 static INLINE void dhd_pktid_map_save(dhd_pub_t *dhd,
1741                                       dhd_pktid_map_handle_t *handle, void *pkt,
1742                                       uint32 nkey, dmaaddr_t pa, uint32 len,
1743                                       uint8 dma, void *dmah, void *secdma,
1744                                       dhd_pkttype_t pkttype);
1745 static uint32 dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
1746                                   void *pkt, dmaaddr_t pa, uint32 len,
1747                                   uint8 dma, void *dmah, void *secdma,
1748                                   dhd_pkttype_t pkttype);
1749 /* Return an allocated pktid, retrieving previously saved pkt and metadata */
1750 static void *dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
1751                                 uint32 id, dmaaddr_t *pa, uint32 *len,
1752                                 void **dmah, void **secdma,
1753                                 dhd_pkttype_t pkttype, bool rsv_locker);
1754 
1755 /*
1756  * DHD_PKTID_AUDIT_ENABLED: Audit of PktIds in DHD for duplicate alloc and frees
1757  *
1758  * DHD_PKTID_AUDIT_MAP: Audit the LIFO or FIFO PktIdMap allocator
1759  * DHD_PKTID_AUDIT_RING: Audit the pktid during producer/consumer ring operation
1760  *
1761  * CAUTION: When DHD_PKTID_AUDIT_ENABLED is defined,
1762  *    either DHD_PKTID_AUDIT_MAP or DHD_PKTID_AUDIT_RING may be selected.
1763  */
1764 #if defined(DHD_PKTID_AUDIT_ENABLED)
1765 #define USE_DHD_PKTID_AUDIT_LOCK 1
1766 
1767 /* Audit the pktid during production/consumption of workitems */
1768 #define DHD_PKTID_AUDIT_RING
1769 
1770 #if defined(DHD_PKTID_AUDIT_MAP) && defined(DHD_PKTID_AUDIT_RING)
1771 #error "May only enabled audit of MAP or RING, at a time."
1772 #endif /* DHD_PKTID_AUDIT_MAP && DHD_PKTID_AUDIT_RING */
1773 
1774 #define DHD_DUPLICATE_ALLOC 1
1775 #define DHD_DUPLICATE_FREE 2
1776 #define DHD_TEST_IS_ALLOC 3
1777 #define DHD_TEST_IS_FREE 4
1778 
1779 typedef enum dhd_pktid_map_type {
1780     DHD_PKTID_MAP_TYPE_CTRL = 1,
1781     DHD_PKTID_MAP_TYPE_TX,
1782     DHD_PKTID_MAP_TYPE_RX,
1783     DHD_PKTID_MAP_TYPE_UNKNOWN
1784 } dhd_pktid_map_type_t;
1785 
1786 #ifdef USE_DHD_PKTID_AUDIT_LOCK
1787 #define DHD_PKTID_AUDIT_LOCK_INIT(osh) dhd_os_spin_lock_init(osh)
1788 #define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock)                                 \
1789     dhd_os_spin_lock_deinit(osh, lock)
1790 #define DHD_PKTID_AUDIT_LOCK(lock) dhd_os_spin_lock(lock)
1791 #define DHD_PKTID_AUDIT_UNLOCK(lock, flags) dhd_os_spin_unlock(lock, flags)
1792 #else
1793 #define DHD_PKTID_AUDIT_LOCK_INIT(osh) (void *)(1)
1794 #define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock)                                 \
1795     do { /* noop */                                                            \
1796     } while (0)
1797 #define DHD_PKTID_AUDIT_LOCK(lock) 0
1798 #define DHD_PKTID_AUDIT_UNLOCK(lock, flags)                                    \
1799     do { /* noop */                                                            \
1800     } while (0)
1801 #endif /* !USE_DHD_PKTID_AUDIT_LOCK */
1802 
1803 #endif /* DHD_PKTID_AUDIT_ENABLED */
1804 
1805 #define USE_DHD_PKTID_LOCK 1
1806 
1807 #ifdef USE_DHD_PKTID_LOCK
1808 #define DHD_PKTID_LOCK_INIT(osh) dhd_os_spin_lock_init(osh)
1809 #define DHD_PKTID_LOCK_DEINIT(osh, lock) dhd_os_spin_lock_deinit(osh, lock)
1810 #define DHD_PKTID_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock)
1811 #define DHD_PKTID_UNLOCK(lock, flags) dhd_os_spin_unlock(lock, flags)
1812 #else
1813 #define DHD_PKTID_LOCK_INIT(osh) (void *)(1)
1814 #define DHD_PKTID_LOCK_DEINIT(osh, lock)                                       \
1815     do {                                                                       \
1816         BCM_REFERENCE(osh);                                                    \
1817         BCM_REFERENCE(lock);                                                   \
1818     } while (0)
1819 #define DHD_PKTID_LOCK(lock) 0
1820 #define DHD_PKTID_UNLOCK(lock, flags)                                          \
1821     do {                                                                       \
1822         BCM_REFERENCE(lock);                                                   \
1823         BCM_REFERENCE(flags);                                                  \
1824     } while (0)
1825 #endif /* !USE_DHD_PKTID_LOCK */
1826 
1827 typedef enum dhd_locker_state {
1828     LOCKER_IS_FREE,
1829     LOCKER_IS_BUSY,
1830     LOCKER_IS_RSVD
1831 } dhd_locker_state_t;
1832 
1833 /* Packet metadata saved in packet id mapper */
1834 
1835 typedef struct dhd_pktid_item {
1836     dhd_locker_state_t state; /* tag a locker to be free, busy or reserved */
1837     uint8 dir;             /* dma map direction (Tx=flush or Rx=invalidate) */
1838     dhd_pkttype_t pkttype; /* pktlists are maintained based on pkttype */
1839     uint16 len;            /* length of mapped packet's buffer */
1840     void *pkt;             /* opaque native pointer to a packet */
1841     dmaaddr_t pa;          /* physical address of mapped packet's buffer */
1842     void *dmah;            /* handle to OS specific DMA map */
1843     void *secdma;
1844 } dhd_pktid_item_t;
1845 
1846 typedef uint32 dhd_pktid_key_t;
1847 
1848 typedef struct dhd_pktid_map {
1849     uint32 items; /* total items in map */
1850     uint32 avail; /* total available items */
1851     int failures; /* lockers unavailable count */
1852     /* Spinlock to protect dhd_pktid_map in process/tasklet context */
1853     void *pktid_lock; /* Used when USE_DHD_PKTID_LOCK is defined */
1854 
1855 #if defined(DHD_PKTID_AUDIT_ENABLED)
1856     void *pktid_audit_lock;
1857     struct bcm_mwbmap *pktid_audit; /* multi word bitmap based audit */
1858 #endif                              /* DHD_PKTID_AUDIT_ENABLED */
1859     dhd_pktid_key_t *keys;          /* map_items +1 unique pkt ids */
1860     dhd_pktid_item_t lockers[0];    /* metadata storage */
1861 } dhd_pktid_map_t;
1862 
1863 /*
1864  * PktId (Locker) #0 is never allocated and is considered invalid.
1865  *
1866  * On request for a pktid, a value DHD_PKTID_INVALID must be treated as a
1867  * depleted pktid pool and must not be used by the caller.
1868  *
1869  * Likewise, a caller must never free a pktid of value DHD_PKTID_INVALID.
1870  */
1871 
1872 #define DHD_PKTID_FREE_LOCKER (FALSE)
1873 #define DHD_PKTID_RSV_LOCKER (TRUE)
1874 
1875 #define DHD_PKTID_ITEM_SZ (sizeof(dhd_pktid_item_t))
1876 #define DHD_PKIDMAP_ITEMS(items) (items)
1877 #define DHD_PKTID_MAP_SZ(items)                                                \
1878     (sizeof(dhd_pktid_map_t) + (DHD_PKTID_ITEM_SZ * ((items) + 1)))
1879 #define DHD_PKTIDMAP_KEYS_SZ(items) (sizeof(dhd_pktid_key_t) * ((items) + 1))
1880 
1881 #define DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, map)                              \
1882     dhd_pktid_map_reset_ioctl((dhd), (map))
1883 
1884 /* Convert a packet to a pktid, and save pkt pointer in busy locker */
1885 #define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt, pkttype)                        \
1886     dhd_pktid_map_reserve((dhd), (map), (pkt), (pkttype))
1887 /* Reuse a previously reserved locker to save packet params */
1888 #define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dir, dmah,      \
1889                                  secdma, pkttype)                              \
1890     dhd_pktid_map_save((dhd), (map), (void *)(pkt), (nkey), (pa),              \
1891                        (uint32)(len), (uint8)(dir), (void *)(dmah),            \
1892                        (void *)(secdma), (dhd_pkttype_t)(pkttype))
1893 /* Convert a packet to a pktid, and save packet params in locker */
1894 #define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dir, dmah, secdma,         \
1895                             pkttype)                                           \
1896     dhd_pktid_map_alloc((dhd), (map), (void *)(pkt), (pa), (uint32)(len),      \
1897                         (uint8)(dir), (void *)(dmah), (void *)(secdma),        \
1898                         (dhd_pkttype_t)(pkttype))
1899 
1900 /* Convert pktid to a packet, and free the locker */
1901 #define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype)   \
1902     dhd_pktid_map_free((dhd), (map), (uint32)(pktid), (dmaaddr_t *)&(pa),      \
1903                        (uint32 *)&(len), (void **)&(dmah), (void **)&(secdma), \
1904                        (dhd_pkttype_t)(pkttype), DHD_PKTID_FREE_LOCKER)
1905 
1906 /* Convert the pktid to a packet, empty locker, but keep it reserved */
1907 #define DHD_PKTID_TO_NATIVE_RSV(dhd, map, pktid, pa, len, dmah, secdma,        \
1908                                 pkttype)                                       \
1909     dhd_pktid_map_free((dhd), (map), (uint32)(pktid), (dmaaddr_t *)&(pa),      \
1910                        (uint32 *)&(len), (void **)&(dmah), (void **)&(secdma), \
1911                        (dhd_pkttype_t)(pkttype), DHD_PKTID_RSV_LOCKER)
1912 
1913 #define DHD_PKTID_AVAIL(map) dhd_pktid_map_avail_cnt(map)
1914 
1915 #if defined(DHD_PKTID_AUDIT_ENABLED)
1916 
dhd_get_pktid_map_type(dhd_pub_t * dhd,dhd_pktid_map_t * pktid_map)1917 static int dhd_get_pktid_map_type(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map)
1918 {
1919     dhd_prot_t *prot = dhd->prot;
1920     int pktid_map_type;
1921 
1922     if (pktid_map == prot->pktid_ctrl_map) {
1923         pktid_map_type = DHD_PKTID_MAP_TYPE_CTRL;
1924     } else if (pktid_map == prot->pktid_tx_map) {
1925         pktid_map_type = DHD_PKTID_MAP_TYPE_TX;
1926     } else if (pktid_map == prot->pktid_rx_map) {
1927         pktid_map_type = DHD_PKTID_MAP_TYPE_RX;
1928     } else {
1929         pktid_map_type = DHD_PKTID_MAP_TYPE_UNKNOWN;
1930     }
1931 
1932     return pktid_map_type;
1933 }
1934 
1935 /**
1936  * __dhd_pktid_audit - Use the mwbmap to audit validity of a pktid.
1937  */
__dhd_pktid_audit(dhd_pub_t * dhd,dhd_pktid_map_t * pktid_map,uint32 pktid,const int test_for,const char * errmsg)1938 static int __dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map,
1939                              uint32 pktid, const int test_for,
1940                              const char *errmsg)
1941 {
1942 #define DHD_PKT_AUDIT_STR "ERROR: %16s Host PktId Audit: "
1943     struct bcm_mwbmap *handle;
1944     uint32 flags;
1945     bool ignore_audit;
1946     int error = BCME_OK;
1947 
1948     if (pktid_map == (dhd_pktid_map_t *)NULL) {
1949         DHD_ERROR((DHD_PKT_AUDIT_STR "Pkt id map NULL\n", errmsg));
1950         return BCME_OK;
1951     }
1952 
1953     flags = DHD_PKTID_AUDIT_LOCK(pktid_map->pktid_audit_lock);
1954 
1955     handle = pktid_map->pktid_audit;
1956     if (handle == (struct bcm_mwbmap *)NULL) {
1957         DHD_ERROR((DHD_PKT_AUDIT_STR "Handle NULL\n", errmsg));
1958         goto out;
1959     }
1960 
1961     /* Exclude special pktids from audit */
1962     ignore_audit = (pktid == DHD_IOCTL_REQ_PKTID) | (pktid == DHD_FAKE_PKTID);
1963     if (ignore_audit) {
1964         goto out;
1965     }
1966 
1967     if ((pktid == DHD_PKTID_INVALID) || (pktid > pktid_map->items)) {
1968         DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> invalid\n", errmsg, pktid));
1969         error = BCME_ERROR;
1970         goto out;
1971     }
1972 
1973     /* Perform audit */
1974     switch (test_for) {
1975         case DHD_DUPLICATE_ALLOC:
1976             if (!bcm_mwbmap_isfree(handle, pktid)) {
1977                 DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> alloc duplicate\n",
1978                            errmsg, pktid));
1979                 error = BCME_ERROR;
1980             } else {
1981                 bcm_mwbmap_force(handle, pktid);
1982             }
1983             break;
1984 
1985         case DHD_DUPLICATE_FREE:
1986             if (bcm_mwbmap_isfree(handle, pktid)) {
1987                 DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> free duplicate\n",
1988                            errmsg, pktid));
1989                 error = BCME_ERROR;
1990             } else {
1991                 bcm_mwbmap_free(handle, pktid);
1992             }
1993             break;
1994 
1995         case DHD_TEST_IS_ALLOC:
1996             if (bcm_mwbmap_isfree(handle, pktid)) {
1997                 DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not allocated\n",
1998                            errmsg, pktid));
1999                 error = BCME_ERROR;
2000             }
2001             break;
2002 
2003         case DHD_TEST_IS_FREE:
2004             if (!bcm_mwbmap_isfree(handle, pktid)) {
2005                 DHD_ERROR(
2006                     (DHD_PKT_AUDIT_STR "PktId<%d> is not free", errmsg, pktid));
2007                 error = BCME_ERROR;
2008             }
2009             break;
2010 
2011         default:
2012             DHD_ERROR(("%s: Invalid test case: %d\n", __FUNCTION__, test_for));
2013             error = BCME_ERROR;
2014             break;
2015     }
2016 
2017 out:
2018     DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
2019 
2020     if (error != BCME_OK) {
2021         dhd->pktid_audit_failed = TRUE;
2022     }
2023 
2024     return error;
2025 }
2026 
dhd_pktid_audit(dhd_pub_t * dhd,dhd_pktid_map_t * pktid_map,uint32 pktid,const int test_for,const char * errmsg)2027 static int dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map,
2028                            uint32 pktid, const int test_for, const char *errmsg)
2029 {
2030     int ret = BCME_OK;
2031     ret = __dhd_pktid_audit(dhd, pktid_map, pktid, test_for, errmsg);
2032     if (ret == BCME_ERROR) {
2033         DHD_ERROR(
2034             ("%s: Got Pkt Id Audit failure: PKTID<%d> PKTID MAP TYPE<%d>\n",
2035              __FUNCTION__, pktid, dhd_get_pktid_map_type(dhd, pktid_map)));
2036         dhd_pktid_error_handler(dhd);
2037     }
2038 
2039     return ret;
2040 }
2041 
2042 #define DHD_PKTID_AUDIT(dhdp, map, pktid, test_for)                            \
2043     dhd_pktid_audit((dhdp), (dhd_pktid_map_t *)(map), (pktid), (test_for),     \
2044                     __FUNCTION__)
2045 
dhd_pktid_audit_ring_debug(dhd_pub_t * dhdp,dhd_pktid_map_t * map,uint32 pktid,const int test_for,void * msg,uint32 msg_len,const char * func)2046 static int dhd_pktid_audit_ring_debug(dhd_pub_t *dhdp, dhd_pktid_map_t *map,
2047                                       uint32 pktid, const int test_for,
2048                                       void *msg, uint32 msg_len,
2049                                       const char *func)
2050 {
2051     int ret = BCME_OK;
2052 
2053     if (dhd_query_bus_erros(dhdp)) {
2054         return BCME_ERROR;
2055     }
2056 
2057     ret = __dhd_pktid_audit(dhdp, map, pktid, test_for, func);
2058     if (ret == BCME_ERROR) {
2059         DHD_ERROR(
2060             ("%s: Got Pkt Id Audit failure: PKTID<%d> PKTID MAP TYPE<%d>\n",
2061              __FUNCTION__, pktid, dhd_get_pktid_map_type(dhdp, map)));
2062         prhex(func, (uchar *)msg, msg_len);
2063         dhd_pktid_error_handler(dhdp);
2064     }
2065     return ret;
2066 }
2067 #define DHD_PKTID_AUDIT_RING_DEBUG(dhdp, map, pktid, test_for, msg, msg_len)   \
2068     dhd_pktid_audit_ring_debug((dhdp), (dhd_pktid_map_t *)(map), (pktid),      \
2069                                (test_for), msg, msg_len, __FUNCTION__)
2070 
2071 #endif /* DHD_PKTID_AUDIT_ENABLED */
2072 
2073 /**
2074  * +---------------------------------------------------------------------------+
2075  * Packet to Packet Id mapper using a <numbered_key, locker> paradigm.
2076  *
2077  * dhd_pktid_map manages a set of unique Packet Ids range[1..MAX_xxx_PKTID].
2078  *
2079  * dhd_pktid_map_alloc() may be used to save some packet metadata, and a unique
2080  * packet id is returned. This unique packet id may be used to retrieve the
2081  * previously saved packet metadata, using dhd_pktid_map_free(). On invocation
2082  * of dhd_pktid_map_free(), the unique packet id is essentially freed. A
2083  * subsequent call to dhd_pktid_map_alloc() may reuse this packet id.
2084  *
2085  * Implementation Note:
2086  * Convert this into a <key,locker> abstraction and place into bcmutils !
2087  * Locker abstraction should treat contents as opaque storage, and a
2088  * callback should be registered to handle busy lockers on destructor.
2089  *
2090  * +---------------------------------------------------------------------------+
2091  */
2092 
2093 /** Allocate and initialize a mapper of num_items <numbered_key, locker> */
2094 
dhd_pktid_map_init(dhd_pub_t * dhd,uint32 num_items)2095 static dhd_pktid_map_handle_t *dhd_pktid_map_init(dhd_pub_t *dhd,
2096                                                   uint32 num_items)
2097 {
2098     void *osh;
2099     uint32 nkey;
2100     dhd_pktid_map_t *map;
2101     uint32 dhd_pktid_map_sz;
2102     uint32 map_items;
2103     uint32 map_keys_sz;
2104     osh = dhd->osh;
2105 
2106     dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(num_items);
2107 
2108     map = (dhd_pktid_map_t *)VMALLOC(osh, dhd_pktid_map_sz);
2109     if (map == NULL) {
2110         DHD_ERROR(("%s:%d: MALLOC failed for size %d\n", __FUNCTION__, __LINE__,
2111                    dhd_pktid_map_sz));
2112         return (dhd_pktid_map_handle_t *)NULL;
2113     }
2114 
2115     map->items = num_items;
2116     map->avail = num_items;
2117 
2118     map_items = DHD_PKIDMAP_ITEMS(map->items);
2119 
2120     map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
2121 
2122     /* Initialize the lock that protects this structure */
2123     map->pktid_lock = DHD_PKTID_LOCK_INIT(osh);
2124     if (map->pktid_lock == NULL) {
2125         DHD_ERROR(("%s:%d: Lock init failed \r\n", __FUNCTION__, __LINE__));
2126         goto error;
2127     }
2128 
2129     map->keys = (dhd_pktid_key_t *)MALLOC(osh, map_keys_sz);
2130     if (map->keys == NULL) {
2131         DHD_ERROR(("%s:%d: MALLOC failed for map->keys size %d\n", __FUNCTION__,
2132                    __LINE__, map_keys_sz));
2133         goto error;
2134     }
2135 
2136 #if defined(DHD_PKTID_AUDIT_ENABLED)
2137     /* Incarnate a hierarchical multiword bitmap for auditing pktid allocator */
2138     map->pktid_audit = bcm_mwbmap_init(osh, map_items + 1);
2139     if (map->pktid_audit == (struct bcm_mwbmap *)NULL) {
2140         DHD_ERROR(
2141             ("%s:%d: pktid_audit init failed\r\n", __FUNCTION__, __LINE__));
2142         goto error;
2143     } else {
2144         DHD_ERROR(("%s:%d: pktid_audit init succeeded %d\n", __FUNCTION__,
2145                    __LINE__, map_items + 1));
2146     }
2147     map->pktid_audit_lock = DHD_PKTID_AUDIT_LOCK_INIT(osh);
2148 #endif /* DHD_PKTID_AUDIT_ENABLED */
2149 
2150     for (nkey = 1; nkey <= map_items; nkey++) { /* locker #0 is reserved */
2151         map->keys[nkey] = nkey;                 /* populate with unique keys */
2152         map->lockers[nkey].state = LOCKER_IS_FREE;
2153         map->lockers[nkey].pkt = NULL; /* bzero: redundant */
2154         map->lockers[nkey].len = 0;
2155     }
2156 
2157     /* Reserve pktid #0, i.e. DHD_PKTID_INVALID to be inuse */
2158     map->lockers[DHD_PKTID_INVALID].state =
2159         LOCKER_IS_BUSY;                         /* tag locker #0 as inuse */
2160     map->lockers[DHD_PKTID_INVALID].pkt = NULL; /* bzero: redundant */
2161     map->lockers[DHD_PKTID_INVALID].len = 0;
2162 
2163 #if defined(DHD_PKTID_AUDIT_ENABLED)
2164     /* do not use dhd_pktid_audit() here, use bcm_mwbmap_force directly */
2165     bcm_mwbmap_force(map->pktid_audit, DHD_PKTID_INVALID);
2166 #endif /* DHD_PKTID_AUDIT_ENABLED */
2167 
2168     return (dhd_pktid_map_handle_t *)map; /* opaque handle */
2169 
2170 error:
2171     if (map) {
2172 #if defined(DHD_PKTID_AUDIT_ENABLED)
2173         if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
2174             bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */
2175             map->pktid_audit = (struct bcm_mwbmap *)NULL;
2176             if (map->pktid_audit_lock) {
2177                 DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock);
2178             }
2179         }
2180 #endif /* DHD_PKTID_AUDIT_ENABLED */
2181 
2182         if (map->keys) {
2183             MFREE(osh, map->keys, map_keys_sz);
2184         }
2185 
2186         if (map->pktid_lock) {
2187             DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock);
2188         }
2189 
2190         VMFREE(osh, map, dhd_pktid_map_sz);
2191     }
2192     return (dhd_pktid_map_handle_t *)NULL;
2193 }
2194 
2195 /**
2196  * Retrieve all allocated keys and free all <numbered_key, locker>.
2197  * Freeing implies: unmapping the buffers and freeing the native packet
2198  * This could have been a callback registered with the pktid mapper.
2199  */
dhd_pktid_map_reset(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle)2200 static void dhd_pktid_map_reset(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
2201 {
2202     void *osh;
2203     uint32 nkey;
2204     dhd_pktid_map_t *map;
2205     dhd_pktid_item_t *locker;
2206     uint32 map_items;
2207     unsigned long flags;
2208     bool data_tx = FALSE;
2209 
2210     map = (dhd_pktid_map_t *)handle;
2211     DHD_PKTID_LOCK(map->pktid_lock, flags);
2212     osh = dhd->osh;
2213 
2214     map_items = DHD_PKIDMAP_ITEMS(map->items);
2215     /* skip reserved KEY #0, and start from 1 */
2216 
2217     for (nkey = 1; nkey <= map_items; nkey++) {
2218         if (map->lockers[nkey].state == LOCKER_IS_BUSY) {
2219             locker = &map->lockers[nkey];
2220             locker->state = LOCKER_IS_FREE;
2221             data_tx = (locker->pkttype == PKTTYPE_DATA_TX);
2222             if (data_tx) {
2223                 OSL_ATOMIC_DEC(dhd->osh, &dhd->prot->active_tx_count);
2224             }
2225 
2226 #ifdef DHD_PKTID_AUDIT_RING
2227             DHD_PKTID_AUDIT(dhd, map, nkey,
2228                             DHD_DUPLICATE_FREE); /* duplicate frees */
2229 #endif                                           /* DHD_PKTID_AUDIT_RING */
2230 #ifdef DHD_MAP_PKTID_LOGGING
2231             DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_unmap, locker->pa, nkey,
2232                           locker->len, locker->pkttype);
2233 #endif /* DHD_MAP_PKTID_LOGGING */
2234 
2235             {
2236                 if (SECURE_DMA_ENAB(dhd->osh)) {
2237                     SECURE_DMA_UNMAP(osh, locker->pa, locker->len, locker->dir,
2238                                      0, locker->dmah, locker->secdma, 0);
2239                 } else {
2240                     DMA_UNMAP(osh, locker->pa, locker->len, locker->dir, 0,
2241                               locker->dmah);
2242                 }
2243             }
2244             dhd_prot_packet_free(dhd, (ulong *)locker->pkt, locker->pkttype,
2245                                  data_tx);
2246         } else {
2247 #ifdef DHD_PKTID_AUDIT_RING
2248             DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
2249 #endif /* DHD_PKTID_AUDIT_RING */
2250         }
2251         map->keys[nkey] = nkey; /* populate with unique keys */
2252     }
2253 
2254     map->avail = map_items;
2255     memset(&map->lockers[1], 0, sizeof(dhd_pktid_item_t) * map_items);
2256     DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2257 }
2258 
2259 #ifdef IOCTLRESP_USE_CONSTMEM
2260 /** Called in detach scenario. Releasing IOCTL buffers. */
dhd_pktid_map_reset_ioctl(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle)2261 static void dhd_pktid_map_reset_ioctl(dhd_pub_t *dhd,
2262                                       dhd_pktid_map_handle_t *handle)
2263 {
2264     uint32 nkey;
2265     dhd_pktid_map_t *map;
2266     dhd_pktid_item_t *locker;
2267     uint32 map_items;
2268     unsigned long flags;
2269 
2270     map = (dhd_pktid_map_t *)handle;
2271     DHD_PKTID_LOCK(map->pktid_lock, flags);
2272 
2273     map_items = DHD_PKIDMAP_ITEMS(map->items);
2274     /* skip reserved KEY #0, and start from 1 */
2275     for (nkey = 1; nkey <= map_items; nkey++) {
2276         if (map->lockers[nkey].state == LOCKER_IS_BUSY) {
2277             dhd_dma_buf_t retbuf;
2278 
2279 #ifdef DHD_PKTID_AUDIT_RING
2280             DHD_PKTID_AUDIT(dhd, map, nkey,
2281                             DHD_DUPLICATE_FREE); /* duplicate frees */
2282 #endif                                           /* DHD_PKTID_AUDIT_RING */
2283 
2284             locker = &map->lockers[nkey];
2285             retbuf.va = locker->pkt;
2286             retbuf.len = locker->len;
2287             retbuf.pa = locker->pa;
2288             retbuf.dmah = locker->dmah;
2289             retbuf.secdma = locker->secdma;
2290 
2291             free_ioctl_return_buffer(dhd, &retbuf);
2292         } else {
2293 #ifdef DHD_PKTID_AUDIT_RING
2294             DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
2295 #endif /* DHD_PKTID_AUDIT_RING */
2296         }
2297         map->keys[nkey] = nkey; /* populate with unique keys */
2298     }
2299 
2300     map->avail = map_items;
2301     memset(&map->lockers[1], 0, sizeof(dhd_pktid_item_t) * map_items);
2302     DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2303 }
2304 #endif /* IOCTLRESP_USE_CONSTMEM */
2305 
2306 /**
2307  * Free the pktid map.
2308  */
dhd_pktid_map_fini(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle)2309 static void dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
2310 {
2311     dhd_pktid_map_t *map;
2312     uint32 dhd_pktid_map_sz;
2313     uint32 map_keys_sz;
2314 
2315     if (handle == NULL) {
2316         return;
2317     }
2318 
2319     /* Free any pending packets */
2320     dhd_pktid_map_reset(dhd, handle);
2321 
2322     map = (dhd_pktid_map_t *)handle;
2323     dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
2324     map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
2325 
2326     DHD_PKTID_LOCK_DEINIT(dhd->osh, map->pktid_lock);
2327 
2328 #if defined(DHD_PKTID_AUDIT_ENABLED)
2329     if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
2330         bcm_mwbmap_fini(dhd->osh, map->pktid_audit); /* Destruct pktid_audit */
2331         map->pktid_audit = (struct bcm_mwbmap *)NULL;
2332         if (map->pktid_audit_lock) {
2333             DHD_PKTID_AUDIT_LOCK_DEINIT(dhd->osh, map->pktid_audit_lock);
2334         }
2335     }
2336 #endif /* DHD_PKTID_AUDIT_ENABLED */
2337     MFREE(dhd->osh, map->keys, map_keys_sz);
2338     VMFREE(dhd->osh, handle, dhd_pktid_map_sz);
2339 }
2340 
2341 #ifdef IOCTLRESP_USE_CONSTMEM
dhd_pktid_map_fini_ioctl(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle)2342 static void dhd_pktid_map_fini_ioctl(dhd_pub_t *dhd,
2343                                      dhd_pktid_map_handle_t *handle)
2344 {
2345     dhd_pktid_map_t *map;
2346     uint32 dhd_pktid_map_sz;
2347     uint32 map_keys_sz;
2348 
2349     if (handle == NULL) {
2350         return;
2351     }
2352 
2353     /* Free any pending packets */
2354     dhd_pktid_map_reset_ioctl(dhd, handle);
2355 
2356     map = (dhd_pktid_map_t *)handle;
2357     dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
2358     map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
2359 
2360     DHD_PKTID_LOCK_DEINIT(dhd->osh, map->pktid_lock);
2361 
2362 #if defined(DHD_PKTID_AUDIT_ENABLED)
2363     if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
2364         bcm_mwbmap_fini(dhd->osh, map->pktid_audit); /* Destruct pktid_audit */
2365         map->pktid_audit = (struct bcm_mwbmap *)NULL;
2366         if (map->pktid_audit_lock) {
2367             DHD_PKTID_AUDIT_LOCK_DEINIT(dhd->osh, map->pktid_audit_lock);
2368         }
2369     }
2370 #endif /* DHD_PKTID_AUDIT_ENABLED */
2371 
2372     MFREE(dhd->osh, map->keys, map_keys_sz);
2373     VMFREE(dhd->osh, handle, dhd_pktid_map_sz);
2374 }
2375 #endif /* IOCTLRESP_USE_CONSTMEM */
2376 
2377 /** Get the pktid free count */
2378 static INLINE uint32 BCMFASTPATH
dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t * handle)2379 dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle)
2380 {
2381     dhd_pktid_map_t *map;
2382     uint32 avail;
2383     unsigned long flags;
2384 
2385     ASSERT(handle != NULL);
2386     map = (dhd_pktid_map_t *)handle;
2387 
2388     DHD_PKTID_LOCK(map->pktid_lock, flags);
2389     avail = map->avail;
2390     DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2391 
2392     return avail;
2393 }
2394 
2395 /**
2396  * dhd_pktid_map_reserve - reserve a unique numbered key. Reserved locker is not
2397  * yet populated. Invoke the pktid save api to populate the packet parameters
2398  * into the locker. This function is not reentrant, and is the caller's
2399  * responsibility. Caller must treat a returned value DHD_PKTID_INVALID as
2400  * a failure case, implying a depleted pool of pktids.
2401  */
dhd_pktid_map_reserve(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle,void * pkt,dhd_pkttype_t pkttype)2402 static INLINE uint32 dhd_pktid_map_reserve(dhd_pub_t *dhd,
2403                                            dhd_pktid_map_handle_t *handle,
2404                                            void *pkt, dhd_pkttype_t pkttype)
2405 {
2406     uint32 nkey;
2407     dhd_pktid_map_t *map;
2408     dhd_pktid_item_t *locker;
2409     unsigned long flags;
2410 
2411     ASSERT(handle != NULL);
2412     map = (dhd_pktid_map_t *)handle;
2413 
2414     DHD_PKTID_LOCK(map->pktid_lock, flags);
2415 
2416     if ((int)(map->avail) <= 0) { /* no more pktids to allocate */
2417         map->failures++;
2418         DHD_INFO(("%s:%d: failed, no free keys\n", __FUNCTION__, __LINE__));
2419         DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2420         return DHD_PKTID_INVALID; /* failed alloc request */
2421     }
2422 
2423     ASSERT(map->avail <= map->items);
2424     nkey = map->keys[map->avail]; /* fetch a free locker, pop stack */
2425 
2426     if ((map->avail > map->items) || (nkey > map->items)) {
2427         map->failures++;
2428         DHD_ERROR(("%s:%d: failed to allocate a new pktid,"
2429                    " map->avail<%u>, nkey<%u>, pkttype<%u>\n",
2430                    __FUNCTION__, __LINE__, map->avail, nkey, pkttype));
2431         DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2432         return DHD_PKTID_INVALID; /* failed alloc request */
2433     }
2434 
2435     locker = &map->lockers[nkey]; /* save packet metadata in locker */
2436     map->avail--;
2437     locker->pkt = pkt; /* pkt is saved, other params not yet saved. */
2438     locker->len = 0;
2439     locker->state = LOCKER_IS_BUSY; /* reserve this locker */
2440 
2441     DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2442 
2443     ASSERT(nkey != DHD_PKTID_INVALID);
2444 
2445     return nkey; /* return locker's numbered key */
2446 }
2447 
2448 /*
2449  * dhd_pktid_map_save - Save a packet's parameters into a locker
2450  * corresponding to a previously reserved unique numbered key.
2451  */
dhd_pktid_map_save(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle,void * pkt,uint32 nkey,dmaaddr_t pa,uint32 len,uint8 dir,void * dmah,void * secdma,dhd_pkttype_t pkttype)2452 static INLINE void dhd_pktid_map_save(dhd_pub_t *dhd,
2453                                       dhd_pktid_map_handle_t *handle, void *pkt,
2454                                       uint32 nkey, dmaaddr_t pa, uint32 len,
2455                                       uint8 dir, void *dmah, void *secdma,
2456                                       dhd_pkttype_t pkttype)
2457 {
2458     dhd_pktid_map_t *map;
2459     dhd_pktid_item_t *locker;
2460     unsigned long flags;
2461 
2462     ASSERT(handle != NULL);
2463     map = (dhd_pktid_map_t *)handle;
2464 
2465     DHD_PKTID_LOCK(map->pktid_lock, flags);
2466 
2467     if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) {
2468         DHD_ERROR(("%s:%d: Error! saving invalid pktid<%u> pkttype<%u>\n",
2469                    __FUNCTION__, __LINE__, nkey, pkttype));
2470         DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2471 #ifdef DHD_FW_COREDUMP
2472         if (dhd->memdump_enabled) {
2473             /* collect core dump */
2474             dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
2475             dhd_bus_mem_dump(dhd);
2476         }
2477 #else
2478         ASSERT(0);
2479 #endif /* DHD_FW_COREDUMP */
2480         return;
2481     }
2482 
2483     locker = &map->lockers[nkey];
2484 
2485     ASSERT(((locker->state == LOCKER_IS_BUSY) && (locker->pkt == pkt)) ||
2486            ((locker->state == LOCKER_IS_RSVD) && (locker->pkt == NULL)));
2487 
2488     /* store contents in locker */
2489     locker->dir = dir;
2490     locker->pa = pa;
2491     locker->len = (uint16)len; /* 16bit len */
2492     locker->dmah = dmah;       /* 16bit len */
2493     locker->secdma = secdma;
2494     locker->pkttype = pkttype;
2495     locker->pkt = pkt;
2496     locker->state = LOCKER_IS_BUSY; /* make this locker busy */
2497 #ifdef DHD_MAP_PKTID_LOGGING
2498     DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_map, pa, nkey, len, pkttype);
2499 #endif /* DHD_MAP_PKTID_LOGGING */
2500     DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2501 }
2502 
2503 /**
2504  * dhd_pktid_map_alloc - Allocate a unique numbered key and save the packet
2505  * contents into the corresponding locker. Return the numbered key.
2506  */
dhd_pktid_map_alloc(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle,void * pkt,dmaaddr_t pa,uint32 len,uint8 dir,void * dmah,void * secdma,dhd_pkttype_t pkttype)2507 static uint32 BCMFASTPATH dhd_pktid_map_alloc(
2508     dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt, dmaaddr_t pa,
2509     uint32 len, uint8 dir, void *dmah, void *secdma, dhd_pkttype_t pkttype)
2510 {
2511     uint32 nkey;
2512 
2513     nkey = dhd_pktid_map_reserve(dhd, handle, pkt, pkttype);
2514     if (nkey != DHD_PKTID_INVALID) {
2515         dhd_pktid_map_save(dhd, handle, pkt, nkey, pa, len, dir, dmah, secdma,
2516                            pkttype);
2517     }
2518 
2519     return nkey;
2520 }
2521 
2522 /**
2523  * dhd_pktid_map_free - Given a numbered key, return the locker contents.
2524  * dhd_pktid_map_free() is not reentrant, and is the caller's responsibility.
2525  * Caller may not free a pktid value DHD_PKTID_INVALID or an arbitrary pktid
2526  * value. Only a previously allocated pktid may be freed.
2527  */
2528 static void *BCMFASTPATH
dhd_pktid_map_free(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle,uint32 nkey,dmaaddr_t * pa,uint32 * len,void ** dmah,void ** secdma,dhd_pkttype_t pkttype,bool rsv_locker)2529 dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, uint32 nkey,
2530                    dmaaddr_t *pa, uint32 *len, void **dmah, void **secdma,
2531                    dhd_pkttype_t pkttype, bool rsv_locker)
2532 {
2533     dhd_pktid_map_t *map;
2534     dhd_pktid_item_t *locker;
2535     void *pkt;
2536     unsigned long long locker_addr;
2537     unsigned long flags;
2538 
2539     ASSERT(handle != NULL);
2540 
2541     map = (dhd_pktid_map_t *)handle;
2542 
2543     DHD_PKTID_LOCK(map->pktid_lock, flags);
2544 
2545     if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) {
2546         DHD_ERROR(("%s:%d: Error! Try to free invalid pktid<%u>, pkttype<%d>\n",
2547                    __FUNCTION__, __LINE__, nkey, pkttype));
2548         DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2549 #ifdef DHD_FW_COREDUMP
2550         if (dhd->memdump_enabled) {
2551             /* collect core dump */
2552             dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
2553             dhd_bus_mem_dump(dhd);
2554         }
2555 #else
2556         ASSERT(0);
2557 #endif /* DHD_FW_COREDUMP */
2558         return NULL;
2559     }
2560 
2561     locker = &map->lockers[nkey];
2562 
2563 #if defined(DHD_PKTID_AUDIT_MAP)
2564     DHD_PKTID_AUDIT(dhd, map, nkey,
2565                     DHD_DUPLICATE_FREE); /* Audit duplicate FREE */
2566 #endif                                   /* DHD_PKTID_AUDIT_MAP */
2567 
2568     /* Debug check for cloned numbered key */
2569     if (locker->state == LOCKER_IS_FREE) {
2570         DHD_ERROR(("%s:%d: Error! freeing already freed invalid pktid<%u>\n",
2571                    __FUNCTION__, __LINE__, nkey));
2572         DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2573 #ifdef DHD_FW_COREDUMP
2574         if (dhd->memdump_enabled) {
2575             /* collect core dump */
2576             dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
2577             dhd_bus_mem_dump(dhd);
2578         }
2579 #else
2580         ASSERT(0);
2581 #endif /* DHD_FW_COREDUMP */
2582         return NULL;
2583     }
2584 
2585     /* Check for the colour of the buffer i.e The buffer posted for TX,
2586      * should be freed for TX completion. Similarly the buffer posted for
2587      * IOCTL should be freed for IOCT completion etc.
2588      */
2589     if ((pkttype != PKTTYPE_NO_CHECK) && (locker->pkttype != pkttype)) {
2590         DHD_ERROR(("%s:%d: Error! Invalid Buffer Free for pktid<%u> \n",
2591                    __FUNCTION__, __LINE__, nkey));
2592 #ifdef BCMDMA64OSL
2593         PHYSADDRTOULONG(locker->pa, locker_addr);
2594 #else
2595         locker_addr = PHYSADDRLO(locker->pa);
2596 #endif /* BCMDMA64OSL */
2597         DHD_ERROR(("%s:%d: locker->state <%d>, locker->pkttype <%d>,"
2598                    "pkttype <%d> locker->pa <0x%llx> \n",
2599                    __FUNCTION__, __LINE__, locker->state, locker->pkttype,
2600                    pkttype, locker_addr));
2601         DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2602 #ifdef DHD_FW_COREDUMP
2603         if (dhd->memdump_enabled) {
2604             /* collect core dump */
2605             dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
2606             dhd_bus_mem_dump(dhd);
2607         }
2608 #else
2609         ASSERT(0);
2610 #endif /* DHD_FW_COREDUMP */
2611         return NULL;
2612     }
2613 
2614     if (rsv_locker == DHD_PKTID_FREE_LOCKER) {
2615         map->avail++;
2616         map->keys[map->avail] = nkey;   /* make this numbered key available */
2617         locker->state = LOCKER_IS_FREE; /* open and free Locker */
2618     } else {
2619         /* pktid will be reused, but the locker does not have a valid pkt */
2620         locker->state = LOCKER_IS_RSVD;
2621     }
2622 
2623 #if defined(DHD_PKTID_AUDIT_MAP)
2624     DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
2625 #endif /* DHD_PKTID_AUDIT_MAP */
2626 #ifdef DHD_MAP_PKTID_LOGGING
2627     DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_unmap, locker->pa, nkey,
2628                   (uint32)locker->len, pkttype);
2629 #endif /* DHD_MAP_PKTID_LOGGING */
2630 
2631     *pa = locker->pa; /* return contents of locker */
2632     *len = (uint32)locker->len;
2633     *dmah = locker->dmah;
2634     *secdma = locker->secdma;
2635 
2636     pkt = locker->pkt;
2637     locker->pkt = NULL; /* Clear pkt */
2638     locker->len = 0;
2639 
2640     DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2641 
2642     return pkt;
2643 }
2644 
2645 #else /* ! DHD_PCIE_PKTID */
2646 
2647 typedef struct pktlist {
2648     PKT_LIST *tx_pkt_list;   /* list for tx packets */
2649     PKT_LIST *rx_pkt_list;   /* list for rx packets */
2650     PKT_LIST *ctrl_pkt_list; /* list for ioctl/event buf post */
2651 } pktlists_t;
2652 
2653 /*
2654  * Given that each workitem only uses a 32bit pktid, only 32bit hosts may avail
2655  * of a one to one mapping 32bit pktptr and a 32bit pktid.
2656  *
2657  * - When PKTIDMAP is not used, DHD_NATIVE_TO_PKTID variants will never fail.
2658  * - Neither DHD_NATIVE_TO_PKTID nor DHD_PKTID_TO_NATIVE need to be protected by
2659  *   a lock.
2660  * - Hence DHD_PKTID_INVALID is not defined when DHD_PCIE_PKTID is undefined.
2661  */
2662 #define DHD_PKTID32(pktptr32) ((uint32)(pktptr32))
2663 #define DHD_PKTPTR32(pktid32) ((void *)(pktid32))
2664 
2665 static INLINE uint32 dhd_native_to_pktid(dhd_pktid_map_handle_t *map,
2666                                          void *pktptr32, dmaaddr_t pa,
2667                                          uint32 dma_len, void *dmah,
2668                                          void *secdma, dhd_pkttype_t pkttype);
2669 static INLINE void *dhd_pktid_to_native(dhd_pktid_map_handle_t *map,
2670                                         uint32 pktid32, dmaaddr_t *pa,
2671                                         uint32 *dma_len, void **dmah,
2672                                         void **secdma, dhd_pkttype_t pkttype);
2673 
dhd_pktid_map_init(dhd_pub_t * dhd,uint32 num_items)2674 static dhd_pktid_map_handle_t *dhd_pktid_map_init(dhd_pub_t *dhd,
2675                                                   uint32 num_items)
2676 {
2677     osl_t *osh = dhd->osh;
2678     pktlists_t *handle = NULL;
2679 
2680     if ((handle = (pktlists_t *)MALLOCZ(osh, sizeof(pktlists_t))) == NULL) {
2681         DHD_ERROR(("%s:%d: MALLOC failed for lists allocation, size=%d\n",
2682                    __FUNCTION__, __LINE__, sizeof(pktlists_t)));
2683         goto error_done;
2684     }
2685 
2686     if ((handle->tx_pkt_list = (PKT_LIST *)MALLOC(osh, sizeof(PKT_LIST))) ==
2687         NULL) {
2688         DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
2689                    __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
2690         goto error;
2691     }
2692 
2693     if ((handle->rx_pkt_list = (PKT_LIST *)MALLOC(osh, sizeof(PKT_LIST))) ==
2694         NULL) {
2695         DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
2696                    __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
2697         goto error;
2698     }
2699 
2700     if ((handle->ctrl_pkt_list = (PKT_LIST *)MALLOC(osh, sizeof(PKT_LIST))) ==
2701         NULL) {
2702         DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
2703                    __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
2704         goto error;
2705     }
2706 
2707     PKTLIST_INIT(handle->tx_pkt_list);
2708     PKTLIST_INIT(handle->rx_pkt_list);
2709     PKTLIST_INIT(handle->ctrl_pkt_list);
2710 
2711     return (dhd_pktid_map_handle_t *)handle;
2712 
2713 error:
2714     if (handle->ctrl_pkt_list) {
2715         MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST));
2716     }
2717 
2718     if (handle->rx_pkt_list) {
2719         MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST));
2720     }
2721 
2722     if (handle->tx_pkt_list) {
2723         MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST));
2724     }
2725 
2726     if (handle) {
2727         MFREE(osh, handle, sizeof(pktlists_t));
2728     }
2729 
2730 error_done:
2731     return (dhd_pktid_map_handle_t *)NULL;
2732 }
2733 
dhd_pktid_map_reset(dhd_pub_t * dhd,pktlists_t * handle)2734 static void dhd_pktid_map_reset(dhd_pub_t *dhd, pktlists_t *handle)
2735 {
2736     osl_t *osh = dhd->osh;
2737 
2738     if (handle->ctrl_pkt_list) {
2739         PKTLIST_FINI(handle->ctrl_pkt_list);
2740         MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST));
2741     }
2742 
2743     if (handle->rx_pkt_list) {
2744         PKTLIST_FINI(handle->rx_pkt_list);
2745         MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST));
2746     }
2747 
2748     if (handle->tx_pkt_list) {
2749         PKTLIST_FINI(handle->tx_pkt_list);
2750         MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST));
2751     }
2752 }
2753 
dhd_pktid_map_fini(dhd_pub_t * dhd,dhd_pktid_map_handle_t * map)2754 static void dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map)
2755 {
2756     osl_t *osh = dhd->osh;
2757     pktlists_t *handle = (pktlists_t *)map;
2758 
2759     ASSERT(handle != NULL);
2760     if (handle == (pktlists_t *)NULL) {
2761         return;
2762     }
2763 
2764     dhd_pktid_map_reset(dhd, handle);
2765 
2766     if (handle) {
2767         MFREE(osh, handle, sizeof(pktlists_t));
2768     }
2769 }
2770 
2771 /** Save dma parameters into the packet's pkttag and convert a pktptr to pktid
2772  */
dhd_native_to_pktid(dhd_pktid_map_handle_t * map,void * pktptr32,dmaaddr_t pa,uint32 dma_len,void * dmah,void * secdma,dhd_pkttype_t pkttype)2773 static INLINE uint32 dhd_native_to_pktid(dhd_pktid_map_handle_t *map,
2774                                          void *pktptr32, dmaaddr_t pa,
2775                                          uint32 dma_len, void *dmah,
2776                                          void *secdma, dhd_pkttype_t pkttype)
2777 {
2778     pktlists_t *handle = (pktlists_t *)map;
2779     ASSERT(pktptr32 != NULL);
2780     DHD_PKT_SET_DMA_LEN(pktptr32, dma_len);
2781     DHD_PKT_SET_DMAH(pktptr32, dmah);
2782     DHD_PKT_SET_PA(pktptr32, pa);
2783     DHD_PKT_SET_SECDMA(pktptr32, secdma);
2784 
2785     if (pkttype == PKTTYPE_DATA_TX) {
2786         PKTLIST_ENQ(handle->tx_pkt_list, pktptr32);
2787     } else if (pkttype == PKTTYPE_DATA_RX) {
2788         PKTLIST_ENQ(handle->rx_pkt_list, pktptr32);
2789     } else {
2790         PKTLIST_ENQ(handle->ctrl_pkt_list, pktptr32);
2791     }
2792 
2793     return DHD_PKTID32(pktptr32);
2794 }
2795 
2796 /** Convert a pktid to pktptr and retrieve saved dma parameters from packet */
dhd_pktid_to_native(dhd_pktid_map_handle_t * map,uint32 pktid32,dmaaddr_t * pa,uint32 * dma_len,void ** dmah,void ** secdma,dhd_pkttype_t pkttype)2797 static INLINE void *dhd_pktid_to_native(dhd_pktid_map_handle_t *map,
2798                                         uint32 pktid32, dmaaddr_t *pa,
2799                                         uint32 *dma_len, void **dmah,
2800                                         void **secdma, dhd_pkttype_t pkttype)
2801 {
2802     pktlists_t *handle = (pktlists_t *)map;
2803     void *pktptr32;
2804 
2805     ASSERT(pktid32 != 0U);
2806     pktptr32 = DHD_PKTPTR32(pktid32);
2807     *dma_len = DHD_PKT_GET_DMA_LEN(pktptr32);
2808     *dmah = DHD_PKT_GET_DMAH(pktptr32);
2809     *pa = DHD_PKT_GET_PA(pktptr32);
2810     *secdma = DHD_PKT_GET_SECDMA(pktptr32);
2811 
2812     if (pkttype == PKTTYPE_DATA_TX) {
2813         PKTLIST_UNLINK(handle->tx_pkt_list, pktptr32);
2814     } else if (pkttype == PKTTYPE_DATA_RX) {
2815         PKTLIST_UNLINK(handle->rx_pkt_list, pktptr32);
2816     } else {
2817         PKTLIST_UNLINK(handle->ctrl_pkt_list, pktptr32);
2818     }
2819 
2820     return pktptr32;
2821 }
2822 
2823 #define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt, pkttype) DHD_PKTID32(pkt)
2824 
2825 #define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dma_dir, dmah,  \
2826                                  secdma, pkttype)                              \
2827     ( {                                                                        \
2828         BCM_REFERENCE(dhd);                                                    \
2829         BCM_REFERENCE(nkey);                                                   \
2830         BCM_REFERENCE(dma_dir);                                                \
2831         dhd_native_to_pktid((dhd_pktid_map_handle_t *)map, (pkt), (pa), (len), \
2832                             (dmah), (secdma), (dhd_pkttype_t)(pkttype));       \
2833     })
2834 
2835 #define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dma_dir, dmah, secdma,     \
2836                             pkttype)                                           \
2837     ( {                                                                        \
2838         BCM_REFERENCE(dhd);                                                    \
2839         BCM_REFERENCE(dma_dir);                                                \
2840         dhd_native_to_pktid((dhd_pktid_map_handle_t *)map, (pkt), (pa), (len), \
2841                             (dmah), (secdma), (dhd_pkttype_t)(pkttype));       \
2842     })
2843 
2844 #define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype)   \
2845     ( {                                                                        \
2846         BCM_REFERENCE(dhd);                                                    \
2847         BCM_REFERENCE(pkttype);                                                \
2848         dhd_pktid_to_native((dhd_pktid_map_handle_t *)map, (uint32)(pktid),    \
2849                             (dmaaddr_t *)&(pa), (uint32 *)&(len),              \
2850                             (void **)&(dmah), (void **)&secdma,                \
2851                             (dhd_pkttype_t)(pkttype));                         \
2852     })
2853 
2854 #define DHD_PKTID_AVAIL(map) (~0)
2855 
2856 #endif /* ! DHD_PCIE_PKTID */
2857 
2858 /* +------------------ End of PCIE DHD PKTID MAPPER  -----------------------+ */
2859 
2860 /**
2861  * The PCIE FD protocol layer is constructed in two phases:
2862  *    Phase 1. dhd_prot_attach()
2863  *    Phase 2. dhd_prot_init()
2864  *
2865  * dhd_prot_attach() - Allocates a dhd_prot_t object and resets all its fields.
2866  * All Common rings are allose attached (msgbuf_ring_t objects are allocated
2867  * with DMA-able buffers).
2868  * All dhd_dma_buf_t objects are also allocated here.
2869  *
2870  * As dhd_prot_attach is invoked prior to the pcie_shared object is read, any
2871  * initialization of objects that requires information advertized by the dongle
2872  * may not be performed here.
2873  * E.g. the number of TxPost flowrings is not know at this point, neither do
2874  * we know shich form of D2H DMA sync mechanism is advertized by the dongle, or
2875  * whether the dongle supports DMA-ing of WR/RD indices for the H2D and/or D2H
2876  * rings (common + flow).
2877  *
2878  * dhd_prot_init() is invoked after the bus layer has fetched the information
2879  * advertized by the dongle in the pcie_shared_t.
2880  */
dhd_prot_attach(dhd_pub_t * dhd)2881 int dhd_prot_attach(dhd_pub_t *dhd)
2882 {
2883     osl_t *osh = dhd->osh;
2884     dhd_prot_t *prot;
2885 
2886     /* FW going to DMA extended trap data,
2887      * allocate buffer for the maximum extended trap data.
2888      */
2889     uint32 trap_buf_len = BCMPCIE_EXT_TRAP_DATA_MAXLEN;
2890 
2891     /* Allocate prot structure */
2892     if (!(prot = (dhd_prot_t *)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_PROT,
2893                                                sizeof(dhd_prot_t)))) {
2894         DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
2895         goto fail;
2896     }
2897     memset(prot, 0, sizeof(*prot));
2898 
2899     prot->osh = osh;
2900     dhd->prot = prot;
2901 
2902     /* DMAing ring completes supported? FALSE by default  */
2903     dhd->dma_d2h_ring_upd_support = FALSE;
2904     dhd->dma_h2d_ring_upd_support = FALSE;
2905     dhd->dma_ring_upd_overwrite = FALSE;
2906 
2907     dhd->hwa_inited = 0;
2908     dhd->idma_inited = 0;
2909     dhd->ifrm_inited = 0;
2910     dhd->dar_inited = 0;
2911 
2912     /* Common Ring Allocations */
2913 
2914     /* Ring  0: H2D Control Submission */
2915     if (dhd_prot_ring_attach(dhd, &prot->h2dring_ctrl_subn, "h2dctrl",
2916                              H2DRING_CTRL_SUB_MAX_ITEM,
2917                              H2DRING_CTRL_SUB_ITEMSIZE,
2918                              BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT) != BCME_OK) {
2919         DHD_ERROR(("%s: dhd_prot_ring_attach H2D Ctrl Submission failed\n",
2920                    __FUNCTION__));
2921         goto fail;
2922     }
2923 
2924     /* Ring  1: H2D Receive Buffer Post */
2925     if (dhd_prot_ring_attach(dhd, &prot->h2dring_rxp_subn, "h2drxp",
2926                              H2DRING_RXPOST_MAX_ITEM, H2DRING_RXPOST_ITEMSIZE,
2927                              BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT) != BCME_OK) {
2928         DHD_ERROR(
2929             ("%s: dhd_prot_ring_attach H2D RxPost failed\n", __FUNCTION__));
2930         goto fail;
2931     }
2932 
2933     /* Ring  2: D2H Control Completion */
2934     if (dhd_prot_ring_attach(dhd, &prot->d2hring_ctrl_cpln, "d2hctrl",
2935                              D2HRING_CTRL_CMPLT_MAX_ITEM,
2936                              D2HRING_CTRL_CMPLT_ITEMSIZE,
2937                              BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE) != BCME_OK) {
2938         DHD_ERROR(("%s: dhd_prot_ring_attach D2H Ctrl Completion failed\n",
2939                    __FUNCTION__));
2940         goto fail;
2941     }
2942 
2943     /* Ring  3: D2H Transmit Complete */
2944     if (dhd_prot_ring_attach(dhd, &prot->d2hring_tx_cpln, "d2htxcpl",
2945                              D2HRING_TXCMPLT_MAX_ITEM, D2HRING_TXCMPLT_ITEMSIZE,
2946                              BCMPCIE_D2H_MSGRING_TX_COMPLETE) != BCME_OK) {
2947         DHD_ERROR(("%s: dhd_prot_ring_attach D2H Tx Completion failed\n",
2948                    __FUNCTION__));
2949         goto fail;
2950     }
2951 
2952     /* Ring  4: D2H Receive Complete */
2953     if (dhd_prot_ring_attach(dhd, &prot->d2hring_rx_cpln, "d2hrxcpl",
2954                              D2HRING_RXCMPLT_MAX_ITEM, D2HRING_RXCMPLT_ITEMSIZE,
2955                              BCMPCIE_D2H_MSGRING_RX_COMPLETE) != BCME_OK) {
2956         DHD_ERROR(("%s: dhd_prot_ring_attach D2H Rx Completion failed\n",
2957                    __FUNCTION__));
2958         goto fail;
2959     }
2960 
2961     /*
2962      * Max number of flowrings is not yet known. msgbuf_ring_t with DMA-able
2963      * buffers for flowrings will be instantiated, in dhd_prot_init() .
2964      * See dhd_prot_flowrings_pool_attach()
2965      */
2966     /* ioctl response buffer */
2967     if (dhd_dma_buf_alloc(dhd, &prot->retbuf, IOCT_RETBUF_SIZE)) {
2968         goto fail;
2969     }
2970 
2971     /* IOCTL request buffer */
2972     if (dhd_dma_buf_alloc(dhd, &prot->ioctbuf, IOCT_RETBUF_SIZE)) {
2973         goto fail;
2974     }
2975 
2976     /* Host TS request buffer one buffer for now */
2977     if (dhd_dma_buf_alloc(dhd, &prot->hostts_req_buf,
2978                           CTRLSUB_HOSTTS_MEESAGE_SIZE)) {
2979         goto fail;
2980     }
2981     prot->hostts_req_buf_inuse = FALSE;
2982 
2983     /* Scratch buffer for dma rx offset */
2984 #ifdef BCM_HOST_BUF
2985     if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf,
2986                           ROUNDUP(DMA_D2H_SCRATCH_BUF_LEN, 0x10) +
2987                           DMA_HOST_BUFFER_LEN))
2988 #else
2989     if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf,
2990                           DMA_D2H_SCRATCH_BUF_LEN))
2991 
2992 #endif /* BCM_HOST_BUF */
2993     {
2994         goto fail;
2995     }
2996 
2997     /* scratch buffer bus throughput measurement */
2998     if (dhd_dma_buf_alloc(dhd, &prot->host_bus_throughput_buf,
2999                           DHD_BUS_TPUT_BUF_LEN)) {
3000         goto fail;
3001     }
3002 
3003 #ifdef DHD_RX_CHAINING
3004     dhd_rxchain_reset(&prot->rxchain);
3005 #endif // endif
3006 
3007     prot->pktid_ctrl_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_CTRL_PKTID);
3008     if (prot->pktid_ctrl_map == NULL) {
3009         goto fail;
3010     }
3011 
3012     prot->pktid_rx_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_RX_PKTID);
3013     if (prot->pktid_rx_map == NULL) {
3014         goto fail;
3015     }
3016 
3017     prot->pktid_tx_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_TX_PKTID);
3018     if (prot->pktid_tx_map == NULL) {
3019         goto fail;
3020     }
3021 
3022 #ifdef IOCTLRESP_USE_CONSTMEM
3023     prot->pktid_map_handle_ioctl =
3024         DHD_NATIVE_TO_PKTID_INIT(dhd, DHD_FLOWRING_MAX_IOCTLRESPBUF_POST);
3025     if (prot->pktid_map_handle_ioctl == NULL) {
3026         goto fail;
3027     }
3028 #endif /* IOCTLRESP_USE_CONSTMEM */
3029 
3030 #ifdef DHD_MAP_PKTID_LOGGING
3031     prot->pktid_dma_map = DHD_PKTID_LOG_INIT(dhd, MAX_PKTID_LOG);
3032     if (prot->pktid_dma_map == NULL) {
3033         DHD_ERROR(("%s: failed to allocate pktid_dma_map\n", __FUNCTION__));
3034     }
3035 
3036     prot->pktid_dma_unmap = DHD_PKTID_LOG_INIT(dhd, MAX_PKTID_LOG);
3037     if (prot->pktid_dma_unmap == NULL) {
3038         DHD_ERROR(("%s: failed to allocate pktid_dma_unmap\n", __FUNCTION__));
3039     }
3040 #endif /* DHD_MAP_PKTID_LOGGING */
3041 
3042     /* Initialize the work queues to be used by the Load Balancing logic */
3043 #if defined(DHD_LB_TXC)
3044     {
3045         void *buffer;
3046         buffer = MALLOC(dhd->osh, sizeof(void *) * DHD_LB_WORKQ_SZ);
3047         if (buffer == NULL) {
3048             DHD_ERROR(
3049                 ("%s: failed to allocate RXC work buffer\n", __FUNCTION__));
3050             goto fail;
3051         }
3052         bcm_workq_init(&prot->tx_compl_prod, &prot->tx_compl_cons, buffer,
3053                        DHD_LB_WORKQ_SZ);
3054         prot->tx_compl_prod_sync = 0;
3055         DHD_INFO(("%s: created tx_compl_workq <%p,%d>\n", __FUNCTION__, buffer,
3056                   DHD_LB_WORKQ_SZ));
3057     }
3058 #endif /* DHD_LB_TXC */
3059 
3060 #if defined(DHD_LB_RXC)
3061     {
3062         void *buffer;
3063         buffer = MALLOC(dhd->osh, sizeof(void *) * DHD_LB_WORKQ_SZ);
3064         if (buffer == NULL) {
3065             DHD_ERROR(
3066                 ("%s: failed to allocate RXC work buffer\n", __FUNCTION__));
3067             goto fail;
3068         }
3069         bcm_workq_init(&prot->rx_compl_prod, &prot->rx_compl_cons, buffer,
3070                        DHD_LB_WORKQ_SZ);
3071         prot->rx_compl_prod_sync = 0;
3072         DHD_INFO(("%s: created rx_compl_workq <%p,%d>\n", __FUNCTION__, buffer,
3073                   DHD_LB_WORKQ_SZ));
3074     }
3075 #endif /* DHD_LB_RXC */
3076 
3077     /* Initialize trap buffer */
3078     if (dhd_dma_buf_alloc(dhd, &dhd->prot->fw_trap_buf, trap_buf_len)) {
3079         DHD_ERROR(("%s: dhd_init_trap_buffer falied\n", __FUNCTION__));
3080         goto fail;
3081     }
3082 
3083     return BCME_OK;
3084 
3085 fail:
3086 
3087     if (prot) {
3088         /* Free up all allocated memories */
3089         dhd_prot_detach(dhd);
3090     }
3091 
3092     return BCME_NOMEM;
3093 } /* dhd_prot_attach */
3094 
dhd_alloc_host_scbs(dhd_pub_t * dhd)3095 static int dhd_alloc_host_scbs(dhd_pub_t *dhd)
3096 {
3097     int ret = BCME_OK;
3098     sh_addr_t base_addr;
3099     dhd_prot_t *prot = dhd->prot;
3100     uint32 host_scb_size = 0;
3101 
3102     if (dhd->hscb_enable) {
3103         /* read number of bytes to allocate from F/W */
3104         dhd_bus_cmn_readshared(dhd->bus, &host_scb_size, HOST_SCB_ADDR, 0);
3105         if (host_scb_size) {
3106             dhd_dma_buf_free(dhd, &prot->host_scb_buf);
3107             /* alloc array of host scbs */
3108             ret = dhd_dma_buf_alloc(dhd, &prot->host_scb_buf, host_scb_size);
3109             /* write host scb address to F/W */
3110             if (ret == BCME_OK) {
3111                 dhd_base_addr_htolpa(&base_addr, prot->host_scb_buf.pa);
3112                 dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
3113                                         HOST_SCB_ADDR, 0);
3114             } else {
3115                 DHD_TRACE(("dhd_alloc_host_scbs: dhd_dma_buf_alloc error\n"));
3116             }
3117         } else {
3118             DHD_TRACE(("dhd_alloc_host_scbs: host_scb_size is 0.\n"));
3119         }
3120     } else {
3121         DHD_TRACE(("dhd_alloc_host_scbs: Host scb not supported in F/W.\n"));
3122     }
3123 
3124     return ret;
3125 }
3126 
dhd_set_host_cap(dhd_pub_t * dhd)3127 void dhd_set_host_cap(dhd_pub_t *dhd)
3128 {
3129     uint32 data = 0;
3130     dhd_prot_t *prot = dhd->prot;
3131 
3132     if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6) {
3133         if (dhd->h2d_phase_supported) {
3134             data |= HOSTCAP_H2D_VALID_PHASE;
3135             if (dhd->force_dongletrap_on_bad_h2d_phase) {
3136                 data |= HOSTCAP_H2D_ENABLE_TRAP_ON_BADPHASE;
3137             }
3138         }
3139         if (prot->host_ipc_version > prot->device_ipc_version) {
3140             prot->active_ipc_version = prot->device_ipc_version;
3141         } else {
3142             prot->active_ipc_version = prot->host_ipc_version;
3143         }
3144 
3145         data |= prot->active_ipc_version;
3146 
3147         if (dhdpcie_bus_get_pcie_hostready_supported(dhd->bus)) {
3148             DHD_INFO(("Advertise Hostready Capability\n"));
3149             data |= HOSTCAP_H2D_ENABLE_HOSTRDY;
3150         }
3151         {
3152             /* Disable DS altogether */
3153             data |= HOSTCAP_DS_NO_OOB_DW;
3154             dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_NONE);
3155         }
3156 
3157         /* Indicate support for extended trap data */
3158         data |= HOSTCAP_EXTENDED_TRAP_DATA;
3159 
3160         /* Indicate support for TX status metadata */
3161         if (dhd->pcie_txs_metadata_enable != 0) {
3162             data |= HOSTCAP_TXSTATUS_METADATA;
3163         }
3164 
3165         /* Enable fast delete ring in firmware if supported */
3166         if (dhd->fast_delete_ring_support) {
3167             data |= HOSTCAP_FAST_DELETE_RING;
3168         }
3169 
3170         if (dhdpcie_bus_get_pcie_hwa_supported(dhd->bus)) {
3171             DHD_ERROR(("HWA inited\n"));
3172             /* Is hostcap needed? */
3173             dhd->hwa_inited = TRUE;
3174         }
3175 
3176         if (dhdpcie_bus_get_pcie_idma_supported(dhd->bus)) {
3177             DHD_ERROR(("IDMA inited\n"));
3178             data |= HOSTCAP_H2D_IDMA;
3179             dhd->idma_inited = TRUE;
3180         }
3181 
3182         if (dhdpcie_bus_get_pcie_ifrm_supported(dhd->bus)) {
3183             DHD_ERROR(("IFRM Inited\n"));
3184             data |= HOSTCAP_H2D_IFRM;
3185             dhd->ifrm_inited = TRUE;
3186             dhd->dma_h2d_ring_upd_support = FALSE;
3187             dhd_prot_dma_indx_free(dhd);
3188         }
3189 
3190         if (dhdpcie_bus_get_pcie_dar_supported(dhd->bus)) {
3191             DHD_ERROR(("DAR doorbell Use\n"));
3192             data |= HOSTCAP_H2D_DAR;
3193             dhd->dar_inited = TRUE;
3194         }
3195 
3196         data |= HOSTCAP_UR_FW_NO_TRAP;
3197 
3198         if (dhd->hscb_enable) {
3199             data |= HOSTCAP_HSCB;
3200         }
3201 
3202 #ifdef EWP_EDL
3203         if (dhd->dongle_edl_support) {
3204             data |= HOSTCAP_EDL_RING;
3205             DHD_ERROR(("Enable EDL host cap\n"));
3206         } else {
3207             DHD_ERROR(("DO NOT SET EDL host cap\n"));
3208         }
3209 #endif /* EWP_EDL */
3210 
3211 #ifdef DHD_HP2P
3212         if (dhd->hp2p_capable) {
3213             data |= HOSTCAP_PKT_TIMESTAMP;
3214             data |= HOSTCAP_PKT_HP2P;
3215             DHD_ERROR(("Enable HP2P in host cap\n"));
3216         } else {
3217             DHD_ERROR(("HP2P not enabled in host cap\n"));
3218         }
3219 #endif // endif
3220 
3221 #ifdef DHD_DB0TS
3222         if (dhd->db0ts_capable) {
3223             data |= HOSTCAP_DB0_TIMESTAMP;
3224             DHD_ERROR(("Enable DB0 TS in host cap\n"));
3225         } else {
3226             DHD_ERROR(("DB0 TS not enabled in host cap\n"));
3227         }
3228 #endif /* DHD_DB0TS */
3229         if (dhd->extdtxs_in_txcpl) {
3230             DHD_ERROR(("Enable hostcap: EXTD TXS in txcpl\n"));
3231             data |= HOSTCAP_PKT_TXSTATUS;
3232         } else {
3233             DHD_ERROR(("Enable hostcap: EXTD TXS in txcpl\n"));
3234         }
3235 
3236         DHD_INFO(("%s:Active Ver:%d, Host Ver:%d, FW Ver:%d\n", __FUNCTION__,
3237                   prot->active_ipc_version, prot->host_ipc_version,
3238                   prot->device_ipc_version));
3239 
3240         dhd_bus_cmn_writeshared(dhd->bus, &data, sizeof(uint32),
3241                                 HOST_API_VERSION, 0);
3242         dhd_bus_cmn_writeshared(dhd->bus, &prot->fw_trap_buf.pa,
3243                                 sizeof(prot->fw_trap_buf.pa),
3244                                 DNGL_TO_HOST_TRAP_ADDR, 0);
3245     }
3246 }
3247 
3248 /**
3249  * dhd_prot_init - second stage of dhd_prot_attach. Now that the dongle has
3250  * completed it's initialization of the pcie_shared structure, we may now fetch
3251  * the dongle advertized features and adjust the protocol layer accordingly.
3252  *
3253  * dhd_prot_init() may be invoked again after a dhd_prot_reset().
3254  */
dhd_prot_init(dhd_pub_t * dhd)3255 int dhd_prot_init(dhd_pub_t *dhd)
3256 {
3257     sh_addr_t base_addr;
3258     dhd_prot_t *prot = dhd->prot;
3259     int ret = 0;
3260     uint32 idmacontrol;
3261     uint32 waitcount = 0;
3262 
3263 #ifdef WL_MONITOR
3264     dhd->monitor_enable = FALSE;
3265 #endif /* WL_MONITOR */
3266 
3267     /**
3268      * A user defined value can be assigned to global variable h2d_max_txpost
3269      * via
3270      * 1. DHD IOVAR h2d_max_txpost, before firmware download
3271      * 2. module parameter h2d_max_txpost
3272      * prot->h2d_max_txpost is assigned with H2DRING_TXPOST_MAX_ITEM,
3273      * if user has not defined any buffers by one of the above methods.
3274      */
3275     prot->h2d_max_txpost = (uint16)h2d_max_txpost;
3276 
3277     DHD_ERROR(("%s:%d: h2d_max_txpost = %d\n", __FUNCTION__, __LINE__,
3278                prot->h2d_max_txpost));
3279 
3280     /* Read max rx packets supported by dongle */
3281     dhd_bus_cmn_readshared(dhd->bus, &prot->max_rxbufpost, MAX_HOST_RXBUFS, 0);
3282     if (prot->max_rxbufpost == 0) {
3283         /* This would happen if the dongle firmware is not */
3284         /* using the latest shared structure template */
3285         prot->max_rxbufpost = DEFAULT_RX_BUFFERS_TO_POST;
3286     }
3287     DHD_ERROR(("%s:%d: MAX_RXBUFPOST = %d\n", __FUNCTION__, __LINE__,
3288                prot->max_rxbufpost));
3289 
3290     /* Initialize.  bzero() would blow away the dma pointers. */
3291     prot->max_eventbufpost = DHD_FLOWRING_MAX_EVENTBUF_POST;
3292     prot->max_ioctlrespbufpost = DHD_FLOWRING_MAX_IOCTLRESPBUF_POST;
3293     prot->max_infobufpost = DHD_H2D_INFORING_MAX_BUF_POST;
3294     prot->max_tsbufpost = DHD_MAX_TSBUF_POST;
3295 
3296     prot->cur_ioctlresp_bufs_posted = 0;
3297     OSL_ATOMIC_INIT(dhd->osh, &prot->active_tx_count);
3298     prot->data_seq_no = 0;
3299     prot->ioctl_seq_no = 0;
3300     prot->rxbufpost = 0;
3301     prot->cur_event_bufs_posted = 0;
3302     prot->ioctl_state = 0;
3303     prot->curr_ioctl_cmd = 0;
3304     prot->cur_ts_bufs_posted = 0;
3305     prot->infobufpost = 0;
3306 
3307     prot->dmaxfer.srcmem.va = NULL;
3308     prot->dmaxfer.dstmem.va = NULL;
3309     prot->dmaxfer.in_progress = FALSE;
3310 
3311     prot->metadata_dbg = FALSE;
3312     prot->rx_metadata_offset = 0;
3313     prot->tx_metadata_offset = 0;
3314     prot->txp_threshold = TXP_FLUSH_MAX_ITEMS_FLUSH_CNT;
3315 
3316     /* To catch any rollover issues fast, starting with higher ioctl_trans_id */
3317     prot->ioctl_trans_id =
3318         MAXBITVAL(NBITS(prot->ioctl_trans_id)) - BUFFER_BEFORE_ROLLOVER;
3319     prot->ioctl_state = 0;
3320     prot->ioctl_status = 0;
3321     prot->ioctl_resplen = 0;
3322     prot->ioctl_received = IOCTL_WAIT;
3323 
3324     /* Initialize Common MsgBuf Rings */
3325 
3326     prot->device_ipc_version = dhd->bus->api.fw_rev;
3327     prot->host_ipc_version = PCIE_SHARED_VERSION;
3328     prot->no_tx_resource = FALSE;
3329 
3330     /* Init the host API version */
3331     dhd_set_host_cap(dhd);
3332 
3333     /* alloc and configure scb host address for dongle */
3334     if ((ret = dhd_alloc_host_scbs(dhd))) {
3335         return ret;
3336     }
3337 
3338     /* Register the interrupt function upfront */
3339     /* remove corerev checks in data path */
3340     /* do this after host/fw negotiation for DAR */
3341     prot->mb_ring_fn = dhd_bus_get_mbintr_fn(dhd->bus);
3342     prot->mb_2_ring_fn = dhd_bus_get_mbintr_2_fn(dhd->bus);
3343 
3344     dhd->bus->_dar_war = (dhd->bus->sih->buscorerev < 0x40) ? TRUE : FALSE;
3345 
3346     dhd_prot_ring_init(dhd, &prot->h2dring_ctrl_subn);
3347     dhd_prot_ring_init(dhd, &prot->h2dring_rxp_subn);
3348     dhd_prot_ring_init(dhd, &prot->d2hring_ctrl_cpln);
3349 
3350     /* Make it compatibile with pre-rev7 Firmware */
3351     if (prot->active_ipc_version < PCIE_SHARED_VERSION_7) {
3352         prot->d2hring_tx_cpln.item_len = D2HRING_TXCMPLT_ITEMSIZE_PREREV7;
3353         prot->d2hring_rx_cpln.item_len = D2HRING_RXCMPLT_ITEMSIZE_PREREV7;
3354     }
3355     dhd_prot_ring_init(dhd, &prot->d2hring_tx_cpln);
3356     dhd_prot_ring_init(dhd, &prot->d2hring_rx_cpln);
3357 
3358     dhd_prot_d2h_sync_init(dhd);
3359 
3360     dhd_prot_h2d_sync_init(dhd);
3361 
3362     /* init the scratch buffer */
3363     dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_scratch_buf.pa);
3364     dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
3365                             D2H_DMA_SCRATCH_BUF, 0);
3366     dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_scratch_buf.len,
3367                             sizeof(prot->d2h_dma_scratch_buf.len),
3368                             D2H_DMA_SCRATCH_BUF_LEN, 0);
3369 
3370     /* If supported by the host, indicate the memory block
3371      * for completion writes / submission reads to shared space
3372      */
3373     if (dhd->dma_d2h_ring_upd_support) {
3374         dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_wr_buf.pa);
3375         dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
3376                                 D2H_DMA_INDX_WR_BUF, 0);
3377         dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_rd_buf.pa);
3378         dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
3379                                 H2D_DMA_INDX_RD_BUF, 0);
3380     }
3381 
3382     if (dhd->dma_h2d_ring_upd_support || IDMA_ENAB(dhd)) {
3383         dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_wr_buf.pa);
3384         dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
3385                                 H2D_DMA_INDX_WR_BUF, 0);
3386         dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_rd_buf.pa);
3387         dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
3388                                 D2H_DMA_INDX_RD_BUF, 0);
3389     }
3390     /* Signal to the dongle that common ring init is complete */
3391     if (dhd->hostrdy_after_init) {
3392         dhd_bus_hostready(dhd->bus);
3393     }
3394 
3395     /*
3396      * If the DMA-able buffers for flowring needs to come from a specific
3397      * contiguous memory region, then setup prot->flowrings_dma_buf here.
3398      * dhd_prot_flowrings_pool_attach() will carve out DMA-able buffers from
3399      * this contiguous memory region, for each of the flowrings.
3400      */
3401 
3402     /* Pre-allocate pool of msgbuf_ring for flowrings */
3403     if (dhd_prot_flowrings_pool_attach(dhd) != BCME_OK) {
3404         return BCME_ERROR;
3405     }
3406 
3407     /* If IFRM is enabled, wait for FW to setup the DMA channel */
3408     if (IFRM_ENAB(dhd)) {
3409         dhd_base_addr_htolpa(&base_addr, prot->h2d_ifrm_indx_wr_buf.pa);
3410         dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
3411                                 H2D_IFRM_INDX_WR_BUF, 0);
3412     }
3413 
3414     /* If IDMA is enabled and initied, wait for FW to setup the IDMA descriptors
3415      * Waiting just before configuring doorbell
3416      */
3417 #define IDMA_ENABLE_WAIT 10
3418     if (IDMA_ACTIVE(dhd)) {
3419         /* wait for idma_en bit in IDMAcontrol register to be set */
3420         /* Loop till idma_en is not set */
3421         uint buscorerev = dhd->bus->sih->buscorerev;
3422         idmacontrol = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
3423                                  IDMAControl(buscorerev), 0, 0);
3424         while (!(idmacontrol & PCIE_IDMA_MODE_EN(buscorerev)) &&
3425                (waitcount++ < IDMA_ENABLE_WAIT)) {
3426             DHD_ERROR(
3427                 ("iDMA not enabled yet,waiting 1 ms c=%d IDMAControl = %08x\n",
3428                  waitcount, idmacontrol));
3429             OSL_DELAY(0x3E8); /* 1ms as its onetime only */
3430             idmacontrol = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
3431                                      IDMAControl(buscorerev), 0, 0);
3432         }
3433 
3434         if (waitcount < IDMA_ENABLE_WAIT) {
3435             DHD_ERROR(("iDMA enabled PCIEControl = %08x\n", idmacontrol));
3436         } else {
3437             DHD_ERROR(
3438                 ("Error: wait for iDMA timed out wait=%d IDMAControl = %08x\n",
3439                  waitcount, idmacontrol));
3440             return BCME_ERROR;
3441         }
3442         // add delay to fix bring up issue
3443         OSL_SLEEP(1);
3444     }
3445 
3446     /* Host should configure soft doorbells if needed ... here */
3447 
3448     /* Post to dongle host configured soft doorbells */
3449     dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd);
3450 
3451     dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
3452     dhd_msgbuf_rxbuf_post_event_bufs(dhd);
3453 
3454     prot->no_retry = FALSE;
3455     prot->no_aggr = FALSE;
3456     prot->fixed_rate = FALSE;
3457 
3458     /*
3459      * Note that any communication with the Dongle should be added
3460      * below this point. Any other host data structure initialiation that
3461      * needs to be done prior to the DPC starts executing should be done
3462      * befor this point.
3463      * Because once we start sending H2D requests to Dongle, the Dongle
3464      * respond immediately. So the DPC context to handle this
3465      * D2H response could preempt the context in which dhd_prot_init is running.
3466      * We want to ensure that all the Host part of dhd_prot_init is
3467      * done before that.
3468      */
3469 
3470     /* See if info rings could be created, info rings should be created
3471      * only if dongle does not support EDL
3472      */
3473 #ifdef EWP_EDL
3474     if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6 &&
3475         !dhd->dongle_edl_support)
3476 #else
3477     if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
3478 #endif /* EWP_EDL */
3479     {
3480         if ((ret = dhd_prot_init_info_rings(dhd)) != BCME_OK) {
3481             /* For now log and proceed, further clean up action maybe necessary
3482              * when we have more clarity.
3483              */
3484             DHD_ERROR(("%s Info rings couldn't be created: Err Code%d",
3485                        __FUNCTION__, ret));
3486         }
3487     }
3488 
3489 #ifdef EWP_EDL
3490     /* Create Enhanced Debug Lane rings (EDL) if dongle supports it */
3491     if (dhd->dongle_edl_support) {
3492         if ((ret = dhd_prot_init_edl_rings(dhd)) != BCME_OK) {
3493             DHD_ERROR(("%s EDL rings couldn't be created: Err Code%d",
3494                        __FUNCTION__, ret));
3495         }
3496     }
3497 #endif /* EWP_EDL */
3498 
3499 #ifdef DHD_HP2P
3500     /* create HPP txcmpl/rxcmpl rings */
3501     if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_7 && dhd->hp2p_capable) {
3502         if ((ret = dhd_prot_init_hp2p_rings(dhd)) != BCME_OK) {
3503             /* For now log and proceed, further clean up action maybe necessary
3504              * when we have more clarity.
3505              */
3506             DHD_ERROR(("%s HP2P rings couldn't be created: Err Code%d",
3507                        __FUNCTION__, ret));
3508         }
3509     }
3510 #endif /* DHD_HP2P */
3511 
3512     return BCME_OK;
3513 } /* dhd_prot_init */
3514 
3515 /**
3516  * dhd_prot_detach - PCIE FD protocol layer destructor.
3517  * Unlink, frees allocated protocol memory (including dhd_prot)
3518  */
dhd_prot_detach(dhd_pub_t * dhd)3519 void dhd_prot_detach(dhd_pub_t *dhd)
3520 {
3521     dhd_prot_t *prot = dhd->prot;
3522 
3523     /* Stop the protocol module */
3524     if (prot) {
3525         /* free up all DMA-able buffers allocated during prot attach/init */
3526 
3527         dhd_dma_buf_free(dhd, &prot->d2h_dma_scratch_buf);
3528         dhd_dma_buf_free(dhd, &prot->retbuf);
3529         dhd_dma_buf_free(dhd, &prot->ioctbuf);
3530         dhd_dma_buf_free(dhd, &prot->host_bus_throughput_buf);
3531         dhd_dma_buf_free(dhd, &prot->hostts_req_buf);
3532         dhd_dma_buf_free(dhd, &prot->fw_trap_buf);
3533         dhd_dma_buf_free(dhd, &prot->host_scb_buf);
3534 
3535         /* DMA-able buffers for DMAing H2D/D2H WR/RD indices */
3536         dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf);
3537         dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_rd_buf);
3538         dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_wr_buf);
3539         dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf);
3540 
3541         dhd_dma_buf_free(dhd, &prot->h2d_ifrm_indx_wr_buf);
3542 
3543         /* Common MsgBuf Rings */
3544         dhd_prot_ring_detach(dhd, &prot->h2dring_ctrl_subn);
3545         dhd_prot_ring_detach(dhd, &prot->h2dring_rxp_subn);
3546         dhd_prot_ring_detach(dhd, &prot->d2hring_ctrl_cpln);
3547         dhd_prot_ring_detach(dhd, &prot->d2hring_tx_cpln);
3548         dhd_prot_ring_detach(dhd, &prot->d2hring_rx_cpln);
3549 
3550         /* Detach each DMA-able buffer and free the pool of msgbuf_ring_t */
3551         dhd_prot_flowrings_pool_detach(dhd);
3552 
3553         /* detach info rings */
3554         dhd_prot_detach_info_rings(dhd);
3555 
3556 #ifdef EWP_EDL
3557         dhd_prot_detach_edl_rings(dhd);
3558 #endif // endif
3559 #ifdef DHD_HP2P
3560         /* detach HPP rings */
3561         dhd_prot_detach_hp2p_rings(dhd);
3562 #endif /* DHD_HP2P */
3563 
3564         /* if IOCTLRESP_USE_CONSTMEM is defined IOCTL PKTs use
3565          * pktid_map_handle_ioctl handler and PKT memory is allocated using
3566          * alloc_ioctl_return_buffer(), Otherwise they will be part of
3567          * pktid_ctrl_map handler and PKT memory is allocated using
3568          * PKTGET_STATIC (if DHD_USE_STATIC_CTRLBUF is defined) OR PKGET.
3569          * Similarly for freeing PKT buffers DHD_NATIVE_TO_PKTID_FINI will be
3570          * used which calls PKTFREE_STATIC (if DHD_USE_STATIC_CTRLBUF is
3571          * defined) OR PKFREE. Else if IOCTLRESP_USE_CONSTMEM is defined IOCTL
3572          * PKTs will be freed using DHD_NATIVE_TO_PKTID_FINI_IOCTL which calls
3573          * free_ioctl_return_buffer.
3574          */
3575         DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_ctrl_map);
3576         DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_rx_map);
3577         DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_tx_map);
3578 #ifdef IOCTLRESP_USE_CONSTMEM
3579         DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, prot->pktid_map_handle_ioctl);
3580 #endif // endif
3581 #ifdef DHD_MAP_PKTID_LOGGING
3582         DHD_PKTID_LOG_FINI(dhd, prot->pktid_dma_map);
3583         DHD_PKTID_LOG_FINI(dhd, prot->pktid_dma_unmap);
3584 #endif /* DHD_MAP_PKTID_LOGGING */
3585 
3586 #if defined(DHD_LB_TXC)
3587         if (prot->tx_compl_prod.buffer) {
3588             MFREE(dhd->osh, prot->tx_compl_prod.buffer,
3589                   sizeof(void *) * DHD_LB_WORKQ_SZ);
3590         }
3591 #endif /* DHD_LB_TXC */
3592 #if defined(DHD_LB_RXC)
3593         if (prot->rx_compl_prod.buffer) {
3594             MFREE(dhd->osh, prot->rx_compl_prod.buffer,
3595                   sizeof(void *) * DHD_LB_WORKQ_SZ);
3596         }
3597 #endif /* DHD_LB_RXC */
3598 
3599         DHD_OS_PREFREE(dhd, dhd->prot, sizeof(dhd_prot_t));
3600 
3601         dhd->prot = NULL;
3602     }
3603 } /* dhd_prot_detach */
3604 
3605 /**
3606  * dhd_prot_reset - Reset the protocol layer without freeing any objects.
3607  * This may be invoked to soft reboot the dongle, without having to
3608  * detach and attach the entire protocol layer.
3609  *
3610  * After dhd_prot_reset(), dhd_prot_init() may be invoked
3611  * without going througha dhd_prot_attach() phase.
3612  */
dhd_prot_reset(dhd_pub_t * dhd)3613 void dhd_prot_reset(dhd_pub_t *dhd)
3614 {
3615     struct dhd_prot *prot = dhd->prot;
3616 
3617     DHD_TRACE(("%s\n", __FUNCTION__));
3618 
3619     if (prot == NULL) {
3620         return;
3621     }
3622 
3623     dhd_prot_flowrings_pool_reset(dhd);
3624 
3625     /* Reset Common MsgBuf Rings */
3626     dhd_prot_ring_reset(dhd, &prot->h2dring_ctrl_subn);
3627     dhd_prot_ring_reset(dhd, &prot->h2dring_rxp_subn);
3628     dhd_prot_ring_reset(dhd, &prot->d2hring_ctrl_cpln);
3629     dhd_prot_ring_reset(dhd, &prot->d2hring_tx_cpln);
3630     dhd_prot_ring_reset(dhd, &prot->d2hring_rx_cpln);
3631 
3632     /* Reset info rings */
3633     if (prot->h2dring_info_subn) {
3634         dhd_prot_ring_reset(dhd, prot->h2dring_info_subn);
3635     }
3636 
3637     if (prot->d2hring_info_cpln) {
3638         dhd_prot_ring_reset(dhd, prot->d2hring_info_cpln);
3639     }
3640 #ifdef EWP_EDL
3641     if (prot->d2hring_edl) {
3642         dhd_prot_ring_reset(dhd, prot->d2hring_edl);
3643     }
3644 #endif /* EWP_EDL */
3645 
3646     /* Reset all DMA-able buffers allocated during prot attach */
3647     dhd_dma_buf_reset(dhd, &prot->d2h_dma_scratch_buf);
3648     dhd_dma_buf_reset(dhd, &prot->retbuf);
3649     dhd_dma_buf_reset(dhd, &prot->ioctbuf);
3650     dhd_dma_buf_reset(dhd, &prot->host_bus_throughput_buf);
3651     dhd_dma_buf_reset(dhd, &prot->hostts_req_buf);
3652     dhd_dma_buf_reset(dhd, &prot->fw_trap_buf);
3653     dhd_dma_buf_reset(dhd, &prot->host_scb_buf);
3654 
3655     dhd_dma_buf_reset(dhd, &prot->h2d_ifrm_indx_wr_buf);
3656 
3657     /* Rest all DMA-able buffers for DMAing H2D/D2H WR/RD indices */
3658     dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_rd_buf);
3659     dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_wr_buf);
3660     dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_rd_buf);
3661     dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_wr_buf);
3662 
3663     prot->rx_metadata_offset = 0;
3664     prot->tx_metadata_offset = 0;
3665 
3666     prot->rxbufpost = 0;
3667     prot->cur_event_bufs_posted = 0;
3668     prot->cur_ioctlresp_bufs_posted = 0;
3669 
3670     OSL_ATOMIC_INIT(dhd->osh, &prot->active_tx_count);
3671     prot->data_seq_no = 0;
3672     prot->ioctl_seq_no = 0;
3673     prot->ioctl_state = 0;
3674     prot->curr_ioctl_cmd = 0;
3675     prot->ioctl_received = IOCTL_WAIT;
3676     /* To catch any rollover issues fast, starting with higher ioctl_trans_id */
3677     prot->ioctl_trans_id =
3678         MAXBITVAL(NBITS(prot->ioctl_trans_id)) - BUFFER_BEFORE_ROLLOVER;
3679 
3680     /* dhd_flow_rings_init is located at dhd_bus_start,
3681      * so when stopping bus, flowrings shall be deleted
3682      */
3683     if (dhd->flow_rings_inited) {
3684         dhd_flow_rings_deinit(dhd);
3685     }
3686 
3687 #ifdef DHD_HP2P
3688     if (prot->d2hring_hp2p_txcpl) {
3689         dhd_prot_ring_reset(dhd, prot->d2hring_hp2p_txcpl);
3690     }
3691     if (prot->d2hring_hp2p_rxcpl) {
3692         dhd_prot_ring_reset(dhd, prot->d2hring_hp2p_rxcpl);
3693     }
3694 #endif /* DHD_HP2P */
3695 
3696     /* Reset PKTID map */
3697     DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_ctrl_map);
3698     DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_rx_map);
3699     DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_tx_map);
3700 #ifdef IOCTLRESP_USE_CONSTMEM
3701     DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, prot->pktid_map_handle_ioctl);
3702 #endif /* IOCTLRESP_USE_CONSTMEM */
3703 #ifdef DMAMAP_STATS
3704     dhd->dma_stats.txdata = dhd->dma_stats.txdata_sz = 0;
3705     dhd->dma_stats.rxdata = dhd->dma_stats.rxdata_sz = 0;
3706 #ifndef IOCTLRESP_USE_CONSTMEM
3707     dhd->dma_stats.ioctl_rx = dhd->dma_stats.ioctl_rx_sz = 0;
3708 #endif /* IOCTLRESP_USE_CONSTMEM */
3709     dhd->dma_stats.event_rx = dhd->dma_stats.event_rx_sz = 0;
3710     dhd->dma_stats.info_rx = dhd->dma_stats.info_rx_sz = 0;
3711     dhd->dma_stats.tsbuf_rx = dhd->dma_stats.tsbuf_rx_sz = 0;
3712 #endif /* DMAMAP_STATS */
3713 } /* dhd_prot_reset */
3714 
3715 #if defined(DHD_LB_RXP)
3716 #define DHD_LB_DISPATCH_RX_PROCESS(dhdp) dhd_lb_dispatch_rx_process(dhdp)
3717 #else /* !DHD_LB_RXP */
3718 #define DHD_LB_DISPATCH_RX_PROCESS(dhdp)                                       \
3719     do { /* noop */                                                            \
3720     } while (0)
3721 #endif /* !DHD_LB_RXP */
3722 
3723 #if defined(DHD_LB_RXC)
3724 #define DHD_LB_DISPATCH_RX_COMPL(dhdp) dhd_lb_dispatch_rx_compl(dhdp)
3725 #else /* !DHD_LB_RXC */
3726 #define DHD_LB_DISPATCH_RX_COMPL(dhdp)                                         \
3727     do { /* noop */                                                            \
3728     } while (0)
3729 #endif /* !DHD_LB_RXC */
3730 
3731 #if defined(DHD_LB_TXC)
3732 #define DHD_LB_DISPATCH_TX_COMPL(dhdp) dhd_lb_dispatch_tx_compl(dhdp)
3733 #else /* !DHD_LB_TXC */
3734 #define DHD_LB_DISPATCH_TX_COMPL(dhdp)                                         \
3735     do { /* noop */                                                            \
3736     } while (0)
3737 #endif /* !DHD_LB_TXC */
3738 
3739 #if defined(DHD_LB)
3740 /* DHD load balancing: deferral of work to another online CPU */
3741 /* DHD_LB_TXC DHD_LB_RXC DHD_LB_RXP dispatchers, in dhd_linux.c */
3742 extern void dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp);
3743 extern void dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp);
3744 extern void dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp);
3745 extern void dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx);
3746 
3747 #if defined(DHD_LB_RXP)
3748 /**
3749  * dhd_lb_dispatch_rx_process - load balance by dispatch Rx processing work
3750  * to other CPU cores
3751  */
dhd_lb_dispatch_rx_process(dhd_pub_t * dhdp)3752 static INLINE void dhd_lb_dispatch_rx_process(dhd_pub_t *dhdp)
3753 {
3754     dhd_lb_rx_napi_dispatch(dhdp); /* dispatch rx_process_napi */
3755 }
3756 #endif /* DHD_LB_RXP */
3757 
3758 #if defined(DHD_LB_TXC)
3759 /**
3760  * dhd_lb_dispatch_tx_compl - load balance by dispatch Tx complition work
3761  * to other CPU cores
3762  */
dhd_lb_dispatch_tx_compl(dhd_pub_t * dhdp,uint16 ring_idx)3763 static INLINE void dhd_lb_dispatch_tx_compl(dhd_pub_t *dhdp, uint16 ring_idx)
3764 {
3765     bcm_workq_prod_sync(&dhdp->prot->tx_compl_prod); /* flush WR index */
3766     dhd_lb_tx_compl_dispatch(dhdp); /* dispatch tx_compl_tasklet */
3767 }
3768 
3769 /**
3770  * DHD load balanced tx completion tasklet handler, that will perform the
3771  * freeing of packets on the selected CPU. Packet pointers are delivered to
3772  * this tasklet via the tx complete workq.
3773  */
dhd_lb_tx_compl_handler(unsigned long data)3774 void dhd_lb_tx_compl_handler(unsigned long data)
3775 {
3776     int elem_ix;
3777     void *pkt, **elem;
3778     dmaaddr_t pa;
3779     uint32 pa_len;
3780     dhd_pub_t *dhd = (dhd_pub_t *)data;
3781     dhd_prot_t *prot = dhd->prot;
3782     bcm_workq_t *workq = &prot->tx_compl_cons;
3783     uint32 count = 0;
3784 
3785     int curr_cpu;
3786     curr_cpu = get_cpu();
3787     put_cpu();
3788 
3789     DHD_LB_STATS_TXC_PERCPU_CNT_INCR(dhd);
3790 
3791     while (1) {
3792         elem_ix = bcm_ring_cons(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
3793         if (elem_ix == BCM_RING_EMPTY) {
3794             break;
3795         }
3796 
3797         elem = WORKQ_ELEMENT(void *, workq, elem_ix);
3798         pkt = *elem;
3799 
3800         DHD_INFO(("%s: tx_compl_cons pkt<%p>\n", __FUNCTION__, pkt));
3801 
3802         OSL_PREFETCH(PKTTAG(pkt));
3803         OSL_PREFETCH(pkt);
3804 
3805         pa = DHD_PKTTAG_PA((dhd_pkttag_fr_t *)PKTTAG(pkt));
3806         pa_len = DHD_PKTTAG_PA_LEN((dhd_pkttag_fr_t *)PKTTAG(pkt));
3807 
3808         DMA_UNMAP(dhd->osh, pa, pa_len, DMA_RX, 0, 0);
3809 #if defined(BCMPCIE)
3810         dhd_txcomplete(dhd, pkt, true);
3811 #ifdef DHD_4WAYM4_FAIL_DISCONNECT
3812         dhd_eap_txcomplete(dhd, pkt, TRUE, txstatus->cmn_hdr.if_id);
3813 #endif /* DHD_4WAYM4_FAIL_DISCONNECT */
3814 #endif // endif
3815 
3816         PKTFREE(dhd->osh, pkt, TRUE);
3817         count++;
3818     }
3819 
3820     bcm_workq_cons_sync(workq);
3821     DHD_LB_STATS_UPDATE_TXC_HISTO(dhd, count);
3822 }
3823 #endif /* DHD_LB_TXC */
3824 
3825 #if defined(DHD_LB_RXC)
3826 
3827 /**
3828  * dhd_lb_dispatch_rx_compl - load balance by dispatch rx complition work
3829  * to other CPU cores
3830  */
dhd_lb_dispatch_rx_compl(dhd_pub_t * dhdp)3831 static INLINE void dhd_lb_dispatch_rx_compl(dhd_pub_t *dhdp)
3832 {
3833     dhd_prot_t *prot = dhdp->prot;
3834     /* Schedule the takslet only if we have to */
3835     if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) {
3836         /* flush WR index */
3837         bcm_workq_prod_sync(&dhdp->prot->rx_compl_prod);
3838         dhd_lb_rx_compl_dispatch(dhdp); /* dispatch rx_compl_tasklet */
3839     }
3840 }
3841 
dhd_lb_rx_compl_handler(unsigned long data)3842 void dhd_lb_rx_compl_handler(unsigned long data)
3843 {
3844     dhd_pub_t *dhd = (dhd_pub_t *)data;
3845     bcm_workq_t *workq = &dhd->prot->rx_compl_cons;
3846 
3847     DHD_LB_STATS_RXC_PERCPU_CNT_INCR(dhd);
3848 
3849     dhd_msgbuf_rxbuf_post(dhd, TRUE); /* re-use pktids */
3850     bcm_workq_cons_sync(workq);
3851 }
3852 #endif /* DHD_LB_RXC */
3853 #endif /* DHD_LB */
3854 
dhd_prot_rx_dataoffset(dhd_pub_t * dhd,uint32 rx_offset)3855 void dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 rx_offset)
3856 {
3857     dhd_prot_t *prot = dhd->prot;
3858     prot->rx_dataoffset = rx_offset;
3859 }
3860 
dhd_check_create_info_rings(dhd_pub_t * dhd)3861 static int dhd_check_create_info_rings(dhd_pub_t *dhd)
3862 {
3863     dhd_prot_t *prot = dhd->prot;
3864     int ret = BCME_ERROR;
3865     uint16 ringid;
3866 
3867     {
3868         /* dongle may increase max_submission_rings so keep
3869          * ringid at end of dynamic rings
3870          */
3871         ringid = dhd->bus->max_tx_flowrings +
3872                  (dhd->bus->max_submission_rings - dhd->bus->max_tx_flowrings) +
3873                  BCMPCIE_H2D_COMMON_MSGRINGS;
3874     }
3875 
3876     if (prot->d2hring_info_cpln) {
3877         /* for d2hring re-entry case, clear inited flag */
3878         prot->d2hring_info_cpln->inited = FALSE;
3879     }
3880 
3881     if (prot->h2dring_info_subn && prot->d2hring_info_cpln) {
3882         return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
3883     }
3884 
3885     if (prot->h2dring_info_subn == NULL) {
3886         prot->h2dring_info_subn = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
3887 
3888         if (prot->h2dring_info_subn == NULL) {
3889             DHD_ERROR(("%s: couldn't alloc memory for h2dring_info_subn\n",
3890                        __FUNCTION__));
3891             return BCME_NOMEM;
3892         }
3893 
3894         DHD_INFO(("%s: about to create debug submit ring\n", __FUNCTION__));
3895         ret = dhd_prot_ring_attach(dhd, prot->h2dring_info_subn, "h2dinfo",
3896                                    H2DRING_DYNAMIC_INFO_MAX_ITEM,
3897                                    H2DRING_INFO_BUFPOST_ITEMSIZE, ringid);
3898         if (ret != BCME_OK) {
3899             DHD_ERROR(("%s: couldn't alloc resources for dbg submit ring\n",
3900                        __FUNCTION__));
3901             goto err;
3902         }
3903     }
3904 
3905     if (prot->d2hring_info_cpln == NULL) {
3906         prot->d2hring_info_cpln = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
3907 
3908         if (prot->d2hring_info_cpln == NULL) {
3909             DHD_ERROR(("%s: couldn't alloc memory for h2dring_info_subn\n",
3910                        __FUNCTION__));
3911             return BCME_NOMEM;
3912         }
3913 
3914         /* create the debug info completion ring next to debug info submit ring
3915          * ringid = id next to debug info submit ring
3916          */
3917         ringid = ringid + 1;
3918 
3919         DHD_INFO(("%s: about to create debug cpl ring\n", __FUNCTION__));
3920         ret = dhd_prot_ring_attach(dhd, prot->d2hring_info_cpln, "d2hinfo",
3921                                    D2HRING_DYNAMIC_INFO_MAX_ITEM,
3922                                    D2HRING_INFO_BUFCMPLT_ITEMSIZE, ringid);
3923         if (ret != BCME_OK) {
3924             DHD_ERROR(("%s: couldn't alloc resources for dbg cpl ring\n",
3925                        __FUNCTION__));
3926             dhd_prot_ring_detach(dhd, prot->h2dring_info_subn);
3927             goto err;
3928         }
3929     }
3930 
3931     return ret;
3932 err:
3933     MFREE(prot->osh, prot->h2dring_info_subn, sizeof(msgbuf_ring_t));
3934     prot->h2dring_info_subn = NULL;
3935 
3936     if (prot->d2hring_info_cpln) {
3937         MFREE(prot->osh, prot->d2hring_info_cpln, sizeof(msgbuf_ring_t));
3938         prot->d2hring_info_cpln = NULL;
3939     }
3940     return ret;
3941 } /* dhd_check_create_info_rings */
3942 
dhd_prot_init_info_rings(dhd_pub_t * dhd)3943 int dhd_prot_init_info_rings(dhd_pub_t *dhd)
3944 {
3945     dhd_prot_t *prot = dhd->prot;
3946     int ret = BCME_OK;
3947 
3948     if ((ret = dhd_check_create_info_rings(dhd)) != BCME_OK) {
3949         DHD_ERROR(("%s: info rings aren't created! \n", __FUNCTION__));
3950         return ret;
3951     }
3952 
3953     if ((prot->d2hring_info_cpln->inited) ||
3954         (prot->d2hring_info_cpln->create_pending)) {
3955         DHD_INFO(("Info completion ring was created!\n"));
3956         return ret;
3957     }
3958 
3959     DHD_TRACE(("trying to send create d2h info ring: id %d\n",
3960                prot->d2hring_info_cpln->idx));
3961     ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_info_cpln,
3962                                   BCMPCIE_D2H_RING_TYPE_DBGBUF_CPL,
3963                                   DHD_D2H_DBGRING_REQ_PKTID);
3964     if (ret != BCME_OK) {
3965         return ret;
3966     }
3967 
3968     prot->h2dring_info_subn->seqnum = H2D_EPOCH_INIT_VAL;
3969     prot->h2dring_info_subn->current_phase = 0;
3970     prot->d2hring_info_cpln->seqnum = D2H_EPOCH_INIT_VAL;
3971     prot->d2hring_info_cpln->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
3972 
3973     DHD_TRACE(("trying to send create h2d info ring id %d\n",
3974                prot->h2dring_info_subn->idx));
3975     prot->h2dring_info_subn->n_completion_ids = 1;
3976     prot->h2dring_info_subn->compeltion_ring_ids[0] =
3977         prot->d2hring_info_cpln->idx;
3978 
3979     ret = dhd_send_h2d_ringcreate(dhd, prot->h2dring_info_subn,
3980                                   BCMPCIE_H2D_RING_TYPE_DBGBUF_SUBMIT,
3981                                   DHD_H2D_DBGRING_REQ_PKTID);
3982 
3983     /* Note that there is no way to delete d2h or h2d ring deletion incase
3984      * either fails, so can not cleanup if one ring was created while the other
3985      * failed
3986      */
3987     return ret;
3988 } /* dhd_prot_init_info_rings */
3989 
dhd_prot_detach_info_rings(dhd_pub_t * dhd)3990 static void dhd_prot_detach_info_rings(dhd_pub_t *dhd)
3991 {
3992     if (dhd->prot->h2dring_info_subn) {
3993         dhd_prot_ring_detach(dhd, dhd->prot->h2dring_info_subn);
3994         MFREE(dhd->prot->osh, dhd->prot->h2dring_info_subn,
3995               sizeof(msgbuf_ring_t));
3996         dhd->prot->h2dring_info_subn = NULL;
3997     }
3998     if (dhd->prot->d2hring_info_cpln) {
3999         dhd_prot_ring_detach(dhd, dhd->prot->d2hring_info_cpln);
4000         MFREE(dhd->prot->osh, dhd->prot->d2hring_info_cpln,
4001               sizeof(msgbuf_ring_t));
4002         dhd->prot->d2hring_info_cpln = NULL;
4003     }
4004 }
4005 
4006 #ifdef DHD_HP2P
dhd_check_create_hp2p_rings(dhd_pub_t * dhd)4007 static int dhd_check_create_hp2p_rings(dhd_pub_t *dhd)
4008 {
4009     dhd_prot_t *prot = dhd->prot;
4010     int ret = BCME_ERROR;
4011     uint16 ringid;
4012 
4013     /* Last 2 dynamic ring indices are used by hp2p rings */
4014     ringid =
4015         dhd->bus->max_submission_rings + dhd->bus->max_completion_rings - 0x2;
4016 
4017     if (prot->d2hring_hp2p_txcpl == NULL) {
4018         prot->d2hring_hp2p_txcpl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
4019 
4020         if (prot->d2hring_hp2p_txcpl == NULL) {
4021             DHD_ERROR(("%s: couldn't alloc memory for d2hring_hp2p_txcpl\n",
4022                        __FUNCTION__));
4023             return BCME_NOMEM;
4024         }
4025 
4026         DHD_INFO(("%s: about to create hp2p txcpl ring\n", __FUNCTION__));
4027         ret =
4028             dhd_prot_ring_attach(dhd, prot->d2hring_hp2p_txcpl, "d2hhp2p_txcpl",
4029                                  dhd_bus_get_hp2p_ring_max_size(dhd->bus, TRUE),
4030                                  D2HRING_TXCMPLT_ITEMSIZE, ringid);
4031         if (ret != BCME_OK) {
4032             DHD_ERROR(("%s: couldn't alloc resources for hp2p txcpl ring\n",
4033                        __FUNCTION__));
4034             goto err2;
4035         }
4036     } else {
4037         /* for re-entry case, clear inited flag */
4038         prot->d2hring_hp2p_txcpl->inited = FALSE;
4039     }
4040     if (prot->d2hring_hp2p_rxcpl == NULL) {
4041         prot->d2hring_hp2p_rxcpl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
4042 
4043         if (prot->d2hring_hp2p_rxcpl == NULL) {
4044             DHD_ERROR(("%s: couldn't alloc memory for d2hring_hp2p_rxcpl\n",
4045                        __FUNCTION__));
4046             return BCME_NOMEM;
4047         }
4048 
4049         /* create the hp2p rx completion ring next to hp2p tx compl ring
4050          * ringid = id next to hp2p tx compl ring
4051          */
4052         ringid = ringid + 1;
4053 
4054         DHD_INFO(("%s: about to create hp2p rxcpl ring\n", __FUNCTION__));
4055         ret = dhd_prot_ring_attach(
4056             dhd, prot->d2hring_hp2p_rxcpl, "d2hhp2p_rxcpl",
4057             dhd_bus_get_hp2p_ring_max_size(dhd->bus, FALSE),
4058             D2HRING_RXCMPLT_ITEMSIZE, ringid);
4059         if (ret != BCME_OK) {
4060             DHD_ERROR(("%s: couldn't alloc resources for hp2p rxcpl ring\n",
4061                        __FUNCTION__));
4062             goto err1;
4063         }
4064     } else {
4065         /* for re-entry case, clear inited flag */
4066         prot->d2hring_hp2p_rxcpl->inited = FALSE;
4067     }
4068 
4069     return ret;
4070 err1:
4071     MFREE(prot->osh, prot->d2hring_hp2p_rxcpl, sizeof(msgbuf_ring_t));
4072     prot->d2hring_hp2p_rxcpl = NULL;
4073 
4074 err2:
4075     MFREE(prot->osh, prot->d2hring_hp2p_txcpl, sizeof(msgbuf_ring_t));
4076     prot->d2hring_hp2p_txcpl = NULL;
4077     return ret;
4078 } /* dhd_check_create_hp2p_rings */
4079 
dhd_prot_init_hp2p_rings(dhd_pub_t * dhd)4080 int dhd_prot_init_hp2p_rings(dhd_pub_t *dhd)
4081 {
4082     dhd_prot_t *prot = dhd->prot;
4083     int ret = BCME_OK;
4084 
4085     dhd->hp2p_ring_active = FALSE;
4086 
4087     if ((ret = dhd_check_create_hp2p_rings(dhd)) != BCME_OK) {
4088         DHD_ERROR(("%s: hp2p rings aren't created! \n", __FUNCTION__));
4089         return ret;
4090     }
4091 
4092     if ((prot->d2hring_hp2p_txcpl->inited) ||
4093         (prot->d2hring_hp2p_txcpl->create_pending)) {
4094         DHD_INFO(("hp2p tx completion ring was created!\n"));
4095         return ret;
4096     }
4097 
4098     DHD_TRACE(("trying to send create d2h hp2p txcpl ring: id %d\n",
4099                prot->d2hring_hp2p_txcpl->idx));
4100     ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_hp2p_txcpl,
4101                                   BCMPCIE_D2H_RING_TYPE_HPP_TX_CPL,
4102                                   DHD_D2H_HPPRING_TXREQ_PKTID);
4103     if (ret != BCME_OK) {
4104         return ret;
4105     }
4106 
4107     prot->d2hring_hp2p_txcpl->seqnum = D2H_EPOCH_INIT_VAL;
4108     prot->d2hring_hp2p_txcpl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
4109 
4110     if ((prot->d2hring_hp2p_rxcpl->inited) ||
4111         (prot->d2hring_hp2p_rxcpl->create_pending)) {
4112         DHD_INFO(("hp2p rx completion ring was created!\n"));
4113         return ret;
4114     }
4115 
4116     DHD_TRACE(("trying to send create d2h hp2p rxcpl ring: id %d\n",
4117                prot->d2hring_hp2p_rxcpl->idx));
4118     ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_hp2p_rxcpl,
4119                                   BCMPCIE_D2H_RING_TYPE_HPP_RX_CPL,
4120                                   DHD_D2H_HPPRING_RXREQ_PKTID);
4121     if (ret != BCME_OK) {
4122         return ret;
4123     }
4124 
4125     prot->d2hring_hp2p_rxcpl->seqnum = D2H_EPOCH_INIT_VAL;
4126     prot->d2hring_hp2p_rxcpl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
4127 
4128     /* Note that there is no way to delete d2h or h2d ring deletion incase
4129      * either fails, so can not cleanup if one ring was created while the other
4130      * failed
4131      */
4132     return BCME_OK;
4133 } /* dhd_prot_init_hp2p_rings */
4134 
dhd_prot_detach_hp2p_rings(dhd_pub_t * dhd)4135 static void dhd_prot_detach_hp2p_rings(dhd_pub_t *dhd)
4136 {
4137     if (dhd->prot->d2hring_hp2p_txcpl) {
4138         dhd_prot_ring_detach(dhd, dhd->prot->d2hring_hp2p_txcpl);
4139         MFREE(dhd->prot->osh, dhd->prot->d2hring_hp2p_txcpl,
4140               sizeof(msgbuf_ring_t));
4141         dhd->prot->d2hring_hp2p_txcpl = NULL;
4142     }
4143     if (dhd->prot->d2hring_hp2p_rxcpl) {
4144         dhd_prot_ring_detach(dhd, dhd->prot->d2hring_hp2p_rxcpl);
4145         MFREE(dhd->prot->osh, dhd->prot->d2hring_hp2p_rxcpl,
4146               sizeof(msgbuf_ring_t));
4147         dhd->prot->d2hring_hp2p_rxcpl = NULL;
4148     }
4149 }
4150 #endif /* DHD_HP2P */
4151 
4152 #ifdef EWP_EDL
dhd_check_create_edl_rings(dhd_pub_t * dhd)4153 static int dhd_check_create_edl_rings(dhd_pub_t *dhd)
4154 {
4155     dhd_prot_t *prot = dhd->prot;
4156     int ret = BCME_ERROR;
4157     uint16 ringid;
4158 
4159     {
4160         /* dongle may increase max_submission_rings so keep
4161          * ringid at end of dynamic rings (re-use info ring cpl ring id)
4162          */
4163         ringid = dhd->bus->max_tx_flowrings +
4164                  (dhd->bus->max_submission_rings - dhd->bus->max_tx_flowrings) +
4165                  BCMPCIE_H2D_COMMON_MSGRINGS + 1;
4166     }
4167 
4168     if (prot->d2hring_edl) {
4169         prot->d2hring_edl->inited = FALSE;
4170         return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
4171     }
4172 
4173     if (prot->d2hring_edl == NULL) {
4174         prot->d2hring_edl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
4175 
4176         if (prot->d2hring_edl == NULL) {
4177             DHD_ERROR(
4178                 ("%s: couldn't alloc memory for d2hring_edl\n", __FUNCTION__));
4179             return BCME_NOMEM;
4180         }
4181 
4182         DHD_ERROR(("%s: about to create EDL ring, ringid: %u \n", __FUNCTION__,
4183                    ringid));
4184         ret = dhd_prot_ring_attach(dhd, prot->d2hring_edl, "d2hring_edl",
4185                                    D2HRING_EDL_MAX_ITEM, D2HRING_EDL_ITEMSIZE,
4186                                    ringid);
4187         if (ret != BCME_OK) {
4188             DHD_ERROR(
4189                 ("%s: couldn't alloc resources for EDL ring\n", __FUNCTION__));
4190             goto err;
4191         }
4192     }
4193 
4194     return ret;
4195 err:
4196     MFREE(prot->osh, prot->d2hring_edl, sizeof(msgbuf_ring_t));
4197     prot->d2hring_edl = NULL;
4198 
4199     return ret;
4200 } /* dhd_check_create_btlog_rings */
4201 
dhd_prot_init_edl_rings(dhd_pub_t * dhd)4202 int dhd_prot_init_edl_rings(dhd_pub_t *dhd)
4203 {
4204     dhd_prot_t *prot = dhd->prot;
4205     int ret = BCME_ERROR;
4206 
4207     if ((ret = dhd_check_create_edl_rings(dhd)) != BCME_OK) {
4208         DHD_ERROR(("%s: EDL rings aren't created! \n", __FUNCTION__));
4209         return ret;
4210     }
4211 
4212     if ((prot->d2hring_edl->inited) || (prot->d2hring_edl->create_pending)) {
4213         DHD_INFO(("EDL completion ring was created!\n"));
4214         return ret;
4215     }
4216 
4217     DHD_ERROR(("trying to send create d2h edl ring: idx %d\n",
4218                prot->d2hring_edl->idx));
4219     ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_edl,
4220                                   BCMPCIE_D2H_RING_TYPE_EDL,
4221                                   DHD_D2H_DBGRING_REQ_PKTID);
4222     if (ret != BCME_OK) {
4223         return ret;
4224     }
4225 
4226     prot->d2hring_edl->seqnum = D2H_EPOCH_INIT_VAL;
4227     prot->d2hring_edl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
4228 
4229     return BCME_OK;
4230 } /* dhd_prot_init_btlog_rings */
4231 
dhd_prot_detach_edl_rings(dhd_pub_t * dhd)4232 static void dhd_prot_detach_edl_rings(dhd_pub_t *dhd)
4233 {
4234     if (dhd->prot->d2hring_edl) {
4235         dhd_prot_ring_detach(dhd, dhd->prot->d2hring_edl);
4236         MFREE(dhd->prot->osh, dhd->prot->d2hring_edl, sizeof(msgbuf_ring_t));
4237         dhd->prot->d2hring_edl = NULL;
4238     }
4239 }
4240 #endif /* EWP_EDL */
4241 
4242 /**
4243  * Initialize protocol: sync w/dongle state.
4244  * Sets dongle media info (iswl, drv_version, mac address).
4245  */
dhd_sync_with_dongle(dhd_pub_t * dhd)4246 int dhd_sync_with_dongle(dhd_pub_t *dhd)
4247 {
4248     int ret = 0;
4249     wlc_rev_info_t revinfo;
4250     char buf[128];
4251     dhd_prot_t *prot = dhd->prot;
4252 
4253     DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4254 
4255     dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
4256 
4257     /* Post ts buffer after shim layer is attached */
4258     ret = dhd_msgbuf_rxbuf_post_ts_bufs(dhd);
4259 
4260 #ifdef DHD_FW_COREDUMP
4261     /* Check the memdump capability */
4262     dhd_get_memdump_info(dhd);
4263 #endif /* DHD_FW_COREDUMP */
4264 #ifdef BCMASSERT_LOG
4265     dhd_get_assert_info(dhd);
4266 #endif /* BCMASSERT_LOG */
4267 
4268     /* Get the device rev info */
4269     memset(&revinfo, 0, sizeof(revinfo));
4270     ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo),
4271                            FALSE, 0);
4272     if (ret < 0) {
4273         DHD_ERROR(("%s: GET revinfo FAILED\n", __FUNCTION__));
4274         goto done;
4275     }
4276     DHD_ERROR(("%s: GET_REVINFO device 0x%x, vendor 0x%x, chipnum 0x%x\n",
4277                __FUNCTION__, revinfo.deviceid, revinfo.vendorid,
4278                revinfo.chipnum));
4279 
4280     /* Get the RxBuf post size */
4281     memset(buf, 0, sizeof(buf));
4282     bcm_mkiovar("rxbufpost_sz", NULL, 0, buf, sizeof(buf));
4283     ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0);
4284     if (ret < 0) {
4285         DHD_ERROR(("%s: GET RxBuf post FAILED, default to %d\n", __FUNCTION__,
4286                    DHD_FLOWRING_RX_BUFPOST_PKTSZ));
4287         prot->rxbufpost_sz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
4288     } else {
4289         memcpy_s(&(prot->rxbufpost_sz), sizeof(prot->rxbufpost_sz), buf,
4290                  sizeof(uint16));
4291         if (prot->rxbufpost_sz > DHD_FLOWRING_RX_BUFPOST_PKTSZ_MAX) {
4292             DHD_ERROR(("%s: Invalid RxBuf post size : %d, default to %d\n",
4293                        __FUNCTION__, prot->rxbufpost_sz,
4294                        DHD_FLOWRING_RX_BUFPOST_PKTSZ));
4295             prot->rxbufpost_sz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
4296         } else {
4297             DHD_ERROR(
4298                 ("%s: RxBuf Post : %d\n", __FUNCTION__, prot->rxbufpost_sz));
4299         }
4300     }
4301 
4302     /* Post buffers for packet reception */
4303     dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
4304 
4305     DHD_SSSR_DUMP_INIT(dhd);
4306 
4307     dhd_process_cid_mac(dhd, TRUE);
4308     ret = dhd_preinit_ioctls(dhd);
4309     dhd_process_cid_mac(dhd, FALSE);
4310 
4311 #if defined(DHD_H2D_LOG_TIME_SYNC)
4312 #ifdef DHD_HP2P
4313     if (FW_SUPPORTED(dhd, h2dlogts) || dhd->hp2p_capable)
4314 #else
4315     if (FW_SUPPORTED(dhd, h2dlogts))
4316 #endif // endif
4317     {
4318 #ifdef DHD_HP2P
4319         if (dhd->hp2p_enable) {
4320             dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH / 0x28;
4321         } else {
4322             dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH;
4323         }
4324 #else
4325         dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH;
4326 #endif // endif
4327         dhd->bus->dhd_rte_time_sync_count = OSL_SYSUPTIME_US();
4328         /* This is during initialization. */
4329         dhd_h2d_log_time_sync(dhd);
4330     } else {
4331         dhd->dhd_rte_time_sync_ms = 0;
4332     }
4333 #endif /* DHD_H2D_LOG_TIME_SYNC || DHD_HP2P */
4334     /* Always assumes wl for now */
4335     dhd->iswl = TRUE;
4336 done:
4337     return ret;
4338 } /* dhd_sync_with_dongle */
4339 
4340 #define DHD_DBG_SHOW_METADATA 0
4341 
4342 #if DHD_DBG_SHOW_METADATA
dhd_prot_print_metadata(dhd_pub_t * dhd,void * ptr,int len)4343 static void BCMFASTPATH dhd_prot_print_metadata(dhd_pub_t *dhd, void *ptr,
4344                                                 int len)
4345 {
4346     uint8 tlv_t;
4347     uint8 tlv_l;
4348     uint8 *tlv_v = (uint8 *)ptr;
4349 
4350     if (len <= BCMPCIE_D2H_METADATA_HDRLEN) {
4351         return;
4352     }
4353 
4354     len -= BCMPCIE_D2H_METADATA_HDRLEN;
4355     tlv_v += BCMPCIE_D2H_METADATA_HDRLEN;
4356 
4357     while (len > TLV_HDR_LEN) {
4358         tlv_t = tlv_v[TLV_TAG_OFF];
4359         tlv_l = tlv_v[TLV_LEN_OFF];
4360 
4361         len -= TLV_HDR_LEN;
4362         tlv_v += TLV_HDR_LEN;
4363         if (len < tlv_l) {
4364             break;
4365         }
4366         if ((tlv_t == 0) || (tlv_t == WLFC_CTL_TYPE_FILLER)) {
4367             break;
4368         }
4369 
4370         switch (tlv_t) {
4371             case WLFC_CTL_TYPE_TXSTATUS: {
4372                 uint32 txs;
4373                 memcpy(&txs, tlv_v, sizeof(uint32));
4374                 if (tlv_l <
4375                     (sizeof(wl_txstatus_additional_info_t) + sizeof(uint32))) {
4376                     printf("METADATA TX_STATUS: %08x\n", txs);
4377                 } else {
4378                     wl_txstatus_additional_info_t tx_add_info;
4379                     memcpy(&tx_add_info, tlv_v + sizeof(uint32),
4380                            sizeof(wl_txstatus_additional_info_t));
4381                     printf("METADATA TX_STATUS: %08x WLFCTS[%04x | %08x - %08x "
4382                            "- %08x]"
4383                            " rate = %08x tries = %d - %d\n",
4384                            txs, tx_add_info.seq, tx_add_info.entry_ts,
4385                            tx_add_info.enq_ts, tx_add_info.last_ts,
4386                            tx_add_info.rspec, tx_add_info.rts_cnt,
4387                            tx_add_info.tx_cnt);
4388                 }
4389             } break;
4390 
4391             case WLFC_CTL_TYPE_RSSI: {
4392                 if (tlv_l == 1) {
4393                     printf("METADATA RX_RSSI: rssi = %d\n", *tlv_v);
4394                 } else {
4395                     printf("METADATA RX_RSSI[%04x]: rssi = %d snr = %d\n",
4396                            (*(tlv_v + 3) << 8) | *(tlv_v + 2), (int8)(*tlv_v),
4397                            *(tlv_v + 1));
4398                 }
4399             } break;
4400 
4401             case WLFC_CTL_TYPE_FIFO_CREDITBACK:
4402                 bcm_print_bytes("METADATA FIFO_CREDITBACK", tlv_v, tlv_l);
4403                 break;
4404 
4405             case WLFC_CTL_TYPE_TX_ENTRY_STAMP:
4406                 bcm_print_bytes("METADATA TX_ENTRY", tlv_v, tlv_l);
4407                 break;
4408 
4409             case WLFC_CTL_TYPE_RX_STAMP: {
4410                 struct {
4411                     uint32 rspec;
4412                     uint32 bus_time;
4413                     uint32 wlan_time;
4414                 } rx_tmstamp;
4415                 memcpy(&rx_tmstamp, tlv_v, sizeof(rx_tmstamp));
4416                 printf(
4417                     "METADATA RX TIMESTMAP: WLFCTS[%08x - %08x] rate = %08x\n",
4418                     rx_tmstamp.wlan_time, rx_tmstamp.bus_time,
4419                     rx_tmstamp.rspec);
4420             } break;
4421 
4422             case WLFC_CTL_TYPE_TRANS_ID:
4423                 bcm_print_bytes("METADATA TRANS_ID", tlv_v, tlv_l);
4424                 break;
4425 
4426             case WLFC_CTL_TYPE_COMP_TXSTATUS:
4427                 bcm_print_bytes("METADATA COMP_TXSTATUS", tlv_v, tlv_l);
4428                 break;
4429 
4430             default:
4431                 bcm_print_bytes("METADATA UNKNOWN", tlv_v, tlv_l);
4432                 break;
4433         }
4434 
4435         len -= tlv_l;
4436         tlv_v += tlv_l;
4437     }
4438 }
4439 #endif /* DHD_DBG_SHOW_METADATA */
4440 
dhd_prot_packet_free(dhd_pub_t * dhd,void * pkt,uint8 pkttype,bool send)4441 static INLINE void BCMFASTPATH dhd_prot_packet_free(dhd_pub_t *dhd, void *pkt,
4442                                                     uint8 pkttype, bool send)
4443 {
4444     if (pkt) {
4445         if (pkttype == PKTTYPE_IOCTL_RX || pkttype == PKTTYPE_EVENT_RX ||
4446             pkttype == PKTTYPE_INFO_RX || pkttype == PKTTYPE_TSBUF_RX) {
4447 #ifdef DHD_USE_STATIC_CTRLBUF
4448             PKTFREE_STATIC(dhd->osh, pkt, send);
4449 #else
4450             PKTFREE(dhd->osh, pkt, send);
4451 #endif /* DHD_USE_STATIC_CTRLBUF */
4452         } else {
4453             PKTFREE(dhd->osh, pkt, send);
4454         }
4455     }
4456 }
4457 
4458 /**
4459  * dhd_prot_packet_get should be called only for items having pktid_ctrl_map
4460  * handle and all the bottom most functions like dhd_pktid_map_free hold
4461  * separate DHD_PKTID_LOCK to ensure thread safety, so no need to hold any locks
4462  * for this function
4463  */
dhd_prot_packet_get(dhd_pub_t * dhd,uint32 pktid,uint8 pkttype,bool free_pktid)4464 static INLINE void *BCMFASTPATH dhd_prot_packet_get(dhd_pub_t *dhd,
4465                                                     uint32 pktid, uint8 pkttype,
4466                                                     bool free_pktid)
4467 {
4468     void *PKTBUF;
4469     dmaaddr_t pa;
4470     uint32 len;
4471     void *dmah;
4472     void *secdma;
4473 
4474 #ifdef DHD_PCIE_PKTID
4475     if (free_pktid) {
4476         PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_ctrl_map, pktid, pa,
4477                                      len, dmah, secdma, pkttype);
4478     } else {
4479         PKTBUF = DHD_PKTID_TO_NATIVE_RSV(dhd, dhd->prot->pktid_ctrl_map, pktid,
4480                                          pa, len, dmah, secdma, pkttype);
4481     }
4482 #else
4483     PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_ctrl_map, pktid, pa, len,
4484                                  dmah, secdma, pkttype);
4485 #endif /* DHD_PCIE_PKTID */
4486     if (PKTBUF) {
4487         {
4488             if (SECURE_DMA_ENAB(dhd->osh)) {
4489                 SECURE_DMA_UNMAP(dhd->osh, pa, (uint)len, DMA_RX, 0, dmah,
4490                                  secdma, 0);
4491             } else {
4492                 DMA_UNMAP(dhd->osh, pa, (uint)len, DMA_RX, 0, dmah);
4493             }
4494 #ifdef DMAMAP_STATS
4495             switch (pkttype) {
4496 #ifndef IOCTLRESP_USE_CONSTMEM
4497                 case PKTTYPE_IOCTL_RX:
4498                     dhd->dma_stats.ioctl_rx--;
4499                     dhd->dma_stats.ioctl_rx_sz -= len;
4500                     break;
4501 #endif /* IOCTLRESP_USE_CONSTMEM */
4502                 case PKTTYPE_EVENT_RX:
4503                     dhd->dma_stats.event_rx--;
4504                     dhd->dma_stats.event_rx_sz -= len;
4505                     break;
4506                 case PKTTYPE_INFO_RX:
4507                     dhd->dma_stats.info_rx--;
4508                     dhd->dma_stats.info_rx_sz -= len;
4509                     break;
4510                 case PKTTYPE_TSBUF_RX:
4511                     dhd->dma_stats.tsbuf_rx--;
4512                     dhd->dma_stats.tsbuf_rx_sz -= len;
4513                     break;
4514             }
4515 #endif /* DMAMAP_STATS */
4516         }
4517     }
4518 
4519     return PKTBUF;
4520 }
4521 
4522 #ifdef IOCTLRESP_USE_CONSTMEM
dhd_prot_ioctl_ret_buffer_get(dhd_pub_t * dhd,uint32 pktid,dhd_dma_buf_t * retbuf)4523 static INLINE void BCMFASTPATH dhd_prot_ioctl_ret_buffer_get(
4524     dhd_pub_t *dhd, uint32 pktid, dhd_dma_buf_t *retbuf)
4525 {
4526     memset(retbuf, 0, sizeof(dhd_dma_buf_t));
4527     retbuf->va = DHD_PKTID_TO_NATIVE(
4528         dhd, dhd->prot->pktid_map_handle_ioctl, pktid, retbuf->pa, retbuf->len,
4529         retbuf->dmah, retbuf->secdma, PKTTYPE_IOCTL_RX);
4530 
4531     return;
4532 }
4533 #endif // endif
4534 
dhd_msgbuf_rxbuf_post(dhd_pub_t * dhd,bool use_rsv_pktid)4535 static void BCMFASTPATH dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd,
4536                                               bool use_rsv_pktid)
4537 {
4538     dhd_prot_t *prot = dhd->prot;
4539     int16 fillbufs;
4540     uint16 cnt = 256;
4541     int retcount = 0;
4542 
4543     fillbufs = prot->max_rxbufpost - prot->rxbufpost;
4544     while (fillbufs >= RX_BUF_BURST) {
4545         cnt--;
4546         if (cnt == 0) {
4547             /* find a better way to reschedule rx buf post if space not
4548              * available */
4549             DHD_ERROR(
4550                 ("h2d rx post ring not available to post host buffers \n"));
4551             DHD_ERROR(("Current posted host buf count %d \n", prot->rxbufpost));
4552             break;
4553         }
4554 
4555         /* Post in a burst of 32 buffers at a time */
4556         fillbufs = MIN(fillbufs, RX_BUF_BURST);
4557 
4558         /* Post buffers */
4559         retcount = dhd_prot_rxbuf_post(dhd, fillbufs, use_rsv_pktid);
4560 
4561         if (retcount >= 0) {
4562             prot->rxbufpost += (uint16)retcount;
4563 #ifdef DHD_LB_RXC
4564             /* dhd_prot_rxbuf_post returns the number of buffers posted */
4565             DHD_LB_STATS_UPDATE_RXC_HISTO(dhd, retcount);
4566 #endif /* DHD_LB_RXC */
4567             /* how many more to post */
4568             fillbufs = prot->max_rxbufpost - prot->rxbufpost;
4569         } else {
4570             /* Make sure we don't run loop any further */
4571             fillbufs = 0;
4572         }
4573     }
4574 }
4575 
4576 /** Post 'count' no of rx buffers to dongle */
dhd_prot_rxbuf_post(dhd_pub_t * dhd,uint16 count,bool use_rsv_pktid)4577 static int BCMFASTPATH dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count,
4578                                            bool use_rsv_pktid)
4579 {
4580     void *p, **pktbuf;
4581     uint8 *rxbuf_post_tmp;
4582     host_rxbuf_post_t *rxbuf_post;
4583     void *msg_start;
4584     dmaaddr_t pa, *pktbuf_pa;
4585     uint32 *pktlen;
4586     uint16 i = 0, alloced = 0;
4587     unsigned long flags;
4588     uint32 pktid;
4589     dhd_prot_t *prot = dhd->prot;
4590     msgbuf_ring_t *ring = &prot->h2dring_rxp_subn;
4591     void *lcl_buf;
4592     uint16 lcl_buf_size;
4593     uint16 pktsz = prot->rxbufpost_sz;
4594 
4595     /* allocate a local buffer to store pkt buffer va, pa and length */
4596     lcl_buf_size =
4597         (sizeof(void *) + sizeof(dmaaddr_t) + sizeof(uint32)) * RX_BUF_BURST;
4598     lcl_buf = MALLOC(dhd->osh, lcl_buf_size);
4599     if (!lcl_buf) {
4600         DHD_ERROR(
4601             ("%s: local scratch buffer allocation failed\n", __FUNCTION__));
4602         return 0;
4603     }
4604     pktbuf = lcl_buf;
4605     pktbuf_pa = (dmaaddr_t *)((uint8 *)pktbuf + sizeof(void *) * RX_BUF_BURST);
4606     pktlen = (uint32 *)((uint8 *)pktbuf_pa + sizeof(dmaaddr_t) * RX_BUF_BURST);
4607 
4608     for (i = 0; i < count; i++) {
4609         if ((p = PKTGET(dhd->osh, pktsz, FALSE)) == NULL) {
4610             DHD_ERROR(
4611                 ("%s:%d: PKTGET for rxbuf failed\n", __FUNCTION__, __LINE__));
4612             dhd->rx_pktgetfail++;
4613             break;
4614         }
4615 
4616         pktlen[i] = PKTLEN(dhd->osh, p);
4617         if (SECURE_DMA_ENAB(dhd->osh)) {
4618             pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen[i],
4619                                 DMA_RX, p, 0, ring->dma_buf.secdma, 0);
4620         }
4621 #ifndef BCM_SECURE_DMA
4622         else
4623             pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen[i], DMA_RX, p,
4624                          0);
4625 #endif /* #ifndef BCM_SECURE_DMA */
4626 
4627         if (PHYSADDRISZERO(pa)) {
4628             PKTFREE(dhd->osh, p, FALSE);
4629             DHD_ERROR(("Invalid phyaddr 0\n"));
4630             ASSERT(0);
4631             break;
4632         }
4633 #ifdef DMAMAP_STATS
4634         dhd->dma_stats.rxdata++;
4635         dhd->dma_stats.rxdata_sz += pktlen[i];
4636 #endif /* DMAMAP_STATS */
4637 
4638         PKTPULL(dhd->osh, p, prot->rx_metadata_offset);
4639         pktlen[i] = PKTLEN(dhd->osh, p);
4640         pktbuf[i] = p;
4641         pktbuf_pa[i] = pa;
4642     }
4643 
4644     /* only post what we have */
4645     count = i;
4646 
4647     /* grab the ring lock to allocate pktid and post on ring */
4648     DHD_RING_LOCK(ring->ring_lock, flags);
4649 
4650     /* Claim space for exactly 'count' no of messages, for mitigation purpose */
4651     msg_start =
4652         (void *)dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, TRUE);
4653     if (msg_start == NULL) {
4654         DHD_INFO(("%s:%d: Rxbufpost Msgbuf Not available\n", __FUNCTION__,
4655                   __LINE__));
4656         DHD_RING_UNLOCK(ring->ring_lock, flags);
4657         goto cleanup;
4658     }
4659     /* if msg_start !=  NULL, we should have alloced space for atleast 1 item */
4660     ASSERT(alloced > 0);
4661 
4662     rxbuf_post_tmp = (uint8 *)msg_start;
4663 
4664     for (i = 0; i < alloced; i++) {
4665         rxbuf_post = (host_rxbuf_post_t *)rxbuf_post_tmp;
4666         p = pktbuf[i];
4667         pa = pktbuf_pa[i];
4668 
4669 #if defined(DHD_LB_RXC)
4670         if (use_rsv_pktid == TRUE) {
4671             bcm_workq_t *workq = &prot->rx_compl_cons;
4672             int elem_ix = bcm_ring_cons(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
4673 
4674             if (elem_ix == BCM_RING_EMPTY) {
4675                 DHD_INFO(("%s rx_compl_cons ring is empty\n", __FUNCTION__));
4676                 pktid = DHD_PKTID_INVALID;
4677                 goto alloc_pkt_id;
4678             } else {
4679                 uint32 *elem = WORKQ_ELEMENT(uint32, workq, elem_ix);
4680                 pktid = *elem;
4681             }
4682 
4683             rxbuf_post->cmn_hdr.request_id = htol32(pktid);
4684 
4685             /* Now populate the previous locker with valid information */
4686             if (pktid != DHD_PKTID_INVALID) {
4687                 DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_rx_map, p, pktid,
4688                                          pa, pktlen[i], DMA_RX, NULL, NULL,
4689                                          PKTTYPE_DATA_RX);
4690             }
4691         } else
4692 #endif /* ! DHD_LB_RXC */
4693         {
4694 #if defined(DHD_LB_RXC)
4695         alloc_pkt_id:
4696 #endif /* DHD_LB_RXC */
4697             pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_rx_map, p, pa,
4698                                         pktlen[i], DMA_RX, NULL,
4699                                         ring->dma_buf.secdma, PKTTYPE_DATA_RX);
4700 #if defined(DHD_PCIE_PKTID)
4701             if (pktid == DHD_PKTID_INVALID) {
4702                 break;
4703             }
4704 #endif /* DHD_PCIE_PKTID */
4705         }
4706 
4707         /* Common msg header */
4708         rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_RXBUF_POST;
4709         rxbuf_post->cmn_hdr.if_id = 0;
4710         rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
4711         rxbuf_post->cmn_hdr.flags = ring->current_phase;
4712         ring->seqnum++;
4713         rxbuf_post->data_buf_len = htol16((uint16)pktlen[i]);
4714         rxbuf_post->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
4715         rxbuf_post->data_buf_addr.low_addr =
4716             htol32(PHYSADDRLO(pa) + prot->rx_metadata_offset);
4717 
4718         if (prot->rx_metadata_offset) {
4719             rxbuf_post->metadata_buf_len = prot->rx_metadata_offset;
4720             rxbuf_post->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
4721             rxbuf_post->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
4722         } else {
4723             rxbuf_post->metadata_buf_len = 0;
4724             rxbuf_post->metadata_buf_addr.high_addr = 0;
4725             rxbuf_post->metadata_buf_addr.low_addr = 0;
4726         }
4727 
4728 #ifdef DHD_PKTID_AUDIT_RING
4729         DHD_PKTID_AUDIT(dhd, prot->pktid_rx_map, pktid, DHD_DUPLICATE_ALLOC);
4730 #endif /* DHD_PKTID_AUDIT_RING */
4731 
4732         rxbuf_post->cmn_hdr.request_id = htol32(pktid);
4733 
4734         /* Move rxbuf_post_tmp to next item */
4735         rxbuf_post_tmp = rxbuf_post_tmp + ring->item_len;
4736 
4737 #ifdef DHD_LBUF_AUDIT
4738         PKTAUDIT(dhd->osh, p);
4739 #endif // endif
4740     }
4741 
4742     if (i < alloced) {
4743         if (ring->wr < (alloced - i)) {
4744             ring->wr = ring->max_items - (alloced - i);
4745         } else {
4746             ring->wr -= (alloced - i);
4747         }
4748 
4749         if (ring->wr == 0) {
4750             DHD_INFO(("%s: flipping the phase now\n", ring->name));
4751             ring->current_phase =
4752                 ring->current_phase ? 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
4753         }
4754 
4755         alloced = i;
4756     }
4757 
4758     /* update ring's WR index and ring doorbell to dongle */
4759     if (alloced > 0) {
4760         dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced);
4761     }
4762 
4763     DHD_RING_UNLOCK(ring->ring_lock, flags);
4764 
4765 cleanup:
4766     for (i = alloced; i < count; i++) {
4767         p = pktbuf[i];
4768         pa = pktbuf_pa[i];
4769 
4770         if (SECURE_DMA_ENAB(dhd->osh)) {
4771             SECURE_DMA_UNMAP(dhd->osh, pa, pktlen[i], DMA_RX, 0, DHD_DMAH_NULL,
4772                              ring->dma_buf.secdma, 0);
4773         } else {
4774             DMA_UNMAP(dhd->osh, pa, pktlen[i], DMA_RX, 0, DHD_DMAH_NULL);
4775         }
4776         PKTFREE(dhd->osh, p, FALSE);
4777     }
4778 
4779     MFREE(dhd->osh, lcl_buf, lcl_buf_size);
4780 
4781     return alloced;
4782 } /* dhd_prot_rxbufpost */
4783 
dhd_prot_infobufpost(dhd_pub_t * dhd,msgbuf_ring_t * ring)4784 static int dhd_prot_infobufpost(dhd_pub_t *dhd, msgbuf_ring_t *ring)
4785 {
4786     unsigned long flags;
4787     uint32 pktid;
4788     dhd_prot_t *prot = dhd->prot;
4789     uint16 alloced = 0;
4790     uint16 pktsz = DHD_INFOBUF_RX_BUFPOST_PKTSZ;
4791     uint32 pktlen;
4792     info_buf_post_msg_t *infobuf_post;
4793     uint8 *infobuf_post_tmp;
4794     void *p;
4795     void *msg_start;
4796     uint8 i = 0;
4797     dmaaddr_t pa;
4798     int16 count = 0;
4799 
4800     if (ring == NULL) {
4801         return 0;
4802     }
4803 
4804     if (ring->inited != TRUE) {
4805         return 0;
4806     }
4807     if (ring == dhd->prot->h2dring_info_subn) {
4808         if (prot->max_infobufpost == 0) {
4809             return 0;
4810         }
4811 
4812         count = prot->max_infobufpost - prot->infobufpost;
4813     } else {
4814         DHD_ERROR(("Unknown ring\n"));
4815         return 0;
4816     }
4817 
4818     if (count <= 0) {
4819         DHD_INFO(("%s: Cannot post more than max info resp buffers\n",
4820                   __FUNCTION__));
4821         return 0;
4822     }
4823 
4824     /* grab the ring lock to allocate pktid and post on ring */
4825     DHD_RING_LOCK(ring->ring_lock, flags);
4826 
4827     /* Claim space for exactly 'count' no of messages, for mitigation purpose */
4828     msg_start =
4829         (void *)dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, FALSE);
4830     if (msg_start == NULL) {
4831         DHD_INFO(("%s:%d: infobufpost Msgbuf Not available\n", __FUNCTION__,
4832                   __LINE__));
4833         DHD_RING_UNLOCK(ring->ring_lock, flags);
4834         return -1;
4835     }
4836 
4837     /* if msg_start !=  NULL, we should have alloced space for atleast 1 item */
4838     ASSERT(alloced > 0);
4839 
4840     infobuf_post_tmp = (uint8 *)msg_start;
4841 
4842     /* loop through each allocated message in the host ring */
4843     for (i = 0; i < alloced; i++) {
4844         infobuf_post = (info_buf_post_msg_t *)infobuf_post_tmp;
4845         /* Create a rx buffer */
4846 #ifdef DHD_USE_STATIC_CTRLBUF
4847         p = PKTGET_STATIC(dhd->osh, pktsz, FALSE);
4848 #else
4849         p = PKTGET(dhd->osh, pktsz, FALSE);
4850 #endif /* DHD_USE_STATIC_CTRLBUF */
4851         if (p == NULL) {
4852             DHD_ERROR(
4853                 ("%s:%d: PKTGET for infobuf failed\n", __FUNCTION__, __LINE__));
4854             dhd->rx_pktgetfail++;
4855             break;
4856         }
4857         pktlen = PKTLEN(dhd->osh, p);
4858         if (SECURE_DMA_ENAB(dhd->osh)) {
4859             pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX,
4860                                 p, 0, ring->dma_buf.secdma, 0);
4861         }
4862 #ifndef BCM_SECURE_DMA
4863         else
4864             pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
4865 #endif /* #ifndef BCM_SECURE_DMA */
4866         if (PHYSADDRISZERO(pa)) {
4867             if (SECURE_DMA_ENAB(dhd->osh)) {
4868                 SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
4869                                  ring->dma_buf.secdma, 0);
4870             } else {
4871                 DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
4872             }
4873 #ifdef DHD_USE_STATIC_CTRLBUF
4874             PKTFREE_STATIC(dhd->osh, p, FALSE);
4875 #else
4876             PKTFREE(dhd->osh, p, FALSE);
4877 #endif /* DHD_USE_STATIC_CTRLBUF */
4878             DHD_ERROR(("Invalid phyaddr 0\n"));
4879             ASSERT(0);
4880             break;
4881         }
4882 #ifdef DMAMAP_STATS
4883         dhd->dma_stats.info_rx++;
4884         dhd->dma_stats.info_rx_sz += pktlen;
4885 #endif /* DMAMAP_STATS */
4886         pktlen = PKTLEN(dhd->osh, p);
4887 
4888         /* Common msg header */
4889         infobuf_post->cmn_hdr.msg_type = MSG_TYPE_INFO_BUF_POST;
4890         infobuf_post->cmn_hdr.if_id = 0;
4891         infobuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
4892         infobuf_post->cmn_hdr.flags = ring->current_phase;
4893         ring->seqnum++;
4894 
4895         pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_ctrl_map, p, pa,
4896                                     pktlen, DMA_RX, NULL, ring->dma_buf.secdma,
4897                                     PKTTYPE_INFO_RX);
4898 #if defined(DHD_PCIE_PKTID)
4899         if (pktid == DHD_PKTID_INVALID) {
4900             if (SECURE_DMA_ENAB(dhd->osh)) {
4901                 SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, 0,
4902                                  ring->dma_buf.secdma, 0);
4903             } else {
4904                 DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, 0);
4905             }
4906 
4907 #ifdef DHD_USE_STATIC_CTRLBUF
4908             PKTFREE_STATIC(dhd->osh, p, FALSE);
4909 #else
4910             PKTFREE(dhd->osh, p, FALSE);
4911 #endif /* DHD_USE_STATIC_CTRLBUF */
4912             DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__));
4913             break;
4914         }
4915 #endif /* DHD_PCIE_PKTID */
4916 
4917         infobuf_post->host_buf_len = htol16((uint16)pktlen);
4918         infobuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
4919         infobuf_post->host_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
4920 
4921 #ifdef DHD_PKTID_AUDIT_RING
4922         DHD_PKTID_AUDIT(dhd, prot->pktid_ctrl_map, pktid, DHD_DUPLICATE_ALLOC);
4923 #endif /* DHD_PKTID_AUDIT_RING */
4924 
4925         DHD_INFO(("ID %d, low_addr 0x%08x, high_addr 0x%08x\n",
4926                   infobuf_post->cmn_hdr.request_id,
4927                   infobuf_post->host_buf_addr.low_addr,
4928                   infobuf_post->host_buf_addr.high_addr));
4929 
4930         infobuf_post->cmn_hdr.request_id = htol32(pktid);
4931         /* Move rxbuf_post_tmp to next item */
4932         infobuf_post_tmp = infobuf_post_tmp + ring->item_len;
4933 #ifdef DHD_LBUF_AUDIT
4934         PKTAUDIT(dhd->osh, p);
4935 #endif // endif
4936     }
4937 
4938     if (i < alloced) {
4939         if (ring->wr < (alloced - i)) {
4940             ring->wr = ring->max_items - (alloced - i);
4941         } else {
4942             ring->wr -= (alloced - i);
4943         }
4944 
4945         alloced = i;
4946         if (alloced && ring->wr == 0) {
4947             DHD_INFO(("%s: flipping the phase now\n", ring->name));
4948             ring->current_phase =
4949                 ring->current_phase ? 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
4950         }
4951     }
4952 
4953     /* Update the write pointer in TCM & ring bell */
4954     if (alloced > 0) {
4955         if (ring == dhd->prot->h2dring_info_subn) {
4956             prot->infobufpost += alloced;
4957         }
4958         dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced);
4959     }
4960 
4961     DHD_RING_UNLOCK(ring->ring_lock, flags);
4962 
4963     return alloced;
4964 } /* dhd_prot_infobufpost */
4965 
4966 #ifdef IOCTLRESP_USE_CONSTMEM
alloc_ioctl_return_buffer(dhd_pub_t * dhd,dhd_dma_buf_t * retbuf)4967 static int alloc_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf)
4968 {
4969     int err;
4970     memset(retbuf, 0, sizeof(dhd_dma_buf_t));
4971 
4972     if ((err = dhd_dma_buf_alloc(dhd, retbuf, IOCT_RETBUF_SIZE)) != BCME_OK) {
4973         DHD_ERROR(("%s: dhd_dma_buf_alloc err %d\n", __FUNCTION__, err));
4974         ASSERT(0);
4975         return BCME_NOMEM;
4976     }
4977 
4978     return BCME_OK;
4979 }
4980 
free_ioctl_return_buffer(dhd_pub_t * dhd,dhd_dma_buf_t * retbuf)4981 static void free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf)
4982 {
4983     /* retbuf (declared on stack) not fully populated ...  */
4984     if (retbuf->va) {
4985         uint32 dma_pad;
4986         dma_pad = (IOCT_RETBUF_SIZE % DHD_DMA_PAD) ? DHD_DMA_PAD : 0;
4987         retbuf->len = IOCT_RETBUF_SIZE;
4988         retbuf->_alloced = retbuf->len + dma_pad;
4989     }
4990 
4991     dhd_dma_buf_free(dhd, retbuf);
4992     return;
4993 }
4994 #endif /* IOCTLRESP_USE_CONSTMEM */
4995 
dhd_prot_rxbufpost_ctrl(dhd_pub_t * dhd,uint8 msg_type)4996 static int dhd_prot_rxbufpost_ctrl(dhd_pub_t *dhd, uint8 msg_type)
4997 {
4998     void *p;
4999     uint16 pktsz;
5000     ioctl_resp_evt_buf_post_msg_t *rxbuf_post;
5001     dmaaddr_t pa;
5002     uint32 pktlen;
5003     dhd_prot_t *prot = dhd->prot;
5004     uint16 alloced = 0;
5005     unsigned long flags;
5006     dhd_dma_buf_t retbuf;
5007     void *dmah = NULL;
5008     uint32 pktid;
5009     void *map_handle;
5010     msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
5011     bool non_ioctl_resp_buf = 0;
5012     dhd_pkttype_t buf_type;
5013 
5014     if (dhd->busstate == DHD_BUS_DOWN) {
5015         DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
5016         return -1;
5017     }
5018     memset(&retbuf, 0, sizeof(dhd_dma_buf_t));
5019 
5020     if (msg_type == MSG_TYPE_IOCTLRESP_BUF_POST) {
5021         buf_type = PKTTYPE_IOCTL_RX;
5022     } else if (msg_type == MSG_TYPE_EVENT_BUF_POST) {
5023         buf_type = PKTTYPE_EVENT_RX;
5024     } else if (msg_type == MSG_TYPE_TIMSTAMP_BUFPOST) {
5025         buf_type = PKTTYPE_TSBUF_RX;
5026     } else {
5027         DHD_ERROR(
5028             ("invalid message type to be posted to Ctrl ring %d\n", msg_type));
5029         return -1;
5030     }
5031 
5032     if ((msg_type == MSG_TYPE_EVENT_BUF_POST) ||
5033         (msg_type == MSG_TYPE_TIMSTAMP_BUFPOST)) {
5034         non_ioctl_resp_buf = TRUE;
5035     } else {
5036         non_ioctl_resp_buf = FALSE;
5037     }
5038 
5039     if (non_ioctl_resp_buf) {
5040         /* Allocate packet for not ioctl resp buffer post */
5041         pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
5042     } else {
5043         /* Allocate packet for ctrl/ioctl buffer post */
5044         pktsz = DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ;
5045     }
5046 
5047 #ifdef IOCTLRESP_USE_CONSTMEM
5048     if (!non_ioctl_resp_buf) {
5049         if (alloc_ioctl_return_buffer(dhd, &retbuf) != BCME_OK) {
5050             DHD_ERROR(("Could not allocate IOCTL response buffer\n"));
5051             return -1;
5052         }
5053         ASSERT(retbuf.len == IOCT_RETBUF_SIZE);
5054         p = retbuf.va;
5055         pktlen = retbuf.len;
5056         pa = retbuf.pa;
5057         dmah = retbuf.dmah;
5058     } else
5059 #endif /* IOCTLRESP_USE_CONSTMEM */
5060     {
5061 #ifdef DHD_USE_STATIC_CTRLBUF
5062         p = PKTGET_STATIC(dhd->osh, pktsz, FALSE);
5063 #else
5064         p = PKTGET(dhd->osh, pktsz, FALSE);
5065 #endif /* DHD_USE_STATIC_CTRLBUF */
5066         if (p == NULL) {
5067             DHD_ERROR(("%s:%d: PKTGET for %s buf failed\n", __FUNCTION__,
5068                        __LINE__, non_ioctl_resp_buf ? "EVENT" : "IOCTL RESP"));
5069             dhd->rx_pktgetfail++;
5070             return -1;
5071         }
5072 
5073         pktlen = PKTLEN(dhd->osh, p);
5074 
5075         if (SECURE_DMA_ENAB(dhd->osh)) {
5076             pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX,
5077                                 p, 0, ring->dma_buf.secdma, 0);
5078         }
5079 #ifndef BCM_SECURE_DMA
5080         else
5081             pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
5082 #endif /* #ifndef BCM_SECURE_DMA */
5083 
5084         if (PHYSADDRISZERO(pa)) {
5085             DHD_ERROR(("Invalid physaddr 0\n"));
5086             ASSERT(0);
5087             goto free_pkt_return;
5088         }
5089 
5090 #ifdef DMAMAP_STATS
5091         switch (buf_type) {
5092 #ifndef IOCTLRESP_USE_CONSTMEM
5093             case PKTTYPE_IOCTL_RX:
5094                 dhd->dma_stats.ioctl_rx++;
5095                 dhd->dma_stats.ioctl_rx_sz += pktlen;
5096                 break;
5097 #endif /* !IOCTLRESP_USE_CONSTMEM */
5098             case PKTTYPE_EVENT_RX:
5099                 dhd->dma_stats.event_rx++;
5100                 dhd->dma_stats.event_rx_sz += pktlen;
5101                 break;
5102             case PKTTYPE_TSBUF_RX:
5103                 dhd->dma_stats.tsbuf_rx++;
5104                 dhd->dma_stats.tsbuf_rx_sz += pktlen;
5105                 break;
5106             default:
5107                 break;
5108         }
5109 #endif /* DMAMAP_STATS */
5110     }
5111 
5112     /* grab the ring lock to allocate pktid and post on ring */
5113     DHD_RING_LOCK(ring->ring_lock, flags);
5114 
5115     rxbuf_post = (ioctl_resp_evt_buf_post_msg_t *)dhd_prot_alloc_ring_space(
5116         dhd, ring, 1, &alloced, FALSE);
5117     if (rxbuf_post == NULL) {
5118         DHD_RING_UNLOCK(ring->ring_lock, flags);
5119         DHD_ERROR(("%s:%d: Ctrl submit Msgbuf Not available to post buffer \n",
5120                    __FUNCTION__, __LINE__));
5121 
5122 #ifdef IOCTLRESP_USE_CONSTMEM
5123         if (non_ioctl_resp_buf)
5124 #endif /* IOCTLRESP_USE_CONSTMEM */
5125         {
5126             if (SECURE_DMA_ENAB(dhd->osh)) {
5127                 SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
5128                                  ring->dma_buf.secdma, 0);
5129             } else {
5130                 DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
5131             }
5132         }
5133         goto free_pkt_return;
5134     }
5135 
5136     /* CMN msg header */
5137     rxbuf_post->cmn_hdr.msg_type = msg_type;
5138 
5139 #ifdef IOCTLRESP_USE_CONSTMEM
5140     if (!non_ioctl_resp_buf) {
5141         map_handle = dhd->prot->pktid_map_handle_ioctl;
5142         pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle, p, pa, pktlen, DMA_RX,
5143                                     dmah, ring->dma_buf.secdma, buf_type);
5144     } else
5145 #endif /* IOCTLRESP_USE_CONSTMEM */
5146     {
5147         map_handle = dhd->prot->pktid_ctrl_map;
5148         pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle, p, pa, pktlen, DMA_RX,
5149                                     dmah, ring->dma_buf.secdma, buf_type);
5150     }
5151 
5152     if (pktid == DHD_PKTID_INVALID) {
5153         if (ring->wr == 0) {
5154             ring->wr = ring->max_items - 1;
5155         } else {
5156             ring->wr--;
5157             if (ring->wr == 0) {
5158                 ring->current_phase =
5159                     ring->current_phase ? 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
5160             }
5161         }
5162         DHD_RING_UNLOCK(ring->ring_lock, flags);
5163         DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
5164         DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__));
5165         goto free_pkt_return;
5166     }
5167 
5168 #ifdef DHD_PKTID_AUDIT_RING
5169     DHD_PKTID_AUDIT(dhd, map_handle, pktid, DHD_DUPLICATE_ALLOC);
5170 #endif /* DHD_PKTID_AUDIT_RING */
5171 
5172     rxbuf_post->cmn_hdr.request_id = htol32(pktid);
5173     rxbuf_post->cmn_hdr.if_id = 0;
5174     rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
5175     ring->seqnum++;
5176     rxbuf_post->cmn_hdr.flags = ring->current_phase;
5177 
5178 #if defined(DHD_PCIE_PKTID)
5179     if (rxbuf_post->cmn_hdr.request_id == DHD_PKTID_INVALID) {
5180         if (ring->wr == 0) {
5181             ring->wr = ring->max_items - 1;
5182         } else {
5183             if (ring->wr == 0) {
5184                 ring->current_phase =
5185                     ring->current_phase ? 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
5186             }
5187         }
5188         DHD_RING_UNLOCK(ring->ring_lock, flags);
5189 #ifdef IOCTLRESP_USE_CONSTMEM
5190         if (non_ioctl_resp_buf)
5191 #endif /* IOCTLRESP_USE_CONSTMEM */
5192         {
5193             if (SECURE_DMA_ENAB(dhd->osh)) {
5194                 SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
5195                                  ring->dma_buf.secdma, 0);
5196             } else {
5197                 DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
5198             }
5199         }
5200         goto free_pkt_return;
5201     }
5202 #endif /* DHD_PCIE_PKTID */
5203 
5204 #ifndef IOCTLRESP_USE_CONSTMEM
5205     rxbuf_post->host_buf_len = htol16((uint16)PKTLEN(dhd->osh, p));
5206 #else
5207     rxbuf_post->host_buf_len = htol16((uint16)pktlen);
5208 #endif /* IOCTLRESP_USE_CONSTMEM */
5209     rxbuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
5210     rxbuf_post->host_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
5211 
5212 #ifdef DHD_LBUF_AUDIT
5213     if (non_ioctl_resp_buf) {
5214         PKTAUDIT(dhd->osh, p);
5215     }
5216 #endif // endif
5217 
5218     /* update ring's WR index and ring doorbell to dongle */
5219     dhd_prot_ring_write_complete(dhd, ring, rxbuf_post, 1);
5220 
5221     DHD_RING_UNLOCK(ring->ring_lock, flags);
5222 
5223     return 1;
5224 
5225 free_pkt_return:
5226     if (!non_ioctl_resp_buf) {
5227 #ifdef IOCTLRESP_USE_CONSTMEM
5228         free_ioctl_return_buffer(dhd, &retbuf);
5229 #else
5230         dhd_prot_packet_free(dhd, p, buf_type, FALSE);
5231 #endif /* IOCTLRESP_USE_CONSTMEM */
5232     } else {
5233         dhd_prot_packet_free(dhd, p, buf_type, FALSE);
5234     }
5235 
5236     return -1;
5237 } /* dhd_prot_rxbufpost_ctrl */
5238 
dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t * dhd,uint8 msg_type,uint32 max_to_post)5239 static uint16 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, uint8 msg_type,
5240                                              uint32 max_to_post)
5241 {
5242     uint32 i = 0;
5243     int32 ret_val;
5244 
5245     DHD_INFO(("max to post %d, event %d \n", max_to_post, msg_type));
5246 
5247     if (dhd->busstate == DHD_BUS_DOWN) {
5248         DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
5249         return 0;
5250     }
5251 
5252     while (i < max_to_post) {
5253         ret_val = dhd_prot_rxbufpost_ctrl(dhd, msg_type);
5254         if (ret_val < 0) {
5255             break;
5256         }
5257         i++;
5258     }
5259     DHD_INFO(("posted %d buffers of type %d\n", i, msg_type));
5260     return (uint16)i;
5261 }
5262 
dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t * dhd)5263 static void dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *dhd)
5264 {
5265     dhd_prot_t *prot = dhd->prot;
5266     int max_to_post;
5267 
5268     DHD_INFO(("ioctl resp buf post\n"));
5269     max_to_post = prot->max_ioctlrespbufpost - prot->cur_ioctlresp_bufs_posted;
5270     if (max_to_post <= 0) {
5271         DHD_INFO(("%s: Cannot post more than max IOCTL resp buffers\n",
5272                   __FUNCTION__));
5273         return;
5274     }
5275     prot->cur_ioctlresp_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(
5276         dhd, MSG_TYPE_IOCTLRESP_BUF_POST, max_to_post);
5277 }
5278 
dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t * dhd)5279 static void dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *dhd)
5280 {
5281     dhd_prot_t *prot = dhd->prot;
5282     int max_to_post;
5283 
5284     max_to_post = prot->max_eventbufpost - prot->cur_event_bufs_posted;
5285     if (max_to_post <= 0) {
5286         DHD_ERROR(
5287             ("%s: Cannot post more than max event buffers\n", __FUNCTION__));
5288         return;
5289     }
5290     prot->cur_event_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(
5291         dhd, MSG_TYPE_EVENT_BUF_POST, max_to_post);
5292 }
5293 
dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t * dhd)5294 static int dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t *dhd)
5295 {
5296     return 0;
5297 }
5298 
dhd_prot_process_msgbuf_infocpl(dhd_pub_t * dhd,uint bound)5299 bool BCMFASTPATH dhd_prot_process_msgbuf_infocpl(dhd_pub_t *dhd, uint bound)
5300 {
5301     dhd_prot_t *prot = dhd->prot;
5302     bool more = TRUE;
5303     uint n = 0;
5304     msgbuf_ring_t *ring = prot->d2hring_info_cpln;
5305     unsigned long flags;
5306 
5307     if (ring == NULL) {
5308         return FALSE;
5309     }
5310     if (ring->inited != TRUE) {
5311         return FALSE;
5312     }
5313 
5314     /* Process all the messages - DTOH direction */
5315     while (!dhd_is_device_removed(dhd)) {
5316         uint8 *msg_addr;
5317         uint32 msg_len;
5318 
5319         if (dhd_query_bus_erros(dhd)) {
5320             more = FALSE;
5321             break;
5322         }
5323 
5324         if (dhd->hang_was_sent) {
5325             more = FALSE;
5326             break;
5327         }
5328 
5329         if (dhd->smmu_fault_occurred) {
5330             more = FALSE;
5331             break;
5332         }
5333 
5334         DHD_RING_LOCK(ring->ring_lock, flags);
5335         /* Get the message from ring */
5336         msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
5337         DHD_RING_UNLOCK(ring->ring_lock, flags);
5338         if (msg_addr == NULL) {
5339             more = FALSE;
5340             break;
5341         }
5342 
5343         /* Prefetch data to populate the cache */
5344         OSL_PREFETCH(msg_addr);
5345 
5346         if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
5347             DHD_ERROR(("%s: Error at  process rxpl msgbuf of len %d\n",
5348                        __FUNCTION__, msg_len));
5349         }
5350 
5351         /* Update read pointer */
5352         dhd_prot_upd_read_idx(dhd, ring);
5353 
5354         /* After batch processing, check RX bound */
5355         n += msg_len / ring->item_len;
5356         if (n >= bound) {
5357             break;
5358         }
5359     }
5360 
5361     return more;
5362 }
5363 
5364 #ifdef EWP_EDL
dhd_prot_process_msgbuf_edl(dhd_pub_t * dhd)5365 bool dhd_prot_process_msgbuf_edl(dhd_pub_t *dhd)
5366 {
5367     dhd_prot_t *prot = dhd->prot;
5368     msgbuf_ring_t *ring = prot->d2hring_edl;
5369     unsigned long flags = 0;
5370     uint32 items = 0;
5371     uint16 rd = 0;
5372     uint16 depth = 0;
5373 
5374     if (ring == NULL) {
5375         return FALSE;
5376     }
5377     if (ring->inited != TRUE) {
5378         return FALSE;
5379     }
5380     if (ring->item_len == 0) {
5381         DHD_ERROR(("%s: Bad ring ! ringidx %d, item_len %d \n", __FUNCTION__,
5382                    ring->idx, ring->item_len));
5383         return FALSE;
5384     }
5385 
5386     if (dhd_query_bus_erros(dhd)) {
5387         return FALSE;
5388     }
5389 
5390     if (dhd->hang_was_sent) {
5391         return FALSE;
5392     }
5393 
5394     /* in this DPC context just check if wr index has moved
5395      * and schedule deferred context to actually process the
5396      * work items.
5397      */
5398     /* update the write index */
5399     DHD_RING_LOCK(ring->ring_lock, flags);
5400     if (dhd->dma_d2h_ring_upd_support) {
5401         /* DMAing write/read indices supported */
5402         ring->wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
5403     } else {
5404         dhd_bus_cmn_readshared(dhd->bus, &ring->wr, RING_WR_UPD, ring->idx);
5405     }
5406     rd = ring->rd;
5407     DHD_RING_UNLOCK(ring->ring_lock, flags);
5408 
5409     depth = ring->max_items;
5410     /* check for avail space, in number of ring items */
5411     items = READ_AVAIL_SPACE(ring->wr, rd, depth);
5412     if (items == 0) {
5413         /* no work items in edl ring */
5414         return FALSE;
5415     }
5416     if (items > ring->max_items) {
5417         DHD_ERROR(("\r\n======================= \r\n"));
5418         DHD_ERROR(
5419             ("%s(): ring %p, ring->name %s, ring->max_items %d, items %d \r\n",
5420              __FUNCTION__, ring, ring->name, ring->max_items, items));
5421         DHD_ERROR(
5422             ("wr: %d,  rd: %d,  depth: %d  \r\n", ring->wr, ring->rd, depth));
5423         DHD_ERROR(("dhd->busstate %d bus->wait_for_d3_ack %d \r\n",
5424                    dhd->busstate, dhd->bus->wait_for_d3_ack));
5425         DHD_ERROR(("\r\n======================= \r\n"));
5426 #ifdef DHD_FW_COREDUMP
5427         if (dhd->memdump_enabled) {
5428             /* collect core dump */
5429             dhd->memdump_type = DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR;
5430             dhd_bus_mem_dump(dhd);
5431         }
5432 #endif /* DHD_FW_COREDUMP */
5433         dhd_schedule_reset(dhd);
5434 
5435         return FALSE;
5436     }
5437 
5438     if (items > D2HRING_EDL_WATERMARK) {
5439         DHD_ERROR_RLMT(("%s: WARNING! EDL watermark hit, num items=%u;"
5440                         " rd=%u; wr=%u; depth=%u;\n",
5441                         __FUNCTION__, items, ring->rd, ring->wr, depth));
5442     }
5443 
5444     dhd_schedule_logtrace(dhd->info);
5445 
5446     return FALSE;
5447 }
5448 
5449 /* This is called either from work queue context of 'event_log_dispatcher_work'
5450  * or from the kthread context of dhd_logtrace_thread
5451  */
dhd_prot_process_edl_complete(dhd_pub_t * dhd,void * evt_decode_data)5452 int dhd_prot_process_edl_complete(dhd_pub_t *dhd, void *evt_decode_data)
5453 {
5454     dhd_prot_t *prot = NULL;
5455     msgbuf_ring_t *ring = NULL;
5456     int err = 0;
5457     unsigned long flags = 0;
5458     cmn_msg_hdr_t *msg = NULL;
5459     uint8 *msg_addr = NULL;
5460     uint32 max_items_to_process = 0, n = 0;
5461     uint32 num_items = 0, new_items = 0;
5462     uint16 depth = 0;
5463     volatile uint16 wr = 0;
5464 
5465     if (!dhd || !dhd->prot) {
5466         return 0;
5467     }
5468 
5469     prot = dhd->prot;
5470     ring = prot->d2hring_edl;
5471     if (!ring || !evt_decode_data) {
5472         return 0;
5473     }
5474 
5475     if (dhd->hang_was_sent) {
5476         return FALSE;
5477     }
5478 
5479     DHD_RING_LOCK(ring->ring_lock, flags);
5480     ring->curr_rd = ring->rd;
5481     wr = ring->wr;
5482     depth = ring->max_items;
5483     /* check for avail space, in number of ring items
5484      * Note, that this will only give the # of items
5485      * from rd to wr if wr>=rd, or from rd to ring end
5486      * if wr < rd. So in the latter case strictly speaking
5487      * not all the items are read. But this is OK, because
5488      * these will be processed in the next doorbell as rd
5489      * would have wrapped around. Processing in the next
5490      * doorbell is acceptable since EDL only contains debug data
5491      */
5492     num_items = READ_AVAIL_SPACE(wr, ring->rd, depth);
5493     if (num_items == 0) {
5494         /* no work items in edl ring */
5495         DHD_RING_UNLOCK(ring->ring_lock, flags);
5496         return 0;
5497     }
5498 
5499     DHD_INFO(("%s: EDL work items [%u] available \n", __FUNCTION__, num_items));
5500 
5501     /* if space is available, calculate address to be read */
5502     msg_addr = (char *)ring->dma_buf.va + (ring->rd * ring->item_len);
5503 
5504     max_items_to_process = MIN(num_items, DHD_EVENT_LOGTRACE_BOUND);
5505 
5506     DHD_RING_UNLOCK(ring->ring_lock, flags);
5507 
5508     /* Prefetch data to populate the cache */
5509     OSL_PREFETCH(msg_addr);
5510 
5511     n = max_items_to_process;
5512     while (n > 0) {
5513         msg = (cmn_msg_hdr_t *)msg_addr;
5514         /* wait for DMA of work item to complete */
5515         if ((err = prot->d2h_edl_sync_cb(dhd, ring, msg)) != BCME_OK) {
5516             DHD_ERROR(("%s: Error waiting for DMA to cmpl in EDL "
5517                        "ring; err = %d\n",
5518                        __FUNCTION__, err));
5519         }
5520 
5521         /*
5522          * Update the curr_rd to the current index in the ring, from where
5523          * the work item is fetched. This way if the fetched work item
5524          * fails in LIVELOCK, we can print the exact read index in the ring
5525          * that shows up the corrupted work item.
5526          */
5527         if ((ring->curr_rd + 1) >= ring->max_items) {
5528             ring->curr_rd = 0;
5529         } else {
5530             ring->curr_rd += 1;
5531         }
5532 
5533         if (err != BCME_OK) {
5534             return 0;
5535         }
5536 
5537         /* process the edl work item, i.e, the event log */
5538         err = dhd_event_logtrace_process_edl(dhd, msg_addr, evt_decode_data);
5539 
5540         /* Dummy sleep so that scheduler kicks in after processing any logprints
5541          */
5542         OSL_SLEEP(0);
5543 
5544         /* Prefetch data to populate the cache */
5545         OSL_PREFETCH(msg_addr + ring->item_len);
5546 
5547         msg_addr += ring->item_len;
5548         --n;
5549     }
5550 
5551     DHD_RING_LOCK(ring->ring_lock, flags);
5552     /* update host ring read pointer */
5553     if ((ring->rd + max_items_to_process) >= ring->max_items) {
5554         ring->rd = 0;
5555     } else {
5556         ring->rd += max_items_to_process;
5557     }
5558     DHD_RING_UNLOCK(ring->ring_lock, flags);
5559 
5560     /* Now after processing max_items_to_process update dongle rd index.
5561      * The TCM rd index is updated only if bus is not
5562      * in D3. Else, the rd index is updated from resume
5563      * context in - 'dhdpcie_bus_suspend'
5564      */
5565     DHD_GENERAL_LOCK(dhd, flags);
5566     if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhd)) {
5567         DHD_INFO(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
5568                   __FUNCTION__, dhd->busstate, dhd->dhd_bus_busy_state));
5569         DHD_GENERAL_UNLOCK(dhd, flags);
5570     } else {
5571         DHD_GENERAL_UNLOCK(dhd, flags);
5572         DHD_EDL_RING_TCM_RD_UPDATE(dhd);
5573     }
5574 
5575     /* if num_items > bound, then anyway we will reschedule and
5576      * this function runs again, so that if in between the DPC has
5577      * updated the wr index, then the updated wr is read. But if
5578      * num_items <= bound, and if DPC executes and updates the wr index
5579      * when the above while loop is running, then the updated 'wr' index
5580      * needs to be re-read from here, If we don't do so, then till
5581      * the next time this function is scheduled
5582      * the event logs will not be processed.
5583      */
5584     if (num_items <= DHD_EVENT_LOGTRACE_BOUND) {
5585         /* read the updated wr index if reqd. and update num_items */
5586         DHD_RING_LOCK(ring->ring_lock, flags);
5587         if (wr != (volatile uint16)ring->wr) {
5588             wr = (volatile uint16)ring->wr;
5589             new_items = READ_AVAIL_SPACE(wr, ring->rd, depth);
5590             DHD_INFO(
5591                 ("%s: new items [%u] avail in edl\n", __FUNCTION__, new_items));
5592             num_items += new_items;
5593         }
5594         DHD_RING_UNLOCK(ring->ring_lock, flags);
5595     }
5596 
5597     /* if # of items processed is less than num_items, need to re-schedule
5598      * the deferred ctx
5599      */
5600     if (max_items_to_process < num_items) {
5601         DHD_INFO(("%s: EDL bound hit / new items found, "
5602                   "items processed=%u; remaining=%u, "
5603                   "resched deferred ctx...\n",
5604                   __FUNCTION__, max_items_to_process,
5605                   num_items - max_items_to_process));
5606         return (num_items - max_items_to_process);
5607     }
5608 
5609     return 0;
5610 }
5611 
dhd_prot_edl_ring_tcm_rd_update(dhd_pub_t * dhd)5612 void dhd_prot_edl_ring_tcm_rd_update(dhd_pub_t *dhd)
5613 {
5614     dhd_prot_t *prot = NULL;
5615     unsigned long flags = 0;
5616     msgbuf_ring_t *ring = NULL;
5617 
5618     if (!dhd) {
5619         return;
5620     }
5621 
5622     prot = dhd->prot;
5623     if (!prot || !prot->d2hring_edl) {
5624         return;
5625     }
5626 
5627     ring = prot->d2hring_edl;
5628     DHD_RING_LOCK(ring->ring_lock, flags);
5629     dhd_prot_upd_read_idx(dhd, ring);
5630     DHD_RING_UNLOCK(ring->ring_lock, flags);
5631 }
5632 #endif /* EWP_EDL */
5633 
5634 /* called when DHD needs to check for 'receive complete' messages from the
5635  * dongle */
dhd_prot_process_msgbuf_rxcpl(dhd_pub_t * dhd,uint bound,int ringtype)5636 bool BCMFASTPATH dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound,
5637                                                int ringtype)
5638 {
5639     bool more = FALSE;
5640     uint n = 0;
5641     dhd_prot_t *prot = dhd->prot;
5642     msgbuf_ring_t *ring;
5643     uint16 item_len;
5644     host_rxbuf_cmpl_t *msg = NULL;
5645     uint8 *msg_addr;
5646     uint32 msg_len;
5647     uint16 pkt_cnt, pkt_cnt_newidx;
5648     unsigned long flags;
5649     dmaaddr_t pa;
5650     uint32 len;
5651     void *dmah;
5652     void *secdma;
5653     int ifidx = 0, if_newidx = 0;
5654     void *pkt, *pktqhead = NULL, *prevpkt = NULL, *pkt_newidx, *nextpkt;
5655     uint32 pktid;
5656     int i;
5657     uint8 sync;
5658     ts_timestamp_t *ts;
5659 
5660     BCM_REFERENCE(ts);
5661 #ifdef DHD_HP2P
5662     if (ringtype == DHD_HP2P_RING && prot->d2hring_hp2p_rxcpl) {
5663         ring = prot->d2hring_hp2p_rxcpl;
5664     } else
5665 #endif /* DHD_HP2P */
5666         ring = &prot->d2hring_rx_cpln;
5667     item_len = ring->item_len;
5668     while (1) {
5669         if (dhd_is_device_removed(dhd)) {
5670             break;
5671         }
5672 
5673         if (dhd_query_bus_erros(dhd)) {
5674             break;
5675         }
5676 
5677         if (dhd->hang_was_sent) {
5678             break;
5679         }
5680 
5681         if (dhd->smmu_fault_occurred) {
5682             break;
5683         }
5684 
5685         pkt_cnt = 0;
5686         pktqhead = pkt_newidx = NULL;
5687         pkt_cnt_newidx = 0;
5688 
5689         DHD_RING_LOCK(ring->ring_lock, flags);
5690 
5691         /* Get the address of the next message to be read from ring */
5692         msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
5693         if (msg_addr == NULL) {
5694             DHD_RING_UNLOCK(ring->ring_lock, flags);
5695             break;
5696         }
5697 
5698         while (msg_len > 0) {
5699             msg = (host_rxbuf_cmpl_t *)msg_addr;
5700 
5701             /* Wait until DMA completes, then fetch msg_type */
5702             sync = prot->d2h_sync_cb(dhd, ring, &msg->cmn_hdr, item_len);
5703             /*
5704              * Update the curr_rd to the current index in the ring, from where
5705              * the work item is fetched. This way if the fetched work item
5706              * fails in LIVELOCK, we can print the exact read index in the ring
5707              * that shows up the corrupted work item.
5708              */
5709             if ((ring->curr_rd + 1) >= ring->max_items) {
5710                 ring->curr_rd = 0;
5711             } else {
5712                 ring->curr_rd += 1;
5713             }
5714 
5715             if (!sync) {
5716                 msg_len -= item_len;
5717                 msg_addr += item_len;
5718                 continue;
5719             }
5720 
5721             pktid = ltoh32(msg->cmn_hdr.request_id);
5722 
5723 #ifdef DHD_PKTID_AUDIT_RING
5724             DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_rx_map, pktid,
5725                                        DHD_DUPLICATE_FREE, msg,
5726                                        D2HRING_RXCMPLT_ITEMSIZE);
5727 #endif /* DHD_PKTID_AUDIT_RING */
5728 
5729             pkt = DHD_PKTID_TO_NATIVE(dhd, prot->pktid_rx_map, pktid, pa, len,
5730                                       dmah, secdma, PKTTYPE_DATA_RX);
5731             if (!pkt) {
5732                 msg_len -= item_len;
5733                 msg_addr += item_len;
5734                 continue;
5735             }
5736 
5737             if (SECURE_DMA_ENAB(dhd->osh)) {
5738                 SECURE_DMA_UNMAP(dhd->osh, pa, (uint)len, DMA_RX, 0, dmah,
5739                                  secdma, 0);
5740             } else {
5741                 DMA_UNMAP(dhd->osh, pa, (uint)len, DMA_RX, 0, dmah);
5742             }
5743 
5744 #ifdef DMAMAP_STATS
5745             dhd->dma_stats.rxdata--;
5746             dhd->dma_stats.rxdata_sz -= len;
5747 #endif /* DMAMAP_STATS */
5748             DHD_INFO(("id 0x%04x, offset %d, len %d, idx %d, phase 0x%02x, "
5749                       "pktdata %p, metalen %d\n",
5750                       ltoh32(msg->cmn_hdr.request_id), ltoh16(msg->data_offset),
5751                       ltoh16(msg->data_len), msg->cmn_hdr.if_id,
5752                       msg->cmn_hdr.flags, PKTDATA(dhd->osh, pkt),
5753                       ltoh16(msg->metadata_len)));
5754 
5755             pkt_cnt++;
5756             msg_len -= item_len;
5757             msg_addr += item_len;
5758 
5759 #if DHD_DBG_SHOW_METADATA
5760             if (prot->metadata_dbg && prot->rx_metadata_offset &&
5761                 msg->metadata_len) {
5762                 uchar *ptr;
5763                 ptr = PKTDATA(dhd->osh, pkt) - (prot->rx_metadata_offset);
5764                 /* header followed by data */
5765                 bcm_print_bytes("rxmetadata", ptr, msg->metadata_len);
5766                 dhd_prot_print_metadata(dhd, ptr, msg->metadata_len);
5767             }
5768 #endif /* DHD_DBG_SHOW_METADATA */
5769 
5770             /* data_offset from buf start */
5771             if (ltoh16(msg->data_offset)) {
5772                 /* data offset given from dongle after split rx */
5773                 PKTPULL(dhd->osh, pkt, ltoh16(msg->data_offset));
5774             } else if (prot->rx_dataoffset) {
5775                 /* DMA RX offset updated through shared area */
5776                 PKTPULL(dhd->osh, pkt, prot->rx_dataoffset);
5777             }
5778             /* Actual length of the packet */
5779             PKTSETLEN(dhd->osh, pkt, ltoh16(msg->data_len));
5780 
5781 #if defined(WL_MONITOR)
5782             if (dhd_monitor_enabled(dhd, ifidx)) {
5783                 if (msg->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11) {
5784                     dhd_rx_mon_pkt(dhd, msg, pkt, ifidx);
5785                     continue;
5786                 } else {
5787                     DHD_ERROR(("Received non 802.11 packet, "
5788                                "when monitor mode is enabled\n"));
5789                 }
5790             }
5791 #endif /* WL_MONITOR */
5792 
5793             if (!pktqhead) {
5794                 pktqhead = prevpkt = pkt;
5795                 ifidx = msg->cmn_hdr.if_id;
5796             } else {
5797                 if (ifidx != msg->cmn_hdr.if_id) {
5798                     pkt_newidx = pkt;
5799                     if_newidx = msg->cmn_hdr.if_id;
5800                     pkt_cnt--;
5801                     pkt_cnt_newidx = 1;
5802                     break;
5803                 } else {
5804                     PKTSETNEXT(dhd->osh, prevpkt, pkt);
5805                     prevpkt = pkt;
5806                 }
5807             }
5808 
5809 #ifdef DHD_HP2P
5810             if (dhd->hp2p_capable && ring == prot->d2hring_hp2p_rxcpl) {
5811 #ifdef DHD_HP2P_DEBUG
5812                 bcm_print_bytes("Rxcpl", (uchar *)msg,
5813                                 sizeof(host_rxbuf_cmpl_t));
5814 #endif /* DHD_HP2P_DEBUG */
5815                 dhd_update_hp2p_rxstats(dhd, msg);
5816             }
5817 #endif /* DHD_HP2P */
5818 
5819 #ifdef DHD_LBUF_AUDIT
5820             PKTAUDIT(dhd->osh, pkt);
5821 #endif // endif
5822         }
5823 
5824         /* roll back read pointer for unprocessed message */
5825         if (msg_len > 0) {
5826             if (ring->rd < msg_len / item_len) {
5827                 ring->rd = ring->max_items - msg_len / item_len;
5828             } else {
5829                 ring->rd -= msg_len / item_len;
5830             }
5831         }
5832 
5833         /* Update read pointer */
5834         dhd_prot_upd_read_idx(dhd, ring);
5835 
5836         DHD_RING_UNLOCK(ring->ring_lock, flags);
5837 
5838         pkt = pktqhead;
5839         for (i = 0; pkt && i < pkt_cnt; i++, pkt = nextpkt) {
5840             nextpkt = PKTNEXT(dhd->osh, pkt);
5841             PKTSETNEXT(dhd->osh, pkt, NULL);
5842 #ifdef DHD_LB_RXP
5843             dhd_lb_rx_pkt_enqueue(dhd, pkt, ifidx);
5844 #elif defined(DHD_RX_CHAINING)
5845             dhd_rxchain_frame(dhd, pkt, ifidx);
5846 #else
5847             dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1);
5848 #endif /* DHD_LB_RXP */
5849         }
5850 
5851         if (pkt_newidx) {
5852 #ifdef DHD_LB_RXP
5853             dhd_lb_rx_pkt_enqueue(dhd, pkt_newidx, if_newidx);
5854 #elif defined(DHD_RX_CHAINING)
5855             dhd_rxchain_frame(dhd, pkt_newidx, if_newidx);
5856 #else
5857             dhd_bus_rx_frame(dhd->bus, pkt_newidx, if_newidx, 1);
5858 #endif /* DHD_LB_RXP */
5859         }
5860 
5861         pkt_cnt += pkt_cnt_newidx;
5862 
5863         /* Post another set of rxbufs to the device */
5864         dhd_prot_return_rxbuf(dhd, 0, pkt_cnt);
5865 
5866 #ifdef DHD_RX_CHAINING
5867         dhd_rxchain_commit(dhd);
5868 #endif // endif
5869 
5870         /* After batch processing, check RX bound */
5871         n += pkt_cnt;
5872         if (n >= bound) {
5873             more = TRUE;
5874             break;
5875         }
5876     }
5877 
5878     /* Call lb_dispatch only if packets are queued */
5879     if (n &&
5880 #ifdef WL_MONITOR
5881         !(dhd_monitor_enabled(dhd, ifidx)) &&
5882 #endif /* WL_MONITOR */
5883         TRUE) {
5884         DHD_LB_DISPATCH_RX_COMPL(dhd);
5885         DHD_LB_DISPATCH_RX_PROCESS(dhd);
5886     }
5887 
5888     return more;
5889 }
5890 
5891 /**
5892  * Hands transmit packets (with a caller provided flow_id) over to dongle
5893  * territory (the flow ring)
5894  */
dhd_prot_update_txflowring(dhd_pub_t * dhd,uint16 flowid,void * msgring)5895 void dhd_prot_update_txflowring(dhd_pub_t *dhd, uint16 flowid, void *msgring)
5896 {
5897     msgbuf_ring_t *ring = (msgbuf_ring_t *)msgring;
5898 
5899     if (ring == NULL) {
5900         DHD_ERROR(("%s: NULL txflowring. exiting...\n", __FUNCTION__));
5901         return;
5902     }
5903     /* Update read pointer */
5904     if (dhd->dma_d2h_ring_upd_support) {
5905         ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
5906     }
5907 
5908     DHD_TRACE(("ringid %d flowid %d write %d read %d \n\n", ring->idx, flowid,
5909                ring->wr, ring->rd));
5910 
5911     /* Need more logic here, but for now use it directly */
5912     dhd_bus_schedule_queue(dhd->bus, flowid, TRUE); /* from queue to flowring */
5913 }
5914 
5915 /** called when DHD needs to check for 'transmit complete' messages from the
5916  * dongle */
dhd_prot_process_msgbuf_txcpl(dhd_pub_t * dhd,uint bound,int ringtype)5917 bool BCMFASTPATH dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound,
5918                                                int ringtype)
5919 {
5920     bool more = TRUE;
5921     uint n = 0;
5922     msgbuf_ring_t *ring;
5923     unsigned long flags;
5924 
5925 #ifdef DHD_HP2P
5926     if (ringtype == DHD_HP2P_RING && dhd->prot->d2hring_hp2p_txcpl) {
5927         ring = dhd->prot->d2hring_hp2p_txcpl;
5928     } else
5929 #endif /* DHD_HP2P */
5930         ring = &dhd->prot->d2hring_tx_cpln;
5931 
5932     /* Process all the messages - DTOH direction */
5933     while (!dhd_is_device_removed(dhd)) {
5934         uint8 *msg_addr;
5935         uint32 msg_len;
5936 
5937         if (dhd_query_bus_erros(dhd)) {
5938             more = FALSE;
5939             break;
5940         }
5941 
5942         if (dhd->hang_was_sent) {
5943             more = FALSE;
5944             break;
5945         }
5946 
5947         if (dhd->smmu_fault_occurred) {
5948             more = FALSE;
5949             break;
5950         }
5951 
5952         DHD_RING_LOCK(ring->ring_lock, flags);
5953         /* Get the address of the next message to be read from ring */
5954         msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
5955         DHD_RING_UNLOCK(ring->ring_lock, flags);
5956 
5957         if (msg_addr == NULL) {
5958             more = FALSE;
5959             break;
5960         }
5961 
5962         /* Prefetch data to populate the cache */
5963         OSL_PREFETCH(msg_addr);
5964 
5965         if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
5966             DHD_ERROR(("%s: process %s msg addr %p len %d\n", __FUNCTION__,
5967                        ring->name, msg_addr, msg_len));
5968         }
5969 
5970         /* Write to dngl rd ptr */
5971         dhd_prot_upd_read_idx(dhd, ring);
5972 
5973         /* After batch processing, check bound */
5974         n += msg_len / ring->item_len;
5975         if (n >= bound) {
5976             break;
5977         }
5978     }
5979 
5980     DHD_LB_DISPATCH_TX_COMPL(dhd);
5981 
5982     return more;
5983 }
5984 
dhd_prot_process_trapbuf(dhd_pub_t * dhd)5985 int BCMFASTPATH dhd_prot_process_trapbuf(dhd_pub_t *dhd)
5986 {
5987     uint32 data;
5988     dhd_dma_buf_t *trap_addr = &dhd->prot->fw_trap_buf;
5989 
5990     /* Interrupts can come in before this struct
5991      *  has been initialized.
5992      */
5993     if (trap_addr->va == NULL) {
5994         DHD_ERROR(("%s: trap_addr->va is NULL\n", __FUNCTION__));
5995         return 0;
5996     }
5997 
5998     OSL_CACHE_INV((void *)trap_addr->va, sizeof(uint32));
5999     data = *(uint32 *)(trap_addr->va);
6000 
6001     if (data & D2H_DEV_FWHALT) {
6002         DHD_ERROR(("Firmware trapped and trap_data is 0x%04x\n", data));
6003 
6004         if (data & D2H_DEV_EXT_TRAP_DATA) {
6005             if (dhd->extended_trap_data) {
6006                 OSL_CACHE_INV((void *)trap_addr->va,
6007                               BCMPCIE_EXT_TRAP_DATA_MAXLEN);
6008                 memcpy(dhd->extended_trap_data, (uint32 *)trap_addr->va,
6009                        BCMPCIE_EXT_TRAP_DATA_MAXLEN);
6010             }
6011             DHD_ERROR(("Extended trap data available\n"));
6012         }
6013         return data;
6014     }
6015     return 0;
6016 }
6017 
6018 /** called when DHD needs to check for 'ioctl complete' messages from the dongle
6019  */
dhd_prot_process_ctrlbuf(dhd_pub_t * dhd)6020 int BCMFASTPATH dhd_prot_process_ctrlbuf(dhd_pub_t *dhd)
6021 {
6022     dhd_prot_t *prot = dhd->prot;
6023     msgbuf_ring_t *ring = &prot->d2hring_ctrl_cpln;
6024     unsigned long flags;
6025 
6026     /* Process all the messages - DTOH direction */
6027     while (!dhd_is_device_removed(dhd)) {
6028         uint8 *msg_addr;
6029         uint32 msg_len;
6030 
6031         if (dhd_query_bus_erros(dhd)) {
6032             break;
6033         }
6034 
6035         if (dhd->hang_was_sent) {
6036             break;
6037         }
6038 
6039         if (dhd->smmu_fault_occurred) {
6040             break;
6041         }
6042 
6043         DHD_RING_LOCK(ring->ring_lock, flags);
6044         /* Get the address of the next message to be read from ring */
6045         msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
6046         DHD_RING_UNLOCK(ring->ring_lock, flags);
6047 
6048         if (msg_addr == NULL) {
6049             break;
6050         }
6051 
6052         /* Prefetch data to populate the cache */
6053         OSL_PREFETCH(msg_addr);
6054         if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
6055             DHD_ERROR(("%s: process %s msg addr %p len %d\n", __FUNCTION__,
6056                        ring->name, msg_addr, msg_len));
6057         }
6058 
6059         /* Write to dngl rd ptr */
6060         dhd_prot_upd_read_idx(dhd, ring);
6061     }
6062 
6063     return 0;
6064 }
6065 
6066 /**
6067  * Consume messages out of the D2H ring. Ensure that the message's DMA to host
6068  * memory has completed, before invoking the message handler via a table lookup
6069  * of the cmn_msg_hdr::msg_type.
6070  */
dhd_prot_process_msgtype(dhd_pub_t * dhd,msgbuf_ring_t * ring,uint8 * buf,uint32 len)6071 static int BCMFASTPATH dhd_prot_process_msgtype(dhd_pub_t *dhd,
6072                                                 msgbuf_ring_t *ring, uint8 *buf,
6073                                                 uint32 len)
6074 {
6075     uint32 buf_len = len;
6076     uint16 item_len;
6077     uint8 msg_type;
6078     cmn_msg_hdr_t *msg = NULL;
6079     int ret = BCME_OK;
6080 
6081     ASSERT(ring);
6082     item_len = ring->item_len;
6083     if (item_len == 0) {
6084         DHD_ERROR(("%s: ringidx %d, item_len %d buf_len %d \n", __FUNCTION__,
6085                    ring->idx, item_len, buf_len));
6086         return BCME_ERROR;
6087     }
6088 
6089     while (buf_len > 0) {
6090         if (dhd->hang_was_sent) {
6091             ret = BCME_ERROR;
6092             goto done;
6093         }
6094 
6095         if (dhd->smmu_fault_occurred) {
6096             ret = BCME_ERROR;
6097             goto done;
6098         }
6099 
6100         msg = (cmn_msg_hdr_t *)buf;
6101 
6102         /* Wait until DMA completes, then fetch msg_type */
6103         msg_type = dhd->prot->d2h_sync_cb(dhd, ring, msg, item_len);
6104 
6105         /*
6106          * Update the curr_rd to the current index in the ring, from where
6107          * the work item is fetched. This way if the fetched work item
6108          * fails in LIVELOCK, we can print the exact read index in the ring
6109          * that shows up the corrupted work item.
6110          */
6111         if ((ring->curr_rd + 1) >= ring->max_items) {
6112             ring->curr_rd = 0;
6113         } else {
6114             ring->curr_rd += 1;
6115         }
6116 
6117         /* Prefetch data to populate the cache */
6118         OSL_PREFETCH(buf + item_len);
6119 
6120         DHD_INFO(("msg_type %d item_len %d buf_len %d\n", msg_type, item_len,
6121                   buf_len));
6122 
6123         if (msg_type == MSG_TYPE_LOOPBACK) {
6124             bcm_print_bytes("LPBK RESP: ", (uint8 *)msg, item_len);
6125             DHD_ERROR((" MSG_TYPE_LOOPBACK, len %d\n", item_len));
6126         }
6127 
6128         ASSERT(msg_type < DHD_PROT_FUNCS);
6129         if (msg_type >= DHD_PROT_FUNCS) {
6130             DHD_ERROR(("%s: msg_type %d, item_len %d buf_len %d\n",
6131                        __FUNCTION__, msg_type, item_len, buf_len));
6132             ret = BCME_ERROR;
6133             goto done;
6134         }
6135 
6136         if (msg_type == MSG_TYPE_INFO_BUF_CMPLT) {
6137             if (ring == dhd->prot->d2hring_info_cpln) {
6138                 if (!dhd->prot->infobufpost) {
6139                     DHD_ERROR(("infobuf posted are zero,"
6140                                "but there is a completion\n"));
6141                     goto done;
6142                 }
6143                 dhd->prot->infobufpost--;
6144                 dhd_prot_infobufpost(dhd, dhd->prot->h2dring_info_subn);
6145                 dhd_prot_process_infobuf_complete(dhd, buf);
6146             }
6147         } else if (table_lookup[msg_type]) {
6148             table_lookup[msg_type](dhd, buf);
6149         }
6150 
6151         if (buf_len < item_len) {
6152             ret = BCME_ERROR;
6153             goto done;
6154         }
6155         buf_len = buf_len - item_len;
6156         buf = buf + item_len;
6157     }
6158 
6159 done:
6160 
6161 #ifdef DHD_RX_CHAINING
6162     dhd_rxchain_commit(dhd);
6163 #endif // endif
6164 
6165     return ret;
6166 } /* dhd_prot_process_msgtype */
6167 
dhd_prot_noop(dhd_pub_t * dhd,void * msg)6168 static void dhd_prot_noop(dhd_pub_t *dhd, void *msg)
6169 {
6170     return;
6171 }
6172 
6173 /** called on MSG_TYPE_RING_STATUS message received from dongle */
dhd_prot_ringstatus_process(dhd_pub_t * dhd,void * msg)6174 static void dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg)
6175 {
6176     pcie_ring_status_t *ring_status = (pcie_ring_status_t *)msg;
6177     uint32 request_id = ltoh32(ring_status->cmn_hdr.request_id);
6178     uint16 status = ltoh16(ring_status->compl_hdr.status);
6179     uint16 ring_id = ltoh16(ring_status->compl_hdr.flow_ring_id);
6180 
6181     DHD_ERROR(("ring status: request_id %d, status 0x%04x, flow ring %d, "
6182                "write_idx %d \n",
6183                request_id, status, ring_id, ltoh16(ring_status->write_idx)));
6184 
6185     if (ltoh16(ring_status->compl_hdr.ring_id) !=
6186         BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT) {
6187         return;
6188     }
6189     if (status == BCMPCIE_BAD_PHASE) {
6190         /* bad phase report from */
6191         DHD_ERROR(("Bad phase\n"));
6192     }
6193     if (status != BCMPCIE_BADOPTION) {
6194         return;
6195     }
6196 
6197     if (request_id == DHD_H2D_DBGRING_REQ_PKTID) {
6198         if (dhd->prot->h2dring_info_subn != NULL) {
6199             if (dhd->prot->h2dring_info_subn->create_pending == TRUE) {
6200                 DHD_ERROR(("H2D ring create failed for info ring\n"));
6201                 dhd->prot->h2dring_info_subn->create_pending = FALSE;
6202             } else {
6203                 DHD_ERROR(("ring create ID for a ring, create not pending\n"));
6204             }
6205         } else {
6206             DHD_ERROR(("%s info submit ring doesn't exist\n", __FUNCTION__));
6207         }
6208     } else if (request_id == DHD_D2H_DBGRING_REQ_PKTID) {
6209         if (dhd->prot->d2hring_info_cpln != NULL) {
6210             if (dhd->prot->d2hring_info_cpln->create_pending == TRUE) {
6211                 DHD_ERROR(("D2H ring create failed for info ring\n"));
6212                 dhd->prot->d2hring_info_cpln->create_pending = FALSE;
6213             } else {
6214                 DHD_ERROR(
6215                     ("ring create ID for info ring, create not pending\n"));
6216             }
6217         } else {
6218             DHD_ERROR(("%s info cpl ring doesn't exist\n", __FUNCTION__));
6219         }
6220     }
6221 #ifdef DHD_HP2P
6222     else if (request_id == DHD_D2H_HPPRING_TXREQ_PKTID) {
6223         if (dhd->prot->d2hring_hp2p_txcpl != NULL) {
6224             if (dhd->prot->d2hring_hp2p_txcpl->create_pending == TRUE) {
6225                 DHD_ERROR(("H2D ring create failed for hp2p ring\n"));
6226                 dhd->prot->d2hring_hp2p_txcpl->create_pending = FALSE;
6227             } else {
6228                 DHD_ERROR(("ring create ID for a ring, create not pending\n"));
6229             }
6230         } else {
6231             DHD_ERROR(("%s hp2p txcmpl ring doesn't exist\n", __FUNCTION__));
6232         }
6233     } else if (request_id == DHD_D2H_HPPRING_RXREQ_PKTID) {
6234         if (dhd->prot->d2hring_hp2p_rxcpl != NULL) {
6235             if (dhd->prot->d2hring_hp2p_rxcpl->create_pending == TRUE) {
6236                 DHD_ERROR(("D2H ring create failed for hp2p rxcmpl ring\n"));
6237                 dhd->prot->d2hring_hp2p_rxcpl->create_pending = FALSE;
6238             } else {
6239                 DHD_ERROR(
6240                     ("ring create ID for hp2p rxcmpl ring, not pending\n"));
6241             }
6242         } else {
6243             DHD_ERROR(("%s hp2p rxcpl ring doesn't exist\n", __FUNCTION__));
6244         }
6245     }
6246 #endif /* DHD_HP2P */
6247     else {
6248         DHD_ERROR(("don;t know how to pair with original request\n"));
6249     }
6250     /* How do we track this to pair it with ??? */
6251     return;
6252 }
6253 
6254 /** called on MSG_TYPE_GEN_STATUS ('general status') message received from
6255  * dongle */
dhd_prot_genstatus_process(dhd_pub_t * dhd,void * msg)6256 static void dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg)
6257 {
6258     pcie_gen_status_t *gen_status = (pcie_gen_status_t *)msg;
6259     DHD_ERROR(
6260         ("ERROR: gen status: request_id %d, STATUS 0x%04x, flow ring %d \n",
6261          gen_status->cmn_hdr.request_id, gen_status->compl_hdr.status,
6262          gen_status->compl_hdr.flow_ring_id));
6263 
6264     /* How do we track this to pair it with ??? */
6265     return;
6266 }
6267 
6268 /**
6269  * Called on MSG_TYPE_IOCTLPTR_REQ_ACK ('ioctl ack') message received from
6270  * dongle, meaning that the dongle received the ioctl message in dongle memory.
6271  */
dhd_prot_ioctack_process(dhd_pub_t * dhd,void * msg)6272 static void dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg)
6273 {
6274     ioctl_req_ack_msg_t *ioct_ack = (ioctl_req_ack_msg_t *)msg;
6275     unsigned long flags;
6276 #if defined(DHD_PKTID_AUDIT_RING)
6277     uint32 pktid = ltoh32(ioct_ack->cmn_hdr.request_id);
6278 #endif // endif
6279 #if defined(DHD_PKTID_AUDIT_RING)
6280     /* Skip audit for ADHD_IOCTL_REQ_PKTID = 0xFFFE */
6281     if (pktid != DHD_IOCTL_REQ_PKTID) {
6282 #ifndef IOCTLRESP_USE_CONSTMEM
6283         DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid,
6284                                    DHD_TEST_IS_ALLOC, msg,
6285                                    D2HRING_CTRL_CMPLT_ITEMSIZE);
6286 #else
6287         DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_map_handle_ioctl,
6288                                    pktid, DHD_TEST_IS_ALLOC, msg,
6289                                    D2HRING_CTRL_CMPLT_ITEMSIZE);
6290 #endif /* !IOCTLRESP_USE_CONSTMEM */
6291     }
6292 #endif // endif
6293 
6294     dhd->prot->ioctl_ack_time = OSL_LOCALTIME_NS();
6295 
6296     DHD_GENERAL_LOCK(dhd, flags);
6297     if ((dhd->prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) &&
6298         (dhd->prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) {
6299         dhd->prot->ioctl_state &= ~MSGBUF_IOCTL_ACK_PENDING;
6300     } else {
6301         DHD_ERROR(("%s: received ioctl ACK with state %02x trans_id = %d\n",
6302                    __FUNCTION__, dhd->prot->ioctl_state,
6303                    dhd->prot->ioctl_trans_id));
6304         prhex("dhd_prot_ioctack_process:", (uchar *)msg,
6305               D2HRING_CTRL_CMPLT_ITEMSIZE);
6306     }
6307     DHD_GENERAL_UNLOCK(dhd, flags);
6308 
6309     DHD_CTL(("ioctl req ack: request_id %d, status 0x%04x, flow ring %d \n",
6310              ioct_ack->cmn_hdr.request_id, ioct_ack->compl_hdr.status,
6311              ioct_ack->compl_hdr.flow_ring_id));
6312     if (ioct_ack->compl_hdr.status != 0) {
6313         DHD_ERROR(("got an error status for the ioctl request...need to handle "
6314                    "that\n"));
6315     }
6316 }
6317 
6318 /** called on MSG_TYPE_IOCTL_CMPLT message received from dongle */
dhd_prot_ioctcmplt_process(dhd_pub_t * dhd,void * msg)6319 static void dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg)
6320 {
6321     dhd_prot_t *prot = dhd->prot;
6322     uint32 pkt_id, xt_id;
6323     ioctl_comp_resp_msg_t *ioct_resp = (ioctl_comp_resp_msg_t *)msg;
6324     void *pkt;
6325     unsigned long flags;
6326     dhd_dma_buf_t retbuf;
6327 
6328     /* Check for ioctl timeout induce flag, which is set by firing
6329      * dhd iovar to induce IOCTL timeout. If flag is set,
6330      * return from here, which results in to IOCTL timeout.
6331      */
6332     if (dhd->dhd_induce_error == DHD_INDUCE_IOCTL_TIMEOUT) {
6333         DHD_ERROR(("%s: Inducing resumed on timeout\n", __FUNCTION__));
6334         return;
6335     }
6336 
6337     memset(&retbuf, 0, sizeof(dhd_dma_buf_t));
6338 
6339     pkt_id = ltoh32(ioct_resp->cmn_hdr.request_id);
6340 
6341 #if defined(DHD_PKTID_AUDIT_RING)
6342 #ifndef IOCTLRESP_USE_CONSTMEM
6343     DHD_PKTID_AUDIT_RING_DEBUG(dhd, prot->pktid_ctrl_map, pkt_id,
6344                                DHD_DUPLICATE_FREE, msg,
6345                                D2HRING_CTRL_CMPLT_ITEMSIZE);
6346 #else
6347     DHD_PKTID_AUDIT_RING_DEBUG(dhd, prot->pktid_map_handle_ioctl, pkt_id,
6348                                DHD_DUPLICATE_FREE, msg,
6349                                D2HRING_CTRL_CMPLT_ITEMSIZE);
6350 #endif /* !IOCTLRESP_USE_CONSTMEM */
6351 #endif // endif
6352 
6353     DHD_GENERAL_LOCK(dhd, flags);
6354     if ((prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) ||
6355         !(prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) {
6356         DHD_ERROR(
6357             ("%s: received ioctl response with state %02x trans_id = %d\n",
6358              __FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id));
6359         prhex("dhd_prot_ioctcmplt_process:", (uchar *)msg,
6360               D2HRING_CTRL_CMPLT_ITEMSIZE);
6361         DHD_GENERAL_UNLOCK(dhd, flags);
6362         return;
6363     }
6364 
6365     dhd->prot->ioctl_cmplt_time = OSL_LOCALTIME_NS();
6366 
6367     /* Clear Response pending bit */
6368     prot->ioctl_state &= ~MSGBUF_IOCTL_RESP_PENDING;
6369     DHD_GENERAL_UNLOCK(dhd, flags);
6370 
6371 #ifndef IOCTLRESP_USE_CONSTMEM
6372     pkt = dhd_prot_packet_get(dhd, pkt_id, PKTTYPE_IOCTL_RX, TRUE);
6373 #else
6374     dhd_prot_ioctl_ret_buffer_get(dhd, pkt_id, &retbuf);
6375     pkt = retbuf.va;
6376 #endif /* !IOCTLRESP_USE_CONSTMEM */
6377     if (!pkt) {
6378         DHD_ERROR(
6379             ("%s: received ioctl response with NULL pkt\n", __FUNCTION__));
6380         prhex("dhd_prot_ioctcmplt_process:", (uchar *)msg,
6381               D2HRING_CTRL_CMPLT_ITEMSIZE);
6382         return;
6383     }
6384 
6385     prot->ioctl_resplen = ltoh16(ioct_resp->resp_len);
6386     prot->ioctl_status = ltoh16(ioct_resp->compl_hdr.status);
6387     xt_id = ltoh16(ioct_resp->trans_id);
6388     if (xt_id != prot->ioctl_trans_id ||
6389         prot->curr_ioctl_cmd != ioct_resp->cmd) {
6390         DHD_ERROR(("%s: transaction id(%d %d) or cmd(%d %d) mismatch\n",
6391                    __FUNCTION__, xt_id, prot->ioctl_trans_id,
6392                    prot->curr_ioctl_cmd, ioct_resp->cmd));
6393         dhd_wakeup_ioctl_event(dhd, IOCTL_RETURN_ON_ERROR);
6394         dhd_prot_debug_info_print(dhd);
6395 #ifdef DHD_FW_COREDUMP
6396         if (dhd->memdump_enabled) {
6397             /* collect core dump */
6398             dhd->memdump_type = DUMP_TYPE_TRANS_ID_MISMATCH;
6399             dhd_bus_mem_dump(dhd);
6400         }
6401 #else
6402         ASSERT(0);
6403 #endif /* DHD_FW_COREDUMP */
6404         dhd_schedule_reset(dhd);
6405         goto exit;
6406     }
6407     DHD_CTL(("IOCTL_COMPLETE: req_id %x transid %d status %x resplen %d\n",
6408              pkt_id, xt_id, prot->ioctl_status, prot->ioctl_resplen));
6409 
6410     if (prot->ioctl_resplen > 0) {
6411 #ifndef IOCTLRESP_USE_CONSTMEM
6412         bcopy(PKTDATA(dhd->osh, pkt), prot->retbuf.va, prot->ioctl_resplen);
6413 #else
6414         bcopy(pkt, prot->retbuf.va, prot->ioctl_resplen);
6415 #endif /* !IOCTLRESP_USE_CONSTMEM */
6416     }
6417 
6418     /* wake up any dhd_os_ioctl_resp_wait() */
6419     dhd_wakeup_ioctl_event(dhd, IOCTL_RETURN_ON_SUCCESS);
6420 
6421 exit:
6422 #ifndef IOCTLRESP_USE_CONSTMEM
6423     dhd_prot_packet_free(dhd, pkt, PKTTYPE_IOCTL_RX, FALSE);
6424 #else
6425     free_ioctl_return_buffer(dhd, &retbuf);
6426 #endif /* !IOCTLRESP_USE_CONSTMEM */
6427 
6428     /* Post another ioctl buf to the device */
6429     if (prot->cur_ioctlresp_bufs_posted > 0) {
6430         prot->cur_ioctlresp_bufs_posted--;
6431     }
6432 
6433     dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
6434 }
6435 
dhd_prot_check_tx_resource(dhd_pub_t * dhd)6436 int dhd_prot_check_tx_resource(dhd_pub_t *dhd)
6437 {
6438     return dhd->prot->no_tx_resource;
6439 }
6440 
dhd_prot_update_pktid_txq_stop_cnt(dhd_pub_t * dhd)6441 void dhd_prot_update_pktid_txq_stop_cnt(dhd_pub_t *dhd)
6442 {
6443     dhd->prot->pktid_txq_stop_cnt++;
6444 }
6445 
dhd_prot_update_pktid_txq_start_cnt(dhd_pub_t * dhd)6446 void dhd_prot_update_pktid_txq_start_cnt(dhd_pub_t *dhd)
6447 {
6448     dhd->prot->pktid_txq_start_cnt++;
6449 }
6450 
6451 /** called on MSG_TYPE_TX_STATUS message received from dongle */
dhd_prot_txstatus_process(dhd_pub_t * dhd,void * msg)6452 static void BCMFASTPATH dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg)
6453 {
6454     dhd_prot_t *prot = dhd->prot;
6455     host_txbuf_cmpl_t *txstatus;
6456     unsigned long flags;
6457     uint32 pktid;
6458     void *pkt;
6459     dmaaddr_t pa;
6460     uint32 len;
6461     void *dmah;
6462     void *secdma;
6463     bool pkt_fate;
6464     msgbuf_ring_t *ring = &dhd->prot->d2hring_tx_cpln;
6465 #if defined(TX_STATUS_LATENCY_STATS) || defined(DHD_HP2P)
6466     flow_info_t *flow_info;
6467     uint64 tx_status_latency;
6468 #endif /* TX_STATUS_LATENCY_STATS || DHD_HP2P */
6469 #if defined(TX_STATUS_LATENCY_STATS)
6470     flow_ring_node_t *flow_ring_node;
6471     uint16 flowid;
6472 #endif // endif
6473     ts_timestamp_t *ts;
6474 
6475     BCM_REFERENCE(ts);
6476     txstatus = (host_txbuf_cmpl_t *)msg;
6477 #if defined(TX_STATUS_LATENCY_STATS)
6478     flowid = txstatus->compl_hdr.flow_ring_id;
6479     flow_ring_node = DHD_FLOW_RING(dhd, flowid);
6480 #endif // endif
6481 
6482     /* locks required to protect circular buffer accesses */
6483     DHD_RING_LOCK(ring->ring_lock, flags);
6484     pktid = ltoh32(txstatus->cmn_hdr.request_id);
6485     pkt_fate = TRUE;
6486 
6487 #if defined(DHD_PKTID_AUDIT_RING)
6488     DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_tx_map, pktid,
6489                                DHD_DUPLICATE_FREE, msg,
6490                                D2HRING_TXCMPLT_ITEMSIZE);
6491 #endif // endif
6492 
6493     DHD_INFO(("txstatus for pktid 0x%04x\n", pktid));
6494     if (OSL_ATOMIC_DEC_RETURN(dhd->osh, &prot->active_tx_count) < 0) {
6495         DHD_ERROR(("Extra packets are freed\n"));
6496     }
6497     ASSERT(pktid != 0);
6498 
6499     pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, pktid, pa, len,
6500                               dmah, secdma, PKTTYPE_DATA_TX);
6501     if (!pkt) {
6502         DHD_RING_UNLOCK(ring->ring_lock, flags);
6503         DHD_ERROR(("%s: received txstatus with NULL pkt\n", __FUNCTION__));
6504         prhex("dhd_prot_txstatus_process:", (uchar *)msg,
6505               D2HRING_TXCMPLT_ITEMSIZE);
6506 #ifdef DHD_FW_COREDUMP
6507         if (dhd->memdump_enabled) {
6508             /* collect core dump */
6509             dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
6510             dhd_bus_mem_dump(dhd);
6511         }
6512 #else
6513         ASSERT(0);
6514 #endif /* DHD_FW_COREDUMP */
6515         return;
6516     }
6517 
6518     if (DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map) == DHD_PKTID_MIN_AVAIL_COUNT) {
6519         dhd->prot->no_tx_resource = FALSE;
6520         dhd_bus_start_queue(dhd->bus);
6521     }
6522 
6523     if (SECURE_DMA_ENAB(dhd->osh)) {
6524         int offset = 0;
6525         BCM_REFERENCE(offset);
6526 
6527         if (dhd->prot->tx_metadata_offset) {
6528             offset = dhd->prot->tx_metadata_offset + ETHER_HDR_LEN;
6529         }
6530         SECURE_DMA_UNMAP(dhd->osh, (uint)pa,
6531                          (uint)dhd->prot->tx_metadata_offset, DMA_RX, 0, dmah,
6532                          secdma, offset);
6533     } else {
6534         DMA_UNMAP(dhd->osh, pa, (uint)len, DMA_RX, 0, dmah);
6535     }
6536 
6537 #ifdef TX_STATUS_LATENCY_STATS
6538     /* update the tx status latency for flowid */
6539     flow_info = &flow_ring_node->flow_info;
6540     tx_status_latency = OSL_SYSUPTIME_US() - DHD_PKT_GET_QTIME(pkt);
6541     flow_info->cum_tx_status_latency += tx_status_latency;
6542     flow_info->num_tx_status++;
6543 #endif /* TX_STATUS_LATENCY_STATS */
6544 #if defined(DHD_LB_TXC) && !defined(BCM_SECURE_DMA)
6545     {
6546         int elem_ix;
6547         void **elem;
6548         bcm_workq_t *workq;
6549 
6550         workq = &prot->tx_compl_prod;
6551         /*
6552          * Produce the packet into the tx_compl workq for the tx compl tasklet
6553          * to consume.
6554          */
6555         OSL_PREFETCH(PKTTAG(pkt));
6556 
6557         /* fetch next available slot in workq */
6558         elem_ix = bcm_ring_prod(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
6559 
6560         DHD_PKTTAG_SET_PA((dhd_pkttag_fr_t *)PKTTAG(pkt), pa);
6561         DHD_PKTTAG_SET_PA_LEN((dhd_pkttag_fr_t *)PKTTAG(pkt), len);
6562 
6563         if (elem_ix == BCM_RING_FULL) {
6564             DHD_ERROR(("tx_compl_prod BCM_RING_FULL\n"));
6565             goto workq_ring_full;
6566         }
6567 
6568         elem = WORKQ_ELEMENT(void *, &prot->tx_compl_prod, elem_ix);
6569         *elem = pkt;
6570 
6571         smp_wmb();
6572 
6573         /* Sync WR index to consumer if the SYNC threshold has been reached */
6574         if (++prot->tx_compl_prod_sync >= DHD_LB_WORKQ_SYNC) {
6575             bcm_workq_prod_sync(workq);
6576             prot->tx_compl_prod_sync = 0;
6577         }
6578 
6579         DHD_INFO(("%s: tx_compl_prod pkt<%p> sync<%d>\n", __FUNCTION__, pkt,
6580                   prot->tx_compl_prod_sync));
6581 
6582         DHD_RING_UNLOCK(ring->ring_lock, flags);
6583         return;
6584     }
6585 
6586 workq_ring_full:
6587 
6588 #endif /* !DHD_LB_TXC */
6589 
6590 #ifdef DMAMAP_STATS
6591     dhd->dma_stats.txdata--;
6592     dhd->dma_stats.txdata_sz -= len;
6593 #endif /* DMAMAP_STATS */
6594     pkt_fate = dhd_dbg_process_tx_status(dhd, pkt, pktid,
6595                                          ltoh16(txstatus->compl_hdr.status) &
6596                                              WLFC_CTL_PKTFLAG_MASK);
6597 
6598 #if defined(BCMPCIE)
6599     dhd_txcomplete(dhd, pkt, pkt_fate);
6600 #ifdef DHD_4WAYM4_FAIL_DISCONNECT
6601     dhd_eap_txcomplete(dhd, pkt, pkt_fate, txstatus->cmn_hdr.if_id);
6602 #endif /* DHD_4WAYM4_FAIL_DISCONNECT */
6603 #endif // endif
6604 
6605 #if DHD_DBG_SHOW_METADATA
6606     if (dhd->prot->metadata_dbg && dhd->prot->tx_metadata_offset &&
6607         txstatus->metadata_len) {
6608         uchar *ptr;
6609         /* The Ethernet header of TX frame was copied and removed.
6610          * Here, move the data pointer forward by Ethernet header size.
6611          */
6612         PKTPULL(dhd->osh, pkt, ETHER_HDR_LEN);
6613         ptr = PKTDATA(dhd->osh, pkt) - (dhd->prot->tx_metadata_offset);
6614         bcm_print_bytes("txmetadata", ptr, txstatus->metadata_len);
6615         dhd_prot_print_metadata(dhd, ptr, txstatus->metadata_len);
6616     }
6617 #endif /* DHD_DBG_SHOW_METADATA */
6618 
6619 #ifdef DHD_HP2P
6620     if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) {
6621 #ifdef DHD_HP2P_DEBUG
6622         bcm_print_bytes("txcpl", (uint8 *)txstatus, sizeof(host_txbuf_cmpl_t));
6623 #endif /* DHD_HP2P_DEBUG */
6624         dhd_update_hp2p_txstats(dhd, txstatus);
6625     }
6626 #endif /* DHD_HP2P */
6627 
6628 #ifdef DHD_LBUF_AUDIT
6629     PKTAUDIT(dhd->osh, pkt);
6630 #endif // endif
6631 
6632     DHD_FLOWRING_TXSTATUS_CNT_UPDATE(dhd->bus, txstatus->compl_hdr.flow_ring_id,
6633                                      txstatus->tx_status);
6634     DHD_RING_UNLOCK(ring->ring_lock, flags);
6635     PKTFREE(dhd->osh, pkt, TRUE);
6636     return;
6637 } /* dhd_prot_txstatus_process */
6638 
6639 /** called on MSG_TYPE_WL_EVENT message received from dongle */
dhd_prot_event_process(dhd_pub_t * dhd,void * msg)6640 static void dhd_prot_event_process(dhd_pub_t *dhd, void *msg)
6641 {
6642     wlevent_req_msg_t *evnt;
6643     uint32 bufid;
6644     uint16 buflen;
6645     int ifidx = 0;
6646     void *pkt;
6647     dhd_prot_t *prot = dhd->prot;
6648 
6649     /* Event complete header */
6650     evnt = (wlevent_req_msg_t *)msg;
6651     bufid = ltoh32(evnt->cmn_hdr.request_id);
6652 
6653 #if defined(DHD_PKTID_AUDIT_RING)
6654     DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, bufid,
6655                                DHD_DUPLICATE_FREE, msg,
6656                                D2HRING_CTRL_CMPLT_ITEMSIZE);
6657 #endif // endif
6658 
6659     buflen = ltoh16(evnt->event_data_len);
6660 
6661     ifidx = BCMMSGBUF_API_IFIDX(&evnt->cmn_hdr);
6662 
6663     /* Post another rxbuf to the device */
6664     if (prot->cur_event_bufs_posted) {
6665         prot->cur_event_bufs_posted--;
6666     }
6667     dhd_msgbuf_rxbuf_post_event_bufs(dhd);
6668 
6669     pkt = dhd_prot_packet_get(dhd, bufid, PKTTYPE_EVENT_RX, TRUE);
6670     if (!pkt) {
6671         DHD_ERROR(("%s: pkt is NULL for pktid %d\n", __FUNCTION__, bufid));
6672         return;
6673     }
6674 
6675     /* DMA RX offset updated through shared area */
6676     if (dhd->prot->rx_dataoffset) {
6677         PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
6678     }
6679 
6680     PKTSETLEN(dhd->osh, pkt, buflen);
6681 #ifdef DHD_LBUF_AUDIT
6682     PKTAUDIT(dhd->osh, pkt);
6683 #endif // endif
6684     dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1);
6685 }
6686 
6687 /** called on MSG_TYPE_INFO_BUF_CMPLT message received from dongle */
dhd_prot_process_infobuf_complete(dhd_pub_t * dhd,void * buf)6688 static void BCMFASTPATH dhd_prot_process_infobuf_complete(dhd_pub_t *dhd,
6689                                                           void *buf)
6690 {
6691     info_buf_resp_t *resp;
6692     uint32 pktid;
6693     uint16 buflen;
6694     void *pkt;
6695 
6696     resp = (info_buf_resp_t *)buf;
6697     pktid = ltoh32(resp->cmn_hdr.request_id);
6698     buflen = ltoh16(resp->info_data_len);
6699 
6700 #ifdef DHD_PKTID_AUDIT_RING
6701     DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid,
6702                                DHD_DUPLICATE_FREE, buf,
6703                                D2HRING_INFO_BUFCMPLT_ITEMSIZE);
6704 #endif /* DHD_PKTID_AUDIT_RING */
6705 
6706     DHD_INFO(("id 0x%04x, len %d, phase 0x%02x, seqnum %d, rx_dataoffset %d\n",
6707               pktid, buflen, resp->cmn_hdr.flags, ltoh16(resp->seqnum),
6708               dhd->prot->rx_dataoffset));
6709 
6710     if (dhd->debug_buf_dest_support) {
6711         if (resp->dest < DEBUG_BUF_DEST_MAX) {
6712             dhd->debug_buf_dest_stat[resp->dest]++;
6713         }
6714     }
6715 
6716     pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_INFO_RX, TRUE);
6717     if (!pkt) {
6718         return;
6719     }
6720 
6721     /* DMA RX offset updated through shared area */
6722     if (dhd->prot->rx_dataoffset) {
6723         PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
6724     }
6725 
6726     PKTSETLEN(dhd->osh, pkt, buflen);
6727 
6728 #ifdef DHD_LBUF_AUDIT
6729     PKTAUDIT(dhd->osh, pkt);
6730 #endif // endif
6731 
6732     /* info ring "debug" data, which is not a 802.3 frame, is sent/hacked with a
6733      * special ifidx of -1.  This is just internal to dhd to get the data to
6734      * dhd_linux.c:dhd_rx_frame() from here (dhd_prot_infobuf_cmplt_process).
6735      */
6736     dhd_bus_rx_frame(dhd->bus, pkt, DHD_DUMMY_INFO_IF /* ifidx HACK */, 1);
6737 }
6738 
6739 /** called on MSG_TYPE_SNAPSHOT_CMPLT message received from dongle */
dhd_prot_process_snapshot_complete(dhd_pub_t * dhd,void * buf)6740 static void BCMFASTPATH dhd_prot_process_snapshot_complete(dhd_pub_t *dhd,
6741                                                            void *buf)
6742 {
6743 }
6744 
6745 /** Stop protocol: sync w/dongle state. */
dhd_prot_stop(dhd_pub_t * dhd)6746 void dhd_prot_stop(dhd_pub_t *dhd)
6747 {
6748     ASSERT(dhd);
6749     DHD_TRACE(("%s: Enter\n", __FUNCTION__));
6750 }
6751 
6752 /* Add any protocol-specific data header.
6753  * Caller must reserve prot_hdrlen prepend space.
6754  */
dhd_prot_hdrpush(dhd_pub_t * dhd,int ifidx,void * PKTBUF)6755 void BCMFASTPATH dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, void *PKTBUF)
6756 {
6757     return;
6758 }
6759 
dhd_prot_hdrlen(dhd_pub_t * dhd,void * PKTBUF)6760 uint dhd_prot_hdrlen(dhd_pub_t *dhd, void *PKTBUF)
6761 {
6762     return 0;
6763 }
6764 
6765 #define MAX_MTU_SZ (1600u)
6766 
6767 #define PKTBUF pktbuf
6768 
6769 /**
6770  * Called when a tx ethernet packet has been dequeued from a flow queue, and has
6771  * to be inserted in the corresponding flow ring.
6772  */
dhd_prot_txdata(dhd_pub_t * dhd,void * PKTBUF,uint8 ifidx)6773 int BCMFASTPATH dhd_prot_txdata(dhd_pub_t *dhd, void *PKTBUF, uint8 ifidx)
6774 {
6775     unsigned long flags;
6776     dhd_prot_t *prot = dhd->prot;
6777     host_txbuf_post_t *txdesc = NULL;
6778     dmaaddr_t pa, meta_pa;
6779     uint8 *pktdata;
6780     uint32 pktlen;
6781     uint32 pktid;
6782     uint8 prio;
6783     uint16 flowid = 0;
6784     uint16 alloced = 0;
6785     uint16 headroom;
6786     msgbuf_ring_t *ring;
6787     flow_ring_table_t *flow_ring_table;
6788     flow_ring_node_t *flow_ring_node;
6789 
6790     if (dhd->flow_ring_table == NULL) {
6791         DHD_ERROR(("dhd flow_ring_table is NULL\n"));
6792         return BCME_NORESOURCE;
6793     }
6794 #ifdef DHD_PCIE_PKTID
6795     if (!DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map)) {
6796         if (dhd->prot->pktid_depleted_cnt == DHD_PKTID_DEPLETED_MAX_COUNT) {
6797             dhd_bus_stop_queue(dhd->bus);
6798             dhd->prot->no_tx_resource = TRUE;
6799         }
6800         dhd->prot->pktid_depleted_cnt++;
6801         goto err_no_res;
6802     } else {
6803         dhd->prot->pktid_depleted_cnt = 0;
6804     }
6805 #endif /* DHD_PCIE_PKTID */
6806 
6807     flowid = DHD_PKT_GET_FLOWID(PKTBUF);
6808     flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
6809     flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
6810 
6811     ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
6812 
6813     DHD_RING_LOCK(ring->ring_lock, flags);
6814 
6815     /* Create a unique 32-bit packet id */
6816     pktid = DHD_NATIVE_TO_PKTID_RSV(dhd, dhd->prot->pktid_tx_map, PKTBUF,
6817                                     PKTTYPE_DATA_TX);
6818 #if defined(DHD_PCIE_PKTID)
6819     if (pktid == DHD_PKTID_INVALID) {
6820         DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__));
6821         /*
6822          * If we return error here, the caller would queue the packet
6823          * again. So we'll just free the skb allocated in DMA Zone.
6824          * Since we have not freed the original SKB yet the caller would
6825          * requeue the same.
6826          */
6827         goto err_no_res_pktfree;
6828     }
6829 #endif /* DHD_PCIE_PKTID */
6830 
6831     /* Reserve space in the circular buffer */
6832     txdesc = (host_txbuf_post_t *)dhd_prot_alloc_ring_space(dhd, ring, 1,
6833                                                             &alloced, FALSE);
6834     if (txdesc == NULL) {
6835         DHD_INFO(("%s:%d: HTOD Msgbuf Not available TxCount = %d\n",
6836                   __FUNCTION__, __LINE__,
6837                   OSL_ATOMIC_READ(dhd->osh, &prot->active_tx_count)));
6838         goto err_free_pktid;
6839     }
6840 
6841     /* Extract the data pointer and length information */
6842     pktdata = PKTDATA(dhd->osh, PKTBUF);
6843     pktlen = PKTLEN(dhd->osh, PKTBUF);
6844 
6845     DHD_DBG_PKT_MON_TX(dhd, PKTBUF, pktid);
6846 
6847     /* Ethernet header: Copy before we cache flush packet using DMA_MAP */
6848     bcopy(pktdata, txdesc->txhdr, ETHER_HDR_LEN);
6849 
6850     /* Extract the ethernet header and adjust the data pointer and length */
6851     pktdata = PKTPULL(dhd->osh, PKTBUF, ETHER_HDR_LEN);
6852     pktlen -= ETHER_HDR_LEN;
6853 
6854     /* Map the data pointer to a DMA-able address */
6855     if (SECURE_DMA_ENAB(dhd->osh)) {
6856         int offset = 0;
6857         BCM_REFERENCE(offset);
6858 
6859         if (prot->tx_metadata_offset) {
6860             offset = prot->tx_metadata_offset + ETHER_HDR_LEN;
6861         }
6862 
6863         pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen, DMA_TX,
6864                             PKTBUF, 0, ring->dma_buf.secdma, offset);
6865     }
6866 #ifndef BCM_SECURE_DMA
6867     else
6868         pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen, DMA_TX,
6869                      PKTBUF, 0);
6870 #endif /* #ifndef BCM_SECURE_DMA */
6871 
6872     if (PHYSADDRISZERO(pa)) {
6873         DHD_ERROR(("%s: Something really bad, unless 0 is "
6874                    "a valid phyaddr for pa\n",
6875                    __FUNCTION__));
6876         ASSERT(0);
6877         goto err_rollback_idx;
6878     }
6879 
6880 #ifdef DMAMAP_STATS
6881     dhd->dma_stats.txdata++;
6882     dhd->dma_stats.txdata_sz += pktlen;
6883 #endif /* DMAMAP_STATS */
6884     /* No need to lock. Save the rest of the packet's metadata */
6885     DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_tx_map, PKTBUF, pktid, pa,
6886                              pktlen, DMA_TX, NULL, ring->dma_buf.secdma,
6887                              PKTTYPE_DATA_TX);
6888 
6889 #ifdef TXP_FLUSH_NITEMS
6890     if (ring->pend_items_count == 0) {
6891         ring->start_addr = (void *)txdesc;
6892     }
6893     ring->pend_items_count++;
6894 #endif // endif
6895 
6896     /* Form the Tx descriptor message buffer */
6897 
6898     /* Common message hdr */
6899     txdesc->cmn_hdr.msg_type = MSG_TYPE_TX_POST;
6900     txdesc->cmn_hdr.if_id = ifidx;
6901     txdesc->cmn_hdr.flags = ring->current_phase;
6902 
6903     txdesc->flags = BCMPCIE_PKT_FLAGS_FRAME_802_3;
6904     prio = (uint8)PKTPRIO(PKTBUF);
6905 
6906     txdesc->flags |= (prio & 0x7) << BCMPCIE_PKT_FLAGS_PRIO_SHIFT;
6907     txdesc->seg_cnt = 1;
6908 
6909     txdesc->data_len = htol16((uint16)pktlen);
6910     txdesc->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
6911     txdesc->data_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
6912 
6913     /* Move data pointer to keep ether header in local PKTBUF for later
6914      * reference */
6915     PKTPUSH(dhd->osh, PKTBUF, ETHER_HDR_LEN);
6916 
6917     /* Handle Tx metadata */
6918     headroom = (uint16)PKTHEADROOM(dhd->osh, PKTBUF);
6919     if (prot->tx_metadata_offset && (headroom < prot->tx_metadata_offset)) {
6920         DHD_ERROR(("No headroom for Metadata tx %d %d\n",
6921                    prot->tx_metadata_offset, headroom));
6922     }
6923 
6924     if (prot->tx_metadata_offset && (headroom >= prot->tx_metadata_offset)) {
6925         DHD_TRACE(("Metadata in tx %d\n", prot->tx_metadata_offset));
6926 
6927         /* Adjust the data pointer to account for meta data in DMA_MAP */
6928         PKTPUSH(dhd->osh, PKTBUF, prot->tx_metadata_offset);
6929 
6930         if (SECURE_DMA_ENAB(dhd->osh)) {
6931             meta_pa =
6932                 SECURE_DMA_MAP_TXMETA(dhd->osh, PKTDATA(dhd->osh, PKTBUF),
6933                                       prot->tx_metadata_offset + ETHER_HDR_LEN,
6934                                       DMA_RX, PKTBUF, 0, ring->dma_buf.secdma);
6935         }
6936 #ifndef BCM_SECURE_DMA
6937         else
6938             meta_pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF),
6939                               prot->tx_metadata_offset, DMA_RX, PKTBUF, 0);
6940 #endif /* #ifndef BCM_SECURE_DMA */
6941 
6942         if (PHYSADDRISZERO(meta_pa)) {
6943             /* Unmap the data pointer to a DMA-able address */
6944             if (SECURE_DMA_ENAB(dhd->osh)) {
6945                 int offset = 0;
6946                 BCM_REFERENCE(offset);
6947 
6948                 if (prot->tx_metadata_offset) {
6949                     offset = prot->tx_metadata_offset + ETHER_HDR_LEN;
6950                 }
6951 
6952                 SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_TX, 0, DHD_DMAH_NULL,
6953                                  ring->dma_buf.secdma, offset);
6954             }
6955 #ifndef BCM_SECURE_DMA
6956             else {
6957                 DMA_UNMAP(dhd->osh, pa, pktlen, DMA_TX, 0, DHD_DMAH_NULL);
6958             }
6959 #endif /* #ifndef BCM_SECURE_DMA */
6960 #ifdef TXP_FLUSH_NITEMS
6961             /* update pend_items_count */
6962             ring->pend_items_count--;
6963 #endif /* TXP_FLUSH_NITEMS */
6964 
6965             DHD_ERROR(("%s: Something really bad, unless 0 is "
6966                        "a valid phyaddr for meta_pa\n",
6967                        __FUNCTION__));
6968             ASSERT(0);
6969             goto err_rollback_idx;
6970         }
6971 
6972         /* Adjust the data pointer back to original value */
6973         PKTPULL(dhd->osh, PKTBUF, prot->tx_metadata_offset);
6974 
6975         txdesc->metadata_buf_len = prot->tx_metadata_offset;
6976         txdesc->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(meta_pa));
6977         txdesc->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(meta_pa));
6978     } else {
6979 #ifdef DHD_HP2P
6980         if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) {
6981             dhd_update_hp2p_txdesc(dhd, txdesc);
6982         } else
6983 #endif /* DHD_HP2P */
6984             if (1) {
6985                 txdesc->metadata_buf_len = htol16(0);
6986                 txdesc->metadata_buf_addr.high_addr = 0;
6987                 txdesc->metadata_buf_addr.low_addr = 0;
6988             }
6989     }
6990 
6991 #ifdef DHD_PKTID_AUDIT_RING
6992     DHD_PKTID_AUDIT(dhd, prot->pktid_tx_map, pktid, DHD_DUPLICATE_ALLOC);
6993 #endif /* DHD_PKTID_AUDIT_RING */
6994 
6995     txdesc->cmn_hdr.request_id = htol32(pktid);
6996 
6997     DHD_TRACE(("txpost: data_len %d, pktid 0x%04x\n", txdesc->data_len,
6998                txdesc->cmn_hdr.request_id));
6999 
7000 #ifdef DHD_LBUF_AUDIT
7001     PKTAUDIT(dhd->osh, PKTBUF);
7002 #endif // endif
7003 
7004     if (pktlen > MAX_MTU_SZ) {
7005         DHD_ERROR(("%s: ######## pktlen(%d) > MAX_MTU_SZ(%d) #######\n",
7006                    __FUNCTION__, pktlen, MAX_MTU_SZ));
7007         dhd_prhex("txringitem", (volatile uchar *)txdesc,
7008                   sizeof(host_txbuf_post_t), DHD_ERROR_VAL);
7009     }
7010 
7011     /* Update the write pointer in TCM & ring bell */
7012 #if defined(DHD_HP2P) && defined(TXP_FLUSH_NITEMS)
7013     if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) {
7014         dhd_calc_hp2p_burst(dhd, ring, flowid);
7015     } else {
7016         if ((ring->pend_items_count == prot->txp_threshold) ||
7017             ((uint8 *)txdesc == (uint8 *)DHD_RING_END_VA(ring))) {
7018             dhd_prot_txdata_write_flush(dhd, flowid);
7019         }
7020     }
7021 #elif defined(TXP_FLUSH_NITEMS)
7022     /* Flush if we have either hit the txp_threshold or if this msg is */
7023     /* occupying the last slot in the flow_ring - before wrap around.  */
7024     if ((ring->pend_items_count == prot->txp_threshold) ||
7025         ((uint8 *)txdesc == (uint8 *)DHD_RING_END_VA(ring))) {
7026         dhd_prot_txdata_write_flush(dhd, flowid);
7027     }
7028 #else
7029     /* update ring's WR index and ring doorbell to dongle */
7030     dhd_prot_ring_write_complete(dhd, ring, txdesc, 1);
7031 #endif /* DHD_HP2P && TXP_FLUSH_NITEMS */
7032 
7033 #if defined(TX_STATUS_LATENCY_STATS)
7034     /* set the time when pkt is queued to flowring */
7035     DHD_PKT_SET_QTIME(PKTBUF, OSL_SYSUPTIME_US());
7036 #endif // endif
7037 
7038     OSL_ATOMIC_INC(dhd->osh, &prot->active_tx_count);
7039     /*
7040      * Take a wake lock, do not sleep if we have atleast one packet
7041      * to finish.
7042      */
7043     DHD_TXFL_WAKE_LOCK_TIMEOUT(dhd, MAX_TX_TIMEOUT);
7044 
7045     DHD_RING_UNLOCK(ring->ring_lock, flags);
7046 
7047 #ifdef TX_STATUS_LATENCY_STATS
7048     flow_ring_node->flow_info.num_tx_pkts++;
7049 #endif /* TX_STATUS_LATENCY_STATS */
7050     return BCME_OK;
7051 
7052 err_rollback_idx:
7053     /* roll back write pointer for unprocessed message */
7054     if (ring->wr == 0) {
7055         ring->wr = ring->max_items - 1;
7056     } else {
7057         ring->wr--;
7058         if (ring->wr == 0) {
7059             DHD_INFO(("%s: flipping the phase now\n", ring->name));
7060             ring->current_phase =
7061                 ring->current_phase ? 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
7062         }
7063     }
7064 
7065 err_free_pktid :
7066 #if defined(DHD_PCIE_PKTID)
7067 {
7068     void *dmah;
7069     void *secdma;
7070     /* Free up the PKTID. physaddr and pktlen will be garbage. */
7071     DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, pktid, pa, pktlen, dmah,
7072                         secdma, PKTTYPE_NO_CHECK);
7073 }
7074 
7075 err_no_res_pktfree:
7076 #endif /* DHD_PCIE_PKTID */
7077 
7078     DHD_RING_UNLOCK(ring->ring_lock, flags);
7079 err_no_res:
7080     return BCME_NORESOURCE;
7081 } /* dhd_prot_txdata */
7082 
7083 /* called with a ring_lock */
7084 /** optimization to write "n" tx items at a time to ring */
dhd_prot_txdata_write_flush(dhd_pub_t * dhd,uint16 flowid)7085 void BCMFASTPATH dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flowid)
7086 {
7087 #ifdef TXP_FLUSH_NITEMS
7088     flow_ring_table_t *flow_ring_table;
7089     flow_ring_node_t *flow_ring_node;
7090     msgbuf_ring_t *ring;
7091 
7092     if (dhd->flow_ring_table == NULL) {
7093         return;
7094     }
7095 
7096     flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
7097     flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
7098     ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
7099 
7100     if (ring->pend_items_count) {
7101         /* update ring's WR index and ring doorbell to dongle */
7102         dhd_prot_ring_write_complete(dhd, ring, ring->start_addr,
7103                                      ring->pend_items_count);
7104         ring->pend_items_count = 0;
7105         ring->start_addr = NULL;
7106     }
7107 #endif /* TXP_FLUSH_NITEMS */
7108 }
7109 
7110 #undef PKTBUF /* Only defined in the above routine */
7111 
dhd_prot_hdrpull(dhd_pub_t * dhd,int * ifidx,void * pkt,uchar * buf,uint * len)7112 int BCMFASTPATH dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pkt,
7113                                  uchar *buf, uint *len)
7114 {
7115     return 0;
7116 }
7117 
7118 /** post a set of receive buffers to the dongle */
dhd_prot_return_rxbuf(dhd_pub_t * dhd,uint32 pktid,uint32 rxcnt)7119 static void BCMFASTPATH dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint32 pktid,
7120                                               uint32 rxcnt)
7121 {
7122     dhd_prot_t *prot = dhd->prot;
7123 #if defined(DHD_LB_RXC)
7124     int elem_ix;
7125     uint32 *elem;
7126     bcm_workq_t *workq;
7127 
7128     workq = &prot->rx_compl_prod;
7129 
7130     /* Produce the work item */
7131     elem_ix = bcm_ring_prod(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
7132     if (elem_ix == BCM_RING_FULL) {
7133         DHD_ERROR(("%s LB RxCompl workQ is full\n", __FUNCTION__));
7134         ASSERT(0);
7135         return;
7136     }
7137 
7138     elem = WORKQ_ELEMENT(uint32, workq, elem_ix);
7139     *elem = pktid;
7140 
7141     smp_wmb();
7142 
7143     /* Sync WR index to consumer if the SYNC threshold has been reached */
7144     if (++prot->rx_compl_prod_sync >= DHD_LB_WORKQ_SYNC) {
7145         bcm_workq_prod_sync(workq);
7146         prot->rx_compl_prod_sync = 0;
7147     }
7148 
7149     DHD_INFO(("%s: rx_compl_prod pktid<%u> sync<%d>\n", __FUNCTION__, pktid,
7150               prot->rx_compl_prod_sync));
7151 
7152 #endif /* DHD_LB_RXC */
7153 
7154     if (prot->rxbufpost >= rxcnt) {
7155         prot->rxbufpost -= (uint16)rxcnt;
7156     } else {
7157         prot->rxbufpost = 0;
7158     }
7159 
7160 #if !defined(DHD_LB_RXC)
7161     if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) {
7162         dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
7163     }
7164 #endif /* !DHD_LB_RXC */
7165     return;
7166 }
7167 
7168 /* called before an ioctl is sent to the dongle */
dhd_prot_wlioctl_intercept(dhd_pub_t * dhd,wl_ioctl_t * ioc,void * buf)7169 static void dhd_prot_wlioctl_intercept(dhd_pub_t *dhd, wl_ioctl_t *ioc,
7170                                        void *buf)
7171 {
7172     dhd_prot_t *prot = dhd->prot;
7173     int slen = 0;
7174 
7175     if (ioc->cmd == WLC_SET_VAR && buf != NULL &&
7176         !strcmp(buf, "pcie_bus_tput")) {
7177         pcie_bus_tput_params_t *tput_params;
7178 
7179         slen = strlen("pcie_bus_tput") + 1;
7180         tput_params = (pcie_bus_tput_params_t *)((char *)buf + slen);
7181         bcopy(&prot->host_bus_throughput_buf.pa, &tput_params->host_buf_addr,
7182               sizeof(tput_params->host_buf_addr));
7183         tput_params->host_buf_len = DHD_BUS_TPUT_BUF_LEN;
7184     }
7185 }
7186 
7187 /* called after an ioctl returns from dongle */
dhd_prot_wl_ioctl_ret_intercept(dhd_pub_t * dhd,wl_ioctl_t * ioc,void * buf,int ifidx,int ret,int len)7188 static void dhd_prot_wl_ioctl_ret_intercept(dhd_pub_t *dhd, wl_ioctl_t *ioc,
7189                                             void *buf, int ifidx, int ret,
7190                                             int len)
7191 {
7192     if (!ret && ioc->cmd == WLC_SET_VAR && buf != NULL) {
7193         /* Intercept the wme_dp ioctl here */
7194         if (!strcmp(buf, "wme_dp")) {
7195             int slen, val = 0;
7196             slen = strlen("wme_dp") + 1;
7197             if (len >= (int)(slen + sizeof(int))) {
7198                 bcopy(((char *)buf + slen), &val, sizeof(int));
7199             }
7200             dhd->wme_dp = (uint8)ltoh32(val);
7201         }
7202     }
7203 }
7204 
7205 #ifdef DHD_PM_CONTROL_FROM_FILE
7206 extern bool g_pm_control;
7207 #endif /* DHD_PM_CONTROL_FROM_FILE */
7208 
7209 /** Use protocol to issue ioctl to dongle. Only one ioctl may be in transit. */
dhd_prot_ioctl(dhd_pub_t * dhd,int ifidx,wl_ioctl_t * ioc,void * buf,int len)7210 int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t *ioc, void *buf,
7211                    int len)
7212 {
7213     int ret = -1;
7214     uint8 action;
7215 
7216     if (dhd->bus->is_linkdown) {
7217         DHD_ERROR_RLMT(
7218             ("%s : PCIe link is down. we have nothing to do\n", __FUNCTION__));
7219         goto done;
7220     }
7221 
7222     if (dhd_query_bus_erros(dhd)) {
7223         DHD_ERROR_RLMT(
7224             ("%s : some BUS error. we have nothing to do\n", __FUNCTION__));
7225         goto done;
7226     }
7227 
7228     if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) {
7229         DHD_ERROR_RLMT(("%s : bus is down. we have nothing to do -"
7230                         " bus state: %d, sent hang: %d\n",
7231                         __FUNCTION__, dhd->busstate, dhd->hang_was_sent));
7232         goto done;
7233     }
7234 
7235     if (dhd->busstate == DHD_BUS_SUSPEND) {
7236         DHD_ERROR(("%s : bus is suspended\n", __FUNCTION__));
7237         goto done;
7238     }
7239 
7240     DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7241 
7242     if (ioc->cmd == WLC_SET_PM) {
7243 #ifdef DHD_PM_CONTROL_FROM_FILE
7244         if (g_pm_control == TRUE) {
7245             DHD_ERROR(("%s: SET PM ignored!(Requested:%d)\n", __FUNCTION__,
7246                        buf ? *(char *)buf : 0));
7247             goto done;
7248         }
7249 #endif /* DHD_PM_CONTROL_FROM_FILE */
7250         DHD_TRACE_HW4(
7251             ("%s: SET PM to %d\n", __FUNCTION__, buf ? *(char *)buf : 0));
7252     }
7253 
7254     ASSERT(len <= WLC_IOCTL_MAXLEN);
7255 
7256     if (len > WLC_IOCTL_MAXLEN) {
7257         goto done;
7258     }
7259 
7260     action = ioc->set;
7261 
7262     dhd_prot_wlioctl_intercept(dhd, ioc, buf);
7263 
7264     if (action & WL_IOCTL_ACTION_SET) {
7265         ret = dhd_msgbuf_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
7266     } else {
7267         ret = dhd_msgbuf_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
7268         if (ret > 0) {
7269             ioc->used = ret;
7270         }
7271     }
7272 
7273     /* Too many programs assume ioctl() returns 0 on success */
7274     if (ret >= 0) {
7275         ret = 0;
7276     } else {
7277         DHD_INFO(("%s: status ret value is %d \n", __FUNCTION__, ret));
7278         dhd->dongle_error = ret;
7279     }
7280 
7281     dhd_prot_wl_ioctl_ret_intercept(dhd, ioc, buf, ifidx, ret, len);
7282 
7283 done:
7284     return ret;
7285 } /* dhd_prot_ioctl */
7286 
7287 /** test / loopback */
7288 
dhdmsgbuf_lpbk_req(dhd_pub_t * dhd,uint len)7289 int dhdmsgbuf_lpbk_req(dhd_pub_t *dhd, uint len)
7290 {
7291     unsigned long flags;
7292     dhd_prot_t *prot = dhd->prot;
7293     uint16 alloced = 0;
7294     ioct_reqst_hdr_t *ioct_rqst;
7295 
7296     uint16 hdrlen = sizeof(ioct_reqst_hdr_t);
7297     uint16 msglen = len + hdrlen;
7298     msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
7299 
7300     msglen = ALIGN_SIZE(msglen, DMA_ALIGN_LEN);
7301     msglen = LIMIT_TO_MAX(msglen, MSGBUF_MAX_MSG_SIZE);
7302 
7303     DHD_RING_LOCK(ring->ring_lock, flags);
7304 
7305     ioct_rqst = (ioct_reqst_hdr_t *)dhd_prot_alloc_ring_space(dhd, ring, 1,
7306                                                               &alloced, FALSE);
7307     if (ioct_rqst == NULL) {
7308         DHD_RING_UNLOCK(ring->ring_lock, flags);
7309         return 0;
7310     }
7311 
7312     {
7313         uint8 *ptr;
7314         uint16 i;
7315 
7316         ptr = (uint8 *)ioct_rqst;
7317         for (i = 0; i < msglen; i++) {
7318             ptr[i] = i % 0x100;
7319         }
7320     }
7321 
7322     /* Common msg buf hdr */
7323     ioct_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
7324     ring->seqnum++;
7325 
7326     ioct_rqst->msg.msg_type = MSG_TYPE_LOOPBACK;
7327     ioct_rqst->msg.if_id = 0;
7328     ioct_rqst->msg.flags = ring->current_phase;
7329 
7330     bcm_print_bytes("LPBK REQ: ", (uint8 *)ioct_rqst, msglen);
7331 
7332     /* update ring's WR index and ring doorbell to dongle */
7333     dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1);
7334 
7335     DHD_RING_UNLOCK(ring->ring_lock, flags);
7336 
7337     return 0;
7338 }
7339 
7340 /** test / loopback */
dmaxfer_free_dmaaddr(dhd_pub_t * dhd,dhd_dmaxfer_t * dmaxfer)7341 void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dmaxfer)
7342 {
7343     if (dmaxfer == NULL) {
7344         return;
7345     }
7346 
7347     dhd_dma_buf_free(dhd, &dmaxfer->srcmem);
7348     dhd_dma_buf_free(dhd, &dmaxfer->dstmem);
7349 }
7350 
7351 /** test / loopback */
dhd_prepare_schedule_dmaxfer_free(dhd_pub_t * dhdp)7352 int dhd_prepare_schedule_dmaxfer_free(dhd_pub_t *dhdp)
7353 {
7354     dhd_prot_t *prot = dhdp->prot;
7355     dhd_dmaxfer_t *dmaxfer = &prot->dmaxfer;
7356     dmaxref_mem_map_t *dmap = NULL;
7357 
7358     dmap = MALLOCZ(dhdp->osh, sizeof(dmaxref_mem_map_t));
7359     if (!dmap) {
7360         DHD_ERROR(("%s: dmap alloc failed\n", __FUNCTION__));
7361         goto mem_alloc_fail;
7362     }
7363     dmap->srcmem = &(dmaxfer->srcmem);
7364     dmap->dstmem = &(dmaxfer->dstmem);
7365 
7366     DMAXFER_FREE(dhdp, dmap);
7367     return BCME_OK;
7368 
7369 mem_alloc_fail:
7370     if (dmap) {
7371         MFREE(dhdp->osh, dmap, sizeof(dmaxref_mem_map_t));
7372         dmap = NULL;
7373     }
7374     return BCME_NOMEM;
7375 } /* dhd_prepare_schedule_dmaxfer_free */
7376 
7377 /** test / loopback */
dmaxfer_free_prev_dmaaddr(dhd_pub_t * dhdp,dmaxref_mem_map_t * dmmap)7378 void dmaxfer_free_prev_dmaaddr(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap)
7379 {
7380     dhd_dma_buf_free(dhdp, dmmap->srcmem);
7381     dhd_dma_buf_free(dhdp, dmmap->dstmem);
7382 
7383     MFREE(dhdp->osh, dmmap, sizeof(dmaxref_mem_map_t));
7384     dhdp->bus->dmaxfer_complete = TRUE;
7385     dhd_os_dmaxfer_wake(dhdp);
7386 
7387     dmmap = NULL;
7388 } /* dmaxfer_free_prev_dmaaddr */
7389 
7390 /** test / loopback */
dmaxfer_prepare_dmaaddr(dhd_pub_t * dhd,uint len,uint srcdelay,uint destdelay,dhd_dmaxfer_t * dmaxfer)7391 int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len, uint srcdelay,
7392                             uint destdelay, dhd_dmaxfer_t *dmaxfer)
7393 {
7394     uint i = 0, j = 0;
7395     if (!dmaxfer) {
7396         return BCME_ERROR;
7397     }
7398 
7399     /* First free up existing buffers */
7400     dmaxfer_free_dmaaddr(dhd, dmaxfer);
7401 
7402     if (dhd_dma_buf_alloc(dhd, &dmaxfer->srcmem, len)) {
7403         return BCME_NOMEM;
7404     }
7405 
7406     if (dhd_dma_buf_alloc(dhd, &dmaxfer->dstmem, len + 0x8)) {
7407         dhd_dma_buf_free(dhd, &dmaxfer->srcmem);
7408         return BCME_NOMEM;
7409     }
7410 
7411     dmaxfer->len = len;
7412 
7413     /* Populate source with a pattern like below
7414      * 0x00000000
7415      * 0x01010101
7416      * 0x02020202
7417      * 0x03030303
7418      * 0x04040404
7419      * 0x05050505
7420      * ...
7421      * 0xFFFFFFFF
7422      */
7423     while (i < dmaxfer->len) {
7424         ((uint8 *)dmaxfer->srcmem.va)[i] = j % 0x100;
7425         i++;
7426         if (i % 0x4 == 0) {
7427             j++;
7428         }
7429     }
7430 
7431     OSL_CACHE_FLUSH(dmaxfer->srcmem.va, dmaxfer->len);
7432 
7433     dmaxfer->srcdelay = srcdelay;
7434     dmaxfer->destdelay = destdelay;
7435 
7436     return BCME_OK;
7437 } /* dmaxfer_prepare_dmaaddr */
7438 
dhd_msgbuf_dmaxfer_process(dhd_pub_t * dhd,void * msg)7439 static void dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg)
7440 {
7441     dhd_prot_t *prot = dhd->prot;
7442     uint64 end_usec;
7443     pcie_dmaxfer_cmplt_t *cmplt = (pcie_dmaxfer_cmplt_t *)msg;
7444     int buf_free_scheduled;
7445 
7446     BCM_REFERENCE(cmplt);
7447     end_usec = OSL_SYSUPTIME_US();
7448 
7449     DHD_ERROR(("DMA loopback status: %d\n", cmplt->compl_hdr.status));
7450     prot->dmaxfer.status = cmplt->compl_hdr.status;
7451     OSL_CACHE_INV(prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
7452     if (prot->dmaxfer.srcmem.va && prot->dmaxfer.dstmem.va) {
7453         if (memcmp(prot->dmaxfer.srcmem.va, prot->dmaxfer.dstmem.va,
7454                    prot->dmaxfer.len) ||
7455             cmplt->compl_hdr.status != BCME_OK) {
7456             DHD_ERROR(("DMA loopback failed\n"));
7457             /* it is observed that some times the completion
7458              * header status is set as OK, but the memcmp fails
7459              * hence always explicitly set the dmaxfer status
7460              * as error if this happens.
7461              */
7462             prot->dmaxfer.status = BCME_ERROR;
7463             prhex("XFER SRC: ", prot->dmaxfer.srcmem.va, prot->dmaxfer.len);
7464             prhex("XFER DST: ", prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
7465         } else {
7466             switch (prot->dmaxfer.d11_lpbk) {
7467                 case M2M_DMA_LPBK: {
7468                     DHD_ERROR(("DMA successful pcie m2m DMA loopback\n"));
7469                     break;
7470                 }
7471                 case D11_LPBK: {
7472                     DHD_ERROR(("DMA successful with d11 loopback\n"));
7473                     break;
7474                 }
7475                 case BMC_LPBK: {
7476                     DHD_ERROR(("DMA successful with bmc loopback\n"));
7477                     break;
7478                 }
7479                 case M2M_NON_DMA_LPBK: {
7480                     DHD_ERROR(("DMA successful pcie m2m NON DMA loopback\n"));
7481                     break;
7482                 }
7483                 case D11_HOST_MEM_LPBK: {
7484                     DHD_ERROR(("DMA successful d11 host mem loopback\n"));
7485                     break;
7486                 }
7487                 case BMC_HOST_MEM_LPBK: {
7488                     DHD_ERROR(("DMA successful bmc host mem loopback\n"));
7489                     break;
7490                 }
7491                 default: {
7492                     DHD_ERROR(("Invalid loopback option\n"));
7493                     break;
7494                 }
7495             }
7496 
7497             if (DHD_LPBKDTDUMP_ON()) {
7498                 /* debug info print of the Tx and Rx buffers */
7499                 dhd_prhex("XFER SRC: ", prot->dmaxfer.srcmem.va,
7500                           prot->dmaxfer.len, DHD_INFO_VAL);
7501                 dhd_prhex("XFER DST: ", prot->dmaxfer.dstmem.va,
7502                           prot->dmaxfer.len, DHD_INFO_VAL);
7503             }
7504         }
7505     }
7506 
7507     buf_free_scheduled = dhd_prepare_schedule_dmaxfer_free(dhd);
7508     end_usec -= prot->dmaxfer.start_usec;
7509     if (end_usec) {
7510         prot->dmaxfer.time_taken = end_usec;
7511         DHD_ERROR(
7512             ("DMA loopback %d bytes in %lu usec, %u kBps\n", prot->dmaxfer.len,
7513              (unsigned long)end_usec,
7514              (prot->dmaxfer.len * (0x3E8 * 0x3E8 / 0x400) / (uint32)end_usec)));
7515     }
7516     dhd->prot->dmaxfer.in_progress = FALSE;
7517 
7518     if (buf_free_scheduled != BCME_OK) {
7519         dhd->bus->dmaxfer_complete = TRUE;
7520         dhd_os_dmaxfer_wake(dhd);
7521     }
7522 }
7523 
7524 /** Test functionality.
7525  * Transfers bytes from host to dongle and to host again using DMA
7526  * This function is not reentrant, as prot->dmaxfer.in_progress is not protected
7527  * by a spinlock.
7528  */
dhdmsgbuf_dmaxfer_req(dhd_pub_t * dhd,uint len,uint srcdelay,uint destdelay,uint d11_lpbk,uint core_num)7529 int dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay,
7530                           uint destdelay, uint d11_lpbk, uint core_num)
7531 {
7532     unsigned long flags;
7533     int ret = BCME_OK;
7534     dhd_prot_t *prot = dhd->prot;
7535     pcie_dma_xfer_params_t *dmap;
7536     uint32 xferlen = LIMIT_TO_MAX(len, DMA_XFER_LEN_LIMIT);
7537     uint16 alloced = 0;
7538     msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
7539 
7540     if (prot->dmaxfer.in_progress) {
7541         DHD_ERROR(("DMA is in progress...\n"));
7542         return BCME_ERROR;
7543     }
7544 
7545     if (d11_lpbk >= MAX_LPBK) {
7546         DHD_ERROR(("loopback mode should be either"
7547                    " 0-PCIE_M2M_DMA, 1-D11, 2-BMC or 3-PCIE_M2M_NonDMA\n"));
7548         return BCME_ERROR;
7549     }
7550 
7551     DHD_RING_LOCK(ring->ring_lock, flags);
7552 
7553     prot->dmaxfer.in_progress = TRUE;
7554     if ((ret = dmaxfer_prepare_dmaaddr(dhd, xferlen, srcdelay, destdelay,
7555                                        &prot->dmaxfer)) != BCME_OK) {
7556         prot->dmaxfer.in_progress = FALSE;
7557         DHD_RING_UNLOCK(ring->ring_lock, flags);
7558         return ret;
7559     }
7560 
7561     dmap = (pcie_dma_xfer_params_t *)dhd_prot_alloc_ring_space(dhd, ring, 1,
7562                                                                &alloced, FALSE);
7563     if (dmap == NULL) {
7564         dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer);
7565         prot->dmaxfer.in_progress = FALSE;
7566         DHD_RING_UNLOCK(ring->ring_lock, flags);
7567         return BCME_NOMEM;
7568     }
7569 
7570     /* Common msg buf hdr */
7571     dmap->cmn_hdr.msg_type = MSG_TYPE_LPBK_DMAXFER;
7572     dmap->cmn_hdr.request_id = htol32(DHD_FAKE_PKTID);
7573     dmap->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
7574     dmap->cmn_hdr.flags = ring->current_phase;
7575     ring->seqnum++;
7576 
7577     dmap->host_input_buf_addr.high =
7578         htol32(PHYSADDRHI(prot->dmaxfer.srcmem.pa));
7579     dmap->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.srcmem.pa));
7580     dmap->host_ouput_buf_addr.high =
7581         htol32(PHYSADDRHI(prot->dmaxfer.dstmem.pa));
7582     dmap->host_ouput_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.dstmem.pa));
7583     dmap->xfer_len = htol32(prot->dmaxfer.len);
7584     dmap->srcdelay = htol32(prot->dmaxfer.srcdelay);
7585     dmap->destdelay = htol32(prot->dmaxfer.destdelay);
7586     prot->dmaxfer.d11_lpbk = d11_lpbk;
7587     dmap->flags = (((core_num & PCIE_DMA_XFER_FLG_CORE_NUMBER_MASK)
7588                     << PCIE_DMA_XFER_FLG_CORE_NUMBER_SHIFT) |
7589                    ((prot->dmaxfer.d11_lpbk & PCIE_DMA_XFER_FLG_D11_LPBK_MASK)
7590                     << PCIE_DMA_XFER_FLG_D11_LPBK_SHIFT));
7591     prot->dmaxfer.start_usec = OSL_SYSUPTIME_US();
7592 
7593     /* update ring's WR index and ring doorbell to dongle */
7594     dhd_prot_ring_write_complete(dhd, ring, dmap, 1);
7595 
7596     DHD_RING_UNLOCK(ring->ring_lock, flags);
7597 
7598     DHD_ERROR(("DMA loopback Started...\n"));
7599 
7600     return BCME_OK;
7601 } /* dhdmsgbuf_dmaxfer_req */
7602 
dhdmsgbuf_dmaxfer_status(dhd_pub_t * dhd,dma_xfer_info_t * result)7603 int dhdmsgbuf_dmaxfer_status(dhd_pub_t *dhd, dma_xfer_info_t *result)
7604 {
7605     dhd_prot_t *prot = dhd->prot;
7606 
7607     if (prot->dmaxfer.in_progress) {
7608         result->status = DMA_XFER_IN_PROGRESS;
7609     } else if (prot->dmaxfer.status == 0) {
7610         result->status = DMA_XFER_SUCCESS;
7611     } else {
7612         result->status = DMA_XFER_FAILED;
7613     }
7614 
7615     result->type = prot->dmaxfer.d11_lpbk;
7616     result->error_code = prot->dmaxfer.status;
7617     result->num_bytes = prot->dmaxfer.len;
7618     result->time_taken = prot->dmaxfer.time_taken;
7619     if (prot->dmaxfer.time_taken) {
7620         /* throughput in kBps */
7621         result->tput = (prot->dmaxfer.len * (0x3E8 * 0x3E8 / 0x400)) /
7622                        (uint32)prot->dmaxfer.time_taken;
7623     }
7624 
7625     return BCME_OK;
7626 }
7627 
7628 /** Called in the process of submitting an ioctl to the dongle */
dhd_msgbuf_query_ioctl(dhd_pub_t * dhd,int ifidx,uint cmd,void * buf,uint len,uint8 action)7629 static int dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd,
7630                                   void *buf, uint len, uint8 action)
7631 {
7632     int ret = 0;
7633     uint copylen = 0;
7634 
7635     DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7636 
7637     if (dhd->bus->is_linkdown) {
7638         DHD_ERROR(
7639             ("%s : PCIe link is down. we have nothing to do\n", __FUNCTION__));
7640         return -EIO;
7641     }
7642 
7643     if (dhd->busstate == DHD_BUS_DOWN) {
7644         DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
7645         return -EIO;
7646     }
7647 
7648     /* don't talk to the dongle if fw is about to be reloaded */
7649     if (dhd->hang_was_sent) {
7650         DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n",
7651                    __FUNCTION__));
7652         return -EIO;
7653     }
7654 
7655     if (cmd == WLC_GET_VAR && buf) {
7656         if (!len || !*(uint8 *)buf) {
7657             DHD_ERROR(("%s(): Zero length bailing\n", __FUNCTION__));
7658             ret = BCME_BADARG;
7659             goto done;
7660         }
7661 
7662         /* Respond "bcmerror" and "bcmerrorstr" with local cache */
7663         copylen = MIN(len, BCME_STRLEN);
7664 
7665         if ((len >= strlen("bcmerrorstr")) &&
7666             (!strcmp((char *)buf, "bcmerrorstr"))) {
7667             strncpy((char *)buf, bcmerrorstr(dhd->dongle_error), copylen);
7668             *(uint8 *)((uint8 *)buf + (copylen - 1)) = '\0';
7669             goto done;
7670         } else if ((len >= strlen("bcmerror")) &&
7671                    !strcmp((char *)buf, "bcmerror")) {
7672             *(uint32 *)(uint32 *)buf = dhd->dongle_error;
7673             goto done;
7674         }
7675     }
7676 
7677     DHD_CTL(("query_ioctl: ACTION %d ifdix %d cmd %d len %d \n", action, ifidx,
7678              cmd, len));
7679 
7680     ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx);
7681     if (ret < 0) {
7682         DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
7683         goto done;
7684     }
7685     /* wait for IOCTL completion message from dongle and get first fragment */
7686     ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf);
7687 
7688 done:
7689     return ret;
7690 }
7691 
dhd_msgbuf_iovar_timeout_dump(dhd_pub_t * dhd)7692 void dhd_msgbuf_iovar_timeout_dump(dhd_pub_t *dhd)
7693 {
7694     uint32 intstatus;
7695     dhd_prot_t *prot = dhd->prot;
7696     dhd->rxcnt_timeout++;
7697     dhd->rx_ctlerrs++;
7698     dhd->iovar_timeout_occured = TRUE;
7699     DHD_ERROR(("%s: resumed on timeout rxcnt_timeout%s %d ioctl_cmd %d "
7700                "trans_id %d state %d busstate=%d ioctl_received=%d\n",
7701                __FUNCTION__,
7702                dhd->is_sched_error ? " due to scheduling problem" : "",
7703                dhd->rxcnt_timeout, prot->curr_ioctl_cmd, prot->ioctl_trans_id,
7704                prot->ioctl_state, dhd->busstate, prot->ioctl_received));
7705 #if defined(DHD_KERNEL_SCHED_DEBUG) && defined(DHD_FW_COREDUMP)
7706     if (dhd->is_sched_error && dhd->memdump_enabled == DUMP_MEMFILE_BUGON) {
7707         /* change g_assert_type to trigger Kernel panic */
7708         g_assert_type = 0x2;
7709         /* use ASSERT() to trigger panic */
7710         ASSERT(0);
7711     }
7712 #endif /* DHD_KERNEL_SCHED_DEBUG && DHD_FW_COREDUMP */
7713 
7714     if (prot->curr_ioctl_cmd == WLC_SET_VAR ||
7715         prot->curr_ioctl_cmd == WLC_GET_VAR) {
7716         char iovbuf[32];
7717         int dump_size = 128;
7718         uint8 *ioctl_buf = (uint8 *)prot->ioctbuf.va;
7719         memset(iovbuf, 0, sizeof(iovbuf));
7720         strncpy(iovbuf, ioctl_buf, sizeof(iovbuf) - 1);
7721         iovbuf[sizeof(iovbuf) - 1] = '\0';
7722         DHD_ERROR(("Current IOVAR (%s): %s\n",
7723                    prot->curr_ioctl_cmd == WLC_SET_VAR ? "WLC_SET_VAR"
7724                                                        : "WLC_GET_VAR",
7725                    iovbuf));
7726         DHD_ERROR(("========== START IOCTL REQBUF DUMP ==========\n"));
7727         prhex("ioctl_buf", (const u8 *)ioctl_buf, dump_size);
7728         DHD_ERROR(("\n========== END IOCTL REQBUF DUMP ==========\n"));
7729     }
7730 
7731     /* Check the PCIe link status by reading intstatus register */
7732     intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
7733                            dhd->bus->pcie_mailbox_int, 0, 0);
7734     if (intstatus == (uint32)-1) {
7735         DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__));
7736         dhd->bus->is_linkdown = TRUE;
7737     }
7738 
7739     dhd_bus_dump_console_buffer(dhd->bus);
7740     dhd_prot_debug_info_print(dhd);
7741 }
7742 
7743 /**
7744  * Waits for IOCTL completion message from the dongle, copies this into caller
7745  * provided parameter 'buf'.
7746  */
dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t * dhd,uint32 len,void * buf)7747 static int dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf)
7748 {
7749     dhd_prot_t *prot = dhd->prot;
7750     int timeleft;
7751     unsigned long flags;
7752     int ret = 0;
7753     static uint cnt = 0;
7754 
7755     DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7756 
7757     if (dhd_query_bus_erros(dhd)) {
7758         ret = -EIO;
7759         goto out;
7760     }
7761 
7762     timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received);
7763 
7764 #ifdef DHD_RECOVER_TIMEOUT
7765     if (prot->ioctl_received == 0) {
7766         uint32 intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
7767                                       dhd->bus->pcie_mailbox_int, 0, 0);
7768         int host_irq_disbled = dhdpcie_irq_disabled(dhd->bus);
7769         if ((intstatus) && (intstatus != (uint32)-1) && (timeleft == 0) &&
7770             (!dhd_query_bus_erros(dhd))) {
7771             DHD_ERROR(("%s: iovar timeout trying again intstatus=%x"
7772                        " host_irq_disabled=%d\n",
7773                        __FUNCTION__, intstatus, host_irq_disbled));
7774             dhd_pcie_intr_count_dump(dhd);
7775             dhd_print_tasklet_status(dhd);
7776             dhd_prot_process_ctrlbuf(dhd);
7777             timeleft =
7778                 dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received);
7779             /* Clear Interrupts */
7780             dhdpcie_bus_clear_intstatus(dhd->bus);
7781         }
7782     }
7783 #endif /* DHD_RECOVER_TIMEOUT */
7784 
7785     if (dhd->conf->ctrl_resched > 0 && timeleft == 0 &&
7786         (!dhd_query_bus_erros(dhd))) {
7787         cnt++;
7788         if (cnt <= dhd->conf->ctrl_resched) {
7789             uint buscorerev = dhd->bus->sih->buscorerev;
7790             uint32 intstatus = 0, intmask = 0;
7791             intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
7792                                    PCIMailBoxInt(buscorerev), 0, 0);
7793             intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
7794                                  PCIMailBoxMask(buscorerev), 0, 0);
7795             if (intstatus) {
7796                 DHD_ERROR(("%s: reschedule dhd_dpc, cnt=%d, intstatus=0x%x, "
7797                            "intmask=0x%x\n",
7798                            __FUNCTION__, cnt, intstatus, intmask));
7799                 dhd->bus->intstatus = intstatus;
7800                 dhd->bus->ipend = TRUE;
7801                 dhd->bus->dpc_sched = TRUE;
7802                 dhd_sched_dpc(dhd);
7803                 timeleft = dhd_os_ioctl_resp_wait(dhd, &prot->ioctl_received);
7804             }
7805         }
7806     } else {
7807         cnt = 0;
7808     }
7809 
7810     if (timeleft == 0 && (!dhd_query_bus_erros(dhd))) {
7811         /* check if resumed on time out related to scheduling issue */
7812         dhd->is_sched_error = FALSE;
7813         if (dhd->bus->isr_entry_time > prot->ioctl_fillup_time) {
7814             dhd->is_sched_error = dhd_bus_query_dpc_sched_errors(dhd);
7815         }
7816 
7817         dhd_msgbuf_iovar_timeout_dump(dhd);
7818 
7819 #ifdef DHD_FW_COREDUMP
7820         /* Collect socram dump */
7821         if (dhd->memdump_enabled) {
7822             /* collect core dump */
7823             dhd->memdump_type = DUMP_TYPE_RESUMED_ON_TIMEOUT;
7824             dhd_bus_mem_dump(dhd);
7825         }
7826 #endif /* DHD_FW_COREDUMP */
7827 
7828         ret = -ETIMEDOUT;
7829         goto out;
7830     } else {
7831         if (prot->ioctl_received != IOCTL_RETURN_ON_SUCCESS) {
7832             DHD_ERROR(("%s: IOCTL failure due to ioctl_received = %d\n",
7833                        __FUNCTION__, prot->ioctl_received));
7834             ret = -EINVAL;
7835             goto out;
7836         }
7837         dhd->rxcnt_timeout = 0;
7838         dhd->rx_ctlpkts++;
7839         DHD_CTL(("%s: ioctl resp resumed, got %d\n", __FUNCTION__,
7840                  prot->ioctl_resplen));
7841     }
7842 
7843     if (dhd->prot->ioctl_resplen > len) {
7844         dhd->prot->ioctl_resplen = (uint16)len;
7845     }
7846     if (buf) {
7847         bcopy(dhd->prot->retbuf.va, buf, dhd->prot->ioctl_resplen);
7848     }
7849 
7850     ret = (int)(dhd->prot->ioctl_status);
7851 
7852 out:
7853     DHD_GENERAL_LOCK(dhd, flags);
7854     dhd->prot->ioctl_state = 0;
7855     dhd->prot->ioctl_resplen = 0;
7856     dhd->prot->ioctl_received = IOCTL_WAIT;
7857     dhd->prot->curr_ioctl_cmd = 0;
7858     DHD_GENERAL_UNLOCK(dhd, flags);
7859 
7860     return ret;
7861 } /* dhd_msgbuf_wait_ioctl_cmplt */
7862 
dhd_msgbuf_set_ioctl(dhd_pub_t * dhd,int ifidx,uint cmd,void * buf,uint len,uint8 action)7863 static int dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf,
7864                                 uint len, uint8 action)
7865 {
7866     int ret = 0;
7867 
7868     DHD_TRACE(("%s: Enter \n", __FUNCTION__));
7869 
7870     if (dhd->bus->is_linkdown) {
7871         DHD_ERROR(
7872             ("%s : PCIe link is down. we have nothing to do\n", __FUNCTION__));
7873         return -EIO;
7874     }
7875 
7876     if (dhd->busstate == DHD_BUS_DOWN) {
7877         DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
7878         return -EIO;
7879     }
7880 
7881     /* don't talk to the dongle if fw is about to be reloaded */
7882     if (dhd->hang_was_sent) {
7883         DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n",
7884                    __FUNCTION__));
7885         return -EIO;
7886     }
7887 
7888     DHD_CTL(("ACTION %d ifdix %d cmd %d len %d \n", action, ifidx, cmd, len));
7889 
7890     /* Fill up msgbuf for ioctl req */
7891     ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx);
7892     if (ret < 0) {
7893         DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
7894         goto done;
7895     }
7896     ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf);
7897 
7898 done:
7899     return ret;
7900 }
7901 
7902 /** Called by upper DHD layer. Handles a protocol control response
7903  * asynchronously. */
dhd_prot_ctl_complete(dhd_pub_t * dhd)7904 int dhd_prot_ctl_complete(dhd_pub_t *dhd)
7905 {
7906     return 0;
7907 }
7908 
7909 /** Called by upper DHD layer. Check for and handle local prot-specific iovar
7910  * commands */
dhd_prot_iovar_op(dhd_pub_t * dhd,const char * name,void * params,int plen,void * arg,int len,bool set)7911 int dhd_prot_iovar_op(dhd_pub_t *dhd, const char *name, void *params, int plen,
7912                       void *arg, int len, bool set)
7913 {
7914     return BCME_UNSUPPORTED;
7915 }
7916 
7917 #ifdef DHD_DUMP_PCIE_RINGS
dhd_d2h_h2d_ring_dump(dhd_pub_t * dhd,void * file,const void * user_buf,unsigned long * file_posn,bool file_write)7918 int dhd_d2h_h2d_ring_dump(dhd_pub_t *dhd, void *file, const void *user_buf,
7919                           unsigned long *file_posn, bool file_write)
7920 {
7921     dhd_prot_t *prot;
7922     msgbuf_ring_t *ring;
7923     int ret = 0;
7924     uint16 h2d_flowrings_total;
7925     uint16 flowid;
7926 
7927     if (!(dhd) || !(dhd->prot)) {
7928         goto exit;
7929     }
7930     prot = dhd->prot;
7931 
7932     /* Below is the same ring dump sequence followed in parser as well. */
7933     ring = &prot->h2dring_ctrl_subn;
7934     if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0) {
7935         goto exit;
7936     }
7937 
7938     ring = &prot->h2dring_rxp_subn;
7939     if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0) {
7940         goto exit;
7941     }
7942 
7943     ring = &prot->d2hring_ctrl_cpln;
7944     if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0) {
7945         goto exit;
7946     }
7947 
7948     ring = &prot->d2hring_tx_cpln;
7949     if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0) {
7950         goto exit;
7951     }
7952 
7953     ring = &prot->d2hring_rx_cpln;
7954     if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0) {
7955         goto exit;
7956     }
7957 
7958     h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
7959     FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
7960         if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0) {
7961             goto exit;
7962         }
7963     }
7964 
7965 #ifdef EWP_EDL
7966     if (dhd->dongle_edl_support) {
7967         ring = prot->d2hring_edl;
7968         if ((ret = dhd_edl_ring_hdr_write(dhd, ring, file, user_buf,
7969                                           file_posn)) < 0) {
7970             goto exit;
7971         }
7972     } else if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6 &&
7973                !dhd->dongle_edl_support)
7974 #else
7975     if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
7976 #endif /* EWP_EDL */
7977     {
7978         ring = prot->h2dring_info_subn;
7979         if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0) {
7980             goto exit;
7981         }
7982 
7983         ring = prot->d2hring_info_cpln;
7984         if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0) {
7985             goto exit;
7986         }
7987     }
7988 
7989 exit:
7990     return ret;
7991 }
7992 
7993 /* Write to file */
dhd_ring_write(dhd_pub_t * dhd,msgbuf_ring_t * ring,void * file,const void * user_buf,unsigned long * file_posn)7994 static int dhd_ring_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file,
7995                           const void *user_buf, unsigned long *file_posn)
7996 {
7997     int ret = 0;
7998 
7999     if (ring == NULL) {
8000         DHD_ERROR(("%s: Ring not initialised, failed to dump ring contents\n",
8001                    __FUNCTION__));
8002         return BCME_ERROR;
8003     }
8004     if (file) {
8005         ret = dhd_os_write_file_posn(
8006             file, file_posn, (char *)(ring->dma_buf.va),
8007             ((unsigned long)(ring->max_items) * (ring->item_len)));
8008         if (ret < 0) {
8009             DHD_ERROR(("%s: write file error !\n", __FUNCTION__));
8010             ret = BCME_ERROR;
8011         }
8012     } else if (user_buf) {
8013         ret = dhd_export_debug_data(
8014             (char *)(ring->dma_buf.va), NULL, user_buf,
8015             ((unsigned long)(ring->max_items) * (ring->item_len)),
8016             (int *)file_posn);
8017     }
8018     return ret;
8019 }
8020 #endif /* DHD_DUMP_PCIE_RINGS */
8021 
8022 #ifdef EWP_EDL
8023 /* Write to file */
dhd_edl_ring_hdr_write(dhd_pub_t * dhd,msgbuf_ring_t * ring,void * file,const void * user_buf,unsigned long * file_posn)8024 static int dhd_edl_ring_hdr_write(dhd_pub_t *dhd, msgbuf_ring_t *ring,
8025                                   void *file, const void *user_buf,
8026                                   unsigned long *file_posn)
8027 {
8028     int ret = 0, nitems = 0;
8029     char *buf = NULL, *ptr = NULL;
8030     uint8 *msg_addr = NULL;
8031     uint16 rd = 0;
8032 
8033     if (ring == NULL) {
8034         DHD_ERROR(("%s: Ring not initialised, failed to dump ring contents\n",
8035                    __FUNCTION__));
8036         ret = BCME_ERROR;
8037         goto done;
8038     }
8039 
8040     buf = MALLOCZ(dhd->osh, (D2HRING_EDL_MAX_ITEM * D2HRING_EDL_HDR_SIZE));
8041     if (buf == NULL) {
8042         DHD_ERROR(("%s: buffer allocation failed\n", __FUNCTION__));
8043         ret = BCME_ERROR;
8044         goto done;
8045     }
8046     ptr = buf;
8047 
8048     for (; nitems < D2HRING_EDL_MAX_ITEM; nitems++, rd++) {
8049         msg_addr = (uint8 *)ring->dma_buf.va + (rd * ring->item_len);
8050         memcpy(ptr, (char *)msg_addr, D2HRING_EDL_HDR_SIZE);
8051         ptr += D2HRING_EDL_HDR_SIZE;
8052     }
8053     if (file) {
8054         ret = dhd_os_write_file_posn(
8055             file, file_posn, buf,
8056             (D2HRING_EDL_HDR_SIZE * D2HRING_EDL_MAX_ITEM));
8057         if (ret < 0) {
8058             DHD_ERROR(("%s: write file error !\n", __FUNCTION__));
8059             goto done;
8060         }
8061     } else {
8062         ret = dhd_export_debug_data(
8063             buf, NULL, user_buf, (D2HRING_EDL_HDR_SIZE * D2HRING_EDL_MAX_ITEM),
8064             file_posn);
8065     }
8066 
8067 done:
8068     if (buf) {
8069         MFREE(dhd->osh, buf, (D2HRING_EDL_MAX_ITEM * D2HRING_EDL_HDR_SIZE));
8070     }
8071     return ret;
8072 }
8073 #endif /* EWP_EDL */
8074 
8075 /** Add prot dump output to a buffer */
dhd_prot_dump(dhd_pub_t * dhd,struct bcmstrbuf * b)8076 void dhd_prot_dump(dhd_pub_t *dhd, struct bcmstrbuf *b)
8077 {
8078     if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) {
8079         bcm_bprintf(b, "\nd2h_sync: SEQNUM:");
8080     } else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) {
8081         bcm_bprintf(b, "\nd2h_sync: XORCSUM:");
8082     } else {
8083         bcm_bprintf(b, "\nd2h_sync: NONE:");
8084     }
8085     bcm_bprintf(b, " d2h_sync_wait max<%lu> tot<%lu>\n",
8086                 dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot);
8087 
8088     bcm_bprintf(b, "\nDongle DMA Indices: h2d %d  d2h %d index size %d bytes\n",
8089                 dhd->dma_h2d_ring_upd_support, dhd->dma_d2h_ring_upd_support,
8090                 dhd->prot->rw_index_sz);
8091     bcm_bprintf(b, "h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n",
8092                 h2d_max_txpost, dhd->prot->h2d_max_txpost);
8093     bcm_bprintf(b, "pktid_txq_start_cnt: %d\n", dhd->prot->pktid_txq_start_cnt);
8094     bcm_bprintf(b, "pktid_txq_stop_cnt: %d\n", dhd->prot->pktid_txq_stop_cnt);
8095     bcm_bprintf(b, "pktid_depleted_cnt: %d\n", dhd->prot->pktid_depleted_cnt);
8096 }
8097 
8098 /* Update local copy of dongle statistics */
dhd_prot_dstats(dhd_pub_t * dhd)8099 void dhd_prot_dstats(dhd_pub_t *dhd)
8100 {
8101     return;
8102 }
8103 
8104 /** Called by upper DHD layer */
dhd_process_pkt_reorder_info(dhd_pub_t * dhd,uchar * reorder_info_buf,uint reorder_info_len,void ** pkt,uint32 * free_buf_count)8105 int dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf,
8106                                  uint reorder_info_len, void **pkt,
8107                                  uint32 *free_buf_count)
8108 {
8109     return 0;
8110 }
8111 
8112 /** Debug related, post a dummy message to interrupt dongle. Used to process
8113  * cons commands. */
dhd_post_dummy_msg(dhd_pub_t * dhd)8114 int dhd_post_dummy_msg(dhd_pub_t *dhd)
8115 {
8116     unsigned long flags;
8117     hostevent_hdr_t *hevent = NULL;
8118     uint16 alloced = 0;
8119 
8120     dhd_prot_t *prot = dhd->prot;
8121     msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
8122 
8123     DHD_RING_LOCK(ring->ring_lock, flags);
8124 
8125     hevent = (hostevent_hdr_t *)dhd_prot_alloc_ring_space(dhd, ring, 1,
8126                                                           &alloced, FALSE);
8127     if (hevent == NULL) {
8128         DHD_RING_UNLOCK(ring->ring_lock, flags);
8129         return -1;
8130     }
8131 
8132     /* CMN msg header */
8133     hevent->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
8134     ring->seqnum++;
8135     hevent->msg.msg_type = MSG_TYPE_HOST_EVNT;
8136     hevent->msg.if_id = 0;
8137     hevent->msg.flags = ring->current_phase;
8138 
8139     /* Event payload */
8140     hevent->evnt_pyld = htol32(HOST_EVENT_CONS_CMD);
8141 
8142     /* Since, we are filling the data directly into the bufptr obtained
8143      * from the msgbuf, we can directly call the write_complete
8144      */
8145     dhd_prot_ring_write_complete(dhd, ring, hevent, 1);
8146 
8147     DHD_RING_UNLOCK(ring->ring_lock, flags);
8148 
8149     return 0;
8150 }
8151 
8152 /**
8153  * If exactly_nitems is true, this function will allocate space for nitems or
8154  * fail If exactly_nitems is false, this function will allocate space for nitems
8155  * or less
8156  */
dhd_prot_alloc_ring_space(dhd_pub_t * dhd,msgbuf_ring_t * ring,uint16 nitems,uint16 * alloced,bool exactly_nitems)8157 static void *BCMFASTPATH dhd_prot_alloc_ring_space(dhd_pub_t *dhd,
8158                                                    msgbuf_ring_t *ring,
8159                                                    uint16 nitems,
8160                                                    uint16 *alloced,
8161                                                    bool exactly_nitems)
8162 {
8163     void *ret_buf;
8164 
8165     /* Alloc space for nitems in the ring */
8166     ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems);
8167 
8168     if (ret_buf == NULL) {
8169         /* HWA, need to get RD pointer from different array
8170          * which HWA will directly write into host memory
8171          */
8172         /* if alloc failed , invalidate cached read ptr */
8173         if (dhd->dma_d2h_ring_upd_support) {
8174             ring->rd =
8175                 dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
8176         } else {
8177             dhd_bus_cmn_readshared(dhd->bus, &(ring->rd), RING_RD_UPD,
8178                                    ring->idx);
8179         }
8180 
8181         /* Try allocating once more */
8182         ret_buf =
8183             dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems);
8184 
8185         if (ret_buf == NULL) {
8186             DHD_INFO(("%s: Ring space not available  \n", ring->name));
8187             return NULL;
8188         }
8189     }
8190 
8191     if (ret_buf == HOST_RING_BASE(ring)) {
8192         DHD_INFO(("%s: setting the phase now\n", ring->name));
8193         ring->current_phase =
8194             ring->current_phase ? 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
8195     }
8196 
8197     /* Return alloced space */
8198     return ret_buf;
8199 }
8200 
8201 /**
8202  * Non inline ioct request.
8203  * Form a ioctl request first as per ioctptr_reqst_hdr_t header in the circular
8204  * buffer Form a separate request buffer where a 4 byte cmn header is added in
8205  * the front buf contents from parent function is copied to remaining section of
8206  * this buffer
8207  */
dhd_fillup_ioct_reqst(dhd_pub_t * dhd,uint16 len,uint cmd,void * buf,int ifidx)8208 static int dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd,
8209                                  void *buf, int ifidx)
8210 {
8211     dhd_prot_t *prot = dhd->prot;
8212     ioctl_req_msg_t *ioct_rqst;
8213     void *ioct_buf; /* For ioctl payload */
8214     uint16 rqstlen, resplen;
8215     unsigned long flags;
8216     uint16 alloced = 0;
8217     msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
8218 
8219     if (dhd_query_bus_erros(dhd)) {
8220         return -EIO;
8221     }
8222 
8223     rqstlen = len;
8224     resplen = len;
8225 
8226     /* Limit ioct request to MSGBUF_MAX_MSG_SIZE bytes including hdrs */
8227     /* 8K allocation of dongle buffer fails */
8228     /* dhd doesnt give separate input & output buf lens */
8229     /* so making the assumption that input length can never be more than 2k */
8230     rqstlen = MIN(rqstlen, MSGBUF_IOCTL_MAX_RQSTLEN);
8231 
8232     DHD_RING_LOCK(ring->ring_lock, flags);
8233 
8234     if (prot->ioctl_state) {
8235         DHD_ERROR(
8236             ("%s: pending ioctl %02x\n", __FUNCTION__, prot->ioctl_state));
8237         DHD_RING_UNLOCK(ring->ring_lock, flags);
8238         return BCME_BUSY;
8239     } else {
8240         prot->ioctl_state =
8241             MSGBUF_IOCTL_ACK_PENDING | MSGBUF_IOCTL_RESP_PENDING;
8242     }
8243 
8244     /* Request for cbuf space */
8245     ioct_rqst = (ioctl_req_msg_t *)dhd_prot_alloc_ring_space(dhd, ring, 1,
8246                                                              &alloced, FALSE);
8247     if (ioct_rqst == NULL) {
8248         DHD_ERROR(
8249             ("couldn't allocate space on msgring to send ioctl request\n"));
8250         prot->ioctl_state = 0;
8251         prot->curr_ioctl_cmd = 0;
8252         prot->ioctl_received = IOCTL_WAIT;
8253         DHD_RING_UNLOCK(ring->ring_lock, flags);
8254         return -1;
8255     }
8256 
8257     /* Common msg buf hdr */
8258     ioct_rqst->cmn_hdr.msg_type = MSG_TYPE_IOCTLPTR_REQ;
8259     ioct_rqst->cmn_hdr.if_id = (uint8)ifidx;
8260     ioct_rqst->cmn_hdr.flags = ring->current_phase;
8261     ioct_rqst->cmn_hdr.request_id = htol32(DHD_IOCTL_REQ_PKTID);
8262     ioct_rqst->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
8263     ring->seqnum++;
8264 
8265     ioct_rqst->cmd = htol32(cmd);
8266     prot->curr_ioctl_cmd = cmd;
8267     ioct_rqst->output_buf_len = htol16(resplen);
8268     prot->ioctl_trans_id++;
8269     ioct_rqst->trans_id = prot->ioctl_trans_id;
8270 
8271     /* populate ioctl buffer info */
8272     ioct_rqst->input_buf_len = htol16(rqstlen);
8273     ioct_rqst->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->ioctbuf.pa));
8274     ioct_rqst->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->ioctbuf.pa));
8275     /* copy ioct payload */
8276     ioct_buf = (void *)prot->ioctbuf.va;
8277 
8278     prot->ioctl_fillup_time = OSL_LOCALTIME_NS();
8279 
8280     if (buf) {
8281         memcpy(ioct_buf, buf, len);
8282     }
8283 
8284     OSL_CACHE_FLUSH((void *)prot->ioctbuf.va, len);
8285 
8286     if (!ISALIGNED(ioct_buf, DMA_ALIGN_LEN)) {
8287         DHD_ERROR(("host ioct address unaligned !!!!! \n"));
8288     }
8289 
8290     DHD_CTL(("submitted IOCTL request request_id %d, cmd %d, output_buf_len "
8291              "%d, tx_id %d\n",
8292              ioct_rqst->cmn_hdr.request_id, cmd, ioct_rqst->output_buf_len,
8293              ioct_rqst->trans_id));
8294 
8295     /* update ring's WR index and ring doorbell to dongle */
8296     dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1);
8297 
8298     DHD_RING_UNLOCK(ring->ring_lock, flags);
8299 
8300     return 0;
8301 } /* dhd_fillup_ioct_reqst */
8302 
8303 /**
8304  * dhd_prot_ring_attach - Initialize the msgbuf_ring object and attach a
8305  * DMA-able buffer to it. The ring is NOT tagged as inited until all the ring
8306  * information is posted to the dongle.
8307  *
8308  * Invoked in dhd_prot_attach for the common rings, and in dhd_prot_init for
8309  * each flowring in pool of flowrings.
8310  *
8311  * returns BCME_OK=0 on success
8312  * returns non-zero negative error value on failure.
8313  */
dhd_prot_ring_attach(dhd_pub_t * dhd,msgbuf_ring_t * ring,const char * name,uint16 max_items,uint16 item_len,uint16 ringid)8314 static int dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring,
8315                                 const char *name, uint16 max_items,
8316                                 uint16 item_len, uint16 ringid)
8317 {
8318     int dma_buf_alloced = BCME_NOMEM;
8319     uint32 dma_buf_len = max_items * item_len;
8320     dhd_prot_t *prot = dhd->prot;
8321     uint16 max_flowrings = dhd->bus->max_tx_flowrings;
8322     dhd_dma_buf_t *dma_buf = NULL;
8323 
8324     ASSERT(ring);
8325     ASSERT(name);
8326     ASSERT((max_items < 0xFFFF) && (item_len < 0xFFFF) && (ringid < 0xFFFF));
8327 
8328     /* Init name */
8329     strncpy(ring->name, name, RING_NAME_MAX_LENGTH);
8330     ring->name[RING_NAME_MAX_LENGTH - 1] = '\0';
8331 
8332     ring->idx = ringid;
8333 
8334     ring->max_items = max_items;
8335     ring->item_len = item_len;
8336 
8337     /* A contiguous space may be reserved for all flowrings */
8338     if (DHD_IS_FLOWRING(ringid, max_flowrings) &&
8339         (prot->flowrings_dma_buf.va)) {
8340         /* Carve out from the contiguous DMA-able flowring buffer */
8341         uint16 flowid;
8342         uint32 base_offset;
8343 
8344         dhd_dma_buf_t *rsv_buf = &prot->flowrings_dma_buf;
8345         dma_buf = &ring->dma_buf;
8346 
8347         flowid = DHD_RINGID_TO_FLOWID(ringid);
8348         base_offset = (flowid - BCMPCIE_H2D_COMMON_MSGRINGS) * dma_buf_len;
8349 
8350         ASSERT(base_offset + dma_buf_len <= rsv_buf->len);
8351 
8352         dma_buf->len = dma_buf_len;
8353         dma_buf->va = (void *)((uintptr)rsv_buf->va + base_offset);
8354         PHYSADDRHISET(dma_buf->pa, PHYSADDRHI(rsv_buf->pa));
8355         PHYSADDRLOSET(dma_buf->pa, PHYSADDRLO(rsv_buf->pa) + base_offset);
8356 
8357         /* On 64bit, contiguous space may not span across 0x00000000FFFFFFFF */
8358         ASSERT(PHYSADDRLO(dma_buf->pa) >= PHYSADDRLO(rsv_buf->pa));
8359 
8360         dma_buf->dmah = rsv_buf->dmah;
8361         dma_buf->secdma = rsv_buf->secdma;
8362 
8363         (void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
8364     } else {
8365 #ifdef EWP_EDL
8366         if (ring == dhd->prot->d2hring_edl) {
8367             /* For EDL ring, memory is alloced during attach,
8368              * so just need to copy the dma_buf to the ring's dma_buf
8369              */
8370             memcpy(&ring->dma_buf, &dhd->edl_ring_mem, sizeof(ring->dma_buf));
8371             dma_buf = &ring->dma_buf;
8372             if (dma_buf->va == NULL) {
8373                 return BCME_NOMEM;
8374             }
8375         } else
8376 #endif /* EWP_EDL */
8377         {
8378             /* Allocate a dhd_dma_buf */
8379             dma_buf_alloced =
8380                 dhd_dma_buf_alloc(dhd, &ring->dma_buf, dma_buf_len);
8381             if (dma_buf_alloced != BCME_OK) {
8382                 return BCME_NOMEM;
8383             }
8384         }
8385     }
8386 
8387     /* CAUTION: Save ring::base_addr in little endian format! */
8388     dhd_base_addr_htolpa(&ring->base_addr, ring->dma_buf.pa);
8389 
8390 #ifdef BCM_SECURE_DMA
8391     if (SECURE_DMA_ENAB(prot->osh)) {
8392         ring->dma_buf.secdma = MALLOCZ(prot->osh, sizeof(sec_cma_info_t));
8393         if (ring->dma_buf.secdma == NULL) {
8394             goto free_dma_buf;
8395         }
8396     }
8397 #endif /* BCM_SECURE_DMA */
8398 
8399     ring->ring_lock = dhd_os_spin_lock_init(dhd->osh);
8400 
8401     DHD_INFO(("RING_ATTACH : %s Max item %d len item %d total size %d "
8402               "ring start %p buf phys addr  %x:%x \n",
8403               ring->name, ring->max_items, ring->item_len, dma_buf_len,
8404               ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
8405               ltoh32(ring->base_addr.low_addr)));
8406 
8407     return BCME_OK;
8408 
8409 #ifdef BCM_SECURE_DMA
8410 free_dma_buf:
8411     if (dma_buf_alloced == BCME_OK) {
8412         dhd_dma_buf_free(dhd, &ring->dma_buf);
8413     }
8414 #endif /* BCM_SECURE_DMA */
8415 
8416     return BCME_NOMEM;
8417 } /* dhd_prot_ring_attach */
8418 
8419 /**
8420  * dhd_prot_ring_init - Post the common ring information to dongle.
8421  *
8422  * Used only for common rings.
8423  *
8424  * The flowrings information is passed via the create flowring control message
8425  * (tx_flowring_create_request_t) sent over the H2D control submission common
8426  * ring.
8427  */
dhd_prot_ring_init(dhd_pub_t * dhd,msgbuf_ring_t * ring)8428 static void dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring)
8429 {
8430     ring->wr = 0;
8431     ring->rd = 0;
8432     ring->curr_rd = 0;
8433     /* Reset hwa_db_type for all rings,
8434      * for data path rings, it will be assigned separately post init
8435      * from dhd_prot_d2h_sync_init and dhd_prot_h2d_sync_init
8436      */
8437     ring->hwa_db_type = 0;
8438 
8439     /* CAUTION: ring::base_addr already in Little Endian */
8440     dhd_bus_cmn_writeshared(dhd->bus, &ring->base_addr, sizeof(sh_addr_t),
8441                             RING_BUF_ADDR, ring->idx);
8442     dhd_bus_cmn_writeshared(dhd->bus, &ring->max_items, sizeof(uint16),
8443                             RING_MAX_ITEMS, ring->idx);
8444     dhd_bus_cmn_writeshared(dhd->bus, &ring->item_len, sizeof(uint16),
8445                             RING_ITEM_LEN, ring->idx);
8446 
8447     dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr), sizeof(uint16), RING_WR_UPD,
8448                             ring->idx);
8449     dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd), sizeof(uint16), RING_RD_UPD,
8450                             ring->idx);
8451 
8452     /* ring inited */
8453     ring->inited = TRUE;
8454 } /* dhd_prot_ring_init */
8455 /**
8456  * dhd_prot_ring_reset - bzero a ring's DMA-ble buffer and cache flush
8457  * Reset WR and RD indices to 0.
8458  */
dhd_prot_ring_reset(dhd_pub_t * dhd,msgbuf_ring_t * ring)8459 static void dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring)
8460 {
8461     DHD_TRACE(("%s\n", __FUNCTION__));
8462 
8463     dhd_dma_buf_reset(dhd, &ring->dma_buf);
8464 
8465     ring->rd = ring->wr = 0;
8466     ring->curr_rd = 0;
8467     ring->inited = FALSE;
8468     ring->create_pending = FALSE;
8469 }
8470 
8471 /**
8472  * dhd_prot_ring_detach - Detach the DMA-able buffer and any other objects
8473  * hanging off the msgbuf_ring.
8474  */
dhd_prot_ring_detach(dhd_pub_t * dhd,msgbuf_ring_t * ring)8475 static void dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring)
8476 {
8477     dhd_prot_t *prot = dhd->prot;
8478     uint16 max_flowrings = dhd->bus->max_tx_flowrings;
8479     ASSERT(ring);
8480 
8481     ring->inited = FALSE;
8482     /* rd = ~0, wr = ring->rd - 1, max_items = 0, len_item = ~0 */
8483 
8484 #ifdef BCM_SECURE_DMA
8485     if (SECURE_DMA_ENAB(prot->osh)) {
8486         if (ring->dma_buf.secdma) {
8487             SECURE_DMA_UNMAP_ALL(prot->osh, ring->dma_buf.secdma);
8488             MFREE(prot->osh, ring->dma_buf.secdma, sizeof(sec_cma_info_t));
8489             ring->dma_buf.secdma = NULL;
8490         }
8491     }
8492 #endif /* BCM_SECURE_DMA */
8493 
8494     /* If the DMA-able buffer was carved out of a pre-reserved contiguous
8495      * memory, then simply stop using it.
8496      */
8497     if (DHD_IS_FLOWRING(ring->idx, max_flowrings) &&
8498         (prot->flowrings_dma_buf.va)) {
8499         (void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
8500         memset(&ring->dma_buf, 0, sizeof(dhd_dma_buf_t));
8501     } else {
8502         dhd_dma_buf_free(dhd, &ring->dma_buf);
8503     }
8504 
8505     dhd_os_spin_lock_deinit(dhd->osh, ring->ring_lock);
8506 } /* dhd_prot_ring_detach */
8507 
8508 /* Fetch number of H2D flowrings given the total number of h2d rings */
dhd_get_max_flow_rings(dhd_pub_t * dhd)8509 uint16 dhd_get_max_flow_rings(dhd_pub_t *dhd)
8510 {
8511     if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6) {
8512         return dhd->bus->max_tx_flowrings;
8513     } else {
8514         return (dhd->bus->max_tx_flowrings - BCMPCIE_H2D_COMMON_MSGRINGS);
8515     }
8516 }
8517 
8518 /**
8519  * dhd_prot_flowrings_pool_attach - Initialize a pool of flowring msgbuf_ring_t.
8520  *
8521  * Allocate a pool of msgbuf_ring along with DMA-able buffers for flowrings.
8522  * Dongle includes common rings when it advertizes the number of H2D rings.
8523  * Allocates a pool of msgbuf_ring_t and invokes dhd_prot_ring_attach to
8524  * allocate the DMA-able buffer and initialize each msgbuf_ring_t object.
8525  *
8526  * dhd_prot_ring_attach is invoked to perform the actual initialization and
8527  * attaching the DMA-able buffer.
8528  *
8529  * Later dhd_prot_flowrings_pool_fetch() may be used to fetch a preallocated and
8530  * initialized msgbuf_ring_t object.
8531  *
8532  * returns BCME_OK=0 on success
8533  * returns non-zero negative error value on failure.
8534  */
dhd_prot_flowrings_pool_attach(dhd_pub_t * dhd)8535 static int dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd)
8536 {
8537     uint16 flowid;
8538     msgbuf_ring_t *ring;
8539     uint16 h2d_flowrings_total; /* exclude H2D common rings */
8540     dhd_prot_t *prot = dhd->prot;
8541     char ring_name[RING_NAME_MAX_LENGTH];
8542 
8543     if (prot->h2d_flowrings_pool != NULL) {
8544         return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
8545     }
8546 
8547     ASSERT(prot->h2d_rings_total == 0);
8548 
8549     /* h2d_rings_total includes H2D common rings: ctrl and rxbuf subn */
8550     prot->h2d_rings_total = (uint16)dhd_bus_max_h2d_queues(dhd->bus);
8551 
8552     if (prot->h2d_rings_total < BCMPCIE_H2D_COMMON_MSGRINGS) {
8553         DHD_ERROR(("%s: h2d_rings_total advertized as %u\n", __FUNCTION__,
8554                    prot->h2d_rings_total));
8555         return BCME_ERROR;
8556     }
8557 
8558     /* Subtract number of H2D common rings, to determine number of flowrings */
8559     h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
8560 
8561     DHD_ERROR(("Attach flowrings pool for %d rings\n", h2d_flowrings_total));
8562 
8563     /* Allocate pool of msgbuf_ring_t objects for all flowrings */
8564     prot->h2d_flowrings_pool = (msgbuf_ring_t *)MALLOCZ(
8565         prot->osh, (h2d_flowrings_total * sizeof(msgbuf_ring_t)));
8566 
8567     if (prot->h2d_flowrings_pool == NULL) {
8568         DHD_ERROR(("%s: flowrings pool for %d flowrings, alloc failure\n",
8569                    __FUNCTION__, h2d_flowrings_total));
8570         goto fail;
8571     }
8572 
8573     /* Setup & Attach a DMA-able buffer to each flowring in the flowring pool */
8574     FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
8575         snprintf(ring_name, sizeof(ring_name), "h2dflr_%03u", flowid);
8576         if (dhd_prot_ring_attach(dhd, ring, ring_name, prot->h2d_max_txpost,
8577                                  H2DRING_TXPOST_ITEMSIZE,
8578                                  DHD_FLOWID_TO_RINGID(flowid)) != BCME_OK) {
8579             goto attach_fail;
8580         }
8581         /*
8582          * TOD0 - Currently flowrings hwa is disabled and can be enabled like
8583          * below (dhd->bus->hwa_enab_bmap & HWA_ENAB_BITMAP_TXPOSTS) ?
8584          * HWA_DB_TYPE_TXPOSTS : 0;
8585          */
8586         ring->hwa_db_type = 0;
8587     }
8588 
8589     return BCME_OK;
8590 
8591 attach_fail:
8592     dhd_prot_flowrings_pool_detach(dhd); /* Free entire pool of flowrings */
8593 
8594 fail:
8595     prot->h2d_rings_total = 0;
8596     return BCME_NOMEM;
8597 } /* dhd_prot_flowrings_pool_attach */
8598 
8599 /**
8600  * dhd_prot_flowrings_pool_reset - Reset all msgbuf_ring_t objects in the pool.
8601  * Invokes dhd_prot_ring_reset to perform the actual reset.
8602  *
8603  * The DMA-able buffer is not freed during reset and neither is the flowring
8604  * pool freed.
8605  *
8606  * dhd_prot_flowrings_pool_reset will be invoked in dhd_prot_reset. Following
8607  * the dhd_prot_reset, dhd_prot_init will be re-invoked, and the flowring pool
8608  * from a previous flowring pool instantiation will be reused.
8609  *
8610  * This will avoid a fragmented DMA-able memory condition, if multiple
8611  * dhd_prot_reset were invoked to reboot the dongle without a full detach/attach
8612  * cycle.
8613  */
dhd_prot_flowrings_pool_reset(dhd_pub_t * dhd)8614 static void dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd)
8615 {
8616     uint16 flowid, h2d_flowrings_total;
8617     msgbuf_ring_t *ring;
8618     dhd_prot_t *prot = dhd->prot;
8619 
8620     if (prot->h2d_flowrings_pool == NULL) {
8621         ASSERT(prot->h2d_rings_total == 0);
8622         return;
8623     }
8624     h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
8625     /* Reset each flowring in the flowring pool */
8626     FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
8627         dhd_prot_ring_reset(dhd, ring);
8628         ring->inited = FALSE;
8629     }
8630 
8631     /* Flowring pool state must be as-if dhd_prot_flowrings_pool_attach */
8632 }
8633 
8634 /**
8635  * dhd_prot_flowrings_pool_detach - Free pool of msgbuf_ring along with
8636  * DMA-able buffers for flowrings.
8637  * dhd_prot_ring_detach is invoked to free the DMA-able buffer and perform any
8638  * de-initialization of each msgbuf_ring_t.
8639  */
dhd_prot_flowrings_pool_detach(dhd_pub_t * dhd)8640 static void dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd)
8641 {
8642     int flowid;
8643     msgbuf_ring_t *ring;
8644     uint16 h2d_flowrings_total; /* exclude H2D common rings */
8645     dhd_prot_t *prot = dhd->prot;
8646 
8647     if (prot->h2d_flowrings_pool == NULL) {
8648         ASSERT(prot->h2d_rings_total == 0);
8649         return;
8650     }
8651 
8652     h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
8653     /* Detach the DMA-able buffer for each flowring in the flowring pool */
8654     FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
8655         dhd_prot_ring_detach(dhd, ring);
8656     }
8657 
8658     MFREE(prot->osh, prot->h2d_flowrings_pool,
8659           (h2d_flowrings_total * sizeof(msgbuf_ring_t)));
8660 
8661     prot->h2d_flowrings_pool = (msgbuf_ring_t *)NULL;
8662     prot->h2d_rings_total = 0;
8663 } /* dhd_prot_flowrings_pool_detach */
8664 
8665 /**
8666  * dhd_prot_flowrings_pool_fetch - Fetch a preallocated and initialized
8667  * msgbuf_ring from the flowring pool, and assign it.
8668  *
8669  * Unlike common rings, which uses a dhd_prot_ring_init() to pass the common
8670  * ring information to the dongle, a flowring's information is passed via a
8671  * flowring create control message.
8672  *
8673  * Only the ring state (WR, RD) index are initialized.
8674  */
dhd_prot_flowrings_pool_fetch(dhd_pub_t * dhd,uint16 flowid)8675 static msgbuf_ring_t *dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd,
8676                                                     uint16 flowid)
8677 {
8678     msgbuf_ring_t *ring;
8679     dhd_prot_t *prot = dhd->prot;
8680 
8681     ASSERT(flowid >= DHD_FLOWRING_START_FLOWID);
8682     ASSERT(flowid < prot->h2d_rings_total);
8683     ASSERT(prot->h2d_flowrings_pool != NULL);
8684 
8685     ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid);
8686 
8687     /* ASSERT flow_ring->inited == FALSE */
8688 
8689     ring->wr = 0;
8690     ring->rd = 0;
8691     ring->curr_rd = 0;
8692     ring->inited = TRUE;
8693     /**
8694      * Every time a flowring starts dynamically, initialize current_phase with 0
8695      * then flip to BCMPCIE_CMNHDR_PHASE_BIT_INIT
8696      */
8697     ring->current_phase = 0;
8698     return ring;
8699 }
8700 
8701 /**
8702  * dhd_prot_flowrings_pool_release - release a previously fetched flowring's
8703  * msgbuf_ring back to the flow_ring pool.
8704  */
dhd_prot_flowrings_pool_release(dhd_pub_t * dhd,uint16 flowid,void * flow_ring)8705 void dhd_prot_flowrings_pool_release(dhd_pub_t *dhd, uint16 flowid,
8706                                      void *flow_ring)
8707 {
8708     msgbuf_ring_t *ring;
8709     dhd_prot_t *prot = dhd->prot;
8710 
8711     ASSERT(flowid >= DHD_FLOWRING_START_FLOWID);
8712     ASSERT(flowid < prot->h2d_rings_total);
8713     ASSERT(prot->h2d_flowrings_pool != NULL);
8714 
8715     ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid);
8716 
8717     ASSERT(ring == (msgbuf_ring_t *)flow_ring);
8718     /* ASSERT flow_ring->inited == TRUE */
8719 
8720     (void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
8721 
8722     ring->wr = 0;
8723     ring->rd = 0;
8724     ring->inited = FALSE;
8725 
8726     ring->curr_rd = 0;
8727 }
8728 
8729 /* Assumes only one index is updated at a time */
8730 /* If exactly_nitems is true, this function will allocate space for nitems or
8731  * fail */
8732 /*    Exception: when wrap around is encountered, to prevent hangup (last nitems
8733  * of ring buffer) */
8734 /* If exactly_nitems is false, this function will allocate space for nitems or
8735  * less */
dhd_prot_get_ring_space(msgbuf_ring_t * ring,uint16 nitems,uint16 * alloced,bool exactly_nitems)8736 static void *BCMFASTPATH dhd_prot_get_ring_space(msgbuf_ring_t *ring,
8737                                                  uint16 nitems, uint16 *alloced,
8738                                                  bool exactly_nitems)
8739 {
8740     void *ret_ptr = NULL;
8741     uint16 ring_avail_cnt;
8742 
8743     ASSERT(nitems <= ring->max_items);
8744 
8745     ring_avail_cnt = CHECK_WRITE_SPACE(ring->rd, ring->wr, ring->max_items);
8746 
8747     if ((ring_avail_cnt == 0) || (exactly_nitems && (ring_avail_cnt < nitems) &&
8748                                   ((ring->max_items - ring->wr) >= nitems))) {
8749         DHD_INFO(("Space not available: ring %s items %d write %d read %d\n",
8750                   ring->name, nitems, ring->wr, ring->rd));
8751         return NULL;
8752     }
8753     *alloced = MIN(nitems, ring_avail_cnt);
8754 
8755     /* Return next available space */
8756     ret_ptr = (char *)DHD_RING_BGN_VA(ring) + (ring->wr * ring->item_len);
8757 
8758     /* Update write index */
8759     if ((ring->wr + *alloced) == ring->max_items) {
8760         ring->wr = 0;
8761     } else if ((ring->wr + *alloced) < ring->max_items) {
8762         ring->wr += *alloced;
8763     } else {
8764         /* Should never hit this */
8765         ASSERT(0);
8766         return NULL;
8767     }
8768 
8769     return ret_ptr;
8770 } /* dhd_prot_get_ring_space */
8771 
8772 /**
8773  * dhd_prot_ring_write_complete - Host updates the new WR index on producing
8774  * new messages in a H2D ring. The messages are flushed from cache prior to
8775  * posting the new WR index. The new WR index will be updated in the DMA index
8776  * array or directly in the dongle's ring state memory.
8777  * A PCIE doorbell will be generated to wake up the dongle.
8778  * This is a non-atomic function, make sure the callers
8779  * always hold appropriate locks.
8780  */
__dhd_prot_ring_write_complete(dhd_pub_t * dhd,msgbuf_ring_t * ring,void * p,uint16 nitems)8781 static void BCMFASTPATH __dhd_prot_ring_write_complete(dhd_pub_t *dhd,
8782                                                        msgbuf_ring_t *ring,
8783                                                        void *p, uint16 nitems)
8784 {
8785     dhd_prot_t *prot = dhd->prot;
8786     uint32 db_index;
8787     uint16 max_flowrings = dhd->bus->max_tx_flowrings;
8788     uint corerev;
8789 
8790     /* cache flush */
8791     OSL_CACHE_FLUSH(p, ring->item_len * nitems);
8792 
8793     /* For HWA, update db_index and ring mb2 DB and return */
8794     if (HWA_ACTIVE(dhd) && ring->hwa_db_type) {
8795         db_index = HWA_DB_INDEX_VALUE(ring->wr) | ring->hwa_db_type;
8796         DHD_TRACE(("%s: ring(%s) wr(%d) hwa_db_type(0x%x) db_index(0x%x)\n",
8797                    __FUNCTION__, ring->name, ring->wr, ring->hwa_db_type,
8798                    db_index));
8799         prot->mb_2_ring_fn(dhd->bus, db_index, TRUE);
8800         return;
8801     }
8802 
8803     if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
8804         dhd_prot_dma_indx_set(dhd, ring->wr, H2D_DMA_INDX_WR_UPD, ring->idx);
8805     } else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings)) {
8806         dhd_prot_dma_indx_set(dhd, ring->wr, H2D_IFRM_INDX_WR_UPD, ring->idx);
8807     } else {
8808         dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr), sizeof(uint16),
8809                                 RING_WR_UPD, ring->idx);
8810     }
8811 
8812     /* raise h2d interrupt */
8813     if (IDMA_ACTIVE(dhd) ||
8814         (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings))) {
8815         db_index = IDMA_IDX0;
8816         /* this api is called in wl down path..in that case sih is freed already
8817          */
8818         if (dhd->bus->sih) {
8819             corerev = dhd->bus->sih->buscorerev;
8820             /* We need to explictly configure the type of DMA for core rev >= 24
8821              */
8822             if (corerev >= 24) {
8823                 db_index |= (DMA_TYPE_IDMA << DMA_TYPE_SHIFT);
8824             }
8825         }
8826         prot->mb_2_ring_fn(dhd->bus, db_index, TRUE);
8827     } else {
8828         prot->mb_ring_fn(dhd->bus, ring->wr);
8829     }
8830 }
8831 
dhd_prot_ring_write_complete(dhd_pub_t * dhd,msgbuf_ring_t * ring,void * p,uint16 nitems)8832 static void BCMFASTPATH dhd_prot_ring_write_complete(dhd_pub_t *dhd,
8833                                                      msgbuf_ring_t *ring,
8834                                                      void *p, uint16 nitems)
8835 {
8836     unsigned long flags_bus;
8837     DHD_BUS_LOCK(dhd->bus->bus_lock, flags_bus);
8838     __dhd_prot_ring_write_complete(dhd, ring, p, nitems);
8839     DHD_BUS_UNLOCK(dhd->bus->bus_lock, flags_bus);
8840 }
8841 
8842 /**
8843  * dhd_prot_ring_write_complete_mbdata - will be called from
8844  * dhd_prot_h2d_mbdata_send_ctrlmsg, which will hold DHD_BUS_LOCK to update WR
8845  * pointer, Ring DB and also update bus_low_power_state to indicate D3_INFORM
8846  * sent in the same BUS_LOCK.
8847  */
dhd_prot_ring_write_complete_mbdata(dhd_pub_t * dhd,msgbuf_ring_t * ring,void * p,uint16 nitems,uint32 mb_data)8848 static void BCMFASTPATH dhd_prot_ring_write_complete_mbdata(
8849     dhd_pub_t *dhd, msgbuf_ring_t *ring, void *p, uint16 nitems, uint32 mb_data)
8850 {
8851     unsigned long flags_bus;
8852 
8853     DHD_BUS_LOCK(dhd->bus->bus_lock, flags_bus);
8854 
8855     __dhd_prot_ring_write_complete(dhd, ring, p, nitems);
8856 
8857     /* Mark D3_INFORM in the same context to skip ringing H2D DB after D3_INFORM
8858      */
8859     if (mb_data == H2D_HOST_D3_INFORM) {
8860         dhd->bus->bus_low_power_state = DHD_BUS_D3_INFORM_SENT;
8861     }
8862 
8863     DHD_BUS_UNLOCK(dhd->bus->bus_lock, flags_bus);
8864 }
8865 
8866 /**
8867  * dhd_prot_upd_read_idx - Host updates the new RD index on consuming messages
8868  * from a D2H ring. The new RD index will be updated in the DMA Index array or
8869  * directly in dongle's ring state memory.
8870  */
dhd_prot_upd_read_idx(dhd_pub_t * dhd,msgbuf_ring_t * ring)8871 static void dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t *ring)
8872 {
8873     dhd_prot_t *prot = dhd->prot;
8874     uint32 db_index;
8875     uint corerev;
8876 
8877     /* For HWA, update db_index and ring mb2 DB and return */
8878     if (HWA_ACTIVE(dhd) && ring->hwa_db_type) {
8879         db_index = HWA_DB_INDEX_VALUE(ring->rd) | ring->hwa_db_type;
8880         DHD_TRACE(("%s: ring(%s) rd(0x%x) hwa_db_type(0x%x) db_index(0x%x)\n",
8881                    __FUNCTION__, ring->name, ring->rd, ring->hwa_db_type,
8882                    db_index));
8883         prot->mb_2_ring_fn(dhd->bus, db_index, FALSE);
8884         return;
8885     }
8886 
8887     /* update read index */
8888     /* If dma'ing h2d indices supported
8889      * update the r -indices in the
8890      * host memory o/w in TCM
8891      */
8892     if (IDMA_ACTIVE(dhd)) {
8893         dhd_prot_dma_indx_set(dhd, ring->rd, D2H_DMA_INDX_RD_UPD, ring->idx);
8894         db_index = IDMA_IDX1;
8895         if (dhd->bus->sih) {
8896             corerev = dhd->bus->sih->buscorerev;
8897             /* We need to explictly configure the type of DMA for core rev >= 24
8898              */
8899             if (corerev >= 0x7C) {
8900                 db_index |= (DMA_TYPE_IDMA << DMA_TYPE_SHIFT);
8901             }
8902         }
8903         prot->mb_2_ring_fn(dhd->bus, db_index, FALSE);
8904     } else if (dhd->dma_h2d_ring_upd_support) {
8905         dhd_prot_dma_indx_set(dhd, ring->rd, D2H_DMA_INDX_RD_UPD, ring->idx);
8906     } else {
8907         dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd), sizeof(uint16),
8908                                 RING_RD_UPD, ring->idx);
8909     }
8910 }
8911 
dhd_send_d2h_ringcreate(dhd_pub_t * dhd,msgbuf_ring_t * ring_to_create,uint16 ring_type,uint32 req_id)8912 static int dhd_send_d2h_ringcreate(dhd_pub_t *dhd,
8913                                    msgbuf_ring_t *ring_to_create,
8914                                    uint16 ring_type, uint32 req_id)
8915 {
8916     unsigned long flags;
8917     d2h_ring_create_req_t *d2h_ring;
8918     uint16 alloced = 0;
8919     int ret = BCME_OK;
8920     uint16 max_h2d_rings = dhd->bus->max_submission_rings;
8921     msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn;
8922 
8923     DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
8924 
8925     DHD_TRACE(("%s trying to send D2H ring create Req\n", __FUNCTION__));
8926 
8927     if (ring_to_create == NULL) {
8928         DHD_ERROR(("%s: FATAL: ring_to_create is NULL\n", __FUNCTION__));
8929         ret = BCME_ERROR;
8930         goto err;
8931     }
8932 
8933     /* Request for ring buffer space */
8934     d2h_ring = (d2h_ring_create_req_t *)dhd_prot_alloc_ring_space(
8935         dhd, ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced,
8936         FALSE);
8937     if (d2h_ring == NULL) {
8938         DHD_ERROR(
8939             ("%s: FATAL: No space in control ring to send D2H ring create\n",
8940              __FUNCTION__));
8941         ret = BCME_NOMEM;
8942         goto err;
8943     }
8944     ring_to_create->create_req_id = (uint16)req_id;
8945     ring_to_create->create_pending = TRUE;
8946 
8947     /* Common msg buf hdr */
8948     d2h_ring->msg.msg_type = MSG_TYPE_D2H_RING_CREATE;
8949     d2h_ring->msg.if_id = 0;
8950     d2h_ring->msg.flags = ctrl_ring->current_phase;
8951     d2h_ring->msg.request_id = htol32(ring_to_create->create_req_id);
8952     d2h_ring->ring_id =
8953         htol16(DHD_D2H_RING_OFFSET(ring_to_create->idx, max_h2d_rings));
8954     DHD_ERROR(("%s ringid: %d idx: %d max_h2d: %d\n", __FUNCTION__,
8955                d2h_ring->ring_id, ring_to_create->idx, max_h2d_rings));
8956 
8957     d2h_ring->ring_type = ring_type;
8958     d2h_ring->max_items = htol16(ring_to_create->max_items);
8959     d2h_ring->len_item = htol16(ring_to_create->item_len);
8960     d2h_ring->ring_ptr.low_addr = ring_to_create->base_addr.low_addr;
8961     d2h_ring->ring_ptr.high_addr = ring_to_create->base_addr.high_addr;
8962 
8963     d2h_ring->flags = 0;
8964     d2h_ring->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
8965     ctrl_ring->seqnum++;
8966 #ifdef EWP_EDL
8967     if (ring_type == BCMPCIE_D2H_RING_TYPE_EDL) {
8968         DHD_ERROR(("%s: sending d2h EDL ring create: "
8969                    "\n max items=%u; len_item=%u; ring_id=%u; low_addr=0x%x; "
8970                    "high_addr=0x%x\n",
8971                    __FUNCTION__, ltoh16(d2h_ring->max_items),
8972                    ltoh16(d2h_ring->len_item), ltoh16(d2h_ring->ring_id),
8973                    d2h_ring->ring_ptr.low_addr, d2h_ring->ring_ptr.high_addr));
8974     }
8975 #endif /* EWP_EDL */
8976 
8977     /* Update the flow_ring's WRITE index */
8978     dhd_prot_ring_write_complete(dhd, ctrl_ring, d2h_ring,
8979                                  DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
8980 
8981     DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
8982 
8983     return ret;
8984 err:
8985     DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
8986 
8987     return ret;
8988 }
8989 
dhd_send_h2d_ringcreate(dhd_pub_t * dhd,msgbuf_ring_t * ring_to_create,uint8 ring_type,uint32 id)8990 static int dhd_send_h2d_ringcreate(dhd_pub_t *dhd,
8991                                    msgbuf_ring_t *ring_to_create,
8992                                    uint8 ring_type, uint32 id)
8993 {
8994     unsigned long flags;
8995     h2d_ring_create_req_t *h2d_ring;
8996     uint16 alloced = 0;
8997     uint8 i = 0;
8998     int ret = BCME_OK;
8999     msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn;
9000 
9001     DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
9002 
9003     DHD_TRACE(("%s trying to send H2D ring create Req\n", __FUNCTION__));
9004 
9005     if (ring_to_create == NULL) {
9006         DHD_ERROR(("%s: FATAL: ring_to_create is NULL\n", __FUNCTION__));
9007         ret = BCME_ERROR;
9008         goto err;
9009     }
9010 
9011     /* Request for ring buffer space */
9012     h2d_ring = (h2d_ring_create_req_t *)dhd_prot_alloc_ring_space(
9013         dhd, ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced,
9014         FALSE);
9015     if (h2d_ring == NULL) {
9016         DHD_ERROR(
9017             ("%s: FATAL: No space in control ring to send H2D ring create\n",
9018              __FUNCTION__));
9019         ret = BCME_NOMEM;
9020         goto err;
9021     }
9022     ring_to_create->create_req_id = (uint16)id;
9023     ring_to_create->create_pending = TRUE;
9024 
9025     /* Common msg buf hdr */
9026     h2d_ring->msg.msg_type = MSG_TYPE_H2D_RING_CREATE;
9027     h2d_ring->msg.if_id = 0;
9028     h2d_ring->msg.request_id = htol32(ring_to_create->create_req_id);
9029     h2d_ring->msg.flags = ctrl_ring->current_phase;
9030     h2d_ring->ring_id = htol16(DHD_H2D_RING_OFFSET(ring_to_create->idx));
9031     h2d_ring->ring_type = ring_type;
9032     h2d_ring->max_items = htol16(H2DRING_DYNAMIC_INFO_MAX_ITEM);
9033     h2d_ring->n_completion_ids = ring_to_create->n_completion_ids;
9034     h2d_ring->len_item = htol16(H2DRING_INFO_BUFPOST_ITEMSIZE);
9035     h2d_ring->ring_ptr.low_addr = ring_to_create->base_addr.low_addr;
9036     h2d_ring->ring_ptr.high_addr = ring_to_create->base_addr.high_addr;
9037 
9038     for (i = 0; i < ring_to_create->n_completion_ids; i++) {
9039         h2d_ring->completion_ring_ids[i] =
9040             htol16(ring_to_create->compeltion_ring_ids[i]);
9041     }
9042 
9043     h2d_ring->flags = 0;
9044     h2d_ring->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
9045     ctrl_ring->seqnum++;
9046 
9047     /* Update the flow_ring's WRITE index */
9048     dhd_prot_ring_write_complete(dhd, ctrl_ring, h2d_ring,
9049                                  DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
9050 
9051     DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
9052 
9053     return ret;
9054 err:
9055     DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
9056 
9057     return ret;
9058 }
9059 
9060 /**
9061  * dhd_prot_dma_indx_set - set a new WR or RD index in the DMA index array.
9062  * Dongle will DMA the entire array (if DMA_INDX feature is enabled).
9063  * See dhd_prot_dma_indx_init()
9064  */
dhd_prot_dma_indx_set(dhd_pub_t * dhd,uint16 new_index,uint8 type,uint16 ringid)9065 void dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type,
9066                            uint16 ringid)
9067 {
9068     uint8 *ptr;
9069     uint16 offset;
9070     dhd_prot_t *prot = dhd->prot;
9071     uint16 max_h2d_rings = dhd->bus->max_submission_rings;
9072 
9073     switch (type) {
9074         case H2D_DMA_INDX_WR_UPD:
9075             ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va);
9076             offset = DHD_H2D_RING_OFFSET(ringid);
9077             break;
9078 
9079         case D2H_DMA_INDX_RD_UPD:
9080             ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va);
9081             offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
9082             break;
9083 
9084         case H2D_IFRM_INDX_WR_UPD:
9085             ptr = (uint8 *)(prot->h2d_ifrm_indx_wr_buf.va);
9086             offset = DHD_H2D_FRM_FLOW_RING_OFFSET(ringid);
9087             break;
9088 
9089         default:
9090             DHD_ERROR(("%s: Invalid option for DMAing read/write index\n",
9091                        __FUNCTION__));
9092             return;
9093     }
9094 
9095     ASSERT(prot->rw_index_sz != 0);
9096     ptr += offset * prot->rw_index_sz;
9097 
9098     *(uint16 *)ptr = htol16(new_index);
9099 
9100     OSL_CACHE_FLUSH((void *)ptr, prot->rw_index_sz);
9101 
9102     DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n",
9103                __FUNCTION__, new_index, type, ringid, ptr, offset));
9104 } /* dhd_prot_dma_indx_set */
9105 
9106 /**
9107  * dhd_prot_dma_indx_get - Fetch a WR or RD index from the dongle DMA-ed index
9108  * array.
9109  * Dongle DMAes an entire array to host memory (if the feature is enabled).
9110  * See dhd_prot_dma_indx_init()
9111  */
dhd_prot_dma_indx_get(dhd_pub_t * dhd,uint8 type,uint16 ringid)9112 static uint16 dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid)
9113 {
9114     uint8 *ptr;
9115     uint16 data;
9116     uint16 offset;
9117     dhd_prot_t *prot = dhd->prot;
9118     uint16 max_h2d_rings = dhd->bus->max_submission_rings;
9119 
9120     switch (type) {
9121         case H2D_DMA_INDX_WR_UPD:
9122             ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va);
9123             offset = DHD_H2D_RING_OFFSET(ringid);
9124             break;
9125 
9126         case H2D_DMA_INDX_RD_UPD:
9127             ptr = (uint8 *)(prot->h2d_dma_indx_rd_buf.va);
9128             offset = DHD_H2D_RING_OFFSET(ringid);
9129             break;
9130 
9131         case D2H_DMA_INDX_WR_UPD:
9132             ptr = (uint8 *)(prot->d2h_dma_indx_wr_buf.va);
9133             offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
9134             break;
9135 
9136         case D2H_DMA_INDX_RD_UPD:
9137             ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va);
9138             offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
9139             break;
9140 
9141         default:
9142             DHD_ERROR(("%s: Invalid option for DMAing read/write index\n",
9143                        __FUNCTION__));
9144             return 0;
9145     }
9146 
9147     ASSERT(prot->rw_index_sz != 0);
9148     ptr += offset * prot->rw_index_sz;
9149 
9150     OSL_CACHE_INV((void *)ptr, prot->rw_index_sz);
9151 
9152     data = LTOH16(*((uint16 *)ptr));
9153 
9154     DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n",
9155                __FUNCTION__, data, type, ringid, ptr, offset));
9156 
9157     return (data);
9158 } /* dhd_prot_dma_indx_get */
9159 
9160 /**
9161  * An array of DMA read/write indices, containing information about host rings,
9162  * can be maintained either in host memory or in device memory, dependent on
9163  * preprocessor options. This function is, dependent on these options, called
9164  * during driver initialization. It reserves and initializes blocks of DMA'able
9165  * host memory containing an array of DMA read or DMA write indices. The
9166  * physical address of these host memory blocks are communicated to the dongle
9167  * later on. By reading this host memory, the dongle learns about the state of
9168  * the host rings.
9169  */
9170 
dhd_prot_dma_indx_alloc(dhd_pub_t * dhd,uint8 type,dhd_dma_buf_t * dma_buf,uint32 bufsz)9171 static INLINE int dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type,
9172                                           dhd_dma_buf_t *dma_buf, uint32 bufsz)
9173 {
9174     int rc;
9175 
9176     if ((dma_buf->len == bufsz) || (dma_buf->va != NULL)) {
9177         return BCME_OK;
9178     }
9179 
9180     rc = dhd_dma_buf_alloc(dhd, dma_buf, bufsz);
9181 
9182     return rc;
9183 }
9184 
dhd_prot_dma_indx_init(dhd_pub_t * dhd,uint32 rw_index_sz,uint8 type,uint32 length)9185 int dhd_prot_dma_indx_init(dhd_pub_t *dhd, uint32 rw_index_sz, uint8 type,
9186                            uint32 length)
9187 {
9188     uint32 bufsz;
9189     dhd_prot_t *prot = dhd->prot;
9190     dhd_dma_buf_t *dma_buf;
9191 
9192     if (prot == NULL) {
9193         DHD_ERROR(("prot is not inited\n"));
9194         return BCME_ERROR;
9195     }
9196 
9197     /* Dongle advertizes 2B or 4B RW index size */
9198     ASSERT(rw_index_sz != 0);
9199     prot->rw_index_sz = rw_index_sz;
9200 
9201     bufsz = rw_index_sz * length;
9202 
9203     switch (type) {
9204         case H2D_DMA_INDX_WR_BUF:
9205             dma_buf = &prot->h2d_dma_indx_wr_buf;
9206             if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) {
9207                 goto ret_no_mem;
9208             }
9209             DHD_ERROR(("H2D DMA WR INDX : array size %d = %d * %d\n",
9210                        dma_buf->len, rw_index_sz, length));
9211             break;
9212 
9213         case H2D_DMA_INDX_RD_BUF:
9214             dma_buf = &prot->h2d_dma_indx_rd_buf;
9215             if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) {
9216                 goto ret_no_mem;
9217             }
9218             DHD_ERROR(("H2D DMA RD INDX : array size %d = %d * %d\n",
9219                        dma_buf->len, rw_index_sz, length));
9220             break;
9221 
9222         case D2H_DMA_INDX_WR_BUF:
9223             dma_buf = &prot->d2h_dma_indx_wr_buf;
9224             if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) {
9225                 goto ret_no_mem;
9226             }
9227             DHD_ERROR(("D2H DMA WR INDX : array size %d = %d * %d\n",
9228                        dma_buf->len, rw_index_sz, length));
9229             break;
9230 
9231         case D2H_DMA_INDX_RD_BUF:
9232             dma_buf = &prot->d2h_dma_indx_rd_buf;
9233             if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) {
9234                 goto ret_no_mem;
9235             }
9236             DHD_ERROR(("D2H DMA RD INDX : array size %d = %d * %d\n",
9237                        dma_buf->len, rw_index_sz, length));
9238             break;
9239 
9240         case H2D_IFRM_INDX_WR_BUF:
9241             dma_buf = &prot->h2d_ifrm_indx_wr_buf;
9242             if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) {
9243                 goto ret_no_mem;
9244             }
9245             DHD_ERROR(("H2D IFRM WR INDX : array size %d = %d * %d\n",
9246                        dma_buf->len, rw_index_sz, length));
9247             break;
9248 
9249         default:
9250             DHD_ERROR(("%s: Unexpected option\n", __FUNCTION__));
9251             return BCME_BADOPTION;
9252     }
9253 
9254     return BCME_OK;
9255 
9256 ret_no_mem:
9257     DHD_ERROR(("%s: dhd_prot_dma_indx_alloc type %d buf_sz %d failure\n",
9258                __FUNCTION__, type, bufsz));
9259     return BCME_NOMEM;
9260 } /* dhd_prot_dma_indx_init */
9261 
9262 /**
9263  * Called on checking for 'completion' messages from the dongle. Returns next
9264  * host buffer to read from, or NULL if there are no more messages to read.
9265  */
dhd_prot_get_read_addr(dhd_pub_t * dhd,msgbuf_ring_t * ring,uint32 * available_len)9266 static uint8 *dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring,
9267                                      uint32 *available_len)
9268 {
9269     uint16 wr;
9270     uint16 rd;
9271     uint16 depth;
9272     uint16 items;
9273     void *read_addr = NULL; /* address of next msg to be read in ring */
9274     uint16 d2h_wr = 0;
9275 
9276     DHD_TRACE(("%s: d2h_dma_indx_rd_buf %p, d2h_dma_indx_wr_buf %p\n",
9277                __FUNCTION__, (uint32 *)(dhd->prot->d2h_dma_indx_rd_buf.va),
9278                (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va)));
9279 
9280     /* Remember the read index in a variable.
9281      * This is becuase ring->rd gets updated in the end of this function
9282      * So if we have to print the exact read index from which the
9283      * message is read its not possible.
9284      */
9285     ring->curr_rd = ring->rd;
9286 
9287     /* update write pointer */
9288     if (dhd->dma_d2h_ring_upd_support) {
9289         /* DMAing write/read indices supported */
9290         d2h_wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
9291         ring->wr = d2h_wr;
9292     } else {
9293         dhd_bus_cmn_readshared(dhd->bus, &(ring->wr), RING_WR_UPD, ring->idx);
9294     }
9295 
9296     wr = ring->wr;
9297     rd = ring->rd;
9298     depth = ring->max_items;
9299 
9300     /* check for avail space, in number of ring items */
9301     items = READ_AVAIL_SPACE(wr, rd, depth);
9302     if (items == 0) {
9303         return NULL;
9304     }
9305 
9306     /*
9307      * Note that there are builds where Assert translates to just printk
9308      * so, even if we had hit this condition we would never halt. Now
9309      * dhd_prot_process_msgtype can get into an big loop if this
9310      * happens.
9311      */
9312     if (items > ring->max_items) {
9313         DHD_ERROR(("\r\n======================= \r\n"));
9314         DHD_ERROR(
9315             ("%s(): ring %p, ring->name %s, ring->max_items %d, items %d \r\n",
9316              __FUNCTION__, ring, ring->name, ring->max_items, items));
9317         DHD_ERROR(("wr: %d,  rd: %d,  depth: %d  \r\n", wr, rd, depth));
9318         DHD_ERROR(("dhd->busstate %d bus->wait_for_d3_ack %d \r\n",
9319                    dhd->busstate, dhd->bus->wait_for_d3_ack));
9320         DHD_ERROR(("\r\n======================= \r\n"));
9321 #ifdef DHD_FW_COREDUMP
9322         if (dhd->memdump_enabled) {
9323             /* collect core dump */
9324             dhd->memdump_type = DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR;
9325             dhd_bus_mem_dump(dhd);
9326         }
9327 #endif /* DHD_FW_COREDUMP */
9328 
9329         *available_len = 0;
9330         dhd_schedule_reset(dhd);
9331 
9332         return NULL;
9333     }
9334 
9335     /* if space is available, calculate address to be read */
9336     read_addr = (char *)ring->dma_buf.va + (rd * ring->item_len);
9337 
9338     /* update read pointer */
9339     if ((ring->rd + items) >= ring->max_items) {
9340         ring->rd = 0;
9341     } else {
9342         ring->rd += items;
9343     }
9344 
9345     ASSERT(ring->rd < ring->max_items);
9346 
9347     /* convert items to bytes : available_len must be 32bits */
9348     *available_len = (uint32)(items * ring->item_len);
9349 
9350     OSL_CACHE_INV(read_addr, *available_len);
9351 
9352     /* return read address */
9353     return read_addr;
9354 } /* dhd_prot_get_read_addr */
9355 
9356 /**
9357  * dhd_prot_h2d_mbdata_send_ctrlmsg is a non-atomic function,
9358  * make sure the callers always hold appropriate locks.
9359  */
dhd_prot_h2d_mbdata_send_ctrlmsg(dhd_pub_t * dhd,uint32 mb_data)9360 int dhd_prot_h2d_mbdata_send_ctrlmsg(dhd_pub_t *dhd, uint32 mb_data)
9361 {
9362     h2d_mailbox_data_t *h2d_mb_data;
9363     uint16 alloced = 0;
9364     msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn;
9365     unsigned long flags;
9366     int num_post = 1;
9367     int i;
9368 
9369     DHD_INFO(
9370         ("%s Sending H2D MB data Req data 0x%04x\n", __FUNCTION__, mb_data));
9371     if (!ctrl_ring->inited) {
9372         DHD_ERROR(("%s: Ctrl Submit Ring: not inited\n", __FUNCTION__));
9373         return BCME_ERROR;
9374     }
9375 
9376     for (i = 0; i < num_post; i++) {
9377         DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
9378         /* Request for ring buffer space */
9379         h2d_mb_data = (h2d_mailbox_data_t *)dhd_prot_alloc_ring_space(
9380             dhd, ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced,
9381             FALSE);
9382         if (h2d_mb_data == NULL) {
9383             DHD_ERROR(
9384                 ("%s: FATAL: No space in control ring to send H2D Mb data\n",
9385                  __FUNCTION__));
9386             DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
9387             return BCME_NOMEM;
9388         }
9389         memset(h2d_mb_data, 0, sizeof(h2d_mailbox_data_t));
9390         /* Common msg buf hdr */
9391         h2d_mb_data->msg.msg_type = MSG_TYPE_H2D_MAILBOX_DATA;
9392         h2d_mb_data->msg.flags = ctrl_ring->current_phase;
9393 
9394         h2d_mb_data->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
9395         ctrl_ring->seqnum++;
9396 
9397         /* Update flow create message */
9398         h2d_mb_data->mail_box_data = htol32(mb_data);
9399         {
9400             h2d_mb_data->mail_box_data = htol32(mb_data);
9401         }
9402 
9403         DHD_INFO(
9404             ("%s Send H2D MB data Req data 0x%04x\n", __FUNCTION__, mb_data));
9405 
9406         /* upd wrt ptr and raise interrupt */
9407         dhd_prot_ring_write_complete_mbdata(
9408             dhd, ctrl_ring, h2d_mb_data, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
9409             mb_data);
9410 
9411         DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
9412     }
9413     return 0;
9414 }
9415 
9416 /** Creates a flow ring and informs dongle of this event */
dhd_prot_flow_ring_create(dhd_pub_t * dhd,flow_ring_node_t * flow_ring_node)9417 int dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
9418 {
9419     tx_flowring_create_request_t *flow_create_rqst;
9420     msgbuf_ring_t *flow_ring;
9421     dhd_prot_t *prot = dhd->prot;
9422     unsigned long flags;
9423     uint16 alloced = 0;
9424     msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
9425     uint16 max_flowrings = dhd->bus->max_tx_flowrings;
9426 
9427     /* Fetch a pre-initialized msgbuf_ring from the flowring pool */
9428     flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid);
9429     if (flow_ring == NULL) {
9430         DHD_ERROR(("%s: dhd_prot_flowrings_pool_fetch TX Flowid %d failed\n",
9431                    __FUNCTION__, flow_ring_node->flowid));
9432         return BCME_NOMEM;
9433     }
9434 
9435     DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
9436 
9437     /* Request for ctrl_ring buffer space */
9438     flow_create_rqst =
9439         (tx_flowring_create_request_t *)dhd_prot_alloc_ring_space(
9440             dhd, ctrl_ring, 1, &alloced, FALSE);
9441     if (flow_create_rqst == NULL) {
9442         dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring);
9443         DHD_ERROR(("%s: Flow Create Req flowid %d - failure ring space\n",
9444                    __FUNCTION__, flow_ring_node->flowid));
9445         DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
9446         return BCME_NOMEM;
9447     }
9448 
9449     flow_ring_node->prot_info = (void *)flow_ring;
9450 
9451     /* Common msg buf hdr */
9452     flow_create_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_CREATE;
9453     flow_create_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
9454     flow_create_rqst->msg.request_id = htol32(0);
9455     flow_create_rqst->msg.flags = ctrl_ring->current_phase;
9456 
9457     flow_create_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
9458     ctrl_ring->seqnum++;
9459 
9460     /* Update flow create message */
9461     flow_create_rqst->tid = flow_ring_node->flow_info.tid;
9462     flow_create_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
9463     memcpy(flow_create_rqst->sa, flow_ring_node->flow_info.sa,
9464            sizeof(flow_create_rqst->sa));
9465     memcpy(flow_create_rqst->da, flow_ring_node->flow_info.da,
9466            sizeof(flow_create_rqst->da));
9467     /* CAUTION: ring::base_addr already in Little Endian */
9468     flow_create_rqst->flow_ring_ptr.low_addr = flow_ring->base_addr.low_addr;
9469     flow_create_rqst->flow_ring_ptr.high_addr = flow_ring->base_addr.high_addr;
9470     flow_create_rqst->max_items = htol16(prot->h2d_max_txpost);
9471     flow_create_rqst->len_item = htol16(H2DRING_TXPOST_ITEMSIZE);
9472     flow_create_rqst->if_flags = 0;
9473 
9474 #ifdef DHD_HP2P
9475     /* Create HPP flow ring if HP2P is enabled and TID=7  and AWDL interface */
9476     /* and traffic is not multicast */
9477     /* Allow infra interface only if user enabled hp2p_infra_enable thru iovar
9478      */
9479     /* Allow only one HP2P Flow active at a time */
9480     if (dhd->hp2p_capable && !dhd->hp2p_ring_active &&
9481         flow_ring_node->flow_info.tid == HP2P_PRIO &&
9482         (dhd->hp2p_infra_enable || flow_create_rqst->msg.if_id) &&
9483         !ETHER_ISMULTI(flow_create_rqst->da)) {
9484         flow_create_rqst->if_flags |= BCMPCIE_FLOW_RING_INTF_HP2P;
9485         flow_ring_node->hp2p_ring = TRUE;
9486         dhd->hp2p_ring_active = TRUE;
9487 
9488         DHD_ERROR(("%s: flow ring for HP2P tid = %d flowid = %d\n",
9489                    __FUNCTION__, flow_ring_node->flow_info.tid,
9490                    flow_ring_node->flowid));
9491     }
9492 #endif /* DHD_HP2P */
9493 
9494     /* definition for ifrm mask : bit0:d11ac core, bit1:d11ad core
9495      * currently it is not used for priority. so uses solely for ifrm mask
9496      */
9497     if (IFRM_ACTIVE(dhd)) {
9498         flow_create_rqst->priority_ifrmmask = (1 << IFRM_DEV_0);
9499     }
9500 
9501     DHD_ERROR(("%s: Send Flow Create Req flow ID %d for peer " MACDBG
9502                " prio %d ifindex %d\n",
9503                __FUNCTION__, flow_ring_node->flowid,
9504                MAC2STRDBG(flow_ring_node->flow_info.da),
9505                flow_ring_node->flow_info.tid,
9506                flow_ring_node->flow_info.ifindex));
9507 
9508     /* Update the flow_ring's WRITE index */
9509     if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
9510         dhd_prot_dma_indx_set(dhd, flow_ring->wr, H2D_DMA_INDX_WR_UPD,
9511                               flow_ring->idx);
9512     } else if (IFRM_ACTIVE(dhd) &&
9513                DHD_IS_FLOWRING(flow_ring->idx, max_flowrings)) {
9514         dhd_prot_dma_indx_set(dhd, flow_ring->wr, H2D_IFRM_INDX_WR_UPD,
9515                               flow_ring->idx);
9516     } else {
9517         dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr), sizeof(uint16),
9518                                 RING_WR_UPD, flow_ring->idx);
9519     }
9520 
9521     /* update control subn ring's WR index and ring doorbell to dongle */
9522     dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_create_rqst, 1);
9523 
9524     DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
9525 
9526     return BCME_OK;
9527 } /* dhd_prot_flow_ring_create */
9528 
9529 /** called on receiving MSG_TYPE_FLOW_RING_CREATE_CMPLT message from dongle */
dhd_prot_flow_ring_create_response_process(dhd_pub_t * dhd,void * msg)9530 static void dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd,
9531                                                        void *msg)
9532 {
9533     tx_flowring_create_response_t *flow_create_resp =
9534         (tx_flowring_create_response_t *)msg;
9535 
9536     DHD_ERROR(("%s: Flow Create Response status = %d Flow %d\n", __FUNCTION__,
9537                ltoh16(flow_create_resp->cmplt.status),
9538                ltoh16(flow_create_resp->cmplt.flow_ring_id)));
9539 
9540     dhd_bus_flow_ring_create_response(
9541         dhd->bus, ltoh16(flow_create_resp->cmplt.flow_ring_id),
9542         ltoh16(flow_create_resp->cmplt.status));
9543 }
9544 
dhd_prot_process_h2d_ring_create_complete(dhd_pub_t * dhd,void * buf)9545 static void dhd_prot_process_h2d_ring_create_complete(dhd_pub_t *dhd, void *buf)
9546 {
9547     h2d_ring_create_response_t *resp = (h2d_ring_create_response_t *)buf;
9548     DHD_INFO(("%s ring create Response status = %d ring %d, id 0x%04x\n",
9549               __FUNCTION__, ltoh16(resp->cmplt.status),
9550               ltoh16(resp->cmplt.ring_id), ltoh32(resp->cmn_hdr.request_id)));
9551     if ((ltoh32(resp->cmn_hdr.request_id) != DHD_H2D_DBGRING_REQ_PKTID) &&
9552         (ltoh32(resp->cmn_hdr.request_id) != DHD_H2D_BTLOGRING_REQ_PKTID)) {
9553         DHD_ERROR(("invalid request ID with h2d ring create complete\n"));
9554         return;
9555     }
9556     if (dhd->prot->h2dring_info_subn->create_req_id ==
9557             ltoh32(resp->cmn_hdr.request_id) &&
9558         !dhd->prot->h2dring_info_subn->create_pending) {
9559         DHD_ERROR(("info ring create status for not pending submit ring\n"));
9560     }
9561 
9562     if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
9563         DHD_ERROR(("info/btlog ring create failed with status %d\n",
9564                    ltoh16(resp->cmplt.status)));
9565         return;
9566     }
9567     if (dhd->prot->h2dring_info_subn->create_req_id ==
9568         ltoh32(resp->cmn_hdr.request_id)) {
9569         dhd->prot->h2dring_info_subn->create_pending = FALSE;
9570         dhd->prot->h2dring_info_subn->inited = TRUE;
9571         DHD_ERROR(("info buffer post after ring create\n"));
9572         dhd_prot_infobufpost(dhd, dhd->prot->h2dring_info_subn);
9573     }
9574 }
9575 
dhd_prot_process_d2h_ring_create_complete(dhd_pub_t * dhd,void * buf)9576 static void dhd_prot_process_d2h_ring_create_complete(dhd_pub_t *dhd, void *buf)
9577 {
9578     d2h_ring_create_response_t *resp = (d2h_ring_create_response_t *)buf;
9579     DHD_INFO(("%s ring create Response status = %d ring %d, id 0x%04x\n",
9580               __FUNCTION__, ltoh16(resp->cmplt.status),
9581               ltoh16(resp->cmplt.ring_id), ltoh32(resp->cmn_hdr.request_id)));
9582     if ((ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_DBGRING_REQ_PKTID) &&
9583         (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_BTLOGRING_REQ_PKTID) &&
9584 #ifdef DHD_HP2P
9585         (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_HPPRING_TXREQ_PKTID) &&
9586         (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_HPPRING_RXREQ_PKTID) &&
9587 #endif /* DHD_HP2P */
9588         TRUE) {
9589         DHD_ERROR(("invalid request ID with d2h ring create complete\n"));
9590         return;
9591     }
9592     if (ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_DBGRING_REQ_PKTID) {
9593 #ifdef EWP_EDL
9594         if (!dhd->dongle_edl_support)
9595 #endif // endif
9596         {
9597             if (!dhd->prot->d2hring_info_cpln->create_pending) {
9598                 DHD_ERROR(
9599                     ("info ring create status for not pending cpl ring\n"));
9600                 return;
9601             }
9602 
9603             if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
9604                 DHD_ERROR(("info cpl ring create failed with status %d\n",
9605                            ltoh16(resp->cmplt.status)));
9606                 return;
9607             }
9608             dhd->prot->d2hring_info_cpln->create_pending = FALSE;
9609             dhd->prot->d2hring_info_cpln->inited = TRUE;
9610         }
9611 #ifdef EWP_EDL
9612         else {
9613             if (!dhd->prot->d2hring_edl->create_pending) {
9614                 DHD_ERROR(
9615                     ("edl ring create status for not pending cpl ring\n"));
9616                 return;
9617             }
9618 
9619             if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
9620                 DHD_ERROR(("edl cpl ring create failed with status %d\n",
9621                            ltoh16(resp->cmplt.status)));
9622                 return;
9623             }
9624             dhd->prot->d2hring_edl->create_pending = FALSE;
9625             dhd->prot->d2hring_edl->inited = TRUE;
9626         }
9627 #endif /* EWP_EDL */
9628     }
9629 
9630 #ifdef DHD_HP2P
9631     if (dhd->prot->d2hring_hp2p_txcpl &&
9632         ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_HPPRING_TXREQ_PKTID) {
9633         if (!dhd->prot->d2hring_hp2p_txcpl->create_pending) {
9634             DHD_ERROR(("HPP tx ring create status for not pending cpl ring\n"));
9635             return;
9636         }
9637 
9638         if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
9639             DHD_ERROR(("HPP tx cpl ring create failed with status %d\n",
9640                        ltoh16(resp->cmplt.status)));
9641             return;
9642         }
9643         dhd->prot->d2hring_hp2p_txcpl->create_pending = FALSE;
9644         dhd->prot->d2hring_hp2p_txcpl->inited = TRUE;
9645     }
9646     if (dhd->prot->d2hring_hp2p_rxcpl &&
9647         ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_HPPRING_RXREQ_PKTID) {
9648         if (!dhd->prot->d2hring_hp2p_rxcpl->create_pending) {
9649             DHD_ERROR(("HPP rx ring create status for not pending cpl ring\n"));
9650             return;
9651         }
9652 
9653         if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
9654             DHD_ERROR(("HPP rx cpl ring create failed with status %d\n",
9655                        ltoh16(resp->cmplt.status)));
9656             return;
9657         }
9658         dhd->prot->d2hring_hp2p_rxcpl->create_pending = FALSE;
9659         dhd->prot->d2hring_hp2p_rxcpl->inited = TRUE;
9660     }
9661 #endif /* DHD_HP2P */
9662 }
9663 
dhd_prot_process_d2h_mb_data(dhd_pub_t * dhd,void * buf)9664 static void dhd_prot_process_d2h_mb_data(dhd_pub_t *dhd, void *buf)
9665 {
9666     d2h_mailbox_data_t *d2h_data;
9667 
9668     d2h_data = (d2h_mailbox_data_t *)buf;
9669     DHD_INFO(("%s dhd_prot_process_d2h_mb_data, 0x%04x\n", __FUNCTION__,
9670               d2h_data->d2h_mailbox_data));
9671     dhd_bus_handle_mb_data(dhd->bus, d2h_data->d2h_mailbox_data);
9672 }
9673 
dhd_prot_process_d2h_host_ts_complete(dhd_pub_t * dhd,void * buf)9674 static void dhd_prot_process_d2h_host_ts_complete(dhd_pub_t *dhd, void *buf)
9675 {
9676     DHD_ERROR(("Timesunc feature not compiled in but GOT HOST_TS_COMPLETE\n"));
9677 }
9678 
9679 /** called on e.g. flow ring delete */
dhd_prot_clean_flow_ring(dhd_pub_t * dhd,void * msgbuf_flow_info)9680 void dhd_prot_clean_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info)
9681 {
9682     msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info;
9683     dhd_prot_ring_detach(dhd, flow_ring);
9684     DHD_INFO(("%s Cleaning up Flow \n", __FUNCTION__));
9685 }
9686 
dhd_prot_print_flow_ring(dhd_pub_t * dhd,void * msgbuf_flow_info,struct bcmstrbuf * strbuf,const char * fmt)9687 void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info,
9688                               struct bcmstrbuf *strbuf, const char *fmt)
9689 {
9690     const char *default_fmt =
9691         "RD %d WR %d BASE(VA) %p BASE(PA) %x:%x SIZE %d "
9692         "WORK_ITEM_SIZE %d MAX_WORK_ITEMS %d TOTAL_SIZE %d\n";
9693     msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info;
9694     uint16 rd, wr;
9695     uint32 dma_buf_len = flow_ring->max_items * flow_ring->item_len;
9696 
9697     if (fmt == NULL) {
9698         fmt = default_fmt;
9699     }
9700 
9701     if (dhd->bus->is_linkdown) {
9702         DHD_ERROR(
9703             ("%s: Skip dumping flowring due to Link down\n", __FUNCTION__));
9704         return;
9705     }
9706 
9707     dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx);
9708     dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx);
9709     bcm_bprintf(strbuf, fmt, rd, wr, flow_ring->dma_buf.va,
9710                 ltoh32(flow_ring->base_addr.high_addr),
9711                 ltoh32(flow_ring->base_addr.low_addr), flow_ring->item_len,
9712                 flow_ring->max_items, dma_buf_len);
9713 }
9714 
dhd_prot_print_info(dhd_pub_t * dhd,struct bcmstrbuf * strbuf)9715 void dhd_prot_print_info(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
9716 {
9717     dhd_prot_t *prot = dhd->prot;
9718     bcm_bprintf(strbuf, "IPCrevs: Dev %d, \t Host %d, \tactive %d\n",
9719                 dhd->prot->device_ipc_version, dhd->prot->host_ipc_version,
9720                 dhd->prot->active_ipc_version);
9721 
9722     bcm_bprintf(strbuf, "max Host TS bufs to post: %d, \t posted %d \n",
9723                 dhd->prot->max_tsbufpost, dhd->prot->cur_ts_bufs_posted);
9724     bcm_bprintf(strbuf, "max INFO bufs to post: %d, \t posted %d \n",
9725                 dhd->prot->max_infobufpost, dhd->prot->infobufpost);
9726     bcm_bprintf(strbuf, "max event bufs to post: %d, \t posted %d \n",
9727                 dhd->prot->max_eventbufpost, dhd->prot->cur_event_bufs_posted);
9728     bcm_bprintf(strbuf, "max ioctlresp bufs to post: %d, \t posted %d \n",
9729                 dhd->prot->max_ioctlrespbufpost,
9730                 dhd->prot->cur_ioctlresp_bufs_posted);
9731     bcm_bprintf(strbuf, "max RX bufs to post: %d, \t posted %d \n",
9732                 dhd->prot->max_rxbufpost, dhd->prot->rxbufpost);
9733 
9734     bcm_bprintf(strbuf, "%14s %5s %5s %17s %17s %14s %14s %10s\n", "Type", "RD",
9735                 "WR", "BASE(VA)", "BASE(PA)", "WORK_ITEM_SIZE",
9736                 "MAX_WORK_ITEMS", "TOTAL_SIZE");
9737     bcm_bprintf(strbuf, "%14s", "H2DCtrlPost");
9738     dhd_prot_print_flow_ring(dhd, &prot->h2dring_ctrl_subn, strbuf,
9739                              " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
9740     bcm_bprintf(strbuf, "%14s", "D2HCtrlCpl");
9741     dhd_prot_print_flow_ring(dhd, &prot->d2hring_ctrl_cpln, strbuf,
9742                              " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
9743     bcm_bprintf(strbuf, "%14s", "H2DRxPost", prot->rxbufpost);
9744     dhd_prot_print_flow_ring(dhd, &prot->h2dring_rxp_subn, strbuf,
9745                              " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
9746     bcm_bprintf(strbuf, "%14s", "D2HRxCpl");
9747     dhd_prot_print_flow_ring(dhd, &prot->d2hring_rx_cpln, strbuf,
9748                              " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
9749     bcm_bprintf(strbuf, "%14s", "D2HTxCpl");
9750     dhd_prot_print_flow_ring(dhd, &prot->d2hring_tx_cpln, strbuf,
9751                              " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
9752     if (dhd->prot->h2dring_info_subn != NULL &&
9753         dhd->prot->d2hring_info_cpln != NULL) {
9754         bcm_bprintf(strbuf, "%14s", "H2DRingInfoSub");
9755         dhd_prot_print_flow_ring(dhd, prot->h2dring_info_subn, strbuf,
9756                                  " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
9757         bcm_bprintf(strbuf, "%14s", "D2HRingInfoCpl");
9758         dhd_prot_print_flow_ring(dhd, prot->d2hring_info_cpln, strbuf,
9759                                  " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
9760     }
9761     if (dhd->prot->d2hring_edl != NULL) {
9762         bcm_bprintf(strbuf, "%14s", "D2HRingEDL");
9763         dhd_prot_print_flow_ring(dhd, prot->d2hring_edl, strbuf,
9764                                  " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
9765     }
9766 
9767     bcm_bprintf(strbuf,
9768                 "active_tx_count %d	 pktidmap_avail(ctrl/rx/tx) %d %d %d\n",
9769                 OSL_ATOMIC_READ(dhd->osh, &dhd->prot->active_tx_count),
9770                 DHD_PKTID_AVAIL(dhd->prot->pktid_ctrl_map),
9771                 DHD_PKTID_AVAIL(dhd->prot->pktid_rx_map),
9772                 DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map));
9773 }
9774 
dhd_prot_flow_ring_delete(dhd_pub_t * dhd,flow_ring_node_t * flow_ring_node)9775 int dhd_prot_flow_ring_delete(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
9776 {
9777     tx_flowring_delete_request_t *flow_delete_rqst;
9778     dhd_prot_t *prot = dhd->prot;
9779     unsigned long flags;
9780     uint16 alloced = 0;
9781     msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
9782 
9783     DHD_RING_LOCK(ring->ring_lock, flags);
9784 
9785     /* Request for ring buffer space */
9786     flow_delete_rqst =
9787         (tx_flowring_delete_request_t *)dhd_prot_alloc_ring_space(
9788             dhd, ring, 1, &alloced, FALSE);
9789     if (flow_delete_rqst == NULL) {
9790         DHD_RING_UNLOCK(ring->ring_lock, flags);
9791         DHD_ERROR(("%s: Flow Delete Req - failure ring space\n", __FUNCTION__));
9792         return BCME_NOMEM;
9793     }
9794 
9795     /* Common msg buf hdr */
9796     flow_delete_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_DELETE;
9797     flow_delete_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
9798     flow_delete_rqst->msg.request_id = htol32(0);
9799     flow_delete_rqst->msg.flags = ring->current_phase;
9800 
9801     flow_delete_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
9802     ring->seqnum++;
9803 
9804     /* Update Delete info */
9805     flow_delete_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
9806     flow_delete_rqst->reason = htol16(BCME_OK);
9807 
9808     DHD_ERROR(("%s: Send Flow Delete Req RING ID %d for peer %pM"
9809                " prio %d ifindex %d\n",
9810                __FUNCTION__, flow_ring_node->flowid,
9811                flow_ring_node->flow_info.da, flow_ring_node->flow_info.tid,
9812                flow_ring_node->flow_info.ifindex));
9813 
9814     /* update ring's WR index and ring doorbell to dongle */
9815     dhd_prot_ring_write_complete(dhd, ring, flow_delete_rqst, 1);
9816 
9817     DHD_RING_UNLOCK(ring->ring_lock, flags);
9818 
9819     return BCME_OK;
9820 }
9821 
dhd_prot_flow_ring_fastdelete(dhd_pub_t * dhd,uint16 flowid,uint16 rd_idx)9822 static void BCMFASTPATH dhd_prot_flow_ring_fastdelete(dhd_pub_t *dhd,
9823                                                       uint16 flowid,
9824                                                       uint16 rd_idx)
9825 {
9826     flow_ring_node_t *flow_ring_node = DHD_FLOW_RING(dhd, flowid);
9827     msgbuf_ring_t *ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
9828     host_txbuf_cmpl_t txstatus;
9829     host_txbuf_post_t *txdesc;
9830     uint16 wr_idx;
9831 
9832     DHD_INFO(("%s: FAST delete ring, flowid=%d, rd_idx=%d, wr_idx=%d\n",
9833               __FUNCTION__, flowid, rd_idx, ring->wr));
9834 
9835     memset(&txstatus, 0, sizeof(txstatus));
9836     txstatus.compl_hdr.flow_ring_id = flowid;
9837     txstatus.cmn_hdr.if_id = flow_ring_node->flow_info.ifindex;
9838     wr_idx = ring->wr;
9839 
9840     while (wr_idx != rd_idx) {
9841         if (wr_idx) {
9842             wr_idx--;
9843         } else {
9844             wr_idx = ring->max_items - 1;
9845         }
9846         txdesc = (host_txbuf_post_t *)((char *)DHD_RING_BGN_VA(ring) +
9847                                        (wr_idx * ring->item_len));
9848         txstatus.cmn_hdr.request_id = txdesc->cmn_hdr.request_id;
9849         dhd_prot_txstatus_process(dhd, &txstatus);
9850     }
9851 }
9852 
dhd_prot_flow_ring_delete_response_process(dhd_pub_t * dhd,void * msg)9853 static void dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd,
9854                                                        void *msg)
9855 {
9856     tx_flowring_delete_response_t *flow_delete_resp =
9857         (tx_flowring_delete_response_t *)msg;
9858 
9859     DHD_ERROR(("%s: Flow Delete Response status = %d Flow %d\n", __FUNCTION__,
9860                flow_delete_resp->cmplt.status,
9861                flow_delete_resp->cmplt.flow_ring_id));
9862 
9863     if (dhd->fast_delete_ring_support) {
9864         dhd_prot_flow_ring_fastdelete(dhd, flow_delete_resp->cmplt.flow_ring_id,
9865                                       flow_delete_resp->read_idx);
9866     }
9867     dhd_bus_flow_ring_delete_response(dhd->bus,
9868                                       flow_delete_resp->cmplt.flow_ring_id,
9869                                       flow_delete_resp->cmplt.status);
9870 }
9871 
dhd_prot_process_flow_ring_resume_response(dhd_pub_t * dhd,void * msg)9872 static void dhd_prot_process_flow_ring_resume_response(dhd_pub_t *dhd,
9873                                                        void *msg)
9874 {
9875 #ifdef IDLE_TX_FLOW_MGMT
9876     tx_idle_flowring_resume_response_t *flow_resume_resp =
9877         (tx_idle_flowring_resume_response_t *)msg;
9878 
9879     DHD_ERROR(("%s Flow resume Response status = %d Flow %d\n", __FUNCTION__,
9880                flow_resume_resp->cmplt.status,
9881                flow_resume_resp->cmplt.flow_ring_id));
9882 
9883     dhd_bus_flow_ring_resume_response(dhd->bus,
9884                                       flow_resume_resp->cmplt.flow_ring_id,
9885                                       flow_resume_resp->cmplt.status);
9886 #endif /* IDLE_TX_FLOW_MGMT */
9887 }
9888 
dhd_prot_process_flow_ring_suspend_response(dhd_pub_t * dhd,void * msg)9889 static void dhd_prot_process_flow_ring_suspend_response(dhd_pub_t *dhd,
9890                                                         void *msg)
9891 {
9892 #ifdef IDLE_TX_FLOW_MGMT
9893     int16 status;
9894     tx_idle_flowring_suspend_response_t *flow_suspend_resp =
9895         (tx_idle_flowring_suspend_response_t *)msg;
9896     status = flow_suspend_resp->cmplt.status;
9897 
9898     DHD_ERROR(("%s Flow id %d suspend Response status = %d\n", __FUNCTION__,
9899                flow_suspend_resp->cmplt.flow_ring_id, status));
9900     if (status != BCME_OK) {
9901         DHD_ERROR(("%s Error in Suspending Flow rings!!"
9902                    "Dongle will still be polling idle rings!!Status = %d \n",
9903                    __FUNCTION__, status));
9904     }
9905 #endif /* IDLE_TX_FLOW_MGMT */
9906 }
9907 
dhd_prot_flow_ring_flush(dhd_pub_t * dhd,flow_ring_node_t * flow_ring_node)9908 int dhd_prot_flow_ring_flush(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
9909 {
9910     tx_flowring_flush_request_t *flow_flush_rqst;
9911     dhd_prot_t *prot = dhd->prot;
9912     unsigned long flags;
9913     uint16 alloced = 0;
9914     msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
9915 
9916     DHD_RING_LOCK(ring->ring_lock, flags);
9917 
9918     /* Request for ring buffer space */
9919     flow_flush_rqst = (tx_flowring_flush_request_t *)dhd_prot_alloc_ring_space(
9920         dhd, ring, 1, &alloced, FALSE);
9921     if (flow_flush_rqst == NULL) {
9922         DHD_RING_UNLOCK(ring->ring_lock, flags);
9923         DHD_ERROR(("%s: Flow Flush Req - failure ring space\n", __FUNCTION__));
9924         return BCME_NOMEM;
9925     }
9926 
9927     /* Common msg buf hdr */
9928     flow_flush_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_FLUSH;
9929     flow_flush_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
9930     flow_flush_rqst->msg.request_id = htol32(0);
9931     flow_flush_rqst->msg.flags = ring->current_phase;
9932     flow_flush_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
9933     ring->seqnum++;
9934 
9935     flow_flush_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
9936     flow_flush_rqst->reason = htol16(BCME_OK);
9937 
9938     DHD_INFO(("%s: Send Flow Flush Req\n", __FUNCTION__));
9939 
9940     /* update ring's WR index and ring doorbell to dongle */
9941     dhd_prot_ring_write_complete(dhd, ring, flow_flush_rqst, 1);
9942 
9943     DHD_RING_UNLOCK(ring->ring_lock, flags);
9944 
9945     return BCME_OK;
9946 } /* dhd_prot_flow_ring_flush */
9947 
dhd_prot_flow_ring_flush_response_process(dhd_pub_t * dhd,void * msg)9948 static void dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg)
9949 {
9950     tx_flowring_flush_response_t *flow_flush_resp =
9951         (tx_flowring_flush_response_t *)msg;
9952 
9953     DHD_INFO(("%s: Flow Flush Response status = %d\n", __FUNCTION__,
9954               flow_flush_resp->cmplt.status));
9955 
9956     dhd_bus_flow_ring_flush_response(dhd->bus,
9957                                      flow_flush_resp->cmplt.flow_ring_id,
9958                                      flow_flush_resp->cmplt.status);
9959 }
9960 
9961 /**
9962  * Request dongle to configure soft doorbells for D2H rings. Host populated soft
9963  * doorbell information is transferred to dongle via the d2h ring config control
9964  * message.
9965  */
dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t * dhd)9966 void dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd)
9967 {
9968 #if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT)
9969     uint16 ring_idx;
9970     uint8 *msg_next;
9971     void *msg_start;
9972     uint16 alloced = 0;
9973     unsigned long flags;
9974     dhd_prot_t *prot = dhd->prot;
9975     ring_config_req_t *ring_config_req;
9976     bcmpcie_soft_doorbell_t *soft_doorbell;
9977     msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
9978     const uint16 d2h_rings = BCMPCIE_D2H_COMMON_MSGRINGS;
9979 
9980     /* Claim space for d2h_ring number of d2h_ring_config_req_t messages */
9981     DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
9982     msg_start =
9983         dhd_prot_alloc_ring_space(dhd, ctrl_ring, d2h_rings, &alloced, TRUE);
9984     if (msg_start == NULL) {
9985         DHD_ERROR(("%s Msgbuf no space for %d D2H ring config soft doorbells\n",
9986                    __FUNCTION__, d2h_rings));
9987         DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
9988         return;
9989     }
9990 
9991     msg_next = (uint8 *)msg_start;
9992 
9993     for (ring_idx = 0; ring_idx < d2h_rings; ring_idx++) {
9994         /* position the ring_config_req into the ctrl subm ring */
9995         ring_config_req = (ring_config_req_t *)msg_next;
9996         /* Common msg header */
9997         ring_config_req->msg.msg_type = MSG_TYPE_D2H_RING_CONFIG;
9998         ring_config_req->msg.if_id = 0;
9999         ring_config_req->msg.flags = 0;
10000 
10001         ring_config_req->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
10002         ctrl_ring->seqnum++;
10003 
10004         ring_config_req->msg.request_id = htol32(DHD_FAKE_PKTID); /* unused */
10005 
10006         /* Ring Config subtype and d2h ring_id */
10007         ring_config_req->subtype =
10008             htol16(D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL);
10009         ring_config_req->ring_id = htol16(DHD_D2H_RINGID(ring_idx));
10010 
10011         /* Host soft doorbell configuration */
10012         soft_doorbell = &prot->soft_doorbell[ring_idx];
10013 
10014         ring_config_req->soft_doorbell.value = htol32(soft_doorbell->value);
10015         ring_config_req->soft_doorbell.haddr.high =
10016             htol32(soft_doorbell->haddr.high);
10017         ring_config_req->soft_doorbell.haddr.low =
10018             htol32(soft_doorbell->haddr.low);
10019         ring_config_req->soft_doorbell.items = htol16(soft_doorbell->items);
10020         ring_config_req->soft_doorbell.msecs = htol16(soft_doorbell->msecs);
10021 
10022         DHD_INFO(("%s: Soft doorbell haddr 0x%08x 0x%08x value 0x%08x\n",
10023                   __FUNCTION__, ring_config_req->soft_doorbell.haddr.high,
10024                   ring_config_req->soft_doorbell.haddr.low,
10025                   ring_config_req->soft_doorbell.value));
10026 
10027         msg_next = msg_next + ctrl_ring->item_len;
10028     }
10029 
10030     /* update control subn ring's WR index and ring doorbell to dongle */
10031     dhd_prot_ring_write_complete(dhd, ctrl_ring, msg_start, d2h_rings);
10032 
10033     DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
10034 
10035 #endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */
10036 }
10037 
dhd_prot_process_d2h_ring_config_complete(dhd_pub_t * dhd,void * msg)10038 static void dhd_prot_process_d2h_ring_config_complete(dhd_pub_t *dhd, void *msg)
10039 {
10040     DHD_INFO(("%s: Ring Config Response - status %d ringid %d\n", __FUNCTION__,
10041               ltoh16(((ring_config_resp_t *)msg)->compl_hdr.status),
10042               ltoh16(((ring_config_resp_t *)msg)->compl_hdr.flow_ring_id)));
10043 }
10044 
dhd_prot_debug_info_print(dhd_pub_t * dhd)10045 int dhd_prot_debug_info_print(dhd_pub_t *dhd)
10046 {
10047     dhd_prot_t *prot = dhd->prot;
10048     msgbuf_ring_t *ring;
10049     uint16 rd, wr;
10050     uint32 dma_buf_len;
10051     uint64 current_time;
10052     ulong ring_tcm_rd_addr; /* dongle address */
10053     ulong ring_tcm_wr_addr; /* dongle address */
10054 
10055     DHD_ERROR(("\n ------- DUMPING VERSION INFORMATION ------- \r\n"));
10056     DHD_ERROR(("DHD: %s\n", dhd_version));
10057     DHD_ERROR(("Firmware: %s\n", fw_version));
10058 
10059 #ifdef DHD_FW_COREDUMP
10060     DHD_ERROR(("\n ------- DUMPING CONFIGURATION INFORMATION ------ \r\n"));
10061     DHD_ERROR(("memdump mode: %d\n", dhd->memdump_enabled));
10062 #endif /* DHD_FW_COREDUMP */
10063 
10064     DHD_ERROR(("\n ------- DUMPING PROTOCOL INFORMATION ------- \r\n"));
10065     DHD_ERROR(("ICPrevs: Dev %d, Host %d, active %d\n",
10066                prot->device_ipc_version, prot->host_ipc_version,
10067                prot->active_ipc_version));
10068     DHD_ERROR(("d2h_intr_method -> %s\n",
10069                dhd->bus->d2h_intr_method ? "PCIE_MSI" : "PCIE_INTX"));
10070     DHD_ERROR(("max Host TS bufs to post: %d, posted %d\n", prot->max_tsbufpost,
10071                prot->cur_ts_bufs_posted));
10072     DHD_ERROR(("max INFO bufs to post: %d, posted %d\n", prot->max_infobufpost,
10073                prot->infobufpost));
10074     DHD_ERROR(("max event bufs to post: %d, posted %d\n",
10075                prot->max_eventbufpost, prot->cur_event_bufs_posted));
10076     DHD_ERROR(("max ioctlresp bufs to post: %d, posted %d\n",
10077                prot->max_ioctlrespbufpost, prot->cur_ioctlresp_bufs_posted));
10078     DHD_ERROR(("max RX bufs to post: %d, posted %d\n", prot->max_rxbufpost,
10079                prot->rxbufpost));
10080     DHD_ERROR(("h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n", h2d_max_txpost,
10081                prot->h2d_max_txpost));
10082 
10083     current_time = OSL_LOCALTIME_NS();
10084     DHD_ERROR(("current_time=" SEC_USEC_FMT "\n", GET_SEC_USEC(current_time)));
10085     DHD_ERROR(("ioctl_fillup_time=" SEC_USEC_FMT " ioctl_ack_time=" SEC_USEC_FMT
10086                " ioctl_cmplt_time=" SEC_USEC_FMT "\n",
10087                GET_SEC_USEC(prot->ioctl_fillup_time),
10088                GET_SEC_USEC(prot->ioctl_ack_time),
10089                GET_SEC_USEC(prot->ioctl_cmplt_time)));
10090 
10091     /* Check PCIe INT registers */
10092     if (!dhd_pcie_dump_int_regs(dhd)) {
10093         DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__));
10094         dhd->bus->is_linkdown = TRUE;
10095     }
10096 
10097     DHD_ERROR(("\n ------- DUMPING IOCTL RING RD WR Pointers ------- \r\n"));
10098 
10099     ring = &prot->h2dring_ctrl_subn;
10100     dma_buf_len = ring->max_items * ring->item_len;
10101     ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
10102     ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
10103     DHD_ERROR(
10104         ("CtrlPost: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
10105          "SIZE %d \r\n",
10106          ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
10107          ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
10108          dma_buf_len));
10109     DHD_ERROR(
10110         ("CtrlPost: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
10111     if (dhd->bus->is_linkdown) {
10112         DHD_ERROR(("CtrlPost: From Shared Mem: RD and WR are invalid"
10113                    " due to PCIe link down\r\n"));
10114     } else {
10115         dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
10116         dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
10117         DHD_ERROR(("CtrlPost: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
10118     }
10119     DHD_ERROR(("CtrlPost: seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
10120 
10121     ring = &prot->d2hring_ctrl_cpln;
10122     dma_buf_len = ring->max_items * ring->item_len;
10123     ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
10124     ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
10125     DHD_ERROR(
10126         ("CtrlCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
10127          "SIZE %d \r\n",
10128          ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
10129          ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
10130          dma_buf_len));
10131     DHD_ERROR(
10132         ("CtrlCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
10133     if (dhd->bus->is_linkdown) {
10134         DHD_ERROR(("CtrlCpl: From Shared Mem: RD and WR are invalid"
10135                    " due to PCIe link down\r\n"));
10136     } else {
10137         dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
10138         dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
10139         DHD_ERROR(("CtrlCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
10140     }
10141     DHD_ERROR(("CtrlCpl: Expected seq num: %d \r\n",
10142                ring->seqnum % H2D_EPOCH_MODULO));
10143 
10144     ring = prot->h2dring_info_subn;
10145     if (ring) {
10146         dma_buf_len = ring->max_items * ring->item_len;
10147         ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
10148         ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
10149         DHD_ERROR(("InfoSub: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr "
10150                    "0x%lx:0x%lx "
10151                    "SIZE %d \r\n",
10152                    ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
10153                    ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr,
10154                    ring_tcm_wr_addr, dma_buf_len));
10155         DHD_ERROR(
10156             ("InfoSub: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
10157         if (dhd->bus->is_linkdown) {
10158             DHD_ERROR(("InfoSub: From Shared Mem: RD and WR are invalid"
10159                        " due to PCIe link down\r\n"));
10160         } else {
10161             dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
10162             dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
10163             DHD_ERROR(("InfoSub: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
10164         }
10165         DHD_ERROR(
10166             ("InfoSub: seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
10167     }
10168     ring = prot->d2hring_info_cpln;
10169     if (ring) {
10170         dma_buf_len = ring->max_items * ring->item_len;
10171         ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
10172         ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
10173         DHD_ERROR(("InfoCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr "
10174                    "0x%lx:0x%lx "
10175                    "SIZE %d \r\n",
10176                    ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
10177                    ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr,
10178                    ring_tcm_wr_addr, dma_buf_len));
10179         DHD_ERROR(
10180             ("InfoCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
10181         if (dhd->bus->is_linkdown) {
10182             DHD_ERROR(("InfoCpl: From Shared Mem: RD and WR are invalid"
10183                        " due to PCIe link down\r\n"));
10184         } else {
10185             dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
10186             dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
10187             DHD_ERROR(("InfoCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
10188         }
10189         DHD_ERROR(("InfoCpl: Expected seq num: %d \r\n",
10190                    ring->seqnum % D2H_EPOCH_MODULO));
10191     }
10192 
10193     ring = &prot->d2hring_tx_cpln;
10194     if (ring) {
10195         ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
10196         ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
10197         dma_buf_len = ring->max_items * ring->item_len;
10198         DHD_ERROR((
10199             "TxCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
10200             "SIZE %d \r\n",
10201             ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
10202             ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr,
10203             ring_tcm_wr_addr, dma_buf_len));
10204         DHD_ERROR(
10205             ("TxCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
10206         if (dhd->bus->is_linkdown) {
10207             DHD_ERROR(("TxCpl: From Shared Mem: RD and WR are invalid"
10208                        " due to PCIe link down\r\n"));
10209         } else {
10210             dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
10211             dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
10212             DHD_ERROR(("TxCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
10213         }
10214         DHD_ERROR(("TxCpl: Expected seq num: %d \r\n",
10215                    ring->seqnum % D2H_EPOCH_MODULO));
10216     }
10217 
10218     ring = &prot->d2hring_rx_cpln;
10219     if (ring) {
10220         ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
10221         ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
10222         dma_buf_len = ring->max_items * ring->item_len;
10223         DHD_ERROR((
10224             "RxCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
10225             "SIZE %d \r\n",
10226             ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
10227             ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr,
10228             ring_tcm_wr_addr, dma_buf_len));
10229         DHD_ERROR(
10230             ("RxCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
10231         if (dhd->bus->is_linkdown) {
10232             DHD_ERROR(("RxCpl: From Shared Mem: RD and WR are invalid"
10233                        " due to PCIe link down\r\n"));
10234         } else {
10235             dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
10236             dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
10237             DHD_ERROR(("RxCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
10238         }
10239         DHD_ERROR(("RxCpl: Expected seq num: %d \r\n",
10240                    ring->seqnum % D2H_EPOCH_MODULO));
10241     }
10242 #ifdef EWP_EDL
10243     ring = prot->d2hring_edl;
10244     if (ring) {
10245         ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
10246         ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
10247         dma_buf_len = ring->max_items * ring->item_len;
10248         DHD_ERROR(("EdlRing: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr "
10249                    "0x%lx:0x%lx "
10250                    "SIZE %d \r\n",
10251                    ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
10252                    ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr,
10253                    ring_tcm_wr_addr, dma_buf_len));
10254         DHD_ERROR(
10255             ("EdlRing: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
10256         if (dhd->bus->is_linkdown) {
10257             DHD_ERROR(("EdlRing: From Shared Mem: RD and WR are invalid"
10258                        " due to PCIe link down\r\n"));
10259         } else {
10260             dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
10261             dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
10262             DHD_ERROR(("EdlRing: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
10263         }
10264         DHD_ERROR(("EdlRing: Expected seq num: %d \r\n",
10265                    ring->seqnum % D2H_EPOCH_MODULO));
10266     }
10267 #endif /* EWP_EDL */
10268 
10269     DHD_ERROR(("%s: cur_ioctlresp_bufs_posted %d cur_event_bufs_posted %d\n",
10270                __FUNCTION__, prot->cur_ioctlresp_bufs_posted,
10271                prot->cur_event_bufs_posted));
10272 #ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
10273     DHD_ERROR(
10274         ("%s: multi_client_flow_rings:%d max_multi_client_flow_rings:%d\n",
10275          __FUNCTION__, dhd->multi_client_flow_rings,
10276          dhd->max_multi_client_flow_rings));
10277 #endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */
10278 
10279     DHD_ERROR(("pktid_txq_start_cnt: %d\n", prot->pktid_txq_start_cnt));
10280     DHD_ERROR(("pktid_txq_stop_cnt: %d\n", prot->pktid_txq_stop_cnt));
10281     DHD_ERROR(("pktid_depleted_cnt: %d\n", prot->pktid_depleted_cnt));
10282 
10283     dhd_pcie_debug_info_dump(dhd);
10284 
10285     return 0;
10286 }
10287 
dhd_prot_ringupd_dump(dhd_pub_t * dhd,struct bcmstrbuf * b)10288 int dhd_prot_ringupd_dump(dhd_pub_t *dhd, struct bcmstrbuf *b)
10289 {
10290     uint32 *ptr;
10291     uint32 value;
10292 
10293     if (dhd->prot->d2h_dma_indx_wr_buf.va) {
10294         uint32 i;
10295         uint32 max_h2d_queues = dhd_bus_max_h2d_queues(dhd->bus);
10296 
10297         OSL_CACHE_INV((void *)dhd->prot->d2h_dma_indx_wr_buf.va,
10298                       dhd->prot->d2h_dma_indx_wr_buf.len);
10299 
10300         ptr = (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va);
10301 
10302         bcm_bprintf(b, "\n max_tx_queues %d\n", max_h2d_queues);
10303 
10304         bcm_bprintf(b, "\nRPTR block H2D common rings, 0x%04x\n", ptr);
10305         value = ltoh32(*ptr);
10306         bcm_bprintf(b, "\tH2D CTRL: value 0x%04x\n", value);
10307         ptr++;
10308         value = ltoh32(*ptr);
10309         bcm_bprintf(b, "\tH2D RXPOST: value 0x%04x\n", value);
10310 
10311         ptr++;
10312         bcm_bprintf(b, "RPTR block Flow rings , 0x%04x\n", ptr);
10313         for (i = BCMPCIE_H2D_COMMON_MSGRINGS; i < max_h2d_queues; i++) {
10314             value = ltoh32(*ptr);
10315             bcm_bprintf(b, "\tflowring ID %d: value 0x%04x\n", i, value);
10316             ptr++;
10317         }
10318     }
10319 
10320     if (dhd->prot->h2d_dma_indx_rd_buf.va) {
10321         OSL_CACHE_INV((void *)dhd->prot->h2d_dma_indx_rd_buf.va,
10322                       dhd->prot->h2d_dma_indx_rd_buf.len);
10323 
10324         ptr = (uint32 *)(dhd->prot->h2d_dma_indx_rd_buf.va);
10325 
10326         bcm_bprintf(b, "\nWPTR block D2H common rings, 0x%04x\n", ptr);
10327         value = ltoh32(*ptr);
10328         bcm_bprintf(b, "\tD2H CTRLCPLT: value 0x%04x\n", value);
10329         ptr++;
10330         value = ltoh32(*ptr);
10331         bcm_bprintf(b, "\tD2H TXCPLT: value 0x%04x\n", value);
10332         ptr++;
10333         value = ltoh32(*ptr);
10334         bcm_bprintf(b, "\tD2H RXCPLT: value 0x%04x\n", value);
10335     }
10336 
10337     return 0;
10338 }
10339 
dhd_prot_metadata_dbg_set(dhd_pub_t * dhd,bool val)10340 uint32 dhd_prot_metadata_dbg_set(dhd_pub_t *dhd, bool val)
10341 {
10342     dhd_prot_t *prot = dhd->prot;
10343 #if DHD_DBG_SHOW_METADATA
10344     prot->metadata_dbg = val;
10345 #endif // endif
10346     return (uint32)prot->metadata_dbg;
10347 }
10348 
dhd_prot_metadata_dbg_get(dhd_pub_t * dhd)10349 uint32 dhd_prot_metadata_dbg_get(dhd_pub_t *dhd)
10350 {
10351     dhd_prot_t *prot = dhd->prot;
10352     return (uint32)prot->metadata_dbg;
10353 }
10354 
dhd_prot_metadatalen_set(dhd_pub_t * dhd,uint32 val,bool rx)10355 uint32 dhd_prot_metadatalen_set(dhd_pub_t *dhd, uint32 val, bool rx)
10356 {
10357     dhd_prot_t *prot = dhd->prot;
10358     if (rx) {
10359         prot->rx_metadata_offset = (uint16)val;
10360     } else {
10361         prot->tx_metadata_offset = (uint16)val;
10362     }
10363     return dhd_prot_metadatalen_get(dhd, rx);
10364 }
10365 
dhd_prot_metadatalen_get(dhd_pub_t * dhd,bool rx)10366 uint32 dhd_prot_metadatalen_get(dhd_pub_t *dhd, bool rx)
10367 {
10368     dhd_prot_t *prot = dhd->prot;
10369     if (rx) {
10370         return prot->rx_metadata_offset;
10371     } else {
10372         return prot->tx_metadata_offset;
10373     }
10374 }
10375 
10376 /** optimization to write "n" tx items at a time to ring */
dhd_prot_txp_threshold(dhd_pub_t * dhd,bool set,uint32 val)10377 uint32 dhd_prot_txp_threshold(dhd_pub_t *dhd, bool set, uint32 val)
10378 {
10379     dhd_prot_t *prot = dhd->prot;
10380     if (set) {
10381         prot->txp_threshold = (uint16)val;
10382     }
10383     val = prot->txp_threshold;
10384     return val;
10385 }
10386 
10387 #ifdef DHD_RX_CHAINING
10388 
dhd_rxchain_reset(rxchain_info_t * rxchain)10389 static INLINE void BCMFASTPATH dhd_rxchain_reset(rxchain_info_t *rxchain)
10390 {
10391     rxchain->pkt_count = 0;
10392 }
10393 
dhd_rxchain_frame(dhd_pub_t * dhd,void * pkt,uint ifidx)10394 static void BCMFASTPATH dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx)
10395 {
10396     uint8 *eh;
10397     uint8 prio;
10398     dhd_prot_t *prot = dhd->prot;
10399     rxchain_info_t *rxchain = &prot->rxchain;
10400 
10401     ASSERT(!PKTISCHAINED(pkt));
10402     ASSERT(PKTCLINK(pkt) == NULL);
10403     ASSERT(PKTCGETATTR(pkt) == 0);
10404 
10405     eh = PKTDATA(dhd->osh, pkt);
10406     prio = IP_TOS46(eh + ETHER_HDR_LEN) >> IPV4_TOS_PREC_SHIFT;
10407 
10408     if (rxchain->pkt_count &&
10409         !(PKT_CTF_CHAINABLE(dhd, ifidx, eh, prio, rxchain->h_sa, rxchain->h_da,
10410                             rxchain->h_prio))) {
10411         /* Different flow - First release the existing chain */
10412         dhd_rxchain_commit(dhd);
10413     }
10414 
10415     /* For routers, with HNDCTF, link the packets using PKTSETCLINK, */
10416     /* so that the chain can be handed off to CTF bridge as is. */
10417     if (rxchain->pkt_count == 0) {
10418         /* First packet in chain */
10419         rxchain->pkthead = rxchain->pkttail = pkt;
10420 
10421         /* Keep a copy of ptr to ether_da, ether_sa and prio */
10422         rxchain->h_da = ((struct ether_header *)eh)->ether_dhost;
10423         rxchain->h_sa = ((struct ether_header *)eh)->ether_shost;
10424         rxchain->h_prio = prio;
10425         rxchain->ifidx = ifidx;
10426         rxchain->pkt_count++;
10427     } else {
10428         /* Same flow - keep chaining */
10429         PKTSETCLINK(rxchain->pkttail, pkt);
10430         rxchain->pkttail = pkt;
10431         rxchain->pkt_count++;
10432     }
10433 
10434     if ((dhd_rx_pkt_chainable(dhd, ifidx)) && (!ETHER_ISMULTI(rxchain->h_da)) &&
10435         ((((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IP)) ||
10436          (((struct ether_header *)eh)->ether_type ==
10437           HTON16(ETHER_TYPE_IPV6)))) {
10438         PKTSETCHAINED(dhd->osh, pkt);
10439         PKTCINCRCNT(rxchain->pkthead);
10440         PKTCADDLEN(rxchain->pkthead, PKTLEN(dhd->osh, pkt));
10441     } else {
10442         dhd_rxchain_commit(dhd);
10443         return;
10444     }
10445 
10446     /* If we have hit the max chain length, dispatch the chain and reset */
10447     if (rxchain->pkt_count >= DHD_PKT_CTF_MAX_CHAIN_LEN) {
10448         dhd_rxchain_commit(dhd);
10449     }
10450 }
10451 
dhd_rxchain_commit(dhd_pub_t * dhd)10452 static void BCMFASTPATH dhd_rxchain_commit(dhd_pub_t *dhd)
10453 {
10454     dhd_prot_t *prot = dhd->prot;
10455     rxchain_info_t *rxchain = &prot->rxchain;
10456 
10457     if (rxchain->pkt_count == 0) {
10458         return;
10459     }
10460 
10461     /* Release the packets to dhd_linux */
10462     dhd_bus_rx_frame(dhd->bus, rxchain->pkthead, rxchain->ifidx,
10463                      rxchain->pkt_count);
10464 
10465     /* Reset the chain */
10466     dhd_rxchain_reset(rxchain);
10467 }
10468 
10469 #endif /* DHD_RX_CHAINING */
10470 
10471 #ifdef IDLE_TX_FLOW_MGMT
dhd_prot_flow_ring_resume(dhd_pub_t * dhd,flow_ring_node_t * flow_ring_node)10472 int dhd_prot_flow_ring_resume(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
10473 {
10474     tx_idle_flowring_resume_request_t *flow_resume_rqst;
10475     msgbuf_ring_t *flow_ring;
10476     dhd_prot_t *prot = dhd->prot;
10477     unsigned long flags;
10478     uint16 alloced = 0;
10479     msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
10480 
10481     /* Fetch a pre-initialized msgbuf_ring from the flowring pool */
10482     flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid);
10483     if (flow_ring == NULL) {
10484         DHD_ERROR(("%s: dhd_prot_flowrings_pool_fetch TX Flowid %d failed\n",
10485                    __FUNCTION__, flow_ring_node->flowid));
10486         return BCME_NOMEM;
10487     }
10488 
10489     DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
10490 
10491     /* Request for ctrl_ring buffer space */
10492     flow_resume_rqst =
10493         (tx_idle_flowring_resume_request_t *)dhd_prot_alloc_ring_space(
10494             dhd, ctrl_ring, 1, &alloced, FALSE);
10495     if (flow_resume_rqst == NULL) {
10496         dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring);
10497         DHD_ERROR(("%s: Flow resume Req flowid %d - failure ring space\n",
10498                    __FUNCTION__, flow_ring_node->flowid));
10499         DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
10500         return BCME_NOMEM;
10501     }
10502 
10503     flow_ring_node->prot_info = (void *)flow_ring;
10504 
10505     /* Common msg buf hdr */
10506     flow_resume_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_RESUME;
10507     flow_resume_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
10508     flow_resume_rqst->msg.request_id = htol32(0);
10509 
10510     flow_resume_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
10511     ctrl_ring->seqnum++;
10512 
10513     flow_resume_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
10514     DHD_ERROR(("%s Send Flow resume Req flow ID %d\n", __FUNCTION__,
10515                flow_ring_node->flowid));
10516 
10517     /* Update the flow_ring's WRITE index */
10518     if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
10519         dhd_prot_dma_indx_set(dhd, flow_ring->wr, H2D_DMA_INDX_WR_UPD,
10520                               flow_ring->idx);
10521     } else if (IFRM_ACTIVE(dhd) &&
10522                (flow_ring->idx >= BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START)) {
10523         dhd_prot_dma_indx_set(
10524             dhd, flow_ring->wr, H2D_IFRM_INDX_WR_UPD,
10525             (flow_ring->idx - BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START));
10526     } else {
10527         dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr), sizeof(uint16),
10528                                 RING_WR_UPD, flow_ring->idx);
10529     }
10530 
10531     /* update control subn ring's WR index and ring doorbell to dongle */
10532     dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_resume_rqst, 1);
10533 
10534     DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
10535 
10536     return BCME_OK;
10537 } /* dhd_prot_flow_ring_create */
10538 
dhd_prot_flow_ring_batch_suspend_request(dhd_pub_t * dhd,uint16 * ringid,uint16 count)10539 int dhd_prot_flow_ring_batch_suspend_request(dhd_pub_t *dhd, uint16 *ringid,
10540                                              uint16 count)
10541 {
10542     tx_idle_flowring_suspend_request_t *flow_suspend_rqst;
10543     dhd_prot_t *prot = dhd->prot;
10544     unsigned long flags;
10545     uint16 index;
10546     uint16 alloced = 0;
10547     msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
10548 
10549     DHD_RING_LOCK(ring->ring_lock, flags);
10550 
10551     /* Request for ring buffer space */
10552     flow_suspend_rqst =
10553         (tx_idle_flowring_suspend_request_t *)dhd_prot_alloc_ring_space(
10554             dhd, ring, 1, &alloced, FALSE);
10555     if (flow_suspend_rqst == NULL) {
10556         DHD_RING_UNLOCK(ring->ring_lock, flags);
10557         DHD_ERROR(
10558             ("%s: Flow suspend Req - failure ring space\n", __FUNCTION__));
10559         return BCME_NOMEM;
10560     }
10561 
10562     /* Common msg buf hdr */
10563     flow_suspend_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_SUSPEND;
10564     flow_suspend_rqst->msg.request_id = htol32(0);
10565 
10566     flow_suspend_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
10567     ring->seqnum++;
10568 
10569     /* Update flow id  info */
10570     for (index = 0; index < count; index++) {
10571         flow_suspend_rqst->ring_id[index] = ringid[index];
10572     }
10573     flow_suspend_rqst->num = count;
10574 
10575     DHD_ERROR(
10576         ("%s sending batch suspend!! count is %d\n", __FUNCTION__, count));
10577 
10578     /* update ring's WR index and ring doorbell to dongle */
10579     dhd_prot_ring_write_complete(dhd, ring, flow_suspend_rqst, 1);
10580 
10581     DHD_RING_UNLOCK(ring->ring_lock, flags);
10582 
10583     return BCME_OK;
10584 }
10585 #endif /* IDLE_TX_FLOW_MGMT */
10586 
etd_trap_name(hnd_ext_tag_trap_t tag)10587 static const char *etd_trap_name(hnd_ext_tag_trap_t tag)
10588 {
10589     switch (tag) {
10590         case TAG_TRAP_SIGNATURE:
10591             return "TAG_TRAP_SIGNATURE";
10592         case TAG_TRAP_STACK:
10593             return "TAG_TRAP_STACK";
10594         case TAG_TRAP_MEMORY:
10595             return "TAG_TRAP_MEMORY";
10596         case TAG_TRAP_DEEPSLEEP:
10597             return "TAG_TRAP_DEEPSLEEP";
10598         case TAG_TRAP_PSM_WD:
10599             return "TAG_TRAP_PSM_WD";
10600         case TAG_TRAP_PHY:
10601             return "TAG_TRAP_PHY";
10602         case TAG_TRAP_BUS:
10603             return "TAG_TRAP_BUS";
10604         case TAG_TRAP_MAC_SUSP:
10605             return "TAG_TRAP_MAC_SUSP";
10606         case TAG_TRAP_BACKPLANE:
10607             return "TAG_TRAP_BACKPLANE";
10608         case TAG_TRAP_PCIE_Q:
10609             return "TAG_TRAP_PCIE_Q";
10610         case TAG_TRAP_WLC_STATE:
10611             return "TAG_TRAP_WLC_STATE";
10612         case TAG_TRAP_MAC_WAKE:
10613             return "TAG_TRAP_MAC_WAKE";
10614         case TAG_TRAP_HMAP:
10615             return "TAG_TRAP_HMAP";
10616         case TAG_TRAP_PHYTXERR_THRESH:
10617             return "TAG_TRAP_PHYTXERR_THRESH";
10618         case TAG_TRAP_HC_DATA:
10619             return "TAG_TRAP_HC_DATA";
10620         case TAG_TRAP_LOG_DATA:
10621             return "TAG_TRAP_LOG_DATA";
10622         case TAG_TRAP_CODE:
10623             return "TAG_TRAP_CODE";
10624         case TAG_TRAP_LAST:
10625         default:
10626             return "Unknown";
10627     }
10628     return "Unknown";
10629 }
10630 
dhd_prot_dump_extended_trap(dhd_pub_t * dhdp,struct bcmstrbuf * b,bool raw)10631 int dhd_prot_dump_extended_trap(dhd_pub_t *dhdp, struct bcmstrbuf *b, bool raw)
10632 {
10633     uint32 i;
10634     uint32 *ext_data;
10635     hnd_ext_trap_hdr_t *hdr;
10636     const bcm_tlv_t *tlv;
10637     const trap_t *tr;
10638     const uint32 *stack;
10639     const hnd_ext_trap_bp_err_t *bpe;
10640     uint32 raw_len;
10641 
10642     ext_data = dhdp->extended_trap_data;
10643 
10644     /* return if there is no extended trap data */
10645     if (!ext_data || !(dhdp->dongle_trap_data & D2H_DEV_EXT_TRAP_DATA)) {
10646         bcm_bprintf(b, "%d (0x%x)", dhdp->dongle_trap_data,
10647                     dhdp->dongle_trap_data);
10648         return BCME_OK;
10649     }
10650 
10651     bcm_bprintf(b, "Extended trap data\n");
10652 
10653     /* First word is original trap_data */
10654     bcm_bprintf(b, "trap_data = 0x%08x\n", *ext_data);
10655     ext_data++;
10656 
10657     /* Followed by the extended trap data header */
10658     hdr = (hnd_ext_trap_hdr_t *)ext_data;
10659     bcm_bprintf(b, "version: %d, len: %d\n", hdr->version, hdr->len);
10660 
10661     /* Dump a list of all tags found  before parsing data */
10662     bcm_bprintf(b, "\nTags Found:\n");
10663     for (i = 0; i < TAG_TRAP_LAST; i++) {
10664         tlv = bcm_parse_tlvs(hdr->data, hdr->len, i);
10665         if (tlv) {
10666             bcm_bprintf(b, "Tag: %d (%s), Length: %d\n", i, etd_trap_name(i),
10667                         tlv->len);
10668         }
10669     }
10670 
10671     if (raw) {
10672         raw_len = sizeof(hnd_ext_trap_hdr_t) + (hdr->len / 0x4) +
10673                   (hdr->len % 0x4 ? 1 : 0);
10674         for (i = 0; i < raw_len; i++) {
10675             bcm_bprintf(b, "0x%08x ", ext_data[i]);
10676             if (i % 0x4 == 0x3) {
10677                 bcm_bprintf(b, "\n");
10678             }
10679         }
10680         return BCME_OK;
10681     }
10682 
10683     /* Extract the various supported TLVs from the extended trap data */
10684     tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_CODE);
10685     if (tlv) {
10686         bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_CODE),
10687                     tlv->len);
10688         bcm_bprintf(b, "ETD TYPE: %d\n", tlv->data[0]);
10689     }
10690 
10691     tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE);
10692     if (tlv) {
10693         bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_SIGNATURE),
10694                     tlv->len);
10695         tr = (const trap_t *)tlv->data;
10696 
10697         bcm_bprintf(b, "TRAP %x: pc %x, lr %x, sp %x, cpsr %x, spsr %x\n",
10698                     tr->type, tr->pc, tr->r14, tr->r13, tr->cpsr, tr->spsr);
10699         bcm_bprintf(b, "  r0 %x, r1 %x, r2 %x, r3 %x, r4 %x, r5 %x, r6 %x\n",
10700                     tr->r0, tr->r1, tr->r2, tr->r3, tr->r4, tr->r5, tr->r6);
10701         bcm_bprintf(b, "  r7 %x, r8 %x, r9 %x, r10 %x, r11 %x, r12 %x\n",
10702                     tr->r7, tr->r8, tr->r9, tr->r10, tr->r11, tr->r12);
10703     }
10704 
10705     tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK);
10706     if (tlv) {
10707         bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_STACK),
10708                     tlv->len);
10709         stack = (const uint32 *)tlv->data;
10710         for (i = 0; i < (uint32)(tlv->len / 0x4); i++) {
10711             bcm_bprintf(b, "  0x%08x\n", *stack);
10712             stack++;
10713         }
10714     }
10715 
10716     tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_BACKPLANE);
10717     if (tlv) {
10718         bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_BACKPLANE),
10719                     tlv->len);
10720         bpe = (const hnd_ext_trap_bp_err_t *)tlv->data;
10721         bcm_bprintf(b, " error: %x\n", bpe->error);
10722         bcm_bprintf(b, " coreid: %x\n", bpe->coreid);
10723         bcm_bprintf(b, " baseaddr: %x\n", bpe->baseaddr);
10724         bcm_bprintf(b, " ioctrl: %x\n", bpe->ioctrl);
10725         bcm_bprintf(b, " iostatus: %x\n", bpe->iostatus);
10726         bcm_bprintf(b, " resetctrl: %x\n", bpe->resetctrl);
10727         bcm_bprintf(b, " resetstatus: %x\n", bpe->resetstatus);
10728         bcm_bprintf(b, " errlogctrl: %x\n", bpe->errlogctrl);
10729         bcm_bprintf(b, " errlogdone: %x\n", bpe->errlogdone);
10730         bcm_bprintf(b, " errlogstatus: %x\n", bpe->errlogstatus);
10731         bcm_bprintf(b, " errlogaddrlo: %x\n", bpe->errlogaddrlo);
10732         bcm_bprintf(b, " errlogaddrhi: %x\n", bpe->errlogaddrhi);
10733         bcm_bprintf(b, " errlogid: %x\n", bpe->errlogid);
10734         bcm_bprintf(b, " errloguser: %x\n", bpe->errloguser);
10735         bcm_bprintf(b, " errlogflags: %x\n", bpe->errlogflags);
10736     }
10737 
10738     tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MEMORY);
10739     if (tlv) {
10740         const hnd_ext_trap_heap_err_t *hme;
10741 
10742         bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MEMORY),
10743                     tlv->len);
10744         hme = (const hnd_ext_trap_heap_err_t *)tlv->data;
10745         bcm_bprintf(b, " arena total: %d\n", hme->arena_total);
10746         bcm_bprintf(b, " heap free: %d\n", hme->heap_free);
10747         bcm_bprintf(b, " heap in use: %d\n", hme->heap_inuse);
10748         bcm_bprintf(b, " mf count: %d\n", hme->mf_count);
10749         bcm_bprintf(b, " stack LWM: %x\n", hme->stack_lwm);
10750 
10751         bcm_bprintf(b, " Histogram:\n");
10752         for (i = 0; i < (HEAP_HISTOGRAM_DUMP_LEN * 0x2); i += 0x2) {
10753             if (hme->heap_histogm[i] == 0xfffe) {
10754                 bcm_bprintf(b, " Others\t%d\t?\n", hme->heap_histogm[i + 1]);
10755             } else if (hme->heap_histogm[i] == 0xffff) {
10756                 bcm_bprintf(b, " >= 256K\t%d\t?\n", hme->heap_histogm[i + 1]);
10757             } else {
10758                 bcm_bprintf(b, " %d\t%d\t%d\n", hme->heap_histogm[i] << 0x2,
10759                             hme->heap_histogm[i + 1],
10760                             (hme->heap_histogm[i] << 0x2) *
10761                                 hme->heap_histogm[i + 1]);
10762             }
10763         }
10764 
10765         bcm_bprintf(b, " Max free block: %d\n", hme->max_sz_free_blk[0] << 0x2);
10766         for (i = 1; i < HEAP_MAX_SZ_BLKS_LEN; i++) {
10767             bcm_bprintf(b, " Next lgst free block: %d\n",
10768                         hme->max_sz_free_blk[i] << 0x2);
10769         }
10770     }
10771 
10772     tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PCIE_Q);
10773     if (tlv) {
10774         const hnd_ext_trap_pcie_mem_err_t *pqme;
10775 
10776         bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PCIE_Q),
10777                     tlv->len);
10778         pqme = (const hnd_ext_trap_pcie_mem_err_t *)tlv->data;
10779         bcm_bprintf(b, " d2h queue len: %x\n", pqme->d2h_queue_len);
10780         bcm_bprintf(b, " d2h req queue len: %x\n", pqme->d2h_req_queue_len);
10781     }
10782 
10783     tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_WLC_STATE);
10784     if (tlv) {
10785         const hnd_ext_trap_wlc_mem_err_t *wsme;
10786 
10787         bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_WLC_STATE),
10788                     tlv->len);
10789         wsme = (const hnd_ext_trap_wlc_mem_err_t *)tlv->data;
10790         bcm_bprintf(b, " instance: %d\n", wsme->instance);
10791         bcm_bprintf(b, " associated: %d\n", wsme->associated);
10792         bcm_bprintf(b, " peer count: %d\n", wsme->peer_cnt);
10793         bcm_bprintf(b, " client count: %d\n", wsme->soft_ap_client_cnt);
10794         bcm_bprintf(b, " TX_AC_BK_FIFO: %d\n", wsme->txqueue_len[0]);
10795         bcm_bprintf(b, " TX_AC_BE_FIFO: %d\n", wsme->txqueue_len[1]);
10796         bcm_bprintf(b, " TX_AC_VI_FIFO: %d\n", wsme->txqueue_len[0x2]);
10797         bcm_bprintf(b, " TX_AC_VO_FIFO: %d\n", wsme->txqueue_len[0x3]);
10798 
10799         if (tlv->len >= (sizeof(*wsme) * 0x2)) {
10800             wsme++;
10801             bcm_bprintf(b, "\n instance: %d\n", wsme->instance);
10802             bcm_bprintf(b, " associated: %d\n", wsme->associated);
10803             bcm_bprintf(b, " peer count: %d\n", wsme->peer_cnt);
10804             bcm_bprintf(b, " client count: %d\n", wsme->soft_ap_client_cnt);
10805             bcm_bprintf(b, " TX_AC_BK_FIFO: %d\n", wsme->txqueue_len[0]);
10806             bcm_bprintf(b, " TX_AC_BE_FIFO: %d\n", wsme->txqueue_len[1]);
10807             bcm_bprintf(b, " TX_AC_VI_FIFO: %d\n", wsme->txqueue_len[0x2]);
10808             bcm_bprintf(b, " TX_AC_VO_FIFO: %d\n", wsme->txqueue_len[0x3]);
10809         }
10810     }
10811 
10812     tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PHY);
10813     if (tlv) {
10814         const hnd_ext_trap_phydbg_t *phydbg;
10815         bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PHY), tlv->len);
10816         phydbg = (const hnd_ext_trap_phydbg_t *)tlv->data;
10817         bcm_bprintf(b, " err: 0x%x\n", phydbg->err);
10818         bcm_bprintf(b, " RxFeStatus: 0x%x\n", phydbg->RxFeStatus);
10819         bcm_bprintf(b, " TxFIFOStatus0: 0x%x\n", phydbg->TxFIFOStatus0);
10820         bcm_bprintf(b, " TxFIFOStatus1: 0x%x\n", phydbg->TxFIFOStatus1);
10821         bcm_bprintf(b, " RfseqMode: 0x%x\n", phydbg->RfseqMode);
10822         bcm_bprintf(b, " RfseqStatus0: 0x%x\n", phydbg->RfseqStatus0);
10823         bcm_bprintf(b, " RfseqStatus1: 0x%x\n", phydbg->RfseqStatus1);
10824         bcm_bprintf(b, " RfseqStatus_Ocl: 0x%x\n", phydbg->RfseqStatus_Ocl);
10825         bcm_bprintf(b, " RfseqStatus_Ocl1: 0x%x\n", phydbg->RfseqStatus_Ocl1);
10826         bcm_bprintf(b, " OCLControl1: 0x%x\n", phydbg->OCLControl1);
10827         bcm_bprintf(b, " TxError: 0x%x\n", phydbg->TxError);
10828         bcm_bprintf(b, " bphyTxError: 0x%x\n", phydbg->bphyTxError);
10829         bcm_bprintf(b, " TxCCKError: 0x%x\n", phydbg->TxCCKError);
10830         bcm_bprintf(b, " TxCtrlWrd0: 0x%x\n", phydbg->TxCtrlWrd0);
10831         bcm_bprintf(b, " TxCtrlWrd1: 0x%x\n", phydbg->TxCtrlWrd1);
10832         bcm_bprintf(b, " TxCtrlWrd2: 0x%x\n", phydbg->TxCtrlWrd2);
10833         bcm_bprintf(b, " TxLsig0: 0x%x\n", phydbg->TxLsig0);
10834         bcm_bprintf(b, " TxLsig1: 0x%x\n", phydbg->TxLsig1);
10835         bcm_bprintf(b, " TxVhtSigA10: 0x%x\n", phydbg->TxVhtSigA10);
10836         bcm_bprintf(b, " TxVhtSigA11: 0x%x\n", phydbg->TxVhtSigA11);
10837         bcm_bprintf(b, " TxVhtSigA20: 0x%x\n", phydbg->TxVhtSigA20);
10838         bcm_bprintf(b, " TxVhtSigA21: 0x%x\n", phydbg->TxVhtSigA21);
10839         bcm_bprintf(b, " txPktLength: 0x%x\n", phydbg->txPktLength);
10840         bcm_bprintf(b, " txPsdulengthCtr: 0x%x\n", phydbg->txPsdulengthCtr);
10841         bcm_bprintf(b, " gpioClkControl: 0x%x\n", phydbg->gpioClkControl);
10842         bcm_bprintf(b, " gpioSel: 0x%x\n", phydbg->gpioSel);
10843         bcm_bprintf(b, " pktprocdebug: 0x%x\n", phydbg->pktprocdebug);
10844         for (i = 0; i < 0x3; i++) {
10845             bcm_bprintf(b, " gpioOut[%d]: 0x%x\n", i, phydbg->gpioOut[i]);
10846         }
10847     }
10848 
10849     tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PSM_WD);
10850     if (tlv) {
10851         const hnd_ext_trap_psmwd_t *psmwd;
10852         bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PSM_WD),
10853                     tlv->len);
10854         psmwd = (const hnd_ext_trap_psmwd_t *)tlv;
10855         bcm_bprintf(b, " version: 0x%x\n", psmwd->version);
10856         bcm_bprintf(b, " maccontrol: 0x%x\n", psmwd->i32_maccontrol);
10857         bcm_bprintf(b, " maccommand: 0x%x\n", psmwd->i32_maccommand);
10858         bcm_bprintf(b, " macintstatus: 0x%x\n", psmwd->i32_macintstatus);
10859         bcm_bprintf(b, " phydebug: 0x%x\n", psmwd->i32_phydebug);
10860         bcm_bprintf(b, " clk_ctl_st: 0x%x\n", psmwd->i32_clk_ctl_st);
10861         for (i = 0; i < 0x3; i++) {
10862             bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, psmwd->i32_psmdebug[i]);
10863         }
10864         bcm_bprintf(b, " gated clock en: 0x%x\n", psmwd->i16_0x1a8);
10865         bcm_bprintf(b, " Rcv Fifo Ctrl: 0x%x\n", psmwd->i16_0x406);
10866         bcm_bprintf(b, " Rx ctrl 1: 0x%x\n", psmwd->i16_0x408);
10867         bcm_bprintf(b, " Rxe Status 1: 0x%x\n", psmwd->i16_0x41a);
10868         bcm_bprintf(b, " Rxe Status 2: 0x%x\n", psmwd->i16_0x41c);
10869         bcm_bprintf(b, " rcv wrd count 0: 0x%x\n", psmwd->i16_0x424);
10870         bcm_bprintf(b, " rcv wrd count 1: 0x%x\n", psmwd->i16_0x426);
10871         bcm_bprintf(b, " RCV_LFIFO_STS: 0x%x\n", psmwd->i16_0x456);
10872         bcm_bprintf(b, " PSM_SLP_TMR: 0x%x\n", psmwd->i16_0x480);
10873         bcm_bprintf(b, " PSM BRC: 0x%x\n", psmwd->i16_0x490);
10874         bcm_bprintf(b, " TXE CTRL: 0x%x\n", psmwd->i16_0x500);
10875         bcm_bprintf(b, " TXE Status: 0x%x\n", psmwd->i16_0x50e);
10876         bcm_bprintf(b, " TXE_xmtdmabusy: 0x%x\n", psmwd->i16_0x55e);
10877         bcm_bprintf(b, " TXE_XMTfifosuspflush: 0x%x\n", psmwd->i16_0x566);
10878         bcm_bprintf(b, " IFS Stat: 0x%x\n", psmwd->i16_0x690);
10879         bcm_bprintf(b, " IFS_MEDBUSY_CTR: 0x%x\n", psmwd->i16_0x692);
10880         bcm_bprintf(b, " IFS_TX_DUR: 0x%x\n", psmwd->i16_0x694);
10881         bcm_bprintf(b, " SLow_CTL: 0x%x\n", psmwd->i16_0x6a0);
10882         bcm_bprintf(b, " TXE_AQM fifo Ready: 0x%x\n", psmwd->i16_0x838);
10883         bcm_bprintf(b, " Dagg ctrl: 0x%x\n", psmwd->i16_0x8c0);
10884         bcm_bprintf(b, " shm_prewds_cnt: 0x%x\n", psmwd->shm_prewds_cnt);
10885         bcm_bprintf(b, " shm_txtplufl_cnt: 0x%x\n", psmwd->shm_txtplufl_cnt);
10886         bcm_bprintf(b, " shm_txphyerr_cnt: 0x%x\n", psmwd->shm_txphyerr_cnt);
10887     }
10888 
10889     tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MAC_SUSP);
10890     if (tlv) {
10891         const hnd_ext_trap_macsusp_t *macsusp;
10892         bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MAC_SUSP),
10893                     tlv->len);
10894         macsusp = (const hnd_ext_trap_macsusp_t *)tlv;
10895         bcm_bprintf(b, " version: %d\n", macsusp->version);
10896         bcm_bprintf(b, " trap_reason: %d\n", macsusp->trap_reason);
10897         bcm_bprintf(b, " maccontrol: 0x%x\n", macsusp->i32_maccontrol);
10898         bcm_bprintf(b, " maccommand: 0x%x\n", macsusp->i32_maccommand);
10899         bcm_bprintf(b, " macintstatus: 0x%x\n", macsusp->i32_macintstatus);
10900         for (i = 0; i < 0x4; i++) {
10901             bcm_bprintf(b, " phydebug[%d]: 0x%x\n", i,
10902                         macsusp->i32_phydebug[i]);
10903         }
10904         for (i = 0; i < 0x8; i++) {
10905             bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i,
10906                         macsusp->i32_psmdebug[i]);
10907         }
10908         bcm_bprintf(b, " Rxe Status_1: 0x%x\n", macsusp->i16_0x41a);
10909         bcm_bprintf(b, " Rxe Status_2: 0x%x\n", macsusp->i16_0x41c);
10910         bcm_bprintf(b, " PSM BRC: 0x%x\n", macsusp->i16_0x490);
10911         bcm_bprintf(b, " TXE Status: 0x%x\n", macsusp->i16_0x50e);
10912         bcm_bprintf(b, " TXE xmtdmabusy: 0x%x\n", macsusp->i16_0x55e);
10913         bcm_bprintf(b, " TXE XMTfifosuspflush: 0x%x\n", macsusp->i16_0x566);
10914         bcm_bprintf(b, " IFS Stat: 0x%x\n", macsusp->i16_0x690);
10915         bcm_bprintf(b, " IFS MEDBUSY CTR: 0x%x\n", macsusp->i16_0x692);
10916         bcm_bprintf(b, " IFS TX DUR: 0x%x\n", macsusp->i16_0x694);
10917         bcm_bprintf(b, " WEP CTL: 0x%x\n", macsusp->i16_0x7c0);
10918         bcm_bprintf(b, " TXE AQM fifo Ready: 0x%x\n", macsusp->i16_0x838);
10919         bcm_bprintf(b, " MHP status: 0x%x\n", macsusp->i16_0x880);
10920         bcm_bprintf(b, " shm_prewds_cnt: 0x%x\n", macsusp->shm_prewds_cnt);
10921         bcm_bprintf(b, " shm_ucode_dbgst: 0x%x\n", macsusp->shm_ucode_dbgst);
10922     }
10923 
10924     tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MAC_WAKE);
10925     if (tlv) {
10926         const hnd_ext_trap_macenab_t *macwake;
10927         bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MAC_WAKE),
10928                     tlv->len);
10929         macwake = (const hnd_ext_trap_macenab_t *)tlv;
10930         bcm_bprintf(b, " version: 0x%x\n", macwake->version);
10931         bcm_bprintf(b, " trap_reason: 0x%x\n", macwake->trap_reason);
10932         bcm_bprintf(b, " maccontrol: 0x%x\n", macwake->i32_maccontrol);
10933         bcm_bprintf(b, " maccommand: 0x%x\n", macwake->i32_maccommand);
10934         bcm_bprintf(b, " macintstatus: 0x%x\n", macwake->i32_macintstatus);
10935         for (i = 0; i < 0x8; i++) {
10936             bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i,
10937                         macwake->i32_psmdebug[i]);
10938         }
10939         bcm_bprintf(b, " clk_ctl_st: 0x%x\n", macwake->i32_clk_ctl_st);
10940         bcm_bprintf(b, " powerctl: 0x%x\n", macwake->i32_powerctl);
10941         bcm_bprintf(b, " gated clock en: 0x%x\n", macwake->i16_0x1a8);
10942         bcm_bprintf(b, " PSM_SLP_TMR: 0x%x\n", macwake->i16_0x480);
10943         bcm_bprintf(b, " PSM BRC: 0x%x\n", macwake->i16_0x490);
10944         bcm_bprintf(b, " TSF CTL: 0x%x\n", macwake->i16_0x600);
10945         bcm_bprintf(b, " IFS Stat: 0x%x\n", macwake->i16_0x690);
10946         bcm_bprintf(b, " IFS_MEDBUSY_CTR: 0x%x\n", macwake->i16_0x692);
10947         bcm_bprintf(b, " Slow_CTL: 0x%x\n", macwake->i16_0x6a0);
10948         bcm_bprintf(b, " Slow_FRAC: 0x%x\n", macwake->i16_0x6a6);
10949         bcm_bprintf(b, " fast power up delay: 0x%x\n", macwake->i16_0x6a8);
10950         bcm_bprintf(b, " Slow_PER: 0x%x\n", macwake->i16_0x6aa);
10951         bcm_bprintf(b, " shm_ucode_dbgst: 0x%x\n", macwake->shm_ucode_dbgst);
10952     }
10953 
10954     tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_BUS);
10955     if (tlv) {
10956         const bcm_dngl_pcie_hc_t *hc;
10957         bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_BUS), tlv->len);
10958         hc = (const bcm_dngl_pcie_hc_t *)tlv->data;
10959         bcm_bprintf(b, " version: 0x%x\n", hc->version);
10960         bcm_bprintf(b, " reserved: 0x%x\n", hc->reserved);
10961         bcm_bprintf(b, " pcie_err_ind_type: 0x%x\n", hc->pcie_err_ind_type);
10962         bcm_bprintf(b, " pcie_flag: 0x%x\n", hc->pcie_flag);
10963         bcm_bprintf(b, " pcie_control_reg: 0x%x\n", hc->pcie_control_reg);
10964         for (i = 0; i < HC_PCIEDEV_CONFIG_REGLIST_MAX; i++) {
10965             bcm_bprintf(b, " pcie_config_regs[%d]: 0x%x\n", i,
10966                         hc->pcie_config_regs[i]);
10967         }
10968     }
10969 
10970     tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_HMAP);
10971     if (tlv) {
10972         const pcie_hmapviolation_t *hmap;
10973         hmap = (const pcie_hmapviolation_t *)tlv->data;
10974         bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_HMAP),
10975                     tlv->len);
10976         bcm_bprintf(b, " HMAP Vio Addr Low: 0x%x\n",
10977                     hmap->hmap_violationaddr_lo);
10978         bcm_bprintf(b, " HMAP Vio Addr Hi: 0x%x\n",
10979                     hmap->hmap_violationaddr_hi);
10980         bcm_bprintf(b, " HMAP Vio Info: 0x%x\n", hmap->hmap_violation_info);
10981     }
10982 
10983     return BCME_OK;
10984 }
10985 
10986 #ifdef BCMPCIE
dhd_prot_send_host_timestamp(dhd_pub_t * dhdp,uchar * tlvs,uint16 tlv_len,uint16 seqnum,uint16 xt_id)10987 int dhd_prot_send_host_timestamp(dhd_pub_t *dhdp, uchar *tlvs, uint16 tlv_len,
10988                                  uint16 seqnum, uint16 xt_id)
10989 {
10990     dhd_prot_t *prot = dhdp->prot;
10991     host_timestamp_msg_t *ts_req;
10992     unsigned long flags;
10993     uint16 alloced = 0;
10994     uchar *ts_tlv_buf;
10995     msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
10996 
10997     if ((tlvs == NULL) || (tlv_len == 0)) {
10998         DHD_ERROR(("%s: argument error tlv: %p, tlv_len %d\n", __FUNCTION__,
10999                    tlvs, tlv_len));
11000         return -1;
11001     }
11002 
11003     DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
11004 
11005     /* if Host TS req already pending go away */
11006     if (prot->hostts_req_buf_inuse == TRUE) {
11007         DHD_ERROR(("one host TS request already pending at device\n"));
11008         DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
11009         return -1;
11010     }
11011 
11012     /* Request for cbuf space */
11013     ts_req = (host_timestamp_msg_t *)dhd_prot_alloc_ring_space(
11014         dhdp, ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced,
11015         FALSE);
11016     if (ts_req == NULL) {
11017         DHD_ERROR(
11018             ("couldn't allocate space on msgring to send host TS request\n"));
11019         DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
11020         return -1;
11021     }
11022 
11023     /* Common msg buf hdr */
11024     ts_req->msg.msg_type = MSG_TYPE_HOSTTIMSTAMP;
11025     ts_req->msg.if_id = 0;
11026     ts_req->msg.flags = ctrl_ring->current_phase;
11027     ts_req->msg.request_id = DHD_H2D_HOSTTS_REQ_PKTID;
11028 
11029     ts_req->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
11030     ctrl_ring->seqnum++;
11031 
11032     ts_req->xt_id = xt_id;
11033     ts_req->seqnum = seqnum;
11034     /* populate TS req buffer info */
11035     ts_req->input_data_len = htol16(tlv_len);
11036     ts_req->host_buf_addr.high = htol32(PHYSADDRHI(prot->hostts_req_buf.pa));
11037     ts_req->host_buf_addr.low = htol32(PHYSADDRLO(prot->hostts_req_buf.pa));
11038     /* copy ioct payload */
11039     ts_tlv_buf = (void *)prot->hostts_req_buf.va;
11040     prot->hostts_req_buf_inuse = TRUE;
11041     memcpy(ts_tlv_buf, tlvs, tlv_len);
11042 
11043     OSL_CACHE_FLUSH((void *)prot->hostts_req_buf.va, tlv_len);
11044 
11045     if (ISALIGNED(ts_tlv_buf, DMA_ALIGN_LEN) == FALSE) {
11046         DHD_ERROR(("host TS req buffer address unaligned !!!!! \n"));
11047     }
11048 
11049     DHD_CTL(("submitted Host TS request request_id %d, data_len %d, tx_id %d, "
11050              "seq %d\n",
11051              ts_req->msg.request_id, ts_req->input_data_len, ts_req->xt_id,
11052              ts_req->seqnum));
11053 
11054     /* upd wrt ptr and raise interrupt */
11055     dhd_prot_ring_write_complete(dhdp, ctrl_ring, ts_req,
11056                                  DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
11057 
11058     DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
11059 
11060     return 0;
11061 } /* dhd_prot_send_host_timestamp */
11062 
dhd_prot_data_path_tx_timestamp_logging(dhd_pub_t * dhd,bool enable,bool set)11063 bool dhd_prot_data_path_tx_timestamp_logging(dhd_pub_t *dhd, bool enable,
11064                                              bool set)
11065 {
11066     if (set) {
11067         dhd->prot->tx_ts_log_enabled = enable;
11068     }
11069 
11070     return dhd->prot->tx_ts_log_enabled;
11071 }
11072 
dhd_prot_data_path_rx_timestamp_logging(dhd_pub_t * dhd,bool enable,bool set)11073 bool dhd_prot_data_path_rx_timestamp_logging(dhd_pub_t *dhd, bool enable,
11074                                              bool set)
11075 {
11076     if (set) {
11077         dhd->prot->rx_ts_log_enabled = enable;
11078     }
11079 
11080     return dhd->prot->rx_ts_log_enabled;
11081 }
11082 
dhd_prot_pkt_noretry(dhd_pub_t * dhd,bool enable,bool set)11083 bool dhd_prot_pkt_noretry(dhd_pub_t *dhd, bool enable, bool set)
11084 {
11085     if (set) {
11086         dhd->prot->no_retry = enable;
11087     }
11088 
11089     return dhd->prot->no_retry;
11090 }
11091 
dhd_prot_pkt_noaggr(dhd_pub_t * dhd,bool enable,bool set)11092 bool dhd_prot_pkt_noaggr(dhd_pub_t *dhd, bool enable, bool set)
11093 {
11094     if (set) {
11095         dhd->prot->no_aggr = enable;
11096     }
11097 
11098     return dhd->prot->no_aggr;
11099 }
11100 
dhd_prot_pkt_fixed_rate(dhd_pub_t * dhd,bool enable,bool set)11101 bool dhd_prot_pkt_fixed_rate(dhd_pub_t *dhd, bool enable, bool set)
11102 {
11103     if (set) {
11104         dhd->prot->fixed_rate = enable;
11105     }
11106 
11107     return dhd->prot->fixed_rate;
11108 }
11109 #endif /* BCMPCIE */
11110 
dhd_prot_dma_indx_free(dhd_pub_t * dhd)11111 void dhd_prot_dma_indx_free(dhd_pub_t *dhd)
11112 {
11113     dhd_prot_t *prot = dhd->prot;
11114 
11115     dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf);
11116     dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf);
11117 }
11118 
dhd_msgbuf_delay_post_ts_bufs(dhd_pub_t * dhd)11119 void dhd_msgbuf_delay_post_ts_bufs(dhd_pub_t *dhd)
11120 {
11121     if (dhd->prot->max_tsbufpost > 0) {
11122         dhd_msgbuf_rxbuf_post_ts_bufs(dhd);
11123     }
11124 }
11125 
dhd_prot_process_fw_timestamp(dhd_pub_t * dhd,void * buf)11126 static void BCMFASTPATH dhd_prot_process_fw_timestamp(dhd_pub_t *dhd, void *buf)
11127 {
11128     DHD_ERROR(("Timesunc feature not compiled in but GOT FW TS message\n"));
11129 }
11130 
dhd_prot_get_ioctl_trans_id(dhd_pub_t * dhdp)11131 uint16 dhd_prot_get_ioctl_trans_id(dhd_pub_t *dhdp)
11132 {
11133     return dhdp->prot->ioctl_trans_id;
11134 }
11135 
dhd_get_hscb_info(dhd_pub_t * dhd,void ** va,uint32 * len)11136 int dhd_get_hscb_info(dhd_pub_t *dhd, void **va, uint32 *len)
11137 {
11138     if (!dhd->hscb_enable) {
11139         if (len) {
11140             /* prevent "Operation not supported" dhd message */
11141             *len = 0;
11142             return BCME_OK;
11143         }
11144         return BCME_UNSUPPORTED;
11145     }
11146 
11147     if (va) {
11148         *va = dhd->prot->host_scb_buf.va;
11149     }
11150     if (len) {
11151         *len = dhd->prot->host_scb_buf.len;
11152     }
11153 
11154     return BCME_OK;
11155 }
11156 
11157 #ifdef DHD_HP2P
dhd_prot_pkt_threshold(dhd_pub_t * dhd,bool set,uint32 val)11158 uint32 dhd_prot_pkt_threshold(dhd_pub_t *dhd, bool set, uint32 val)
11159 {
11160     if (set) {
11161         dhd->pkt_thresh = (uint16)val;
11162     }
11163 
11164     val = dhd->pkt_thresh;
11165 
11166     return val;
11167 }
11168 
dhd_prot_time_threshold(dhd_pub_t * dhd,bool set,uint32 val)11169 uint32 dhd_prot_time_threshold(dhd_pub_t *dhd, bool set, uint32 val)
11170 {
11171     if (set) {
11172         dhd->time_thresh = (uint16)val;
11173     }
11174 
11175     val = dhd->time_thresh;
11176 
11177     return val;
11178 }
11179 
dhd_prot_pkt_expiry(dhd_pub_t * dhd,bool set,uint32 val)11180 uint32 dhd_prot_pkt_expiry(dhd_pub_t *dhd, bool set, uint32 val)
11181 {
11182     if (set) {
11183         dhd->pkt_expiry = (uint16)val;
11184     }
11185 
11186     val = dhd->pkt_expiry;
11187 
11188     return val;
11189 }
11190 
dhd_prot_hp2p_enable(dhd_pub_t * dhd,bool set,int enable)11191 uint8 dhd_prot_hp2p_enable(dhd_pub_t *dhd, bool set, int enable)
11192 {
11193     uint8 ret = 0;
11194     if (set) {
11195         dhd->hp2p_enable = (enable & 0xf) ? TRUE : FALSE;
11196         dhd->hp2p_infra_enable = ((enable >> 0x4) & 0xf) ? TRUE : FALSE;
11197 
11198         if (enable) {
11199             dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_TID_MAP);
11200         } else {
11201             dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_AC_MAP);
11202         }
11203     }
11204     ret = dhd->hp2p_infra_enable ? 0x1 : 0x0;
11205     ret <<= 0x4;
11206     ret |= dhd->hp2p_enable ? 0x1 : 0x0;
11207 
11208     return ret;
11209 }
11210 
dhd_update_hp2p_rxstats(dhd_pub_t * dhd,host_rxbuf_cmpl_t * rxstatus)11211 static void dhd_update_hp2p_rxstats(dhd_pub_t *dhd, host_rxbuf_cmpl_t *rxstatus)
11212 {
11213     ts_timestamp_t *ts = (ts_timestamp_t *)&rxstatus->ts;
11214     hp2p_info_t *hp2p_info;
11215     uint32 dur1;
11216 
11217     hp2p_info = &dhd->hp2p_info[0];
11218     dur1 = ((ts->high & 0x3FF) * HP2P_TIME_SCALE) / 0x64;
11219 
11220     if (dur1 > (MAX_RX_HIST_BIN - 1)) {
11221         dur1 = MAX_RX_HIST_BIN - 1;
11222         DHD_ERROR(("%s: 0x%x 0x%x\n", __FUNCTION__, ts->low, ts->high));
11223     }
11224 
11225     hp2p_info->rx_t0[dur1 % MAX_RX_HIST_BIN]++;
11226     return;
11227 }
11228 
dhd_update_hp2p_txstats(dhd_pub_t * dhd,host_txbuf_cmpl_t * txstatus)11229 static void dhd_update_hp2p_txstats(dhd_pub_t *dhd, host_txbuf_cmpl_t *txstatus)
11230 {
11231     ts_timestamp_t *ts = (ts_timestamp_t *)&txstatus->ts;
11232     uint16 flowid = txstatus->compl_hdr.flow_ring_id;
11233     uint32 hp2p_flowid, dur1, dur2;
11234     hp2p_info_t *hp2p_info;
11235 
11236     hp2p_flowid =
11237         dhd->bus->max_submission_rings - dhd->bus->max_cmn_rings - flowid + 1;
11238     hp2p_info = &dhd->hp2p_info[hp2p_flowid];
11239     ts = (ts_timestamp_t *)&(txstatus->ts);
11240 
11241     dur1 = ((ts->high & 0x3FF) * HP2P_TIME_SCALE) / 0x3E8;
11242     if (dur1 > (MAX_TX_HIST_BIN - 1)) {
11243         dur1 = MAX_TX_HIST_BIN - 1;
11244         DHD_ERROR(("%s: 0x%x 0x%x\n", __FUNCTION__, ts->low, ts->high));
11245     }
11246     hp2p_info->tx_t0[dur1 % MAX_TX_HIST_BIN]++;
11247 
11248     dur2 = (((ts->high >> 0xA) & 0x3FF) * HP2P_TIME_SCALE) / 0x3E8;
11249     if (dur2 > (MAX_TX_HIST_BIN - 1)) {
11250         dur2 = MAX_TX_HIST_BIN - 1;
11251         DHD_ERROR(("%s: 0x%x 0x%x\n", __FUNCTION__, ts->low, ts->high));
11252     }
11253 
11254     hp2p_info->tx_t1[dur2 % MAX_TX_HIST_BIN]++;
11255     return;
11256 }
11257 
dhd_hp2p_write(struct hrtimer * timer)11258 enum hrtimer_restart dhd_hp2p_write(struct hrtimer *timer)
11259 {
11260     hp2p_info_t *hp2p_info;
11261     unsigned long flags;
11262     dhd_pub_t *dhdp;
11263 
11264 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
11265 #pragma GCC diagnostic push
11266 #pragma GCC diagnostic ignored "-Wcast-qual"
11267 #endif // endif
11268     hp2p_info = container_of(timer, hp2p_info_t, timer.timer);
11269 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
11270 #pragma GCC diagnostic pop
11271 #endif // endif
11272     dhdp = hp2p_info->dhd_pub;
11273     if (!dhdp) {
11274         goto done;
11275     }
11276 
11277     DHD_INFO(("%s: pend_item = %d flowid = %d\n", __FUNCTION__,
11278               ((msgbuf_ring_t *)hp2p_info->ring)->pend_items_count,
11279               hp2p_info->flowid));
11280 
11281     flags = dhd_os_hp2plock(dhdp);
11282 
11283     dhd_prot_txdata_write_flush(dhdp, hp2p_info->flowid);
11284     hp2p_info->hrtimer_init = FALSE;
11285     hp2p_info->num_timer_limit++;
11286 
11287     dhd_os_hp2punlock(dhdp, flags);
11288 done:
11289     return HRTIMER_NORESTART;
11290 }
11291 
dhd_calc_hp2p_burst(dhd_pub_t * dhd,msgbuf_ring_t * ring,uint16 flowid)11292 static void dhd_calc_hp2p_burst(dhd_pub_t *dhd, msgbuf_ring_t *ring,
11293                                 uint16 flowid)
11294 {
11295     hp2p_info_t *hp2p_info;
11296     uint16 hp2p_flowid;
11297 
11298     hp2p_flowid =
11299         dhd->bus->max_submission_rings - dhd->bus->max_cmn_rings - flowid + 1;
11300     hp2p_info = &dhd->hp2p_info[hp2p_flowid];
11301 
11302     if (ring->pend_items_count == dhd->pkt_thresh) {
11303         dhd_prot_txdata_write_flush(dhd, flowid);
11304 
11305         hp2p_info->hrtimer_init = FALSE;
11306         hp2p_info->ring = NULL;
11307         hp2p_info->num_pkt_limit++;
11308         hrtimer_cancel(&hp2p_info->timer.timer);
11309 
11310         DHD_INFO(("%s: cancel hrtimer for flowid = %d \n"
11311                   "hp2p_flowid = %d pkt_thresh = %d\n",
11312                   __FUNCTION__, flowid, hp2p_flowid, dhd->pkt_thresh));
11313     } else {
11314         if (hp2p_info->hrtimer_init == FALSE) {
11315             hp2p_info->hrtimer_init = TRUE;
11316             hp2p_info->flowid = flowid;
11317             hp2p_info->dhd_pub = dhd;
11318             hp2p_info->ring = ring;
11319             hp2p_info->num_timer_start++;
11320 
11321             tasklet_hrtimer_start(&hp2p_info->timer,
11322                                   ktime_set(0, dhd->time_thresh * 1000),
11323                                   HRTIMER_MODE_REL);
11324 
11325             DHD_INFO(("%s: start hrtimer for flowid = %d hp2_flowid = %d\n",
11326                       __FUNCTION__, flowid, hp2p_flowid));
11327         }
11328     }
11329     return;
11330 }
11331 
dhd_update_hp2p_txdesc(dhd_pub_t * dhd,host_txbuf_post_t * txdesc)11332 static void dhd_update_hp2p_txdesc(dhd_pub_t *dhd, host_txbuf_post_t *txdesc)
11333 {
11334     uint64 ts;
11335 
11336     ts = local_clock();
11337     do_div(ts, 1000);
11338 
11339     txdesc->metadata_buf_len = 0;
11340     txdesc->metadata_buf_addr.high_addr = htol32((ts >> 32) & 0xFFFFFFFF);
11341     txdesc->metadata_buf_addr.low_addr = htol32(ts & 0xFFFFFFFF);
11342     txdesc->exp_time = dhd->pkt_expiry;
11343 
11344     DHD_INFO(("%s: metadata_high = 0x%x metadata_low = 0x%x exp_time = %x\n",
11345               __FUNCTION__, txdesc->metadata_buf_addr.high_addr,
11346               txdesc->metadata_buf_addr.low_addr, txdesc->exp_time));
11347 
11348     return;
11349 }
11350 #endif /* DHD_HP2P */
11351 
11352 #ifdef DHD_MAP_LOGGING
dhd_prot_smmu_fault_dump(dhd_pub_t * dhdp)11353 void dhd_prot_smmu_fault_dump(dhd_pub_t *dhdp)
11354 {
11355     dhd_prot_debug_info_print(dhdp);
11356     OSL_DMA_MAP_DUMP(dhdp->osh);
11357 #ifdef DHD_MAP_PKTID_LOGGING
11358     dhd_pktid_logging_dump(dhdp);
11359 #endif /* DHD_MAP_PKTID_LOGGING */
11360 #ifdef DHD_FW_COREDUMP
11361     dhdp->memdump_type = DUMP_TYPE_SMMU_FAULT;
11362 #ifdef DNGL_AXI_ERROR_LOGGING
11363     dhdp->memdump_enabled = DUMP_MEMFILE;
11364     dhd_bus_get_mem_dump(dhdp);
11365 #else
11366     dhdp->memdump_enabled = DUMP_MEMONLY;
11367     dhd_bus_mem_dump(dhdp);
11368 #endif /* DNGL_AXI_ERROR_LOGGING */
11369 #endif /* DHD_FW_COREDUMP */
11370 }
11371 #endif /* DHD_MAP_LOGGING */
11372