• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * @file definition of host message ring functionality
3  * Provides type definitions and function prototypes used to link the
4  * DHD OS, bus, and protocol modules.
5  *
6  * Copyright (C) 1999-2017, Broadcom Corporation
7  *
8  *      Unless you and Broadcom execute a separate written software license
9  * agreement governing use of this software, this software is licensed to you
10  * under the terms of the GNU General Public License version 2 (the "GPL"),
11  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
12  * following added to such license:
13  *
14  *      As a special exception, the copyright holders of this software give you
15  * permission to link this software with independent modules, and to copy and
16  * distribute the resulting executable under terms of your choice, provided that
17  * you also meet, for each linked independent module, the terms and conditions of
18  * the license of that module.  An independent module is a module which is not
19  * derived from this software.  The special exception does not apply to any
20  * modifications of the software.
21  *
22  *      Notwithstanding the above, under no circumstances may you combine this
23  * software in any way with any other Broadcom software provided under a license
24  * other than the GPL, without Broadcom's express prior written consent.
25  *
26  *
27  * <<Broadcom-WL-IPTag/Open:>>
28  *
29  * $Id: dhd_msgbuf.c 704361 2017-06-13 08:50:38Z $
30  */
31 
32 
33 #include <typedefs.h>
34 #include <osl.h>
35 
36 #include <bcmutils.h>
37 #include <bcmmsgbuf.h>
38 #include <bcmendian.h>
39 
40 #include <dngl_stats.h>
41 #include <dhd.h>
42 #include <dhd_proto.h>
43 
44 #include <dhd_bus.h>
45 
46 #include <dhd_dbg.h>
47 #include <siutils.h>
48 #include <dhd_debug.h>
49 
50 #include <dhd_flowring.h>
51 
52 #include <pcie_core.h>
53 #include <bcmpcie.h>
54 #include <dhd_pcie.h>
55 #include <dhd_config.h>
56 #ifdef DHD_TIMESYNC
57 #include <dhd_timesync.h>
58 #endif /* DHD_TIMESYNC */
59 
60 #if defined(DHD_LB)
61 #include <linux/cpu.h>
62 #include <bcm_ring.h>
63 #define DHD_LB_WORKQ_SZ                (8192)
64 #define DHD_LB_WORKQ_SYNC           (16)
65 #define DHD_LB_WORK_SCHED           (DHD_LB_WORKQ_SYNC * 2)
66 #endif /* DHD_LB */
67 
68 #include <hnd_debug.h>
69 #include <hnd_armtrap.h>
70 
71 #ifdef DHD_PKT_LOGGING
72 #include <dhd_pktlog.h>
73 #endif /* DHD_PKT_LOGGING */
74 
75 extern char dhd_version[];
76 extern char fw_version[];
77 
78 /**
79  * Host configures a soft doorbell for d2h rings, by specifying a 32bit host
80  * address where a value must be written. Host may also interrupt coalescing
81  * on this soft doorbell.
82  * Use Case: Hosts with network processors, may register with the dongle the
83  * network processor's thread wakeup register and a value corresponding to the
84  * core/thread context. Dongle will issue a write transaction <address,value>
85  * to the PCIE RC which will need to be routed to the mapped register space, by
86  * the host.
87  */
88 
89 /* Dependency Check */
90 #if defined(IOCTLRESP_USE_CONSTMEM) && defined(DHD_USE_STATIC_CTRLBUF)
91 #error "DHD_USE_STATIC_CTRLBUF is NOT working with DHD_USE_OSLPKT_FOR_RESPBUF"
92 #endif /* IOCTLRESP_USE_CONSTMEM && DHD_USE_STATIC_CTRLBUF */
93 
94 #define RETRIES 2        /* # of retries to retrieve matching ioctl response */
95 
96 #define DEFAULT_RX_BUFFERS_TO_POST    256
97 #define RXBUFPOST_THRESHOLD            32
98 #define RX_BUF_BURST                32 /* Rx buffers for MSDU Data */
99 
100 #define DHD_STOP_QUEUE_THRESHOLD    200
101 #define DHD_START_QUEUE_THRESHOLD    100
102 
103 #define RX_DMA_OFFSET        8 /* Mem2mem DMA inserts an extra 8 */
104 #define IOCT_RETBUF_SIZE    (RX_DMA_OFFSET + WLC_IOCTL_MAXLEN)
105 
106 /* flags for ioctl pending status */
107 #define MSGBUF_IOCTL_ACK_PENDING    (1<<0)
108 #define MSGBUF_IOCTL_RESP_PENDING    (1<<1)
109 
110 #define DMA_ALIGN_LEN        4
111 
112 #define DMA_D2H_SCRATCH_BUF_LEN    8
113 #define DMA_XFER_LEN_LIMIT    0x400000
114 
115 #ifdef BCM_HOST_BUF
116 #ifndef DMA_HOST_BUFFER_LEN
117 #define DMA_HOST_BUFFER_LEN    0x200000
118 #endif
119 #endif /* BCM_HOST_BUF */
120 
121 #define DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ        8192
122 
123 #define DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D        1
124 #define DHD_FLOWRING_MAX_EVENTBUF_POST            32
125 #define DHD_FLOWRING_MAX_IOCTLRESPBUF_POST        8
126 #define DHD_H2D_INFORING_MAX_BUF_POST            32
127 #define DHD_MAX_TSBUF_POST            8
128 
129 #define DHD_PROT_FUNCS    41
130 
131 /* Length of buffer in host for bus throughput measurement */
132 #define DHD_BUS_TPUT_BUF_LEN 2048
133 
134 #define TXP_FLUSH_NITEMS
135 
136 /* optimization to write "n" tx items at a time to ring */
137 #define TXP_FLUSH_MAX_ITEMS_FLUSH_CNT    48
138 
139 #define RING_NAME_MAX_LENGTH        24
140 #define CTRLSUB_HOSTTS_MEESAGE_SIZE        1024
141 /* Giving room before ioctl_trans_id rollsover. */
142 #define BUFFER_BEFORE_ROLLOVER 300
143 
144 struct msgbuf_ring; /* ring context for common and flow rings */
145 
146 /**
147  * PCIE D2H DMA Complete Sync Modes
148  *
149  * Firmware may interrupt the host, prior to the D2H Mem2Mem DMA completes into
150  * Host system memory. A WAR using one of 3 approaches is needed:
151  * 1. Dongle places a modulo-253 seqnum in last word of each D2H message
152  * 2. XOR Checksum, with epoch# in each work item. Dongle builds an XOR checksum
153  *    writes in the last word of each work item. Each work item has a seqnum
154  *    number = sequence num % 253.
155  *
156  * 3. Read Barrier: Dongle does a host memory read access prior to posting an
157  *    interrupt, ensuring that D2H data transfer indeed completed.
158  * 4. Dongle DMA's all indices after producing items in the D2H ring, flushing
159  *    ring contents before the indices.
160  *
161  * Host does not sync for DMA to complete with option #3 or #4, and a noop sync
162  * callback (see dhd_prot_d2h_sync_none) may be bound.
163  *
164  * Dongle advertizes host side sync mechanism requirements.
165  */
166 
167 #define PCIE_D2H_SYNC_WAIT_TRIES    (512UL)
168 #define PCIE_D2H_SYNC_NUM_OF_STEPS  (5UL)
169 #define PCIE_D2H_SYNC_DELAY         (100UL)    /* in terms of usecs */
170 
171 /**
172  * Custom callback attached based upon D2H DMA Sync mode advertized by dongle.
173  *
174  * On success: return cmn_msg_hdr_t::msg_type
175  * On failure: return 0 (invalid msg_type)
176  */
177 typedef uint8 (* d2h_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring,
178                                 volatile cmn_msg_hdr_t *msg, int msglen);
179 
180 /*
181  * +----------------------------------------------------------------------------
182  *
183  * RingIds and FlowId are not equivalent as ringids include D2H rings whereas
184  * flowids do not.
185  *
186  * Dongle advertizes the max H2D rings, as max_sub_queues = 'N' which includes
187  * the H2D common rings as well as the (N-BCMPCIE_H2D_COMMON_MSGRINGS) flowrings
188  *
189  * Here is a sample mapping for (based on PCIE Full Dongle Rev5) where,
190  *  BCMPCIE_H2D_COMMON_MSGRINGS = 2, i.e. 2 H2D common rings,
191  *  BCMPCIE_COMMON_MSGRINGS     = 5, i.e. include 3 D2H common rings.
192  *
193  *  H2D Control  Submit   RingId = 0        FlowId = 0 reserved never allocated
194  *  H2D RxPost   Submit   RingId = 1        FlowId = 1 reserved never allocated
195  *
196  *  D2H Control  Complete RingId = 2
197  *  D2H Transmit Complete RingId = 3
198  *  D2H Receive  Complete RingId = 4
199  *
200  *  H2D TxPost   FLOWRING RingId = 5         FlowId = 2     (1st flowring)
201  *  H2D TxPost   FLOWRING RingId = 6         FlowId = 3     (2nd flowring)
202  *  H2D TxPost   FLOWRING RingId = 5 + (N-1) FlowId = (N-1) (Nth flowring)
203  *
204  * When TxPost FlowId(s) are allocated, the FlowIds [0..FLOWID_RESERVED) are
205  * unused, where FLOWID_RESERVED is BCMPCIE_H2D_COMMON_MSGRINGS.
206  *
207  * Example: when a system supports 4 bc/mc and 128 uc flowrings, with
208  * BCMPCIE_H2D_COMMON_MSGRINGS = 2, and BCMPCIE_H2D_COMMON_MSGRINGS = 5, and the
209  * FlowId values would be in the range [2..133] and the corresponding
210  * RingId values would be in the range [5..136].
211  *
212  * The flowId allocator, may chose to, allocate Flowids:
213  *   bc/mc (per virtual interface) in one consecutive range [2..(2+VIFS))
214  *   X# of uc flowids in consecutive ranges (per station Id), where X is the
215  *   packet's access category (e.g. 4 uc flowids per station).
216  *
217  * CAUTION:
218  * When DMA indices array feature is used, RingId=5, corresponding to the 0th
219  * FLOWRING, will actually use the FlowId as index into the H2D DMA index,
220  * since the FlowId truly represents the index in the H2D DMA indices array.
221  *
222  * Likewise, in the D2H direction, the RingId - BCMPCIE_H2D_COMMON_MSGRINGS,
223  * will represent the index in the D2H DMA indices array.
224  *
225  * +----------------------------------------------------------------------------
226  */
227 
228 /* First TxPost Flowring Id */
229 #define DHD_FLOWRING_START_FLOWID   BCMPCIE_H2D_COMMON_MSGRINGS
230 
231 /* Determine whether a ringid belongs to a TxPost flowring */
232 #define DHD_IS_FLOWRING(ringid, max_flow_rings) \
233     ((ringid) >= BCMPCIE_COMMON_MSGRINGS && \
234     (ringid) < ((max_flow_rings) + BCMPCIE_COMMON_MSGRINGS))
235 
236 /* Convert a H2D TxPost FlowId to a MsgBuf RingId */
237 #define DHD_FLOWID_TO_RINGID(flowid) \
238     (BCMPCIE_COMMON_MSGRINGS + ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS))
239 
240 /* Convert a MsgBuf RingId to a H2D TxPost FlowId */
241 #define DHD_RINGID_TO_FLOWID(ringid) \
242     (BCMPCIE_H2D_COMMON_MSGRINGS + ((ringid) - BCMPCIE_COMMON_MSGRINGS))
243 
244 /* Convert a H2D MsgBuf RingId to an offset index into the H2D DMA indices array
245  * This may be used for the H2D DMA WR index array or H2D DMA RD index array or
246  * any array of H2D rings.
247  */
248 #define DHD_H2D_RING_OFFSET(ringid) \
249     (((ringid) >= BCMPCIE_COMMON_MSGRINGS) ? DHD_RINGID_TO_FLOWID(ringid) : (ringid))
250 
251 /* Convert a H2D MsgBuf Flowring Id to an offset index into the H2D DMA indices array
252  * This may be used for IFRM.
253  */
254 #define DHD_H2D_FRM_FLOW_RING_OFFSET(ringid) \
255     ((ringid) - BCMPCIE_COMMON_MSGRINGS)
256 
257 /* Convert a D2H MsgBuf RingId to an offset index into the D2H DMA indices array
258  * This may be used for the D2H DMA WR index array or D2H DMA RD index array or
259  * any array of D2H rings.
260  * d2h debug ring is located at the end, i.e. after all the tx flow rings and h2d debug ring
261  * max_h2d_rings: total number of h2d rings
262  */
263 #define DHD_D2H_RING_OFFSET(ringid, max_h2d_rings) \
264     ((ringid) > (max_h2d_rings) ? \
265         ((ringid) - max_h2d_rings) : \
266         ((ringid) - BCMPCIE_H2D_COMMON_MSGRINGS))
267 
268 /* Convert a D2H DMA Indices Offset to a RingId */
269 #define DHD_D2H_RINGID(offset) \
270     ((offset) + BCMPCIE_H2D_COMMON_MSGRINGS)
271 
272 
273 #define DHD_DMAH_NULL      ((void*)NULL)
274 
275 /*
276  * Pad a DMA-able buffer by an additional cachline. If the end of the DMA-able
277  * buffer does not occupy the entire cacheline, and another object is placed
278  * following the DMA-able buffer, data corruption may occur if the DMA-able
279  * buffer is used to DMAing into (e.g. D2H direction), when HW cache coherency
280  * is not available.
281  */
282 #if defined(L1_CACHE_BYTES)
283 #define DHD_DMA_PAD        (L1_CACHE_BYTES)
284 #else
285 #define DHD_DMA_PAD        (128)
286 #endif
287 
288 /* Used in loopback tests */
289 typedef struct dhd_dmaxfer {
290     dhd_dma_buf_t srcmem;
291     dhd_dma_buf_t dstmem;
292     uint32        srcdelay;
293     uint32        destdelay;
294     uint32        len;
295     bool          in_progress;
296     uint64        start_usec;
297     uint32          d11_lpbk;
298 } dhd_dmaxfer_t;
299 
300 /**
301  * msgbuf_ring : This object manages the host side ring that includes a DMA-able
302  * buffer, the WR and RD indices, ring parameters such as max number of items
303  * an length of each items, and other miscellaneous runtime state.
304  * A msgbuf_ring may be used to represent a H2D or D2H common ring or a
305  * H2D TxPost ring as specified in the PCIE FullDongle Spec.
306  * Ring parameters are conveyed to the dongle, which maintains its own peer end
307  * ring state. Depending on whether the DMA Indices feature is supported, the
308  * host will update the WR/RD index in the DMA indices array in host memory or
309  * directly in dongle memory.
310  */
311 typedef struct msgbuf_ring {
312     bool           inited;
313     uint16         idx;       /* ring id */
314     uint16         rd;        /* read index */
315     uint16         curr_rd;   /* read index for debug */
316     uint16         wr;        /* write index */
317     uint16         max_items; /* maximum number of items in ring */
318     uint16         item_len;  /* length of each item in the ring */
319     sh_addr_t      base_addr; /* LITTLE ENDIAN formatted: base address */
320     dhd_dma_buf_t  dma_buf;   /* DMA-able buffer: pa, va, len, dmah, secdma */
321     uint32         seqnum;    /* next expected item's sequence number */
322 #ifdef TXP_FLUSH_NITEMS
323     void           *start_addr;
324     /* # of messages on ring not yet announced to dongle */
325     uint16         pend_items_count;
326 #endif /* TXP_FLUSH_NITEMS */
327 
328     uint8   ring_type;
329     uint8   n_completion_ids;
330     bool    create_pending;
331     uint16  create_req_id;
332     uint8   current_phase;
333     uint16    compeltion_ring_ids[MAX_COMPLETION_RING_IDS_ASSOCIATED];
334     uchar        name[RING_NAME_MAX_LENGTH];
335     uint32        ring_mem_allocated;
336 } msgbuf_ring_t;
337 
338 #define DHD_RING_BGN_VA(ring)           ((ring)->dma_buf.va)
339 #define DHD_RING_END_VA(ring) \
340     ((uint8 *)(DHD_RING_BGN_VA((ring))) + \
341      (((ring)->max_items - 1) * (ring)->item_len))
342 
343 
344 
345 /* This can be overwritten by module parameter defined in dhd_linux.c
346  * or by dhd iovar h2d_max_txpost.
347  */
348 int h2d_max_txpost = H2DRING_TXPOST_MAX_ITEM;
349 
350 /** DHD protocol handle. Is an opaque type to other DHD software layers. */
351 typedef struct dhd_prot {
352     osl_t *osh;        /* OSL handle */
353     uint16 rxbufpost;
354     uint16 max_rxbufpost;
355     uint16 max_eventbufpost;
356     uint16 max_ioctlrespbufpost;
357     uint16 max_tsbufpost;
358     uint16 max_infobufpost;
359     uint16 infobufpost;
360     uint16 cur_event_bufs_posted;
361     uint16 cur_ioctlresp_bufs_posted;
362     uint16 cur_ts_bufs_posted;
363 
364     /* Flow control mechanism based on active transmits pending */
365     uint16 active_tx_count; /* increments on every packet tx, and decrements on tx_status */
366     uint16 h2d_max_txpost;
367     uint16 txp_threshold;  /* optimization to write "n" tx items at a time to ring */
368 
369     /* MsgBuf Ring info: has a dhd_dma_buf that is dynamically allocated */
370     msgbuf_ring_t h2dring_ctrl_subn; /* H2D ctrl message submission ring */
371     msgbuf_ring_t h2dring_rxp_subn; /* H2D RxBuf post ring */
372     msgbuf_ring_t d2hring_ctrl_cpln; /* D2H ctrl completion ring */
373     msgbuf_ring_t d2hring_tx_cpln; /* D2H Tx complete message ring */
374     msgbuf_ring_t d2hring_rx_cpln; /* D2H Rx complete message ring */
375     msgbuf_ring_t *h2dring_info_subn; /* H2D info submission ring */
376     msgbuf_ring_t *d2hring_info_cpln; /* D2H info completion ring */
377 
378     msgbuf_ring_t *h2d_flowrings_pool; /* Pool of preallocated flowings */
379     dhd_dma_buf_t flowrings_dma_buf; /* Contiguous DMA buffer for flowrings */
380     uint16        h2d_rings_total; /* total H2D (common rings + flowrings) */
381 
382     uint32        rx_dataoffset;
383 
384     dhd_mb_ring_t    mb_ring_fn;    /* called when dongle needs to be notified of new msg */
385     dhd_mb_ring_2_t    mb_2_ring_fn;    /* called when dongle needs to be notified of new msg */
386 
387     /* ioctl related resources */
388     uint8 ioctl_state;
389     int16 ioctl_status;        /* status returned from dongle */
390     uint16 ioctl_resplen;
391     dhd_ioctl_recieved_status_t ioctl_received;
392     uint curr_ioctl_cmd;
393     dhd_dma_buf_t    retbuf;        /* For holding ioctl response */
394     dhd_dma_buf_t    ioctbuf;    /* For holding ioctl request */
395 
396     dhd_dma_buf_t    d2h_dma_scratch_buf;    /* For holding d2h scratch */
397 
398     /* DMA-able arrays for holding WR and RD indices */
399     uint32          rw_index_sz; /* Size of a RD or WR index in dongle */
400     dhd_dma_buf_t   h2d_dma_indx_wr_buf;    /* Array of H2D WR indices */
401     dhd_dma_buf_t    h2d_dma_indx_rd_buf;    /* Array of H2D RD indices */
402     dhd_dma_buf_t    d2h_dma_indx_wr_buf;    /* Array of D2H WR indices */
403     dhd_dma_buf_t    d2h_dma_indx_rd_buf;    /* Array of D2H RD indices */
404     dhd_dma_buf_t h2d_ifrm_indx_wr_buf;    /* Array of H2D WR indices for ifrm */
405 
406     dhd_dma_buf_t    host_bus_throughput_buf; /* bus throughput measure buffer */
407 
408     dhd_dma_buf_t   *flowring_buf;    /* pool of flow ring buf */
409     uint32            flowring_num;
410 
411     d2h_sync_cb_t d2h_sync_cb; /* Sync on D2H DMA done: SEQNUM or XORCSUM */
412     ulong d2h_sync_wait_max; /* max number of wait loops to receive one msg */
413     ulong d2h_sync_wait_tot; /* total wait loops */
414 
415     dhd_dmaxfer_t    dmaxfer; /* for test/DMA loopback */
416 
417     uint16        ioctl_seq_no;
418     uint16        data_seq_no;
419     uint16        ioctl_trans_id;
420     void        *pktid_ctrl_map; /* a pktid maps to a packet and its metadata */
421     void        *pktid_rx_map;    /* pktid map for rx path */
422     void        *pktid_tx_map;    /* pktid map for tx path */
423     void        *rx_lock;    /* rx pktid map and rings access protection */
424     bool        metadata_dbg;
425     void        *pktid_map_handle_ioctl;
426 
427     /* Applications/utilities can read tx and rx metadata using IOVARs */
428     uint16        rx_metadata_offset;
429     uint16        tx_metadata_offset;
430 
431 
432 #if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT)
433     /* Host's soft doorbell configuration */
434     bcmpcie_soft_doorbell_t soft_doorbell[BCMPCIE_D2H_COMMON_MSGRINGS];
435 #endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */
436 
437     /* Work Queues to be used by the producer and the consumer, and threshold
438      * when the WRITE index must be synced to consumer's workq
439      */
440 #if defined(DHD_LB_TXC)
441     uint32 tx_compl_prod_sync ____cacheline_aligned;
442     bcm_workq_t tx_compl_prod, tx_compl_cons;
443 #endif /* DHD_LB_TXC */
444 #if defined(DHD_LB_RXC)
445     uint32 rx_compl_prod_sync ____cacheline_aligned;
446     bcm_workq_t rx_compl_prod, rx_compl_cons;
447 #endif /* DHD_LB_RXC */
448 
449     dhd_dma_buf_t    fw_trap_buf; /* firmware trap buffer */
450 
451     uint32  host_ipc_version; /* Host sypported IPC rev */
452     uint32  device_ipc_version; /* FW supported IPC rev */
453     uint32  active_ipc_version; /* Host advertised IPC rev */
454     dhd_dma_buf_t   hostts_req_buf; /* For holding host timestamp request buf */
455     bool    hostts_req_buf_inuse;
456     bool    rx_ts_log_enabled;
457     bool    tx_ts_log_enabled;
458 } dhd_prot_t;
459 
460 extern void dhd_schedule_dmaxfer_free(dhd_pub_t* dhdp, dmaxref_mem_map_t *dmmap);
461 
462 static atomic_t dhd_msgbuf_rxbuf_post_event_bufs_running = ATOMIC_INIT(0);
463 
464 /* Convert a dmaaddr_t to a base_addr with htol operations */
465 static INLINE void dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa);
466 
467 /* APIs for managing a DMA-able buffer */
468 static int  dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
469 static int  dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len);
470 static void dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
471 static void dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
472 
473 /* msgbuf ring management */
474 static int dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring,
475     const char *name, uint16 max_items, uint16 len_item, uint16 ringid);
476 static void dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring);
477 static void dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring);
478 static void dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring);
479 static void dhd_prot_process_fw_timestamp(dhd_pub_t *dhd, void* buf);
480 
481 /* Pool of pre-allocated msgbuf_ring_t with DMA-able buffers for Flowrings */
482 static int  dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd);
483 static void dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd);
484 static void dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd);
485 
486 /* Fetch and Release a flowring msgbuf_ring from flowring  pool */
487 static msgbuf_ring_t *dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd,
488     uint16 flowid);
489 /* see also dhd_prot_flowrings_pool_release() in dhd_prot.h */
490 
491 /* Producer: Allocate space in a msgbuf ring */
492 static void* dhd_prot_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring,
493     uint16 nitems, uint16 *alloced, bool exactly_nitems);
494 static void* dhd_prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems,
495     uint16 *alloced, bool exactly_nitems);
496 
497 /* Consumer: Determine the location where the next message may be consumed */
498 static uint8* dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring,
499     uint32 *available_len);
500 
501 /* Producer (WR index update) or Consumer (RD index update) indication */
502 static void dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t *ring,
503     void *p, uint16 len);
504 static void dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t *ring);
505 
506 static INLINE int dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type,
507     dhd_dma_buf_t *dma_buf, uint32 bufsz);
508 
509 /* Set/Get a RD or WR index in the array of indices */
510 /* See also: dhd_prot_dma_indx_init() */
511 void dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type,
512     uint16 ringid);
513 static uint16 dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid);
514 
515 /* Locate a packet given a pktid */
516 static INLINE void *dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype,
517     bool free_pktid);
518 /* Locate a packet given a PktId and free it. */
519 static INLINE void dhd_prot_packet_free(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send);
520 
521 static int dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd,
522     void *buf, uint len, uint8 action);
523 static int dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd,
524     void *buf, uint len, uint8 action);
525 static int dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf);
526 static int dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd,
527     void *buf, int ifidx);
528 
529 /* Post buffers for Rx, control ioctl response and events */
530 static uint16 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, uint8 msgid, uint32 max_to_post);
531 static void dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *pub);
532 static void dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *pub);
533 static void dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid);
534 static int dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid);
535 static int dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t *pub);
536 
537 static void dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint32 pktid, uint32 rxcnt);
538 
539 
540 /* D2H Message handling */
541 static int dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len);
542 
543 /* D2H Message handlers */
544 static void dhd_prot_noop(dhd_pub_t *dhd, void *msg);
545 static void dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg);
546 static void dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg);
547 static void dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg);
548 static void dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg);
549 static void dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg);
550 static void dhd_prot_event_process(dhd_pub_t *dhd, void *msg);
551 
552 /* Loopback test with dongle */
553 static void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dma);
554 static int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len, uint srcdelay,
555     uint destdelay, dhd_dmaxfer_t *dma);
556 static void dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg);
557 
558 /* Flowring management communication with dongle */
559 static void dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg);
560 static void dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg);
561 static void dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg);
562 static void dhd_prot_process_flow_ring_resume_response(dhd_pub_t *dhd, void* msg);
563 static void dhd_prot_process_flow_ring_suspend_response(dhd_pub_t *dhd, void* msg);
564 
565 /* Monitor Mode */
566 #ifdef WL_MONITOR
567 extern bool dhd_monitor_enabled(dhd_pub_t *dhd, int ifidx);
568 extern void dhd_rx_mon_pkt(dhd_pub_t *dhdp, host_rxbuf_cmpl_t* msg, void *pkt, int ifidx);
569 #endif /* WL_MONITOR */
570 
571 /* Configure a soft doorbell per D2H ring */
572 static void dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd);
573 static void dhd_prot_process_d2h_ring_config_complete(dhd_pub_t *dhd, void *msg);
574 static void dhd_prot_process_d2h_ring_create_complete(dhd_pub_t *dhd, void *buf);
575 static void dhd_prot_process_h2d_ring_create_complete(dhd_pub_t *dhd, void *buf);
576 static void dhd_prot_process_d2h_mb_data(dhd_pub_t *dhd, void* buf);
577 static void dhd_prot_process_infobuf_complete(dhd_pub_t *dhd, void* buf);
578 static void dhd_prot_detach_info_rings(dhd_pub_t *dhd);
579 static void dhd_prot_process_d2h_host_ts_complete(dhd_pub_t *dhd, void* buf);
580 
581 typedef void (*dhd_msgbuf_func_t)(dhd_pub_t *dhd, void *msg);
582 
583 /** callback functions for messages generated by the dongle */
584 #define MSG_TYPE_INVALID 0
585 
586 static dhd_msgbuf_func_t table_lookup[DHD_PROT_FUNCS] = {
587     dhd_prot_noop, /* 0 is MSG_TYPE_INVALID */
588     dhd_prot_genstatus_process, /* MSG_TYPE_GEN_STATUS */
589     dhd_prot_ringstatus_process, /* MSG_TYPE_RING_STATUS */
590     NULL,
591     dhd_prot_flow_ring_create_response_process, /* MSG_TYPE_FLOW_RING_CREATE_CMPLT */
592     NULL,
593     dhd_prot_flow_ring_delete_response_process, /* MSG_TYPE_FLOW_RING_DELETE_CMPLT */
594     NULL,
595     dhd_prot_flow_ring_flush_response_process, /* MSG_TYPE_FLOW_RING_FLUSH_CMPLT */
596     NULL,
597     dhd_prot_ioctack_process, /* MSG_TYPE_IOCTLPTR_REQ_ACK */
598     NULL,
599     dhd_prot_ioctcmplt_process, /* MSG_TYPE_IOCTL_CMPLT */
600     NULL,
601     dhd_prot_event_process, /* MSG_TYPE_WL_EVENT */
602     NULL,
603     dhd_prot_txstatus_process, /* MSG_TYPE_TX_STATUS */
604     NULL,
605     NULL,    /* MSG_TYPE_RX_CMPLT use dedicated handler */
606     NULL,
607     dhd_msgbuf_dmaxfer_process, /* MSG_TYPE_LPBK_DMAXFER_CMPLT */
608     NULL, /* MSG_TYPE_FLOW_RING_RESUME */
609     dhd_prot_process_flow_ring_resume_response, /* MSG_TYPE_FLOW_RING_RESUME_CMPLT */
610     NULL, /* MSG_TYPE_FLOW_RING_SUSPEND */
611     dhd_prot_process_flow_ring_suspend_response, /* MSG_TYPE_FLOW_RING_SUSPEND_CMPLT */
612     NULL, /* MSG_TYPE_INFO_BUF_POST */
613     dhd_prot_process_infobuf_complete, /* MSG_TYPE_INFO_BUF_CMPLT */
614     NULL, /* MSG_TYPE_H2D_RING_CREATE */
615     NULL, /* MSG_TYPE_D2H_RING_CREATE */
616     dhd_prot_process_h2d_ring_create_complete, /* MSG_TYPE_H2D_RING_CREATE_CMPLT */
617     dhd_prot_process_d2h_ring_create_complete, /* MSG_TYPE_D2H_RING_CREATE_CMPLT */
618     NULL, /* MSG_TYPE_H2D_RING_CONFIG */
619     NULL, /* MSG_TYPE_D2H_RING_CONFIG */
620     NULL, /* MSG_TYPE_H2D_RING_CONFIG_CMPLT */
621     dhd_prot_process_d2h_ring_config_complete, /* MSG_TYPE_D2H_RING_CONFIG_CMPLT */
622     NULL, /* MSG_TYPE_H2D_MAILBOX_DATA */
623     dhd_prot_process_d2h_mb_data, /* MSG_TYPE_D2H_MAILBOX_DATA */
624     NULL,    /* MSG_TYPE_TIMSTAMP_BUFPOST */
625     NULL,    /* MSG_TYPE_HOSTTIMSTAMP */
626     dhd_prot_process_d2h_host_ts_complete,    /* MSG_TYPE_HOSTTIMSTAMP_CMPLT */
627     dhd_prot_process_fw_timestamp,    /* MSG_TYPE_FIRMWARE_TIMESTAMP */
628 };
629 
630 
631 #ifdef DHD_RX_CHAINING
632 
633 #define PKT_CTF_CHAINABLE(dhd, ifidx, evh, prio, h_sa, h_da, h_prio) \
634     (dhd_wet_chainable(dhd) && \
635     dhd_rx_pkt_chainable((dhd), (ifidx)) && \
636     !ETHER_ISNULLDEST(((struct ether_header *)(evh))->ether_dhost) && \
637     !ETHER_ISMULTI(((struct ether_header *)(evh))->ether_dhost) && \
638     !eacmp((h_da), ((struct ether_header *)(evh))->ether_dhost) && \
639     !eacmp((h_sa), ((struct ether_header *)(evh))->ether_shost) && \
640     ((h_prio) == (prio)) && (dhd_ctf_hotbrc_check((dhd), (evh), (ifidx))) && \
641     ((((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IP)) || \
642     (((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IPV6))))
643 
644 static INLINE void BCMFASTPATH dhd_rxchain_reset(rxchain_info_t *rxchain);
645 static void BCMFASTPATH dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx);
646 static void BCMFASTPATH dhd_rxchain_commit(dhd_pub_t *dhd);
647 
648 #define DHD_PKT_CTF_MAX_CHAIN_LEN    64
649 
650 #endif /* DHD_RX_CHAINING */
651 
652 static void dhd_prot_h2d_sync_init(dhd_pub_t *dhd);
653 
654 /**
655  * D2H DMA to completion callback handlers. Based on the mode advertised by the
656  * dongle through the PCIE shared region, the appropriate callback will be
657  * registered in the proto layer to be invoked prior to precessing any message
658  * from a D2H DMA ring. If the dongle uses a read barrier or another mode that
659  * does not require host participation, then a noop callback handler will be
660  * bound that simply returns the msg_type.
661  */
662 static void dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 msg_seqnum, msgbuf_ring_t *ring,
663                                        uint32 tries, volatile uchar *msg, int msglen);
664 static uint8 dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
665                                       volatile cmn_msg_hdr_t *msg, int msglen);
666 static uint8 dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
667                                        volatile cmn_msg_hdr_t *msg, int msglen);
668 static uint8 dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
669                                     volatile cmn_msg_hdr_t *msg, int msglen);
670 static void dhd_prot_d2h_sync_init(dhd_pub_t *dhd);
671 static int dhd_send_d2h_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create);
672 static int dhd_send_h2d_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create);
673 static uint16 dhd_get_max_flow_rings(dhd_pub_t *dhd);
674 
675 bool
dhd_prot_is_cmpl_ring_empty(dhd_pub_t * dhd,void * prot_info)676 dhd_prot_is_cmpl_ring_empty(dhd_pub_t *dhd, void *prot_info)
677 {
678     msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)prot_info;
679     uint16 rd, wr;
680     bool ret;
681 
682     if (dhd->dma_d2h_ring_upd_support) {
683         wr = flow_ring->wr;
684     } else {
685         dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx);
686     }
687     if (dhd->dma_h2d_ring_upd_support) {
688         rd = flow_ring->rd;
689     } else {
690         dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx);
691     }
692     ret = (wr == rd) ? TRUE : FALSE;
693     return ret;
694 }
695 uint16
dhd_prot_get_h2d_max_txpost(dhd_pub_t * dhd)696 dhd_prot_get_h2d_max_txpost(dhd_pub_t *dhd)
697 {
698     return (uint16)h2d_max_txpost;
699 }
700 void
dhd_prot_set_h2d_max_txpost(dhd_pub_t * dhd,uint16 max_txpost)701 dhd_prot_set_h2d_max_txpost(dhd_pub_t *dhd, uint16 max_txpost)
702 {
703     h2d_max_txpost = max_txpost;
704 }
705 /**
706  * dhd_prot_d2h_sync_livelock - when the host determines that a DMA transfer has
707  * not completed, a livelock condition occurs. Host will avert this livelock by
708  * dropping this message and moving to the next. This dropped message can lead
709  * to a packet leak, or even something disastrous in the case the dropped
710  * message happens to be a control response.
711  * Here we will log this condition. One may choose to reboot the dongle.
712  *
713  */
714 static void
dhd_prot_d2h_sync_livelock(dhd_pub_t * dhd,uint32 msg_seqnum,msgbuf_ring_t * ring,uint32 tries,volatile uchar * msg,int msglen)715 dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 msg_seqnum, msgbuf_ring_t *ring, uint32 tries,
716                            volatile uchar *msg, int msglen)
717 {
718     uint32 ring_seqnum = ring->seqnum;
719     DHD_ERROR((
720         "LIVELOCK DHD<%p> ring<%s> msg_seqnum<%u> ring_seqnum<%u:%u> tries<%u> max<%lu>"
721         " tot<%lu> dma_buf va<%p> msg<%p> curr_rd<%d>\n",
722         dhd, ring->name, msg_seqnum, ring_seqnum, ring_seqnum% D2H_EPOCH_MODULO, tries,
723         dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot,
724         ring->dma_buf.va, msg, ring->curr_rd));
725     prhex("D2H MsgBuf Failure", (volatile uchar *)msg, msglen);
726 
727     dhd_bus_dump_console_buffer(dhd->bus);
728     dhd_prot_debug_info_print(dhd);
729 
730 #ifdef DHD_FW_COREDUMP
731     if (dhd->memdump_enabled) {
732         /* collect core dump */
733         dhd->memdump_type = DUMP_TYPE_BY_LIVELOCK;
734         dhd_bus_mem_dump(dhd);
735     }
736 #endif /* DHD_FW_COREDUMP */
737 
738     dhd_schedule_reset(dhd);
739 
740 #ifdef SUPPORT_LINKDOWN_RECOVERY
741 #ifdef CONFIG_ARCH_MSM
742     dhd->bus->no_cfg_restore = 1;
743 #endif /* CONFIG_ARCH_MSM */
744     dhd->hang_reason = HANG_REASON_MSGBUF_LIVELOCK;
745     dhd_os_send_hang_message(dhd);
746 #endif /* SUPPORT_LINKDOWN_RECOVERY */
747 }
748 
749 /**
750  * dhd_prot_d2h_sync_seqnum - Sync on a D2H DMA completion using the SEQNUM
751  * mode. Sequence number is always in the last word of a message.
752  */
753 static uint8 BCMFASTPATH
dhd_prot_d2h_sync_seqnum(dhd_pub_t * dhd,msgbuf_ring_t * ring,volatile cmn_msg_hdr_t * msg,int msglen)754 dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
755                          volatile cmn_msg_hdr_t *msg, int msglen)
756 {
757     uint32 tries;
758     uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
759     int num_words = msglen / sizeof(uint32); /* num of 32bit words */
760     volatile uint32 *marker = (volatile uint32 *)msg + (num_words - 1); /* last word */
761     dhd_prot_t *prot = dhd->prot;
762     uint32 msg_seqnum;
763     uint32 step = 0;
764     uint32 delay = PCIE_D2H_SYNC_DELAY;
765     uint32 total_tries = 0;
766 
767     ASSERT(msglen == ring->item_len);
768 
769     BCM_REFERENCE(delay);
770     /*
771      * For retries we have to make some sort of stepper algorithm.
772      * We see that every time when the Dongle comes out of the D3
773      * Cold state, the first D2H mem2mem DMA takes more time to
774      * complete, leading to livelock issues.
775      *
776      * Case 1 - Apart from Host CPU some other bus master is
777      * accessing the DDR port, probably page close to the ring
778      * so, PCIE does not get a change to update the memory.
779      * Solution - Increase the number of tries.
780      *
781      * Case 2 - The 50usec delay given by the Host CPU is not
782      * sufficient for the PCIe RC to start its work.
783      * In this case the breathing time of 50usec given by
784      * the Host CPU is not sufficient.
785      * Solution: Increase the delay in a stepper fashion.
786      * This is done to ensure that there are no
787      * unwanted extra delay introdcued in normal conditions.
788      */
789     for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
790         for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
791             msg_seqnum = *marker;
792             if (ltoh32(msg_seqnum) == ring_seqnum) { /* dma upto last word done */
793                 ring->seqnum++; /* next expected sequence number */
794                 goto dma_completed;
795             }
796 
797             total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries;
798 
799             if (total_tries > prot->d2h_sync_wait_max)
800                 prot->d2h_sync_wait_max = total_tries;
801 
802             OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
803             OSL_CPU_RELAX(); /* CPU relax for msg_seqnum  value to update */
804             OSL_DELAY(delay * step); /* Add stepper delay */
805         } /* for PCIE_D2H_SYNC_WAIT_TRIES */
806     } /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
807 
808     dhd_prot_d2h_sync_livelock(dhd, msg_seqnum, ring, total_tries,
809         (volatile uchar *) msg, msglen);
810 
811     ring->seqnum++; /* skip this message ... leak of a pktid */
812     return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */
813 
814 dma_completed:
815 
816     prot->d2h_sync_wait_tot += tries;
817     return msg->msg_type;
818 }
819 
820 /**
821  * dhd_prot_d2h_sync_xorcsum - Sync on a D2H DMA completion using the XORCSUM
822  * mode. The xorcsum is placed in the last word of a message. Dongle will also
823  * place a seqnum in the epoch field of the cmn_msg_hdr.
824  */
825 static uint8 BCMFASTPATH
dhd_prot_d2h_sync_xorcsum(dhd_pub_t * dhd,msgbuf_ring_t * ring,volatile cmn_msg_hdr_t * msg,int msglen)826 dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
827                           volatile cmn_msg_hdr_t *msg, int msglen)
828 {
829     uint32 tries;
830     uint32 prot_checksum = 0; /* computed checksum */
831     int num_words = msglen / sizeof(uint32); /* num of 32bit words */
832     uint8 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
833     dhd_prot_t *prot = dhd->prot;
834     uint32 step = 0;
835     uint32 delay = PCIE_D2H_SYNC_DELAY;
836     uint32 total_tries = 0;
837 
838     ASSERT(msglen == ring->item_len);
839 
840     BCM_REFERENCE(delay);
841     /*
842      * For retries we have to make some sort of stepper algorithm.
843      * We see that every time when the Dongle comes out of the D3
844      * Cold state, the first D2H mem2mem DMA takes more time to
845      * complete, leading to livelock issues.
846      *
847      * Case 1 - Apart from Host CPU some other bus master is
848      * accessing the DDR port, probably page close to the ring
849      * so, PCIE does not get a change to update the memory.
850      * Solution - Increase the number of tries.
851      *
852      * Case 2 - The 50usec delay given by the Host CPU is not
853      * sufficient for the PCIe RC to start its work.
854      * In this case the breathing time of 50usec given by
855      * the Host CPU is not sufficient.
856      * Solution: Increase the delay in a stepper fashion.
857      * This is done to ensure that there are no
858      * unwanted extra delay introdcued in normal conditions.
859      */
860     for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
861         for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
862             prot_checksum = bcm_compute_xor32((volatile uint32 *)msg, num_words);
863             if (prot_checksum == 0U) { /* checksum is OK */
864                 if (msg->epoch == ring_seqnum) {
865                     ring->seqnum++; /* next expected sequence number */
866                     goto dma_completed;
867                 }
868             }
869 
870             total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries;
871 
872             if (total_tries > prot->d2h_sync_wait_max)
873                 prot->d2h_sync_wait_max = total_tries;
874 
875             OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
876             OSL_CPU_RELAX(); /* CPU relax for msg_seqnum  value to update */
877             OSL_DELAY(delay * step); /* Add stepper delay */
878         } /* for PCIE_D2H_SYNC_WAIT_TRIES */
879     } /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
880 
881     DHD_ERROR(("%s: prot_checksum = 0x%x\n", __FUNCTION__, prot_checksum));
882     dhd_prot_d2h_sync_livelock(dhd, msg->epoch, ring, total_tries,
883         (volatile uchar *) msg, msglen);
884 
885     ring->seqnum++; /* skip this message ... leak of a pktid */
886     return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */
887 
888 dma_completed:
889 
890     prot->d2h_sync_wait_tot += tries;
891     return msg->msg_type;
892 }
893 
894 /**
895  * dhd_prot_d2h_sync_none - Dongle ensure that the DMA will complete and host
896  * need to try to sync. This noop sync handler will be bound when the dongle
897  * advertises that neither the SEQNUM nor XORCSUM mode of DMA sync is required.
898  */
899 static uint8 BCMFASTPATH
dhd_prot_d2h_sync_none(dhd_pub_t * dhd,msgbuf_ring_t * ring,volatile cmn_msg_hdr_t * msg,int msglen)900 dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
901                        volatile cmn_msg_hdr_t *msg, int msglen)
902 {
903     return msg->msg_type;
904 }
905 
906 INLINE void
dhd_wakeup_ioctl_event(dhd_pub_t * dhd,dhd_ioctl_recieved_status_t reason)907 dhd_wakeup_ioctl_event(dhd_pub_t *dhd, dhd_ioctl_recieved_status_t reason)
908 {
909     /* To synchronize with the previous memory operations call wmb() */
910     OSL_SMP_WMB();
911     dhd->prot->ioctl_received = reason;
912     /* Call another wmb() to make sure before waking up the other event value gets updated */
913     OSL_SMP_WMB();
914     dhd_os_ioctl_resp_wake(dhd);
915 }
916 
917 /**
918  * dhd_prot_d2h_sync_init - Setup the host side DMA sync mode based on what
919  * dongle advertizes.
920  */
921 static void
dhd_prot_d2h_sync_init(dhd_pub_t * dhd)922 dhd_prot_d2h_sync_init(dhd_pub_t *dhd)
923 {
924     dhd_prot_t *prot = dhd->prot;
925     prot->d2h_sync_wait_max = 0UL;
926     prot->d2h_sync_wait_tot = 0UL;
927 
928     prot->d2hring_ctrl_cpln.seqnum = D2H_EPOCH_INIT_VAL;
929     prot->d2hring_ctrl_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
930 
931     prot->d2hring_tx_cpln.seqnum = D2H_EPOCH_INIT_VAL;
932     prot->d2hring_tx_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
933 
934     prot->d2hring_rx_cpln.seqnum = D2H_EPOCH_INIT_VAL;
935     prot->d2hring_rx_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
936 
937     if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) {
938         prot->d2h_sync_cb = dhd_prot_d2h_sync_seqnum;
939         DHD_ERROR(("%s(): D2H sync mechanism is SEQNUM \r\n", __FUNCTION__));
940     } else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) {
941         prot->d2h_sync_cb = dhd_prot_d2h_sync_xorcsum;
942         DHD_ERROR(("%s(): D2H sync mechanism is XORCSUM \r\n", __FUNCTION__));
943     } else {
944         prot->d2h_sync_cb = dhd_prot_d2h_sync_none;
945         DHD_ERROR(("%s(): D2H sync mechanism is NONE \r\n", __FUNCTION__));
946     }
947 }
948 
949 /**
950  * dhd_prot_h2d_sync_init - Per H2D common ring, setup the msgbuf ring seqnum
951  */
952 static void
dhd_prot_h2d_sync_init(dhd_pub_t * dhd)953 dhd_prot_h2d_sync_init(dhd_pub_t *dhd)
954 {
955     dhd_prot_t *prot = dhd->prot;
956     prot->h2dring_rxp_subn.seqnum = H2D_EPOCH_INIT_VAL;
957     prot->h2dring_rxp_subn.current_phase = 0;
958 
959     prot->h2dring_ctrl_subn.seqnum = H2D_EPOCH_INIT_VAL;
960     prot->h2dring_ctrl_subn.current_phase = 0;
961 }
962 
963 /* +-----------------  End of PCIE DHD H2D DMA SYNC ------------------------+ */
964 
965 
966 /*
967  * +---------------------------------------------------------------------------+
968  * PCIE DMA-able buffer. Sets up a dhd_dma_buf_t object, which includes the
969  * virtual and physical address, the buffer lenght and the DMA handler.
970  * A secdma handler is also included in the dhd_dma_buf object.
971  * +---------------------------------------------------------------------------+
972  */
973 
974 static INLINE void
dhd_base_addr_htolpa(sh_addr_t * base_addr,dmaaddr_t pa)975 dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa)
976 {
977     base_addr->low_addr = htol32(PHYSADDRLO(pa));
978     base_addr->high_addr = htol32(PHYSADDRHI(pa));
979 }
980 
981 
982 /**
983  * dhd_dma_buf_audit - Any audits on a DHD DMA Buffer.
984  */
985 static int
dhd_dma_buf_audit(dhd_pub_t * dhd,dhd_dma_buf_t * dma_buf)986 dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
987 {
988     uint32 pa_lowaddr, end; /* dongle uses 32bit ptr arithmetic */
989     ASSERT(dma_buf);
990     pa_lowaddr = PHYSADDRLO(dma_buf->pa);
991     ASSERT(PHYSADDRLO(dma_buf->pa) || PHYSADDRHI(dma_buf->pa));
992     ASSERT(ISALIGNED(pa_lowaddr, DMA_ALIGN_LEN));
993     ASSERT(dma_buf->len != 0);
994 
995     /* test 32bit offset arithmetic over dma buffer for loss of carry-over */
996     end = (pa_lowaddr + dma_buf->len); /* end address */
997 
998     if ((end & 0xFFFFFFFF) < (pa_lowaddr & 0xFFFFFFFF)) { /* exclude carryover */
999         DHD_ERROR(("%s: dma_buf %x len %d spans dongle 32bit ptr arithmetic\n",
1000             __FUNCTION__, pa_lowaddr, dma_buf->len));
1001         return BCME_ERROR;
1002     }
1003 
1004     return BCME_OK;
1005 }
1006 
1007 /**
1008  * dhd_dma_buf_alloc - Allocate a cache coherent DMA-able buffer.
1009  * returns BCME_OK=0 on success
1010  * returns non-zero negative error value on failure.
1011  */
1012 static int
dhd_dma_buf_alloc(dhd_pub_t * dhd,dhd_dma_buf_t * dma_buf,uint32 buf_len)1013 dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len)
1014 {
1015     uint32 dma_pad = 0;
1016     osl_t *osh = dhd->osh;
1017     uint16 dma_align = DMA_ALIGN_LEN;
1018 
1019 
1020     ASSERT(dma_buf != NULL);
1021     ASSERT(dma_buf->va == NULL);
1022     ASSERT(dma_buf->len == 0);
1023 
1024     /* Pad the buffer length by one extra cacheline size.
1025      * Required for D2H direction.
1026      */
1027     dma_pad = (buf_len % DHD_DMA_PAD) ? DHD_DMA_PAD : 0;
1028     dma_buf->va = DMA_ALLOC_CONSISTENT(osh, buf_len + dma_pad,
1029         dma_align, &dma_buf->_alloced, &dma_buf->pa, &dma_buf->dmah);
1030 
1031     if (dma_buf->va == NULL) {
1032         DHD_ERROR(("%s: buf_len %d, no memory available\n",
1033             __FUNCTION__, buf_len));
1034         return BCME_NOMEM;
1035     }
1036 
1037     dma_buf->len = buf_len; /* not including padded len */
1038 
1039     if (dhd_dma_buf_audit(dhd, dma_buf) != BCME_OK) { /* audit dma buf */
1040         dhd_dma_buf_free(dhd, dma_buf);
1041         return BCME_ERROR;
1042     }
1043 
1044     dhd_dma_buf_reset(dhd, dma_buf); /* zero out and cache flush */
1045 
1046     return BCME_OK;
1047 }
1048 
1049 /**
1050  * dhd_dma_buf_reset - Reset a cache coherent DMA-able buffer.
1051  */
1052 static void
dhd_dma_buf_reset(dhd_pub_t * dhd,dhd_dma_buf_t * dma_buf)1053 dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
1054 {
1055     if ((dma_buf == NULL) || (dma_buf->va == NULL))
1056         return;
1057 
1058     (void)dhd_dma_buf_audit(dhd, dma_buf);
1059 
1060     /* Zero out the entire buffer and cache flush */
1061     memset((void*)dma_buf->va, 0, dma_buf->len);
1062     OSL_CACHE_FLUSH((void *)dma_buf->va, dma_buf->len);
1063 }
1064 
1065 /**
1066  * dhd_dma_buf_free - Free a DMA-able buffer that was previously allocated using
1067  * dhd_dma_buf_alloc().
1068  */
1069 static void
dhd_dma_buf_free(dhd_pub_t * dhd,dhd_dma_buf_t * dma_buf)1070 dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
1071 {
1072     osl_t *osh = dhd->osh;
1073 
1074     ASSERT(dma_buf);
1075 
1076     if (dma_buf->va == NULL)
1077         return; /* Allow for free invocation, when alloc failed */
1078 
1079     /* DEBUG: dhd_dma_buf_reset(dhd, dma_buf) */
1080     (void)dhd_dma_buf_audit(dhd, dma_buf);
1081 
1082     /* dma buffer may have been padded at allocation */
1083     DMA_FREE_CONSISTENT(osh, dma_buf->va, dma_buf->_alloced,
1084         dma_buf->pa, dma_buf->dmah);
1085 
1086     memset(dma_buf, 0, sizeof(dhd_dma_buf_t));
1087 }
1088 
1089 /**
1090  * dhd_dma_buf_init - Initialize a dhd_dma_buf with speicifed values.
1091  * Do not use dhd_dma_buf_init to zero out a dhd_dma_buf_t object. Use memset 0.
1092  */
1093 void
dhd_dma_buf_init(dhd_pub_t * dhd,void * dhd_dma_buf,void * va,uint32 len,dmaaddr_t pa,void * dmah,void * secdma)1094 dhd_dma_buf_init(dhd_pub_t *dhd, void *dhd_dma_buf,
1095     void *va, uint32 len, dmaaddr_t pa, void *dmah, void *secdma)
1096 {
1097     dhd_dma_buf_t *dma_buf;
1098     ASSERT(dhd_dma_buf);
1099     dma_buf = (dhd_dma_buf_t *)dhd_dma_buf;
1100     dma_buf->va = va;
1101     dma_buf->len = len;
1102     dma_buf->pa = pa;
1103     dma_buf->dmah = dmah;
1104     dma_buf->secdma = secdma;
1105 
1106     /* Audit user defined configuration */
1107     (void)dhd_dma_buf_audit(dhd, dma_buf);
1108 }
1109 
1110 /* +------------------  End of PCIE DHD DMA BUF ADT ------------------------+ */
1111 
1112 /*
1113  * +---------------------------------------------------------------------------+
1114  * PktId Map: Provides a native packet pointer to unique 32bit PktId mapping.
1115  * Main purpose is to save memory on the dongle, has other purposes as well.
1116  * The packet id map, also includes storage for some packet parameters that
1117  * may be saved. A native packet pointer along with the parameters may be saved
1118  * and a unique 32bit pkt id will be returned. Later, the saved packet pointer
1119  * and the metadata may be retrieved using the previously allocated packet id.
1120  * +---------------------------------------------------------------------------+
1121  */
1122 #define DHD_PCIE_PKTID
1123 #define MAX_CTRL_PKTID        (1024) /* Maximum number of pktids supported */
1124 #define MAX_RX_PKTID        (1024)
1125 #define MAX_TX_PKTID        (3072 * 2)
1126 
1127 /* On Router, the pktptr serves as a pktid. */
1128 
1129 
1130 #if defined(PROP_TXSTATUS) && !defined(DHD_PCIE_PKTID)
1131 #error "PKTIDMAP must be supported with PROP_TXSTATUS/WLFC"
1132 #endif
1133 
1134 /* Enum for marking the buffer color based on usage */
1135 typedef enum dhd_pkttype {
1136     PKTTYPE_DATA_TX = 0,
1137     PKTTYPE_DATA_RX,
1138     PKTTYPE_IOCTL_RX,
1139     PKTTYPE_EVENT_RX,
1140     PKTTYPE_INFO_RX,
1141     /* dhd_prot_pkt_free no check, if pktid reserved and no space avail case */
1142     PKTTYPE_NO_CHECK,
1143     PKTTYPE_TSBUF_RX
1144 } dhd_pkttype_t;
1145 
1146 #define DHD_PKTID_INVALID               (0U)
1147 #define DHD_IOCTL_REQ_PKTID             (0xFFFE)
1148 #define DHD_FAKE_PKTID                  (0xFACE)
1149 #define DHD_H2D_DBGRING_REQ_PKTID    0xFFFD
1150 #define DHD_D2H_DBGRING_REQ_PKTID    0xFFFC
1151 #define DHD_H2D_HOSTTS_REQ_PKTID    0xFFFB
1152 
1153 #define IS_FLOWRING(ring) \
1154     ((strncmp(ring->name, "h2dflr", sizeof("h2dflr"))) == (0))
1155 
1156 typedef void * dhd_pktid_map_handle_t; /* opaque handle to a pktid map */
1157 
1158 /* Construct a packet id mapping table, returning an opaque map handle */
1159 static dhd_pktid_map_handle_t *dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items);
1160 
1161 /* Destroy a packet id mapping table, freeing all packets active in the table */
1162 static void dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map);
1163 
1164 #define DHD_NATIVE_TO_PKTID_INIT(dhd, items) dhd_pktid_map_init((dhd), (items))
1165 #define DHD_NATIVE_TO_PKTID_RESET(dhd, map)  dhd_pktid_map_reset((dhd), (map))
1166 #define DHD_NATIVE_TO_PKTID_FINI(dhd, map)   dhd_pktid_map_fini((dhd), (map))
1167 #define DHD_NATIVE_TO_PKTID_FINI_IOCTL(osh, map)  dhd_pktid_map_fini_ioctl((osh), (map))
1168 
1169 #ifdef MACOSX_DHD
1170 #undef DHD_PCIE_PKTID
1171 #define DHD_PCIE_PKTID 1
1172 #endif /* MACOSX_DHD */
1173 
1174 #if defined(DHD_PCIE_PKTID)
1175 #if defined(MACOSX_DHD) || defined(DHD_EFI)
1176 #define IOCTLRESP_USE_CONSTMEM
1177 static void free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf);
1178 static int  alloc_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf);
1179 #endif
1180 
1181 /* Determine number of pktids that are available */
1182 static INLINE uint32 dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle);
1183 
1184 /* Allocate a unique pktid against which a pkt and some metadata is saved */
1185 static INLINE uint32 dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
1186     void *pkt, dhd_pkttype_t pkttype);
1187 static INLINE void dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
1188     void *pkt, uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dma,
1189     void *dmah, void *secdma, dhd_pkttype_t pkttype);
1190 static uint32 dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
1191     void *pkt, dmaaddr_t pa, uint32 len, uint8 dma,
1192     void *dmah, void *secdma, dhd_pkttype_t pkttype);
1193 
1194 /* Return an allocated pktid, retrieving previously saved pkt and metadata */
1195 static void *dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
1196     uint32 id, dmaaddr_t *pa, uint32 *len, void **dmah,
1197     void **secdma, dhd_pkttype_t pkttype, bool rsv_locker);
1198 
1199 /*
1200  * DHD_PKTID_AUDIT_ENABLED: Audit of PktIds in DHD for duplicate alloc and frees
1201  *
1202  * DHD_PKTID_AUDIT_MAP: Audit the LIFO or FIFO PktIdMap allocator
1203  * DHD_PKTID_AUDIT_RING: Audit the pktid during producer/consumer ring operation
1204  *
1205  * CAUTION: When DHD_PKTID_AUDIT_ENABLED is defined,
1206  *    either DHD_PKTID_AUDIT_MAP or DHD_PKTID_AUDIT_RING may be selected.
1207  */
1208 #if defined(DHD_PKTID_AUDIT_ENABLED)
1209 #define USE_DHD_PKTID_AUDIT_LOCK 1
1210 /* Audit the pktidmap allocator */
1211 
1212 /* Audit the pktid during production/consumption of workitems */
1213 #define DHD_PKTID_AUDIT_RING
1214 
1215 #if defined(DHD_PKTID_AUDIT_MAP) && defined(DHD_PKTID_AUDIT_RING)
1216 #error "May only enabled audit of MAP or RING, at a time."
1217 #endif /* DHD_PKTID_AUDIT_MAP && DHD_PKTID_AUDIT_RING */
1218 
1219 #define DHD_DUPLICATE_ALLOC     1
1220 #define DHD_DUPLICATE_FREE      2
1221 #define DHD_TEST_IS_ALLOC       3
1222 #define DHD_TEST_IS_FREE        4
1223 
1224 #ifdef USE_DHD_PKTID_AUDIT_LOCK
1225 #define DHD_PKTID_AUDIT_LOCK_INIT(osh)          dhd_os_spin_lock_init(osh)
1226 #define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock)  dhd_os_spin_lock_deinit(osh, lock)
1227 #define DHD_PKTID_AUDIT_LOCK(lock)              dhd_os_spin_lock(lock)
1228 #define DHD_PKTID_AUDIT_UNLOCK(lock, flags)     dhd_os_spin_unlock(lock, flags)
1229 #else
1230 #define DHD_PKTID_AUDIT_LOCK_INIT(osh)          (void *)(1)
1231 #define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock)  do { } while (0)
1232 #define DHD_PKTID_AUDIT_LOCK(lock)              0
1233 #define DHD_PKTID_AUDIT_UNLOCK(lock, flags)     do { } while (0)
1234 #endif /* !USE_DHD_PKTID_AUDIT_LOCK */
1235 
1236 #endif /* DHD_PKTID_AUDIT_ENABLED */
1237 
1238 #ifdef USE_DHD_PKTID_LOCK
1239 #define DHD_PKTID_LOCK_INIT(osh)                dhd_os_spin_lock_init(osh)
1240 #define DHD_PKTID_LOCK_DEINIT(osh, lock)        dhd_os_spin_lock_deinit(osh, lock)
1241 #define DHD_PKTID_LOCK(lock)                    dhd_os_spin_lock(lock)
1242 #define DHD_PKTID_UNLOCK(lock, flags)           dhd_os_spin_unlock(lock, flags)
1243 #else
1244 #define DHD_PKTID_LOCK_INIT(osh)                (void *)(1)
1245 #define DHD_PKTID_LOCK_DEINIT(osh, lock)    \
1246     do { \
1247         BCM_REFERENCE(osh); \
1248         BCM_REFERENCE(lock); \
1249     } while (0)
1250 #define DHD_PKTID_LOCK(lock)                    0
1251 #define DHD_PKTID_UNLOCK(lock, flags)           \
1252     do { \
1253         BCM_REFERENCE(lock); \
1254         BCM_REFERENCE(flags); \
1255     } while (0)
1256 #endif /* !USE_DHD_PKTID_LOCK */
1257 
1258 typedef enum dhd_locker_state {
1259     LOCKER_IS_FREE,
1260     LOCKER_IS_BUSY,
1261     LOCKER_IS_RSVD
1262 } dhd_locker_state_t;
1263 
1264 /* Packet metadata saved in packet id mapper */
1265 
1266 typedef struct dhd_pktid_item {
1267     dhd_locker_state_t state;  /* tag a locker to be free, busy or reserved */
1268     uint8       dir;      /* dma map direction (Tx=flush or Rx=invalidate) */
1269     dhd_pkttype_t pkttype; /* pktlists are maintained based on pkttype */
1270     uint16      len;      /* length of mapped packet's buffer */
1271     void        *pkt;     /* opaque native pointer to a packet */
1272     dmaaddr_t   pa;       /* physical address of mapped packet's buffer */
1273     void        *dmah;    /* handle to OS specific DMA map */
1274     void        *secdma;
1275 } dhd_pktid_item_t;
1276 
1277 typedef uint32 dhd_pktid_key_t;
1278 
1279 typedef struct dhd_pktid_map {
1280     uint32      items;    /* total items in map */
1281     uint32      avail;    /* total available items */
1282     int         failures; /* lockers unavailable count */
1283 #if defined(DHD_PKTID_AUDIT_ENABLED)
1284     void        *pktid_audit_lock;
1285     struct bcm_mwbmap *pktid_audit; /* multi word bitmap based audit */
1286 #endif /* DHD_PKTID_AUDIT_ENABLED */
1287     dhd_pktid_key_t    *keys; /* map_items +1 unique pkt ids */
1288     dhd_pktid_item_t lockers[0];           /* metadata storage */
1289 } dhd_pktid_map_t;
1290 
1291 /*
1292  * PktId (Locker) #0 is never allocated and is considered invalid.
1293  *
1294  * On request for a pktid, a value DHD_PKTID_INVALID must be treated as a
1295  * depleted pktid pool and must not be used by the caller.
1296  *
1297  * Likewise, a caller must never free a pktid of value DHD_PKTID_INVALID.
1298  */
1299 
1300 #define DHD_PKTID_FREE_LOCKER           (FALSE)
1301 #define DHD_PKTID_RSV_LOCKER            (TRUE)
1302 
1303 #define DHD_PKTID_ITEM_SZ               (sizeof(dhd_pktid_item_t))
1304 #define DHD_PKIDMAP_ITEMS(items)        (items)
1305 #define DHD_PKTID_MAP_SZ(items)         (sizeof(dhd_pktid_map_t) + \
1306                                          (DHD_PKTID_ITEM_SZ * ((items) + 1)))
1307 #define DHD_PKTIDMAP_KEYS_SZ(items)     (sizeof(dhd_pktid_key_t) * ((items) + 1))
1308 
1309 #define DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, map)  dhd_pktid_map_reset_ioctl((dhd), (map))
1310 
1311 /* Convert a packet to a pktid, and save pkt pointer in busy locker */
1312 #define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt, pkttype)    \
1313     dhd_pktid_map_reserve((dhd), (map), (pkt), (pkttype))
1314 /* Reuse a previously reserved locker to save packet params */
1315 #define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dir, dmah, secdma, pkttype) \
1316     dhd_pktid_map_save((dhd), (map), (void *)(pkt), (nkey), (pa), (uint32)(len), \
1317         (uint8)(dir), (void *)(dmah), (void *)(secdma), \
1318         (dhd_pkttype_t)(pkttype))
1319 /* Convert a packet to a pktid, and save packet params in locker */
1320 #define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dir, dmah, secdma, pkttype) \
1321     dhd_pktid_map_alloc((dhd), (map), (void *)(pkt), (pa), (uint32)(len), \
1322         (uint8)(dir), (void *)(dmah), (void *)(secdma), \
1323         (dhd_pkttype_t)(pkttype))
1324 
1325 /* Convert pktid to a packet, and free the locker */
1326 #define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
1327     dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \
1328         (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
1329         (void **)&(secdma), (dhd_pkttype_t)(pkttype), DHD_PKTID_FREE_LOCKER)
1330 
1331 /* Convert the pktid to a packet, empty locker, but keep it reserved */
1332 #define DHD_PKTID_TO_NATIVE_RSV(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
1333     dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \
1334                        (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
1335                        (void **)&(secdma), (dhd_pkttype_t)(pkttype), DHD_PKTID_RSV_LOCKER)
1336 
1337 #define DHD_PKTID_AVAIL(map)                 dhd_pktid_map_avail_cnt(map)
1338 
1339 #if defined(DHD_PKTID_AUDIT_ENABLED)
1340 /**
1341 * dhd_pktid_audit - Use the mwbmap to audit validity of a pktid.
1342 */
1343 static int
dhd_pktid_audit(dhd_pub_t * dhd,dhd_pktid_map_t * pktid_map,uint32 pktid,const int test_for,const char * errmsg)1344 dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid,
1345     const int test_for, const char *errmsg)
1346 {
1347 #define DHD_PKT_AUDIT_STR "ERROR: %16s Host PktId Audit: "
1348     struct bcm_mwbmap *handle;
1349     uint32    flags;
1350     bool ignore_audit;
1351 
1352     if (pktid_map == (dhd_pktid_map_t *)NULL) {
1353         DHD_ERROR((DHD_PKT_AUDIT_STR "Pkt id map NULL\n", errmsg));
1354         return BCME_OK;
1355     }
1356 
1357     flags = DHD_PKTID_AUDIT_LOCK(pktid_map->pktid_audit_lock);
1358 
1359     handle = pktid_map->pktid_audit;
1360     if (handle == (struct bcm_mwbmap *)NULL) {
1361         DHD_ERROR((DHD_PKT_AUDIT_STR "Handle NULL\n", errmsg));
1362         DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
1363         return BCME_OK;
1364     }
1365 
1366     /* Exclude special pktids from audit */
1367     ignore_audit = (pktid == DHD_IOCTL_REQ_PKTID) | (pktid == DHD_FAKE_PKTID);
1368     if (ignore_audit) {
1369         DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
1370         return BCME_OK;
1371     }
1372 
1373     if ((pktid == DHD_PKTID_INVALID) || (pktid > pktid_map->items)) {
1374         DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> invalid\n", errmsg, pktid));
1375         /* lock is released in "error" */
1376         goto error;
1377     }
1378 
1379     /* Perform audit */
1380     switch (test_for) {
1381         case DHD_DUPLICATE_ALLOC:
1382             if (!bcm_mwbmap_isfree(handle, pktid)) {
1383                 DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> alloc duplicate\n",
1384                            errmsg, pktid));
1385                 goto error;
1386             }
1387             bcm_mwbmap_force(handle, pktid);
1388             break;
1389 
1390         case DHD_DUPLICATE_FREE:
1391             if (bcm_mwbmap_isfree(handle, pktid)) {
1392                 DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> free duplicate\n",
1393                            errmsg, pktid));
1394                 goto error;
1395             }
1396             bcm_mwbmap_free(handle, pktid);
1397             break;
1398 
1399         case DHD_TEST_IS_ALLOC:
1400             if (bcm_mwbmap_isfree(handle, pktid)) {
1401                 DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not allocated\n",
1402                            errmsg, pktid));
1403                 goto error;
1404             }
1405             break;
1406 
1407         case DHD_TEST_IS_FREE:
1408             if (!bcm_mwbmap_isfree(handle, pktid)) {
1409                 DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not free",
1410                            errmsg, pktid));
1411                 goto error;
1412             }
1413             break;
1414 
1415         default:
1416             goto error;
1417     }
1418 
1419     DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
1420     return BCME_OK;
1421 
1422 error:
1423 
1424     DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
1425     /* May insert any trap mechanism here ! */
1426     dhd_pktid_error_handler(dhd);
1427 
1428     return BCME_ERROR;
1429 }
1430 
1431 #define DHD_PKTID_AUDIT(dhdp, map, pktid, test_for) \
1432     dhd_pktid_audit((dhdp), (dhd_pktid_map_t *)(map), (pktid), (test_for), __FUNCTION__)
1433 
1434 static int
dhd_pktid_audit_ring_debug(dhd_pub_t * dhdp,dhd_pktid_map_t * map,uint32 pktid,const int test_for,void * msg,uint32 msg_len,const char * func)1435 dhd_pktid_audit_ring_debug(dhd_pub_t *dhdp, dhd_pktid_map_t *map, uint32 pktid,
1436     const int test_for, void *msg, uint32 msg_len, const char * func)
1437 {
1438     int ret = 0;
1439     ret = DHD_PKTID_AUDIT(dhdp, map, pktid, test_for);
1440     if (ret == BCME_ERROR) {
1441         prhex(func, (uchar *)msg, msg_len);
1442     }
1443     return ret;
1444 }
1445 #define DHD_PKTID_AUDIT_RING_DEBUG(dhdp, map, pktid, test_for, msg, msg_len) \
1446     dhd_pktid_audit_ring_debug((dhdp), (dhd_pktid_map_t *)(map), \
1447         (pktid), (test_for), msg, msg_len, __FUNCTION__)
1448 
1449 #endif /* DHD_PKTID_AUDIT_ENABLED */
1450 
1451 
1452 /**
1453  * +---------------------------------------------------------------------------+
1454  * Packet to Packet Id mapper using a <numbered_key, locker> paradigm.
1455  *
1456  * dhd_pktid_map manages a set of unique Packet Ids range[1..MAX_xxx_PKTID].
1457  *
1458  * dhd_pktid_map_alloc() may be used to save some packet metadata, and a unique
1459  * packet id is returned. This unique packet id may be used to retrieve the
1460  * previously saved packet metadata, using dhd_pktid_map_free(). On invocation
1461  * of dhd_pktid_map_free(), the unique packet id is essentially freed. A
1462  * subsequent call to dhd_pktid_map_alloc() may reuse this packet id.
1463  *
1464  * Implementation Note:
1465  * Convert this into a <key,locker> abstraction and place into bcmutils !
1466  * Locker abstraction should treat contents as opaque storage, and a
1467  * callback should be registered to handle busy lockers on destructor.
1468  *
1469  * +---------------------------------------------------------------------------+
1470  */
1471 
1472 /** Allocate and initialize a mapper of num_items <numbered_key, locker> */
1473 
1474 static dhd_pktid_map_handle_t *
dhd_pktid_map_init(dhd_pub_t * dhd,uint32 num_items)1475 dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items)
1476 {
1477     void* osh;
1478     uint32 nkey;
1479     dhd_pktid_map_t *map;
1480     uint32 dhd_pktid_map_sz;
1481     uint32 map_items;
1482     uint32 map_keys_sz;
1483     osh = dhd->osh;
1484 
1485     dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(num_items);
1486 
1487     map = (dhd_pktid_map_t *)VMALLOC(osh, dhd_pktid_map_sz);
1488     if (map == NULL) {
1489         DHD_ERROR(("%s:%d: MALLOC failed for size %d\n",
1490             __FUNCTION__, __LINE__, dhd_pktid_map_sz));
1491         return (dhd_pktid_map_handle_t *)NULL;
1492     }
1493 
1494     /* Initialize the lock that protects this structure */
1495     map->items = num_items;
1496     map->avail = num_items;
1497 
1498     map_items = DHD_PKIDMAP_ITEMS(map->items);
1499 
1500     map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
1501     map->keys = (dhd_pktid_key_t *)MALLOC(osh, map_keys_sz);
1502     if (map->keys == NULL) {
1503         DHD_ERROR(("%s:%d: MALLOC failed for map->keys size %d\n",
1504             __FUNCTION__, __LINE__, map_keys_sz));
1505         goto error;
1506     }
1507 
1508 #if defined(DHD_PKTID_AUDIT_ENABLED)
1509         /* Incarnate a hierarchical multiword bitmap for auditing pktid allocator */
1510         map->pktid_audit = bcm_mwbmap_init(osh, map_items + 1);
1511         if (map->pktid_audit == (struct bcm_mwbmap *)NULL) {
1512             DHD_ERROR(("%s:%d: pktid_audit init failed\r\n", __FUNCTION__, __LINE__));
1513             goto error;
1514         } else {
1515             DHD_ERROR(("%s:%d: pktid_audit init succeeded %d\n",
1516                 __FUNCTION__, __LINE__, map_items + 1));
1517         }
1518         map->pktid_audit_lock = DHD_PKTID_AUDIT_LOCK_INIT(osh);
1519 #endif /* DHD_PKTID_AUDIT_ENABLED */
1520 
1521     for (nkey = 1; nkey <= map_items; nkey++) { /* locker #0 is reserved */
1522         map->keys[nkey] = nkey; /* populate with unique keys */
1523         map->lockers[nkey].state = LOCKER_IS_FREE;
1524         map->lockers[nkey].pkt   = NULL; /* bzero: redundant */
1525         map->lockers[nkey].len   = 0;
1526     }
1527 
1528     /* Reserve pktid #0, i.e. DHD_PKTID_INVALID to be inuse */
1529     map->lockers[DHD_PKTID_INVALID].state = LOCKER_IS_BUSY; /* tag locker #0 as inuse */
1530     map->lockers[DHD_PKTID_INVALID].pkt   = NULL; /* bzero: redundant */
1531     map->lockers[DHD_PKTID_INVALID].len   = 0;
1532 
1533 #if defined(DHD_PKTID_AUDIT_ENABLED)
1534     /* do not use dhd_pktid_audit() here, use bcm_mwbmap_force directly */
1535     bcm_mwbmap_force(map->pktid_audit, DHD_PKTID_INVALID);
1536 #endif /* DHD_PKTID_AUDIT_ENABLED */
1537 
1538     return (dhd_pktid_map_handle_t *)map; /* opaque handle */
1539 
1540 error:
1541     if (map) {
1542 #if defined(DHD_PKTID_AUDIT_ENABLED)
1543         if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
1544             bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */
1545             map->pktid_audit = (struct bcm_mwbmap *)NULL;
1546             if (map->pktid_audit_lock)
1547                 DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock);
1548         }
1549 #endif /* DHD_PKTID_AUDIT_ENABLED */
1550         if (map->keys) {
1551             MFREE(osh, map->keys, map_keys_sz);
1552         }
1553         VMFREE(osh, map, dhd_pktid_map_sz);
1554     }
1555     return (dhd_pktid_map_handle_t *)NULL;
1556 }
1557 
1558 /**
1559  * Retrieve all allocated keys and free all <numbered_key, locker>.
1560  * Freeing implies: unmapping the buffers and freeing the native packet
1561  * This could have been a callback registered with the pktid mapper.
1562  */
1563 static void
dhd_pktid_map_reset(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle)1564 dhd_pktid_map_reset(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
1565 {
1566     void *osh;
1567     uint32 nkey;
1568     dhd_pktid_map_t *map;
1569     dhd_pktid_item_t *locker;
1570     uint32 map_items;
1571     uint32 flags;
1572     bool data_tx = FALSE;
1573 
1574     map = (dhd_pktid_map_t *)handle;
1575     DHD_GENERAL_LOCK(dhd, flags);
1576     osh = dhd->osh;
1577 
1578     map_items = DHD_PKIDMAP_ITEMS(map->items);
1579     /* skip reserved KEY #0, and start from 1 */
1580 
1581     for (nkey = 1; nkey <= map_items; nkey++) {
1582         if (map->lockers[nkey].state == LOCKER_IS_BUSY) {
1583             locker = &map->lockers[nkey];
1584             locker->state = LOCKER_IS_FREE;
1585             data_tx = (locker->pkttype == PKTTYPE_DATA_TX);
1586             if (data_tx) {
1587                 dhd->prot->active_tx_count--;
1588             }
1589 
1590 #ifdef DHD_PKTID_AUDIT_RING
1591             DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */
1592 #endif /* DHD_PKTID_AUDIT_RING */
1593 
1594             {
1595                 if (SECURE_DMA_ENAB(dhd->osh))
1596                     SECURE_DMA_UNMAP(osh, locker->pa,
1597                         locker->len, locker->dir, 0,
1598                         locker->dmah, locker->secdma, 0);
1599                 else
1600                     DMA_UNMAP(osh, locker->pa, locker->len,
1601                         locker->dir, 0, locker->dmah);
1602             }
1603             dhd_prot_packet_free(dhd, (ulong*)locker->pkt,
1604                 locker->pkttype, data_tx);
1605         }
1606         else {
1607 #ifdef DHD_PKTID_AUDIT_RING
1608             DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
1609 #endif /* DHD_PKTID_AUDIT_RING */
1610         }
1611         map->keys[nkey] = nkey; /* populate with unique keys */
1612     }
1613 
1614     map->avail = map_items;
1615     memset(&map->lockers[1], 0, sizeof(dhd_pktid_item_t) * map_items);
1616     DHD_GENERAL_UNLOCK(dhd, flags);
1617 }
1618 
1619 #ifdef IOCTLRESP_USE_CONSTMEM
1620 /** Called in detach scenario. Releasing IOCTL buffers. */
1621 static void
dhd_pktid_map_reset_ioctl(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle)1622 dhd_pktid_map_reset_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
1623 {
1624     uint32 nkey;
1625     dhd_pktid_map_t *map;
1626     dhd_pktid_item_t *locker;
1627     uint32 map_items;
1628     uint32 flags;
1629 
1630     map = (dhd_pktid_map_t *)handle;
1631     DHD_GENERAL_LOCK(dhd, flags);
1632 
1633     map_items = DHD_PKIDMAP_ITEMS(map->items);
1634     /* skip reserved KEY #0, and start from 1 */
1635     for (nkey = 1; nkey <= map_items; nkey++) {
1636         if (map->lockers[nkey].state == LOCKER_IS_BUSY) {
1637             dhd_dma_buf_t retbuf;
1638 
1639 #ifdef DHD_PKTID_AUDIT_RING
1640             DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */
1641 #endif /* DHD_PKTID_AUDIT_RING */
1642 
1643             locker = &map->lockers[nkey];
1644             retbuf.va = locker->pkt;
1645             retbuf.len = locker->len;
1646             retbuf.pa = locker->pa;
1647             retbuf.dmah = locker->dmah;
1648             retbuf.secdma = locker->secdma;
1649 
1650             /* This could be a callback registered with dhd_pktid_map */
1651             DHD_GENERAL_UNLOCK(dhd, flags);
1652             free_ioctl_return_buffer(dhd, &retbuf);
1653             DHD_GENERAL_LOCK(dhd, flags);
1654         }
1655         else {
1656 #ifdef DHD_PKTID_AUDIT_RING
1657             DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
1658 #endif /* DHD_PKTID_AUDIT_RING */
1659         }
1660         map->keys[nkey] = nkey; /* populate with unique keys */
1661     }
1662 
1663     map->avail = map_items;
1664     memset(&map->lockers[1], 0, sizeof(dhd_pktid_item_t) * map_items);
1665     DHD_GENERAL_UNLOCK(dhd, flags);
1666 }
1667 #endif /* IOCTLRESP_USE_CONSTMEM */
1668 
1669 
1670 /**
1671  * Free the pktid map.
1672  */
1673 static void
dhd_pktid_map_fini(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle)1674 dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
1675 {
1676     dhd_pktid_map_t *map;
1677     uint32 dhd_pktid_map_sz;
1678     uint32 map_keys_sz;
1679 
1680     /* Free any pending packets */
1681     dhd_pktid_map_reset(dhd, handle);
1682 
1683     map = (dhd_pktid_map_t *)handle;
1684     dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
1685     map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
1686 
1687 #if defined(DHD_PKTID_AUDIT_ENABLED)
1688     if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
1689         bcm_mwbmap_fini(dhd->osh, map->pktid_audit); /* Destruct pktid_audit */
1690         map->pktid_audit = (struct bcm_mwbmap *)NULL;
1691         if (map->pktid_audit_lock) {
1692             DHD_PKTID_AUDIT_LOCK_DEINIT(dhd->osh, map->pktid_audit_lock);
1693         }
1694     }
1695 #endif /* DHD_PKTID_AUDIT_ENABLED */
1696     MFREE(dhd->osh, map->keys, map_keys_sz);
1697     VMFREE(dhd->osh, handle, dhd_pktid_map_sz);
1698 }
1699 #ifdef IOCTLRESP_USE_CONSTMEM
1700 static void
dhd_pktid_map_fini_ioctl(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle)1701 dhd_pktid_map_fini_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
1702 {
1703     dhd_pktid_map_t *map;
1704     uint32 dhd_pktid_map_sz;
1705     uint32 map_keys_sz;
1706 
1707     /* Free any pending packets */
1708     dhd_pktid_map_reset_ioctl(dhd, handle);
1709 
1710     map = (dhd_pktid_map_t *)handle;
1711     dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
1712     map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
1713 
1714 #if defined(DHD_PKTID_AUDIT_ENABLED)
1715     if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
1716         bcm_mwbmap_fini(dhd->osh, map->pktid_audit); /* Destruct pktid_audit */
1717         map->pktid_audit = (struct bcm_mwbmap *)NULL;
1718         if (map->pktid_audit_lock) {
1719             DHD_PKTID_AUDIT_LOCK_DEINIT(dhd->osh, map->pktid_audit_lock);
1720         }
1721     }
1722 #endif /* DHD_PKTID_AUDIT_ENABLED */
1723 
1724     MFREE(dhd->osh, map->keys, map_keys_sz);
1725     VMFREE(dhd->osh, handle, dhd_pktid_map_sz);
1726 }
1727 #endif /* IOCTLRESP_USE_CONSTMEM */
1728 
1729 /** Get the pktid free count */
1730 static INLINE uint32 BCMFASTPATH
dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t * handle)1731 dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle)
1732 {
1733     dhd_pktid_map_t *map;
1734     uint32    avail;
1735 
1736     ASSERT(handle != NULL);
1737     map = (dhd_pktid_map_t *)handle;
1738 
1739     avail = map->avail;
1740 
1741     return avail;
1742 }
1743 
1744 /**
1745  * dhd_pktid_map_reserve - reserve a unique numbered key. Reserved locker is not
1746  * yet populated. Invoke the pktid save api to populate the packet parameters
1747  * into the locker. This function is not reentrant, and is the caller's
1748  * responsibility. Caller must treat a returned value DHD_PKTID_INVALID as
1749  * a failure case, implying a depleted pool of pktids.
1750  */
1751 static INLINE uint32
dhd_pktid_map_reserve(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle,void * pkt,dhd_pkttype_t pkttype)1752 dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
1753     void *pkt, dhd_pkttype_t pkttype)
1754 {
1755     uint32 nkey;
1756     dhd_pktid_map_t *map;
1757     dhd_pktid_item_t *locker;
1758 
1759     ASSERT(handle != NULL);
1760     map = (dhd_pktid_map_t *)handle;
1761 
1762     if ((int)(map->avail) <= 0) { /* no more pktids to allocate */
1763         map->failures++;
1764         DHD_INFO(("%s:%d: failed, no free keys\n", __FUNCTION__, __LINE__));
1765         return DHD_PKTID_INVALID; /* failed alloc request */
1766     }
1767 
1768     ASSERT(map->avail <= map->items);
1769     nkey = map->keys[map->avail]; /* fetch a free locker, pop stack */
1770 
1771     if ((map->avail > map->items) || (nkey > map->items)) {
1772         map->failures++;
1773         DHD_ERROR(("%s:%d: failed to allocate a new pktid,"
1774             " map->avail<%u>, nkey<%u>, pkttype<%u>\n",
1775             __FUNCTION__, __LINE__, map->avail, nkey,
1776             pkttype));
1777         return DHD_PKTID_INVALID; /* failed alloc request */
1778     }
1779 
1780     locker = &map->lockers[nkey]; /* save packet metadata in locker */
1781     map->avail--;
1782     locker->pkt = pkt; /* pkt is saved, other params not yet saved. */
1783     locker->len = 0;
1784     locker->state = LOCKER_IS_BUSY; /* reserve this locker */
1785 
1786     ASSERT(nkey != DHD_PKTID_INVALID);
1787     return nkey; /* return locker's numbered key */
1788 }
1789 
1790 /*
1791  * dhd_pktid_map_save - Save a packet's parameters into a locker
1792  * corresponding to a previously reserved unique numbered key.
1793  */
1794 static INLINE void
dhd_pktid_map_save(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle,void * pkt,uint32 nkey,dmaaddr_t pa,uint32 len,uint8 dir,void * dmah,void * secdma,dhd_pkttype_t pkttype)1795 dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
1796     uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma,
1797     dhd_pkttype_t pkttype)
1798 {
1799     dhd_pktid_map_t *map;
1800     dhd_pktid_item_t *locker;
1801 
1802     ASSERT(handle != NULL);
1803     map = (dhd_pktid_map_t *)handle;
1804 
1805     if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) {
1806         DHD_ERROR(("%s:%d: Error! saving invalid pktid<%u> pkttype<%u>\n",
1807             __FUNCTION__, __LINE__, nkey, pkttype));
1808 #ifdef DHD_FW_COREDUMP
1809         if (dhd->memdump_enabled) {
1810             /* collect core dump */
1811             dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
1812             dhd_bus_mem_dump(dhd);
1813         }
1814 #else
1815         ASSERT(0);
1816 #endif /* DHD_FW_COREDUMP */
1817         return;
1818     }
1819 
1820     locker = &map->lockers[nkey];
1821 
1822     ASSERT(((locker->state == LOCKER_IS_BUSY) && (locker->pkt == pkt)) ||
1823         ((locker->state == LOCKER_IS_RSVD) && (locker->pkt == NULL)));
1824 
1825     /* store contents in locker */
1826     locker->dir = dir;
1827     locker->pa = pa;
1828     locker->len = (uint16)len; /* 16bit len */
1829     locker->dmah = dmah; /* 16bit len */
1830     locker->secdma = secdma;
1831     locker->pkttype = pkttype;
1832     locker->pkt = pkt;
1833     locker->state = LOCKER_IS_BUSY; /* make this locker busy */
1834 }
1835 
1836 /**
1837  * dhd_pktid_map_alloc - Allocate a unique numbered key and save the packet
1838  * contents into the corresponding locker. Return the numbered key.
1839  */
1840 static uint32 BCMFASTPATH
dhd_pktid_map_alloc(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle,void * pkt,dmaaddr_t pa,uint32 len,uint8 dir,void * dmah,void * secdma,dhd_pkttype_t pkttype)1841 dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
1842     dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma,
1843     dhd_pkttype_t pkttype)
1844 {
1845     uint32 nkey;
1846 
1847     nkey = dhd_pktid_map_reserve(dhd, handle, pkt, pkttype);
1848     if (nkey != DHD_PKTID_INVALID) {
1849         dhd_pktid_map_save(dhd, handle, pkt, nkey, pa,
1850             len, dir, dmah, secdma, pkttype);
1851     }
1852 
1853     return nkey;
1854 }
1855 
1856 /**
1857  * dhd_pktid_map_free - Given a numbered key, return the locker contents.
1858  * dhd_pktid_map_free() is not reentrant, and is the caller's responsibility.
1859  * Caller may not free a pktid value DHD_PKTID_INVALID or an arbitrary pktid
1860  * value. Only a previously allocated pktid may be freed.
1861  */
1862 static void * BCMFASTPATH
dhd_pktid_map_free(dhd_pub_t * dhd,dhd_pktid_map_handle_t * handle,uint32 nkey,dmaaddr_t * pa,uint32 * len,void ** dmah,void ** secdma,dhd_pkttype_t pkttype,bool rsv_locker)1863 dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, uint32 nkey,
1864     dmaaddr_t *pa, uint32 *len, void **dmah, void **secdma, dhd_pkttype_t pkttype,
1865     bool rsv_locker)
1866 {
1867     dhd_pktid_map_t *map;
1868     dhd_pktid_item_t *locker;
1869     void * pkt;
1870     unsigned long long locker_addr;
1871 
1872     ASSERT(handle != NULL);
1873 
1874     map = (dhd_pktid_map_t *)handle;
1875 
1876     if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) {
1877         DHD_ERROR(("%s:%d: Error! Try to free invalid pktid<%u>, pkttype<%d>\n",
1878                    __FUNCTION__, __LINE__, nkey, pkttype));
1879 #ifdef DHD_FW_COREDUMP
1880         if (dhd->memdump_enabled) {
1881             /* collect core dump */
1882             dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
1883             dhd_bus_mem_dump(dhd);
1884         }
1885 #else
1886         ASSERT(0);
1887 #endif /* DHD_FW_COREDUMP */
1888         return NULL;
1889     }
1890 
1891     locker = &map->lockers[nkey];
1892 
1893 #if defined(DHD_PKTID_AUDIT_MAP)
1894     DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* Audit duplicate FREE */
1895 #endif /* DHD_PKTID_AUDIT_MAP */
1896 
1897     /* Debug check for cloned numbered key */
1898     if (locker->state == LOCKER_IS_FREE) {
1899         DHD_ERROR(("%s:%d: Error! freeing already freed invalid pktid<%u>\n",
1900                    __FUNCTION__, __LINE__, nkey));
1901 #ifdef DHD_FW_COREDUMP
1902         if (dhd->memdump_enabled) {
1903             /* collect core dump */
1904             dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
1905             dhd_bus_mem_dump(dhd);
1906         }
1907 #else
1908         ASSERT(0);
1909 #endif /* DHD_FW_COREDUMP */
1910         return NULL;
1911     }
1912 
1913     /* Check for the colour of the buffer i.e The buffer posted for TX,
1914      * should be freed for TX completion. Similarly the buffer posted for
1915      * IOCTL should be freed for IOCT completion etc.
1916      */
1917     if ((pkttype != PKTTYPE_NO_CHECK) && (locker->pkttype != pkttype)) {
1918         DHD_ERROR(("%s:%d: Error! Invalid Buffer Free for pktid<%u> \n",
1919             __FUNCTION__, __LINE__, nkey));
1920 #ifdef BCMDMA64OSL
1921         PHYSADDRTOULONG(locker->pa, locker_addr);
1922 #else
1923         locker_addr = PHYSADDRLO(locker->pa);
1924 #endif /* BCMDMA64OSL */
1925         DHD_ERROR(("%s:%d: locker->state <%d>, locker->pkttype <%d>,"
1926             "pkttype <%d> locker->pa <0x%llx> \n",
1927             __FUNCTION__, __LINE__, locker->state, locker->pkttype,
1928             pkttype, locker_addr));
1929 #ifdef DHD_FW_COREDUMP
1930         if (dhd->memdump_enabled) {
1931             /* collect core dump */
1932             dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
1933             dhd_bus_mem_dump(dhd);
1934         }
1935 #else
1936         ASSERT(0);
1937 #endif /* DHD_FW_COREDUMP */
1938         return NULL;
1939     }
1940 
1941     if (rsv_locker == DHD_PKTID_FREE_LOCKER) {
1942         map->avail++;
1943         map->keys[map->avail] = nkey; /* make this numbered key available */
1944         locker->state = LOCKER_IS_FREE; /* open and free Locker */
1945     } else {
1946         /* pktid will be reused, but the locker does not have a valid pkt */
1947         locker->state = LOCKER_IS_RSVD;
1948     }
1949 
1950 #if defined(DHD_PKTID_AUDIT_MAP)
1951     DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
1952 #endif /* DHD_PKTID_AUDIT_MAP */
1953 
1954     *pa = locker->pa; /* return contents of locker */
1955     *len = (uint32)locker->len;
1956     *dmah = locker->dmah;
1957     *secdma = locker->secdma;
1958 
1959     pkt = locker->pkt;
1960     locker->pkt = NULL; /* Clear pkt */
1961     locker->len = 0;
1962 
1963     return pkt;
1964 }
1965 
1966 #else /* ! DHD_PCIE_PKTID */
1967 
1968 
1969 typedef struct pktlist {
1970     PKT_LIST *tx_pkt_list;        /* list for tx packets */
1971     PKT_LIST *rx_pkt_list;        /* list for rx packets */
1972     PKT_LIST *ctrl_pkt_list;    /* list for ioctl/event buf post */
1973 } pktlists_t;
1974 
1975 /*
1976  * Given that each workitem only uses a 32bit pktid, only 32bit hosts may avail
1977  * of a one to one mapping 32bit pktptr and a 32bit pktid.
1978  *
1979  * - When PKTIDMAP is not used, DHD_NATIVE_TO_PKTID variants will never fail.
1980  * - Neither DHD_NATIVE_TO_PKTID nor DHD_PKTID_TO_NATIVE need to be protected by
1981  *   a lock.
1982  * - Hence DHD_PKTID_INVALID is not defined when DHD_PCIE_PKTID is undefined.
1983  */
1984 #define DHD_PKTID32(pktptr32)    ((uint32)(pktptr32))
1985 #define DHD_PKTPTR32(pktid32)    ((void *)(pktid32))
1986 
1987 
1988 static INLINE uint32 dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32,
1989     dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma,
1990     dhd_pkttype_t pkttype);
1991 static INLINE void * dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32,
1992     dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma,
1993     dhd_pkttype_t pkttype);
1994 
1995 static dhd_pktid_map_handle_t *
dhd_pktid_map_init(dhd_pub_t * dhd,uint32 num_items)1996 dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items)
1997 {
1998     osl_t *osh = dhd->osh;
1999     pktlists_t *handle = NULL;
2000 
2001     if ((handle = (pktlists_t *) MALLOCZ(osh, sizeof(pktlists_t))) == NULL) {
2002         DHD_ERROR(("%s:%d: MALLOC failed for lists allocation, size=%d\n",
2003                    __FUNCTION__, __LINE__, sizeof(pktlists_t)));
2004         goto error_done;
2005     }
2006 
2007     if ((handle->tx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
2008         DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
2009                    __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
2010         goto error;
2011     }
2012 
2013     if ((handle->rx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
2014         DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
2015                    __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
2016         goto error;
2017     }
2018 
2019     if ((handle->ctrl_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
2020         DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
2021                    __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
2022         goto error;
2023     }
2024 
2025     PKTLIST_INIT(handle->tx_pkt_list);
2026     PKTLIST_INIT(handle->rx_pkt_list);
2027     PKTLIST_INIT(handle->ctrl_pkt_list);
2028 
2029     return (dhd_pktid_map_handle_t *) handle;
2030 
2031 error:
2032     if (handle->ctrl_pkt_list) {
2033         MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST));
2034     }
2035 
2036     if (handle->rx_pkt_list) {
2037         MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST));
2038     }
2039 
2040     if (handle->tx_pkt_list) {
2041         MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST));
2042     }
2043 
2044     if (handle) {
2045         MFREE(osh, handle, sizeof(pktlists_t));
2046     }
2047 
2048 error_done:
2049     return (dhd_pktid_map_handle_t *)NULL;
2050 }
2051 
2052 static void
dhd_pktid_map_fini(dhd_pub_t * dhd,dhd_pktid_map_handle_t * map)2053 dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map)
2054 {
2055     osl_t *osh = dhd->osh;
2056     pktlists_t *handle = (pktlists_t *) map;
2057 
2058     ASSERT(handle != NULL);
2059     if (handle == (pktlists_t *)NULL)
2060         return;
2061 
2062     if (handle->ctrl_pkt_list) {
2063         PKTLIST_FINI(handle->ctrl_pkt_list);
2064         MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST));
2065     }
2066 
2067     if (handle->rx_pkt_list) {
2068         PKTLIST_FINI(handle->rx_pkt_list);
2069         MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST));
2070     }
2071 
2072     if (handle->tx_pkt_list) {
2073         PKTLIST_FINI(handle->tx_pkt_list);
2074         MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST));
2075     }
2076 
2077     if (handle) {
2078         MFREE(osh, handle, sizeof(pktlists_t));
2079     }
2080 }
2081 
2082 /** Save dma parameters into the packet's pkttag and convert a pktptr to pktid */
2083 static INLINE uint32
dhd_native_to_pktid(dhd_pktid_map_handle_t * map,void * pktptr32,dmaaddr_t pa,uint32 dma_len,void * dmah,void * secdma,dhd_pkttype_t pkttype)2084 dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32,
2085     dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma,
2086     dhd_pkttype_t pkttype)
2087 {
2088     pktlists_t *handle = (pktlists_t *) map;
2089     ASSERT(pktptr32 != NULL);
2090     DHD_PKT_SET_DMA_LEN(pktptr32, dma_len);
2091     DHD_PKT_SET_DMAH(pktptr32, dmah);
2092     DHD_PKT_SET_PA(pktptr32, pa);
2093     DHD_PKT_SET_SECDMA(pktptr32, secdma);
2094 
2095     if (pkttype == PKTTYPE_DATA_TX) {
2096         PKTLIST_ENQ(handle->tx_pkt_list,  pktptr32);
2097     } else if (pkttype == PKTTYPE_DATA_RX) {
2098         PKTLIST_ENQ(handle->rx_pkt_list,  pktptr32);
2099     } else {
2100         PKTLIST_ENQ(handle->ctrl_pkt_list,  pktptr32);
2101     }
2102 
2103     return DHD_PKTID32(pktptr32);
2104 }
2105 
2106 /** Convert a pktid to pktptr and retrieve saved dma parameters from packet */
2107 static INLINE void *
dhd_pktid_to_native(dhd_pktid_map_handle_t * map,uint32 pktid32,dmaaddr_t * pa,uint32 * dma_len,void ** dmah,void ** secdma,dhd_pkttype_t pkttype)2108 dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32,
2109     dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma,
2110     dhd_pkttype_t pkttype)
2111 {
2112     pktlists_t *handle = (pktlists_t *) map;
2113     void *pktptr32;
2114 
2115     ASSERT(pktid32 != 0U);
2116     pktptr32 = DHD_PKTPTR32(pktid32);
2117     *dma_len = DHD_PKT_GET_DMA_LEN(pktptr32);
2118     *dmah = DHD_PKT_GET_DMAH(pktptr32);
2119     *pa = DHD_PKT_GET_PA(pktptr32);
2120     *secdma = DHD_PKT_GET_SECDMA(pktptr32);
2121 
2122     if (pkttype == PKTTYPE_DATA_TX) {
2123         PKTLIST_UNLINK(handle->tx_pkt_list,  pktptr32);
2124     } else if (pkttype == PKTTYPE_DATA_RX) {
2125         PKTLIST_UNLINK(handle->rx_pkt_list,  pktptr32);
2126     } else {
2127         PKTLIST_UNLINK(handle->ctrl_pkt_list,  pktptr32);
2128     }
2129 
2130     return pktptr32;
2131 }
2132 
2133 #define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt, pkttype)  DHD_PKTID32(pkt)
2134 
2135 #define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dma_dir, dmah, secdma, pkttype) \
2136     ({ BCM_REFERENCE(dhd); BCM_REFERENCE(nkey); BCM_REFERENCE(dma_dir); \
2137        dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \
2138                (dmah), (secdma), (dhd_pkttype_t)(pkttype)); \
2139     })
2140 
2141 #define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dma_dir, dmah, secdma, pkttype) \
2142     ({ BCM_REFERENCE(dhd); BCM_REFERENCE(dma_dir); \
2143        dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \
2144                (dmah), (secdma), (dhd_pkttype_t)(pkttype)); \
2145     })
2146 
2147 #define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
2148     ({ BCM_REFERENCE(dhd); BCM_REFERENCE(pkttype);    \
2149         dhd_pktid_to_native((dhd_pktid_map_handle_t *) map, (uint32)(pktid), \
2150                 (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
2151                 (void **)&secdma, (dhd_pkttype_t)(pkttype)); \
2152     })
2153 
2154 #define DHD_PKTID_AVAIL(map)  (~0)
2155 
2156 #endif /* ! DHD_PCIE_PKTID */
2157 
2158 /* +------------------ End of PCIE DHD PKTID MAPPER  -----------------------+ */
2159 
2160 
2161 /**
2162  * The PCIE FD protocol layer is constructed in two phases:
2163  *    Phase 1. dhd_prot_attach()
2164  *    Phase 2. dhd_prot_init()
2165  *
2166  * dhd_prot_attach() - Allocates a dhd_prot_t object and resets all its fields.
2167  * All Common rings are allose attached (msgbuf_ring_t objects are allocated
2168  * with DMA-able buffers).
2169  * All dhd_dma_buf_t objects are also allocated here.
2170  *
2171  * As dhd_prot_attach is invoked prior to the pcie_shared object is read, any
2172  * initialization of objects that requires information advertized by the dongle
2173  * may not be performed here.
2174  * E.g. the number of TxPost flowrings is not know at this point, neither do
2175  * we know shich form of D2H DMA sync mechanism is advertized by the dongle, or
2176  * whether the dongle supports DMA-ing of WR/RD indices for the H2D and/or D2H
2177  * rings (common + flow).
2178  *
2179  * dhd_prot_init() is invoked after the bus layer has fetched the information
2180  * advertized by the dongle in the pcie_shared_t.
2181  */
2182 int
dhd_prot_attach(dhd_pub_t * dhd)2183 dhd_prot_attach(dhd_pub_t *dhd)
2184 {
2185     osl_t *osh = dhd->osh;
2186     dhd_prot_t *prot;
2187 
2188     /* Allocate prot structure */
2189     if (!(prot = (dhd_prot_t *)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_PROT,
2190         sizeof(dhd_prot_t)))) {
2191         DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
2192         goto fail;
2193     }
2194     memset(prot, 0, sizeof(*prot));
2195 
2196     prot->osh = osh;
2197     dhd->prot = prot;
2198 
2199     /* DMAing ring completes supported? FALSE by default  */
2200     dhd->dma_d2h_ring_upd_support = FALSE;
2201     dhd->dma_h2d_ring_upd_support = FALSE;
2202     dhd->dma_ring_upd_overwrite = FALSE;
2203 
2204     dhd->idma_inited = 0;
2205     dhd->ifrm_inited = 0;
2206 
2207     /* Common Ring Allocations */
2208 
2209     /* Ring  0: H2D Control Submission */
2210     if (dhd_prot_ring_attach(dhd, &prot->h2dring_ctrl_subn, "h2dctrl",
2211             H2DRING_CTRL_SUB_MAX_ITEM, H2DRING_CTRL_SUB_ITEMSIZE,
2212             BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT) != BCME_OK) {
2213         DHD_ERROR(("%s: dhd_prot_ring_attach H2D Ctrl Submission failed\n",
2214             __FUNCTION__));
2215         goto fail;
2216     }
2217 
2218     /* Ring  1: H2D Receive Buffer Post */
2219     if (dhd_prot_ring_attach(dhd, &prot->h2dring_rxp_subn, "h2drxp",
2220             H2DRING_RXPOST_MAX_ITEM, H2DRING_RXPOST_ITEMSIZE,
2221             BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT) != BCME_OK) {
2222         DHD_ERROR(("%s: dhd_prot_ring_attach H2D RxPost failed\n",
2223             __FUNCTION__));
2224         goto fail;
2225     }
2226 
2227     /* Ring  2: D2H Control Completion */
2228     if (dhd_prot_ring_attach(dhd, &prot->d2hring_ctrl_cpln, "d2hctrl",
2229             D2HRING_CTRL_CMPLT_MAX_ITEM, D2HRING_CTRL_CMPLT_ITEMSIZE,
2230             BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE) != BCME_OK) {
2231         DHD_ERROR(("%s: dhd_prot_ring_attach D2H Ctrl Completion failed\n",
2232             __FUNCTION__));
2233         goto fail;
2234     }
2235 
2236     /* Ring  3: D2H Transmit Complete */
2237     if (dhd_prot_ring_attach(dhd, &prot->d2hring_tx_cpln, "d2htxcpl",
2238             D2HRING_TXCMPLT_MAX_ITEM, D2HRING_TXCMPLT_ITEMSIZE,
2239             BCMPCIE_D2H_MSGRING_TX_COMPLETE) != BCME_OK) {
2240         DHD_ERROR(("%s: dhd_prot_ring_attach D2H Tx Completion failed\n",
2241             __FUNCTION__));
2242         goto fail;
2243     }
2244 
2245     /* Ring  4: D2H Receive Complete */
2246     if (dhd_prot_ring_attach(dhd, &prot->d2hring_rx_cpln, "d2hrxcpl",
2247             D2HRING_RXCMPLT_MAX_ITEM, D2HRING_RXCMPLT_ITEMSIZE,
2248             BCMPCIE_D2H_MSGRING_RX_COMPLETE) != BCME_OK) {
2249         DHD_ERROR(("%s: dhd_prot_ring_attach D2H Rx Completion failed\n",
2250             __FUNCTION__));
2251         goto fail;
2252     }
2253 
2254     /*
2255      * Max number of flowrings is not yet known. msgbuf_ring_t with DMA-able
2256      * buffers for flowrings will be instantiated, in dhd_prot_init() .
2257      * See dhd_prot_flowrings_pool_attach()
2258      */
2259     /* ioctl response buffer */
2260     if (dhd_dma_buf_alloc(dhd, &prot->retbuf, IOCT_RETBUF_SIZE)) {
2261         goto fail;
2262     }
2263 
2264     /* IOCTL request buffer */
2265     if (dhd_dma_buf_alloc(dhd, &prot->ioctbuf, IOCT_RETBUF_SIZE)) {
2266         goto fail;
2267     }
2268 
2269     /* Host TS request buffer one buffer for now */
2270     if (dhd_dma_buf_alloc(dhd, &prot->hostts_req_buf, CTRLSUB_HOSTTS_MEESAGE_SIZE)) {
2271         goto fail;
2272     }
2273     prot->hostts_req_buf_inuse = FALSE;
2274 
2275     /* Scratch buffer for dma rx offset */
2276 #ifdef BCM_HOST_BUF
2277     if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf,
2278         ROUNDUP(DMA_D2H_SCRATCH_BUF_LEN, 16) + DMA_HOST_BUFFER_LEN))
2279 #else
2280     if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf, DMA_D2H_SCRATCH_BUF_LEN))
2281 #endif /* BCM_HOST_BUF */
2282     {
2283         goto fail;
2284     }
2285 
2286     /* scratch buffer bus throughput measurement */
2287     if (dhd_dma_buf_alloc(dhd, &prot->host_bus_throughput_buf, DHD_BUS_TPUT_BUF_LEN)) {
2288         goto fail;
2289     }
2290 
2291 #ifdef DHD_RX_CHAINING
2292     dhd_rxchain_reset(&prot->rxchain);
2293 #endif
2294 
2295     prot->rx_lock = dhd_os_spin_lock_init(dhd->osh);
2296 
2297     prot->pktid_ctrl_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_CTRL_PKTID);
2298     if (prot->pktid_ctrl_map == NULL) {
2299         goto fail;
2300     }
2301 
2302     prot->pktid_rx_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_RX_PKTID);
2303     if (prot->pktid_rx_map == NULL)
2304         goto fail;
2305 
2306     prot->pktid_tx_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_TX_PKTID);
2307     if (prot->pktid_rx_map == NULL)
2308         goto fail;
2309 
2310 #ifdef IOCTLRESP_USE_CONSTMEM
2311     prot->pktid_map_handle_ioctl = DHD_NATIVE_TO_PKTID_INIT(dhd,
2312         DHD_FLOWRING_MAX_IOCTLRESPBUF_POST);
2313     if (prot->pktid_map_handle_ioctl == NULL) {
2314         goto fail;
2315     }
2316 #endif /* IOCTLRESP_USE_CONSTMEM */
2317 
2318        /* Initialize the work queues to be used by the Load Balancing logic */
2319 #if defined(DHD_LB_TXC)
2320     {
2321         void *buffer;
2322         buffer = MALLOC(dhd->osh, sizeof(void*) * DHD_LB_WORKQ_SZ);
2323         bcm_workq_init(&prot->tx_compl_prod, &prot->tx_compl_cons,
2324             buffer, DHD_LB_WORKQ_SZ);
2325         prot->tx_compl_prod_sync = 0;
2326         DHD_INFO(("%s: created tx_compl_workq <%p,%d>\n",
2327             __FUNCTION__, buffer, DHD_LB_WORKQ_SZ));
2328     }
2329 #endif /* DHD_LB_TXC */
2330 
2331 #if defined(DHD_LB_RXC)
2332     {
2333         void *buffer;
2334         buffer = MALLOC(dhd->osh, sizeof(void*) * DHD_LB_WORKQ_SZ);
2335         bcm_workq_init(&prot->rx_compl_prod, &prot->rx_compl_cons,
2336             buffer, DHD_LB_WORKQ_SZ);
2337         prot->rx_compl_prod_sync = 0;
2338         DHD_INFO(("%s: created rx_compl_workq <%p,%d>\n",
2339             __FUNCTION__, buffer, DHD_LB_WORKQ_SZ));
2340     }
2341 #endif /* DHD_LB_RXC */
2342     /* Initialize trap buffer */
2343     if (dhd_dma_buf_alloc(dhd, &dhd->prot->fw_trap_buf, BCMPCIE_EXT_TRAP_DATA_MAXLEN)) {
2344         DHD_ERROR(("%s: dhd_init_trap_buffer falied\n", __FUNCTION__));
2345         goto fail;
2346     }
2347 
2348     return BCME_OK;
2349 
2350 fail:
2351 
2352 #ifndef CONFIG_DHD_USE_STATIC_BUF
2353     if (prot != NULL) {
2354         dhd_prot_detach(dhd);
2355     }
2356 #endif /* CONFIG_DHD_USE_STATIC_BUF */
2357 
2358     return BCME_NOMEM;
2359 } /* dhd_prot_attach */
2360 
2361 void
dhd_set_host_cap(dhd_pub_t * dhd)2362 dhd_set_host_cap(dhd_pub_t *dhd)
2363 {
2364     uint32 data = 0;
2365     dhd_prot_t *prot = dhd->prot;
2366 
2367     if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6) {
2368         if (dhd->h2d_phase_supported) {
2369             data |= HOSTCAP_H2D_VALID_PHASE;
2370 
2371             if (dhd->force_dongletrap_on_bad_h2d_phase) {
2372                 data |= HOSTCAP_H2D_ENABLE_TRAP_ON_BADPHASE;
2373             }
2374         }
2375         if (prot->host_ipc_version > prot->device_ipc_version) {
2376             prot->active_ipc_version = prot->device_ipc_version;
2377         } else {
2378             prot->active_ipc_version = prot->host_ipc_version;
2379         }
2380 
2381         data |= prot->active_ipc_version;
2382 
2383         if (dhdpcie_bus_get_pcie_hostready_supported(dhd->bus)) {
2384             DHD_INFO(("Advertise Hostready Capability\n"));
2385 
2386             data |= HOSTCAP_H2D_ENABLE_HOSTRDY;
2387         }
2388 #ifdef PCIE_INB_DW
2389         if (dhdpcie_bus_get_pcie_inband_dw_supported(dhd->bus)) {
2390             DHD_INFO(("Advertise Inband-DW Capability\n"));
2391             data |= HOSTCAP_DS_INBAND_DW;
2392             data |= HOSTCAP_DS_NO_OOB_DW;
2393             dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_INB);
2394         } else
2395 #endif /* PCIE_INB_DW */
2396 #ifdef PCIE_OOB
2397         if (dhdpcie_bus_get_pcie_oob_dw_supported(dhd->bus)) {
2398             dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_OOB);
2399         } else
2400 #endif /* PCIE_OOB */
2401         {
2402             /* Disable DS altogether */
2403             data |= HOSTCAP_DS_NO_OOB_DW;
2404             dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_NONE);
2405         }
2406 
2407         if (dhdpcie_bus_get_pcie_idma_supported(dhd->bus)) {
2408             DHD_ERROR(("IDMA inited\n"));
2409             data |= HOSTCAP_H2D_IDMA;
2410             dhd->idma_inited = TRUE;
2411         }
2412 
2413         if (dhdpcie_bus_get_pcie_ifrm_supported(dhd->bus)) {
2414             DHD_ERROR(("IFRM Inited\n"));
2415             data |= HOSTCAP_H2D_IFRM;
2416             dhd->ifrm_inited = TRUE;
2417             dhd->dma_h2d_ring_upd_support = FALSE;
2418             dhd_prot_dma_indx_free(dhd);
2419         }
2420 
2421         /* Indicate support for TX status metadata */
2422         data |= HOSTCAP_TXSTATUS_METADATA;
2423 
2424         /* Indicate support for extended trap data */
2425         data |= HOSTCAP_EXTENDED_TRAP_DATA;
2426 
2427         DHD_INFO(("%s:Active Ver:%d, Host Ver:%d, FW Ver:%d\n",
2428             __FUNCTION__,
2429             prot->active_ipc_version, prot->host_ipc_version,
2430             prot->device_ipc_version));
2431 
2432         dhd_bus_cmn_writeshared(dhd->bus, &data, sizeof(uint32), HOST_API_VERSION, 0);
2433         dhd_bus_cmn_writeshared(dhd->bus, &prot->fw_trap_buf.pa,
2434             sizeof(prot->fw_trap_buf.pa), DNGL_TO_HOST_TRAP_ADDR, 0);
2435     }
2436 #ifdef HOFFLOAD_MODULES
2437         dhd_bus_cmn_writeshared(dhd->bus, &dhd->hmem.data_addr,
2438             sizeof(dhd->hmem.data_addr), WRT_HOST_MODULE_ADDR, 0);
2439 #endif
2440 
2441 #ifdef DHD_TIMESYNC
2442     dhd_timesync_notify_ipc_rev(dhd->ts, prot->active_ipc_version);
2443 #endif /* DHD_TIMESYNC */
2444 }
2445 
2446 /**
2447  * dhd_prot_init - second stage of dhd_prot_attach. Now that the dongle has
2448  * completed it's initialization of the pcie_shared structure, we may now fetch
2449  * the dongle advertized features and adjust the protocol layer accordingly.
2450  *
2451  * dhd_prot_init() may be invoked again after a dhd_prot_reset().
2452  */
2453 int
dhd_prot_init(dhd_pub_t * dhd)2454 dhd_prot_init(dhd_pub_t *dhd)
2455 {
2456     sh_addr_t base_addr;
2457     dhd_prot_t *prot = dhd->prot;
2458     int ret = 0;
2459 
2460     /**
2461      * A user defined value can be assigned to global variable h2d_max_txpost via
2462      * 1. DHD IOVAR h2d_max_txpost, before firmware download
2463      * 2. module parameter h2d_max_txpost
2464      * prot->h2d_max_txpost is assigned with H2DRING_TXPOST_MAX_ITEM,
2465      * if user has not defined any buffers by one of the above methods.
2466      */
2467     prot->h2d_max_txpost = (uint16)h2d_max_txpost;
2468 
2469     DHD_ERROR(("%s:%d: h2d_max_txpost = %d\n", __FUNCTION__, __LINE__, prot->h2d_max_txpost));
2470 
2471     /* Read max rx packets supported by dongle */
2472     dhd_bus_cmn_readshared(dhd->bus, &prot->max_rxbufpost, MAX_HOST_RXBUFS, 0);
2473     if (prot->max_rxbufpost == 0) {
2474         /* This would happen if the dongle firmware is not */
2475         /* using the latest shared structure template */
2476         prot->max_rxbufpost = DEFAULT_RX_BUFFERS_TO_POST;
2477     }
2478     DHD_INFO(("%s:%d: MAX_RXBUFPOST = %d\n", __FUNCTION__, __LINE__, prot->max_rxbufpost));
2479 
2480     /* Initialize.  bzero() would blow away the dma pointers. */
2481     prot->max_eventbufpost = DHD_FLOWRING_MAX_EVENTBUF_POST;
2482     prot->max_ioctlrespbufpost = DHD_FLOWRING_MAX_IOCTLRESPBUF_POST;
2483     prot->max_infobufpost = DHD_H2D_INFORING_MAX_BUF_POST;
2484     prot->max_tsbufpost = DHD_MAX_TSBUF_POST;
2485 
2486     prot->cur_ioctlresp_bufs_posted = 0;
2487     prot->active_tx_count = 0;
2488     prot->data_seq_no = 0;
2489     prot->ioctl_seq_no = 0;
2490     prot->rxbufpost = 0;
2491     prot->cur_event_bufs_posted = 0;
2492     prot->ioctl_state = 0;
2493     prot->curr_ioctl_cmd = 0;
2494     prot->cur_ts_bufs_posted = 0;
2495     prot->infobufpost = 0;
2496 
2497     prot->dmaxfer.srcmem.va = NULL;
2498     prot->dmaxfer.dstmem.va = NULL;
2499     prot->dmaxfer.in_progress = FALSE;
2500 
2501     prot->metadata_dbg = FALSE;
2502     prot->rx_metadata_offset = 0;
2503     prot->tx_metadata_offset = 0;
2504     prot->txp_threshold = TXP_FLUSH_MAX_ITEMS_FLUSH_CNT;
2505 
2506     /* To catch any rollover issues fast, starting with higher ioctl_trans_id */
2507     prot->ioctl_trans_id = MAXBITVAL(NBITS(prot->ioctl_trans_id)) - BUFFER_BEFORE_ROLLOVER;
2508     prot->ioctl_state = 0;
2509     prot->ioctl_status = 0;
2510     prot->ioctl_resplen = 0;
2511     prot->ioctl_received = IOCTL_WAIT;
2512 
2513     /* Register the interrupt function upfront */
2514     /* remove corerev checks in data path */
2515     prot->mb_ring_fn = dhd_bus_get_mbintr_fn(dhd->bus);
2516 
2517     prot->mb_2_ring_fn = dhd_bus_get_mbintr_2_fn(dhd->bus);
2518 
2519     /* Initialize Common MsgBuf Rings */
2520 
2521     prot->device_ipc_version = dhd->bus->api.fw_rev;
2522     prot->host_ipc_version = PCIE_SHARED_VERSION;
2523 
2524     /* Init the host API version */
2525     dhd_set_host_cap(dhd);
2526 
2527     dhd_prot_ring_init(dhd, &prot->h2dring_ctrl_subn);
2528     dhd_prot_ring_init(dhd, &prot->h2dring_rxp_subn);
2529     dhd_prot_ring_init(dhd, &prot->d2hring_ctrl_cpln);
2530 
2531     /* Make it compatibile with pre-rev7 Firmware */
2532     if (prot->active_ipc_version < PCIE_SHARED_VERSION_7) {
2533         prot->d2hring_tx_cpln.item_len =
2534             D2HRING_TXCMPLT_ITEMSIZE_PREREV7;
2535         prot->d2hring_rx_cpln.item_len =
2536             D2HRING_RXCMPLT_ITEMSIZE_PREREV7;
2537     }
2538     dhd_prot_ring_init(dhd, &prot->d2hring_tx_cpln);
2539     dhd_prot_ring_init(dhd, &prot->d2hring_rx_cpln);
2540 
2541     dhd_prot_d2h_sync_init(dhd);
2542 
2543     dhd_prot_h2d_sync_init(dhd);
2544 
2545 #ifdef PCIE_INB_DW
2546     /* Set the initial DS state */
2547     if (INBAND_DW_ENAB(dhd->bus)) {
2548         dhdpcie_bus_set_pcie_inband_dw_state(dhd->bus,
2549             DW_DEVICE_DS_ACTIVE);
2550     }
2551 #endif /* PCIE_INB_DW */
2552 
2553     /* init the scratch buffer */
2554     dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_scratch_buf.pa);
2555     dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
2556         D2H_DMA_SCRATCH_BUF, 0);
2557     dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_scratch_buf.len,
2558         sizeof(prot->d2h_dma_scratch_buf.len), D2H_DMA_SCRATCH_BUF_LEN, 0);
2559 
2560     /* If supported by the host, indicate the memory block
2561      * for completion writes / submission reads to shared space
2562      */
2563     if (dhd->dma_d2h_ring_upd_support) {
2564         dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_wr_buf.pa);
2565         dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
2566             D2H_DMA_INDX_WR_BUF, 0);
2567         dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_rd_buf.pa);
2568         dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
2569             H2D_DMA_INDX_RD_BUF, 0);
2570     }
2571 
2572     if (dhd->dma_h2d_ring_upd_support || IDMA_ENAB(dhd)) {
2573         dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_wr_buf.pa);
2574         dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
2575             H2D_DMA_INDX_WR_BUF, 0);
2576         dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_rd_buf.pa);
2577         dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
2578             D2H_DMA_INDX_RD_BUF, 0);
2579     }
2580 
2581     /* Signal to the dongle that common ring init is complete */
2582     dhd_bus_hostready(dhd->bus);
2583 
2584     /*
2585      * If the DMA-able buffers for flowring needs to come from a specific
2586      * contiguous memory region, then setup prot->flowrings_dma_buf here.
2587      * dhd_prot_flowrings_pool_attach() will carve out DMA-able buffers from
2588      * this contiguous memory region, for each of the flowrings.
2589      */
2590 
2591     /* Pre-allocate pool of msgbuf_ring for flowrings */
2592     if (dhd_prot_flowrings_pool_attach(dhd) != BCME_OK) {
2593         return BCME_ERROR;
2594     }
2595 
2596     /* If IFRM is enabled, wait for FW to setup the DMA channel */
2597     if (IFRM_ENAB(dhd)) {
2598         dhd_base_addr_htolpa(&base_addr, prot->h2d_ifrm_indx_wr_buf.pa);
2599         dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
2600             H2D_IFRM_INDX_WR_BUF, 0);
2601     }
2602 
2603     /* See if info rings could be created */
2604     if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6) {
2605         if ((ret = dhd_prot_init_info_rings(dhd)) != BCME_OK) {
2606             /* For now log and proceed, further clean up action maybe necessary
2607              * when we have more clarity.
2608              */
2609             DHD_ERROR(("%s Info rings couldn't be created: Err Code%d",
2610                 __FUNCTION__, ret));
2611         }
2612     }
2613 
2614     /* Host should configure soft doorbells if needed ... here */
2615 
2616     /* Post to dongle host configured soft doorbells */
2617     dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd);
2618 
2619     /* Post buffers for packet reception and ioctl/event responses */
2620     dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
2621     dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
2622     /* Fix re-entry problem without general lock */
2623     atomic_set(&dhd_msgbuf_rxbuf_post_event_bufs_running, 0);
2624     dhd_msgbuf_rxbuf_post_event_bufs(dhd);
2625 
2626     return BCME_OK;
2627 } /* dhd_prot_init */
2628 
2629 
2630 /**
2631  * dhd_prot_detach - PCIE FD protocol layer destructor.
2632  * Unlink, frees allocated protocol memory (including dhd_prot)
2633  */
dhd_prot_detach(dhd_pub_t * dhd)2634 void dhd_prot_detach(dhd_pub_t *dhd)
2635 {
2636     dhd_prot_t *prot = dhd->prot;
2637 
2638     /* Stop the protocol module */
2639     if (prot) {
2640         /* free up all DMA-able buffers allocated during prot attach/init */
2641 
2642         dhd_dma_buf_free(dhd, &prot->d2h_dma_scratch_buf);
2643         dhd_dma_buf_free(dhd, &prot->retbuf);
2644         dhd_dma_buf_free(dhd, &prot->ioctbuf);
2645         dhd_dma_buf_free(dhd, &prot->host_bus_throughput_buf);
2646         dhd_dma_buf_free(dhd, &prot->hostts_req_buf);
2647         dhd_dma_buf_free(dhd, &prot->fw_trap_buf);
2648 
2649         /* DMA-able buffers for DMAing H2D/D2H WR/RD indices */
2650         dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf);
2651         dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_rd_buf);
2652         dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_wr_buf);
2653         dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf);
2654 
2655         dhd_dma_buf_free(dhd, &prot->h2d_ifrm_indx_wr_buf);
2656 
2657         /* Common MsgBuf Rings */
2658         dhd_prot_ring_detach(dhd, &prot->h2dring_ctrl_subn);
2659         dhd_prot_ring_detach(dhd, &prot->h2dring_rxp_subn);
2660         dhd_prot_ring_detach(dhd, &prot->d2hring_ctrl_cpln);
2661         dhd_prot_ring_detach(dhd, &prot->d2hring_tx_cpln);
2662         dhd_prot_ring_detach(dhd, &prot->d2hring_rx_cpln);
2663 
2664         /* Detach each DMA-able buffer and free the pool of msgbuf_ring_t */
2665         dhd_prot_flowrings_pool_detach(dhd);
2666 
2667         /* detach info rings */
2668         dhd_prot_detach_info_rings(dhd);
2669 
2670         /* if IOCTLRESP_USE_CONSTMEM is defined IOCTL PKTs use pktid_map_handle_ioctl
2671          * handler and PKT memory is allocated using alloc_ioctl_return_buffer(), Otherwise
2672          * they will be part of pktid_ctrl_map handler and PKT memory is allocated using
2673          * PKTGET_STATIC (if DHD_USE_STATIC_CTRLBUF is defined) OR PKGET.
2674          * Similarly for freeing PKT buffers DHD_NATIVE_TO_PKTID_FINI will be used
2675          * which calls PKTFREE_STATIC (if DHD_USE_STATIC_CTRLBUF is defined) OR PKFREE.
2676          * Else if IOCTLRESP_USE_CONSTMEM is defined IOCTL PKTs will be freed using
2677          * DHD_NATIVE_TO_PKTID_FINI_IOCTL which calls free_ioctl_return_buffer.
2678          */
2679         DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_ctrl_map);
2680         DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_rx_map);
2681         DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_tx_map);
2682 #ifdef IOCTLRESP_USE_CONSTMEM
2683         DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, prot->pktid_map_handle_ioctl);
2684 #endif
2685 
2686         dhd_os_spin_lock_deinit(dhd->osh, prot->rx_lock);
2687 
2688 #ifndef CONFIG_DHD_USE_STATIC_BUF
2689         MFREE(dhd->osh, dhd->prot, sizeof(dhd_prot_t));
2690 #endif /* CONFIG_DHD_USE_STATIC_BUF */
2691 
2692 #if defined(DHD_LB_TXC)
2693         if (prot->tx_compl_prod.buffer)
2694             MFREE(dhd->osh, prot->tx_compl_prod.buffer,
2695                   sizeof(void*) * DHD_LB_WORKQ_SZ);
2696 #endif /* DHD_LB_TXC */
2697 #if defined(DHD_LB_RXC)
2698         if (prot->rx_compl_prod.buffer)
2699             MFREE(dhd->osh, prot->rx_compl_prod.buffer,
2700                   sizeof(void*) * DHD_LB_WORKQ_SZ);
2701 #endif /* DHD_LB_RXC */
2702 
2703         dhd->prot = NULL;
2704     }
2705 } /* dhd_prot_detach */
2706 
2707 
2708 /**
2709  * dhd_prot_reset - Reset the protocol layer without freeing any objects.
2710  * This may be invoked to soft reboot the dongle, without having to
2711  * detach and attach the entire protocol layer.
2712  *
2713  * After dhd_prot_reset(), dhd_prot_init() may be invoked
2714  * without going througha dhd_prot_attach() phase.
2715  */
2716 void
dhd_prot_reset(dhd_pub_t * dhd)2717 dhd_prot_reset(dhd_pub_t *dhd)
2718 {
2719     struct dhd_prot *prot = dhd->prot;
2720 
2721     DHD_TRACE(("%s\n", __FUNCTION__));
2722 
2723     if (prot == NULL) {
2724         return;
2725     }
2726 
2727     dhd_prot_flowrings_pool_reset(dhd);
2728 
2729     /* Reset Common MsgBuf Rings */
2730     dhd_prot_ring_reset(dhd, &prot->h2dring_ctrl_subn);
2731     dhd_prot_ring_reset(dhd, &prot->h2dring_rxp_subn);
2732     dhd_prot_ring_reset(dhd, &prot->d2hring_ctrl_cpln);
2733     dhd_prot_ring_reset(dhd, &prot->d2hring_tx_cpln);
2734     dhd_prot_ring_reset(dhd, &prot->d2hring_rx_cpln);
2735 
2736     /* Reset info rings */
2737     if (prot->h2dring_info_subn) {
2738         dhd_prot_ring_reset(dhd, prot->h2dring_info_subn);
2739     }
2740 
2741     if (prot->d2hring_info_cpln) {
2742         dhd_prot_ring_reset(dhd, prot->d2hring_info_cpln);
2743     }
2744 
2745     /* Reset all DMA-able buffers allocated during prot attach */
2746     dhd_dma_buf_reset(dhd, &prot->d2h_dma_scratch_buf);
2747     dhd_dma_buf_reset(dhd, &prot->retbuf);
2748     dhd_dma_buf_reset(dhd, &prot->ioctbuf);
2749     dhd_dma_buf_reset(dhd, &prot->host_bus_throughput_buf);
2750     dhd_dma_buf_reset(dhd, &prot->hostts_req_buf);
2751     dhd_dma_buf_reset(dhd, &prot->fw_trap_buf);
2752 
2753     dhd_dma_buf_reset(dhd, &prot->h2d_ifrm_indx_wr_buf);
2754 
2755     /* Reset all DMA-able buffers for DMAing H2D/D2H WR/RD indices */
2756     dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_rd_buf);
2757     dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_wr_buf);
2758     dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_rd_buf);
2759     dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_wr_buf);
2760 
2761 
2762     prot->rx_metadata_offset = 0;
2763     prot->tx_metadata_offset = 0;
2764 
2765     prot->rxbufpost = 0;
2766     prot->cur_event_bufs_posted = 0;
2767     prot->cur_ioctlresp_bufs_posted = 0;
2768 
2769     prot->active_tx_count = 0;
2770     prot->data_seq_no = 0;
2771     prot->ioctl_seq_no = 0;
2772     prot->ioctl_state = 0;
2773     prot->curr_ioctl_cmd = 0;
2774     prot->ioctl_received = IOCTL_WAIT;
2775     /* To catch any rollover issues fast, starting with higher ioctl_trans_id */
2776     prot->ioctl_trans_id = MAXBITVAL(NBITS(prot->ioctl_trans_id)) - BUFFER_BEFORE_ROLLOVER;
2777 
2778     /* dhd_flow_rings_init is located at dhd_bus_start,
2779      * so when stopping bus, flowrings shall be deleted
2780      */
2781     if (dhd->flow_rings_inited) {
2782         dhd_flow_rings_deinit(dhd);
2783     }
2784 
2785     /* Reset PKTID map */
2786     DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_ctrl_map);
2787     DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_rx_map);
2788     DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_tx_map);
2789 #ifdef IOCTLRESP_USE_CONSTMEM
2790     DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, prot->pktid_map_handle_ioctl);
2791 #endif /* IOCTLRESP_USE_CONSTMEM */
2792 #ifdef DMAMAP_STATS
2793     dhd->dma_stats.txdata = dhd->dma_stats.txdata_sz = 0;
2794     dhd->dma_stats.rxdata = dhd->dma_stats.rxdata_sz = 0;
2795 #ifndef IOCTLRESP_USE_CONSTMEM
2796     dhd->dma_stats.ioctl_rx = dhd->dma_stats.ioctl_rx_sz = 0;
2797 #endif /* IOCTLRESP_USE_CONSTMEM */
2798     dhd->dma_stats.event_rx = dhd->dma_stats.event_rx_sz = 0;
2799     dhd->dma_stats.info_rx = dhd->dma_stats.info_rx_sz = 0;
2800     dhd->dma_stats.tsbuf_rx = dhd->dma_stats.tsbuf_rx_sz = 0;
2801 #endif /* DMAMAP_STATS */
2802 } /* dhd_prot_reset */
2803 
2804 #if defined(DHD_LB_RXP)
2805 #define DHD_LB_DISPATCH_RX_PROCESS(dhdp)    dhd_lb_dispatch_rx_process(dhdp)
2806 #else /* !DHD_LB_RXP */
2807 #define DHD_LB_DISPATCH_RX_PROCESS(dhdp)    do { } while (0)
2808 #endif /* !DHD_LB_RXP */
2809 
2810 #if defined(DHD_LB_RXC)
2811 #define DHD_LB_DISPATCH_RX_COMPL(dhdp)    dhd_lb_dispatch_rx_compl(dhdp)
2812 #else /* !DHD_LB_RXC */
2813 #define DHD_LB_DISPATCH_RX_COMPL(dhdp)    do { } while (0)
2814 #endif /* !DHD_LB_RXC */
2815 
2816 #if defined(DHD_LB_TXC)
2817 #define DHD_LB_DISPATCH_TX_COMPL(dhdp)    dhd_lb_dispatch_tx_compl(dhdp)
2818 #else /* !DHD_LB_TXC */
2819 #define DHD_LB_DISPATCH_TX_COMPL(dhdp)    do { } while (0)
2820 #endif /* !DHD_LB_TXC */
2821 
2822 
2823 #if defined(DHD_LB)
2824 /* DHD load balancing: deferral of work to another online CPU */
2825 /* DHD_LB_TXC DHD_LB_RXC DHD_LB_RXP dispatchers, in dhd_linux.c */
2826 extern void dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp);
2827 extern void dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp);
2828 extern void dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp);
2829 extern void dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx);
2830 
2831 #if defined(DHD_LB_RXP)
2832 /**
2833  * dhd_lb_dispatch_rx_process - load balance by dispatch Rx processing work
2834  * to other CPU cores
2835  */
2836 static INLINE void
dhd_lb_dispatch_rx_process(dhd_pub_t * dhdp)2837 dhd_lb_dispatch_rx_process(dhd_pub_t *dhdp)
2838 {
2839     dhd_lb_rx_napi_dispatch(dhdp); /* dispatch rx_process_napi */
2840 }
2841 #endif /* DHD_LB_RXP */
2842 
2843 #if defined(DHD_LB_TXC)
2844 /**
2845  * dhd_lb_dispatch_tx_compl - load balance by dispatch Tx complition work
2846  * to other CPU cores
2847  */
2848 static INLINE void
dhd_lb_dispatch_tx_compl(dhd_pub_t * dhdp,uint16 ring_idx)2849 dhd_lb_dispatch_tx_compl(dhd_pub_t *dhdp, uint16 ring_idx)
2850 {
2851     bcm_workq_prod_sync(&dhdp->prot->tx_compl_prod); /* flush WR index */
2852     dhd_lb_tx_compl_dispatch(dhdp); /* dispatch tx_compl_tasklet */
2853 }
2854 
2855 /**
2856  * DHD load balanced tx completion tasklet handler, that will perform the
2857  * freeing of packets on the selected CPU. Packet pointers are delivered to
2858  * this tasklet via the tx complete workq.
2859  */
2860 void
dhd_lb_tx_compl_handler(unsigned long data)2861 dhd_lb_tx_compl_handler(unsigned long data)
2862 {
2863     int elem_ix;
2864     void *pkt, **elem;
2865     dmaaddr_t pa;
2866     uint32 pa_len;
2867     dhd_pub_t *dhd = (dhd_pub_t *)data;
2868     dhd_prot_t *prot = dhd->prot;
2869     bcm_workq_t *workq = &prot->tx_compl_cons;
2870     uint32 count = 0;
2871 
2872     int curr_cpu;
2873     curr_cpu = get_cpu();
2874     put_cpu();
2875 
2876     DHD_LB_STATS_TXC_PERCPU_CNT_INCR(dhd);
2877 
2878     while (1) {
2879         elem_ix = bcm_ring_cons(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
2880 
2881         if (elem_ix == BCM_RING_EMPTY) {
2882             break;
2883         }
2884 
2885         elem = WORKQ_ELEMENT(void *, workq, elem_ix);
2886         pkt = *elem;
2887 
2888         DHD_INFO(("%s: tx_compl_cons pkt<%p>\n", __FUNCTION__, pkt));
2889 
2890         OSL_PREFETCH(PKTTAG(pkt));
2891         OSL_PREFETCH(pkt);
2892 
2893         pa = DHD_PKTTAG_PA((dhd_pkttag_fr_t *)PKTTAG(pkt));
2894         pa_len = DHD_PKTTAG_PA_LEN((dhd_pkttag_fr_t *)PKTTAG(pkt));
2895 
2896         DMA_UNMAP(dhd->osh, pa, pa_len, DMA_RX, 0, 0);
2897 #if defined(BCMPCIE)
2898         dhd_txcomplete(dhd, pkt, true);
2899 #endif
2900 
2901         PKTFREE(dhd->osh, pkt, TRUE);
2902         count++;
2903     }
2904 
2905     bcm_workq_cons_sync(workq);
2906     DHD_LB_STATS_UPDATE_TXC_HISTO(dhd, count);
2907 }
2908 #endif /* DHD_LB_TXC */
2909 
2910 #if defined(DHD_LB_RXC)
2911 
2912 /**
2913  * dhd_lb_dispatch_rx_compl - load balance by dispatch rx complition work
2914  * to other CPU cores
2915  */
2916 static INLINE void
dhd_lb_dispatch_rx_compl(dhd_pub_t * dhdp)2917 dhd_lb_dispatch_rx_compl(dhd_pub_t *dhdp)
2918 {
2919     dhd_prot_t *prot = dhdp->prot;
2920     /* Schedule the takslet only if we have to */
2921     if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) {
2922         /* flush WR index */
2923         bcm_workq_prod_sync(&dhdp->prot->rx_compl_prod);
2924         dhd_lb_rx_compl_dispatch(dhdp); /* dispatch rx_compl_tasklet */
2925     }
2926 }
2927 
2928 void
dhd_lb_rx_compl_handler(unsigned long data)2929 dhd_lb_rx_compl_handler(unsigned long data)
2930 {
2931     dhd_pub_t *dhd = (dhd_pub_t *)data;
2932     bcm_workq_t *workq = &dhd->prot->rx_compl_cons;
2933 
2934     DHD_LB_STATS_RXC_PERCPU_CNT_INCR(dhd);
2935 
2936     dhd_msgbuf_rxbuf_post(dhd, TRUE); /* re-use pktids */
2937     bcm_workq_cons_sync(workq);
2938 }
2939 #endif /* DHD_LB_RXC */
2940 #endif /* DHD_LB */
2941 
2942 void
dhd_prot_rx_dataoffset(dhd_pub_t * dhd,uint32 rx_offset)2943 dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 rx_offset)
2944 {
2945     dhd_prot_t *prot = dhd->prot;
2946     prot->rx_dataoffset = rx_offset;
2947 }
2948 
2949 static int
dhd_check_create_info_rings(dhd_pub_t * dhd)2950 dhd_check_create_info_rings(dhd_pub_t *dhd)
2951 {
2952     dhd_prot_t *prot = dhd->prot;
2953     int ret = BCME_ERROR;
2954     uint16 ringid = dhd->bus->max_tx_flowrings + BCMPCIE_COMMON_MSGRINGS;
2955 
2956     if (prot->h2dring_info_subn && prot->d2hring_info_cpln) {
2957         return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
2958     }
2959 
2960     if (prot->h2dring_info_subn == NULL) {
2961         prot->h2dring_info_subn = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
2962 
2963         if (prot->h2dring_info_subn == NULL) {
2964             DHD_ERROR(("%s: couldn't alloc memory for h2dring_info_subn\n",
2965                 __FUNCTION__));
2966             return BCME_NOMEM;
2967         }
2968 
2969         DHD_INFO(("%s: about to create debug submit ring\n", __FUNCTION__));
2970         ret = dhd_prot_ring_attach(dhd, prot->h2dring_info_subn, "h2dinfo",
2971             H2DRING_DYNAMIC_INFO_MAX_ITEM, H2DRING_INFO_BUFPOST_ITEMSIZE,
2972             ringid);
2973         if (ret != BCME_OK) {
2974             DHD_ERROR(("%s: couldn't alloc resources for dbg submit ring\n",
2975                 __FUNCTION__));
2976             goto err;
2977         }
2978     }
2979 
2980     if (prot->d2hring_info_cpln == NULL) {
2981         prot->d2hring_info_cpln = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
2982 
2983         if (prot->d2hring_info_cpln == NULL) {
2984             DHD_ERROR(("%s: couldn't alloc memory for h2dring_info_subn\n",
2985                 __FUNCTION__));
2986             return BCME_NOMEM;
2987         }
2988 
2989         /* create the debug info completion ring next to debug info submit ring
2990         * ringid = id next to debug info submit ring
2991         */
2992         ringid = ringid + 1;
2993 
2994         DHD_INFO(("%s: about to create debug cpl ring\n", __FUNCTION__));
2995         ret = dhd_prot_ring_attach(dhd, prot->d2hring_info_cpln, "d2hinfo",
2996             D2HRING_DYNAMIC_INFO_MAX_ITEM, D2HRING_INFO_BUFCMPLT_ITEMSIZE,
2997             ringid);
2998         if (ret != BCME_OK) {
2999             DHD_ERROR(("%s: couldn't alloc resources for dbg cpl ring\n",
3000                 __FUNCTION__));
3001             dhd_prot_ring_detach(dhd, prot->h2dring_info_subn);
3002             goto err;
3003         }
3004     }
3005 
3006     return ret;
3007 err:
3008     MFREE(prot->osh, prot->h2dring_info_subn, sizeof(msgbuf_ring_t));
3009     prot->h2dring_info_subn = NULL;
3010 
3011     if (prot->d2hring_info_cpln) {
3012         MFREE(prot->osh, prot->d2hring_info_cpln, sizeof(msgbuf_ring_t));
3013         prot->d2hring_info_cpln = NULL;
3014     }
3015     return ret;
3016 } /* dhd_check_create_info_rings */
3017 
3018 int
dhd_prot_init_info_rings(dhd_pub_t * dhd)3019 dhd_prot_init_info_rings(dhd_pub_t *dhd)
3020 {
3021     dhd_prot_t *prot = dhd->prot;
3022     int ret = BCME_OK;
3023 
3024     if ((ret = dhd_check_create_info_rings(dhd)) != BCME_OK) {
3025         DHD_ERROR(("%s: info rings aren't created! \n",
3026             __FUNCTION__));
3027         return ret;
3028     }
3029 
3030     if ((prot->d2hring_info_cpln->inited) || (prot->d2hring_info_cpln->create_pending)) {
3031         DHD_INFO(("Info completion ring was created!\n"));
3032         return ret;
3033     }
3034 
3035     DHD_TRACE(("trying to send create d2h info ring: id %d\n", prot->d2hring_info_cpln->idx));
3036     ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_info_cpln);
3037     if (ret != BCME_OK)
3038         return ret;
3039 
3040     prot->d2hring_info_cpln->seqnum = D2H_EPOCH_INIT_VAL;
3041 
3042     DHD_TRACE(("trying to send create h2d info ring id %d\n", prot->h2dring_info_subn->idx));
3043     prot->h2dring_info_subn->n_completion_ids = 1;
3044     prot->h2dring_info_subn->compeltion_ring_ids[0] = prot->d2hring_info_cpln->idx;
3045 
3046     ret = dhd_send_h2d_ringcreate(dhd, prot->h2dring_info_subn);
3047 
3048     /* Note that there is no way to delete d2h or h2d ring deletion incase either fails,
3049      * so can not cleanup if one ring was created while the other failed
3050      */
3051     return ret;
3052 } /* dhd_prot_init_info_rings */
3053 
3054 static void
dhd_prot_detach_info_rings(dhd_pub_t * dhd)3055 dhd_prot_detach_info_rings(dhd_pub_t *dhd)
3056 {
3057     if (dhd->prot->h2dring_info_subn) {
3058         dhd_prot_ring_detach(dhd, dhd->prot->h2dring_info_subn);
3059         MFREE(dhd->prot->osh, dhd->prot->h2dring_info_subn, sizeof(msgbuf_ring_t));
3060         dhd->prot->h2dring_info_subn = NULL;
3061     }
3062     if (dhd->prot->d2hring_info_cpln) {
3063         dhd_prot_ring_detach(dhd, dhd->prot->d2hring_info_cpln);
3064         MFREE(dhd->prot->osh, dhd->prot->d2hring_info_cpln, sizeof(msgbuf_ring_t));
3065         dhd->prot->d2hring_info_cpln = NULL;
3066     }
3067 }
3068 
3069 /**
3070  * Initialize protocol: sync w/dongle state.
3071  * Sets dongle media info (iswl, drv_version, mac address).
3072  */
dhd_sync_with_dongle(dhd_pub_t * dhd)3073 int dhd_sync_with_dongle(dhd_pub_t *dhd)
3074 {
3075     int ret = 0;
3076     wlc_rev_info_t revinfo;
3077 
3078 
3079     DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3080 
3081     dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
3082 
3083     /* Post ts buffer after shim layer is attached */
3084     ret = dhd_msgbuf_rxbuf_post_ts_bufs(dhd);
3085 
3086 
3087 #ifdef DHD_FW_COREDUMP
3088     /* Check the memdump capability */
3089     dhd_get_memdump_info(dhd);
3090 #endif /* DHD_FW_COREDUMP */
3091 #ifdef BCMASSERT_LOG
3092     dhd_get_assert_info(dhd);
3093 #endif /* BCMASSERT_LOG */
3094 
3095     /* Get the device rev info */
3096     memset(&revinfo, 0, sizeof(revinfo));
3097     ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0);
3098     if (ret < 0) {
3099         DHD_ERROR(("%s: GET revinfo FAILED\n", __FUNCTION__));
3100         goto done;
3101     }
3102     DHD_ERROR(("%s: GET_REVINFO device 0x%x, vendor 0x%x, chipnum 0x%x\n", __FUNCTION__,
3103         revinfo.deviceid, revinfo.vendorid, revinfo.chipnum));
3104 
3105     DHD_SSSR_DUMP_INIT(dhd);
3106 
3107     dhd_process_cid_mac(dhd, TRUE);
3108     ret = dhd_preinit_ioctls(dhd);
3109     dhd_process_cid_mac(dhd, FALSE);
3110 
3111     /* Always assumes wl for now */
3112     dhd->iswl = TRUE;
3113 done:
3114     return ret;
3115 } /* dhd_sync_with_dongle */
3116 
3117 
3118 #define DHD_DBG_SHOW_METADATA    0
3119 
3120 #if DHD_DBG_SHOW_METADATA
3121 static void BCMFASTPATH
dhd_prot_print_metadata(dhd_pub_t * dhd,void * ptr,int len)3122 dhd_prot_print_metadata(dhd_pub_t *dhd, void *ptr, int len)
3123 {
3124     uint8 tlv_t;
3125     uint8 tlv_l;
3126     uint8 *tlv_v = (uint8 *)ptr;
3127 
3128     if (len <= BCMPCIE_D2H_METADATA_HDRLEN)
3129         return;
3130 
3131     len -= BCMPCIE_D2H_METADATA_HDRLEN;
3132     tlv_v += BCMPCIE_D2H_METADATA_HDRLEN;
3133 
3134     while (len > TLV_HDR_LEN) {
3135         tlv_t = tlv_v[TLV_TAG_OFF];
3136         tlv_l = tlv_v[TLV_LEN_OFF];
3137 
3138         len -= TLV_HDR_LEN;
3139         tlv_v += TLV_HDR_LEN;
3140         if (len < tlv_l)
3141             break;
3142         if ((tlv_t == 0) || (tlv_t == WLFC_CTL_TYPE_FILLER))
3143             break;
3144 
3145         switch (tlv_t) {
3146         case WLFC_CTL_TYPE_TXSTATUS: {
3147             uint32 txs;
3148             memcpy(&txs, tlv_v, sizeof(uint32));
3149             if (tlv_l < (sizeof(wl_txstatus_additional_info_t) + sizeof(uint32))) {
3150                 printf("METADATA TX_STATUS: %08x\n", txs);
3151             } else {
3152                 wl_txstatus_additional_info_t tx_add_info;
3153                 memcpy(&tx_add_info, tlv_v + sizeof(uint32),
3154                     sizeof(wl_txstatus_additional_info_t));
3155                 printf("METADATA TX_STATUS: %08x WLFCTS[%04x | %08x - %08x - %08x]"
3156                     " rate = %08x tries = %d - %d\n", txs,
3157                     tx_add_info.seq, tx_add_info.entry_ts,
3158                     tx_add_info.enq_ts, tx_add_info.last_ts,
3159                     tx_add_info.rspec, tx_add_info.rts_cnt,
3160                     tx_add_info.tx_cnt);
3161             }
3162             } break;
3163 
3164         case WLFC_CTL_TYPE_RSSI: {
3165             if (tlv_l == 1)
3166                 printf("METADATA RX_RSSI: rssi = %d\n", *tlv_v);
3167             else
3168                 printf("METADATA RX_RSSI[%04x]: rssi = %d snr = %d\n",
3169                     (*(tlv_v + 3) << 8) | *(tlv_v + 2),
3170                     (int8)(*tlv_v), *(tlv_v + 1));
3171             } break;
3172 
3173         case WLFC_CTL_TYPE_FIFO_CREDITBACK:
3174             bcm_print_bytes("METADATA FIFO_CREDITBACK", tlv_v, tlv_l);
3175             break;
3176 
3177         case WLFC_CTL_TYPE_TX_ENTRY_STAMP:
3178             bcm_print_bytes("METADATA TX_ENTRY", tlv_v, tlv_l);
3179             break;
3180 
3181         case WLFC_CTL_TYPE_RX_STAMP: {
3182             struct {
3183                 uint32 rspec;
3184                 uint32 bus_time;
3185                 uint32 wlan_time;
3186             } rx_tmstamp;
3187             memcpy(&rx_tmstamp, tlv_v, sizeof(rx_tmstamp));
3188             printf("METADATA RX TIMESTMAP: WLFCTS[%08x - %08x] rate = %08x\n",
3189                 rx_tmstamp.wlan_time, rx_tmstamp.bus_time, rx_tmstamp.rspec);
3190             } break;
3191 
3192         case WLFC_CTL_TYPE_TRANS_ID:
3193             bcm_print_bytes("METADATA TRANS_ID", tlv_v, tlv_l);
3194             break;
3195 
3196         case WLFC_CTL_TYPE_COMP_TXSTATUS:
3197             bcm_print_bytes("METADATA COMP_TXSTATUS", tlv_v, tlv_l);
3198             break;
3199 
3200         default:
3201             bcm_print_bytes("METADATA UNKNOWN", tlv_v, tlv_l);
3202             break;
3203         }
3204 
3205         len -= tlv_l;
3206         tlv_v += tlv_l;
3207     }
3208 }
3209 #endif /* DHD_DBG_SHOW_METADATA */
3210 
3211 static INLINE void BCMFASTPATH
dhd_prot_packet_free(dhd_pub_t * dhd,void * pkt,uint8 pkttype,bool send)3212 dhd_prot_packet_free(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send)
3213 {
3214     if (pkt) {
3215         if (pkttype == PKTTYPE_IOCTL_RX ||
3216             pkttype == PKTTYPE_EVENT_RX ||
3217             pkttype == PKTTYPE_INFO_RX ||
3218             pkttype == PKTTYPE_TSBUF_RX) {
3219 #ifdef DHD_USE_STATIC_CTRLBUF
3220             PKTFREE_STATIC(dhd->osh, pkt, send);
3221 #else
3222             PKTFREE(dhd->osh, pkt, send);
3223 #endif /* DHD_USE_STATIC_CTRLBUF */
3224         } else {
3225             PKTFREE(dhd->osh, pkt, send);
3226         }
3227     }
3228 }
3229 
3230 /* dhd_prot_packet_get should be called only for items having pktid_ctrl_map handle */
3231 static INLINE void * BCMFASTPATH
dhd_prot_packet_get(dhd_pub_t * dhd,uint32 pktid,uint8 pkttype,bool free_pktid)3232 dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype, bool free_pktid)
3233 {
3234     void *PKTBUF;
3235     dmaaddr_t pa;
3236     uint32 len;
3237     void *dmah;
3238     void *secdma;
3239 
3240 #ifdef DHD_PCIE_PKTID
3241     if (free_pktid) {
3242         PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_ctrl_map,
3243             pktid, pa, len, dmah, secdma, pkttype);
3244     } else {
3245         PKTBUF = DHD_PKTID_TO_NATIVE_RSV(dhd, dhd->prot->pktid_ctrl_map,
3246             pktid, pa, len, dmah, secdma, pkttype);
3247     }
3248 #else
3249     PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_ctrl_map, pktid, pa,
3250         len, dmah, secdma, pkttype);
3251 #endif /* DHD_PCIE_PKTID */
3252     if (PKTBUF) {
3253         {
3254             if (SECURE_DMA_ENAB(dhd->osh))
3255                 SECURE_DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah,
3256                     secdma, 0);
3257             else
3258                 DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
3259 #ifdef DMAMAP_STATS
3260             switch (pkttype) {
3261 #ifndef IOCTLRESP_USE_CONSTMEM
3262                 case PKTTYPE_IOCTL_RX:
3263                     dhd->dma_stats.ioctl_rx--;
3264                     dhd->dma_stats.ioctl_rx_sz -= len;
3265                     break;
3266 #endif /* IOCTLRESP_USE_CONSTMEM */
3267                 case PKTTYPE_EVENT_RX:
3268                     dhd->dma_stats.event_rx--;
3269                     dhd->dma_stats.event_rx_sz -= len;
3270                     break;
3271                 case PKTTYPE_INFO_RX:
3272                     dhd->dma_stats.info_rx--;
3273                     dhd->dma_stats.info_rx_sz -= len;
3274                     break;
3275                 case PKTTYPE_TSBUF_RX:
3276                     dhd->dma_stats.tsbuf_rx--;
3277                     dhd->dma_stats.tsbuf_rx_sz -= len;
3278                     break;
3279             }
3280 #endif /* DMAMAP_STATS */
3281         }
3282     }
3283 
3284     return PKTBUF;
3285 }
3286 
3287 #ifdef IOCTLRESP_USE_CONSTMEM
3288 static INLINE void BCMFASTPATH
dhd_prot_ioctl_ret_buffer_get(dhd_pub_t * dhd,uint32 pktid,dhd_dma_buf_t * retbuf)3289 dhd_prot_ioctl_ret_buffer_get(dhd_pub_t *dhd, uint32 pktid, dhd_dma_buf_t *retbuf)
3290 {
3291     memset(retbuf, 0, sizeof(dhd_dma_buf_t));
3292     retbuf->va = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle_ioctl, pktid,
3293         retbuf->pa, retbuf->len, retbuf->dmah, retbuf->secdma, PKTTYPE_IOCTL_RX);
3294 
3295     return;
3296 }
3297 #endif
3298 
3299 #ifdef PCIE_INB_DW
3300 static int
dhd_prot_inc_hostactive_devwake_assert(dhd_bus_t * bus)3301 dhd_prot_inc_hostactive_devwake_assert(dhd_bus_t *bus)
3302 {
3303     unsigned long flags = 0;
3304 
3305     if (INBAND_DW_ENAB(bus)) {
3306         DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
3307         bus->host_active_cnt++;
3308         DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
3309         if (dhd_bus_set_device_wake(bus, TRUE) != BCME_OK) {
3310             DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
3311             bus->host_active_cnt--;
3312             dhd_bus_inb_ack_pending_ds_req(bus);
3313             DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
3314             return BCME_ERROR;
3315         }
3316     }
3317 
3318     return BCME_OK;
3319 }
3320 
3321 static void
dhd_prot_dec_hostactive_ack_pending_dsreq(dhd_bus_t * bus)3322 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd_bus_t *bus)
3323 {
3324     unsigned long flags = 0;
3325     if (INBAND_DW_ENAB(bus)) {
3326         DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
3327         bus->host_active_cnt--;
3328         dhd_bus_inb_ack_pending_ds_req(bus);
3329         DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
3330     }
3331 }
3332 #endif /* PCIE_INB_DW */
3333 
3334 static void BCMFASTPATH
dhd_msgbuf_rxbuf_post(dhd_pub_t * dhd,bool use_rsv_pktid)3335 dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid)
3336 {
3337     dhd_prot_t *prot = dhd->prot;
3338     int16 fillbufs;
3339     uint16 cnt = 256;
3340     int retcount = 0;
3341 
3342     fillbufs = prot->max_rxbufpost - prot->rxbufpost;
3343     while (fillbufs >= RX_BUF_BURST) {
3344         cnt--;
3345         if (cnt == 0) {
3346             /* find a better way to reschedule rx buf post if space not available */
3347             DHD_ERROR(("h2d rx post ring not available to post host buffers \n"));
3348             DHD_ERROR(("Current posted host buf count %d \n", prot->rxbufpost));
3349             break;
3350         }
3351 
3352         /* Post in a burst of 32 buffers at a time */
3353         fillbufs = MIN(fillbufs, RX_BUF_BURST);
3354 
3355         /* Post buffers */
3356         retcount = dhd_prot_rxbuf_post(dhd, fillbufs, use_rsv_pktid);
3357 
3358         if (retcount >= 0) {
3359             prot->rxbufpost += (uint16)retcount;
3360 #ifdef DHD_LB_RXC
3361             /* dhd_prot_rxbuf_post returns the number of buffers posted */
3362             DHD_LB_STATS_UPDATE_RXC_HISTO(dhd, retcount);
3363 #endif /* DHD_LB_RXC */
3364             /* how many more to post */
3365             fillbufs = prot->max_rxbufpost - prot->rxbufpost;
3366         } else {
3367             /* Make sure we don't run loop any further */
3368             fillbufs = 0;
3369         }
3370     }
3371 }
3372 
3373 /** Post 'count' no of rx buffers to dongle */
3374 static int BCMFASTPATH
dhd_prot_rxbuf_post(dhd_pub_t * dhd,uint16 count,bool use_rsv_pktid)3375 dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid)
3376 {
3377     void *p, **pktbuf;
3378     uint16 pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
3379     uint8 *rxbuf_post_tmp;
3380     host_rxbuf_post_t *rxbuf_post;
3381     void *msg_start;
3382     dmaaddr_t pa, *pktbuf_pa;
3383     uint32 *pktlen;
3384     uint16 i = 0, alloced = 0;
3385     unsigned long flags;
3386     uint32 pktid;
3387     dhd_prot_t *prot = dhd->prot;
3388     msgbuf_ring_t *ring = &prot->h2dring_rxp_subn;
3389     void *lcl_buf;
3390     uint16 lcl_buf_size;
3391 
3392 #ifdef PCIE_INB_DW
3393     if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
3394         return BCME_ERROR;
3395 #endif /* PCIE_INB_DW */
3396 
3397     /* allocate a local buffer to store pkt buffer va, pa and length */
3398     lcl_buf_size = (sizeof(void *) + sizeof(dmaaddr_t) + sizeof(uint32)) *
3399         RX_BUF_BURST;
3400     lcl_buf = MALLOC(dhd->osh, lcl_buf_size);
3401     if (!lcl_buf) {
3402         DHD_ERROR(("%s: local scratch buffer allocation failed\n", __FUNCTION__));
3403 #ifdef PCIE_INB_DW
3404         dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
3405 #endif
3406         return 0;
3407     }
3408     pktbuf = lcl_buf;
3409     pktbuf_pa = (dmaaddr_t *)((uint8 *)pktbuf + sizeof(void *) * RX_BUF_BURST);
3410     pktlen = (uint32 *)((uint8 *)pktbuf_pa + sizeof(dmaaddr_t) * RX_BUF_BURST);
3411 
3412     for (i = 0; i < count; i++) {
3413         if ((p = PKTGET(dhd->osh, pktsz, FALSE)) == NULL) {
3414             DHD_ERROR(("%s:%d: PKTGET for rxbuf failed\n", __FUNCTION__, __LINE__));
3415             dhd->rx_pktgetfail++;
3416             break;
3417         }
3418 
3419         pktlen[i] = PKTLEN(dhd->osh, p);
3420         if (SECURE_DMA_ENAB(dhd->osh)) {
3421             pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen[i],
3422                 DMA_RX, p, 0, ring->dma_buf.secdma, 0);
3423         }
3424 #ifndef BCM_SECURE_DMA
3425         else
3426             pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen[i], DMA_RX, p, 0);
3427 #endif /* #ifndef BCM_SECURE_DMA */
3428 
3429         if (PHYSADDRISZERO(pa)) {
3430             PKTFREE(dhd->osh, p, FALSE);
3431             DHD_ERROR(("Invalid phyaddr 0\n"));
3432             ASSERT(0);
3433             break;
3434         }
3435 #ifdef DMAMAP_STATS
3436         dhd->dma_stats.rxdata++;
3437         dhd->dma_stats.rxdata_sz += pktlen[i];
3438 #endif /* DMAMAP_STATS */
3439 
3440         PKTPULL(dhd->osh, p, prot->rx_metadata_offset);
3441         pktlen[i] = PKTLEN(dhd->osh, p);
3442         pktbuf[i] = p;
3443         pktbuf_pa[i] = pa;
3444     }
3445 
3446     /* only post what we have */
3447     count = i;
3448 
3449     /* grab the rx lock to allocate pktid and post on ring */
3450     DHD_SPIN_LOCK(prot->rx_lock, flags);
3451 
3452     /* Claim space for exactly 'count' no of messages, for mitigation purpose */
3453     msg_start = (void *)
3454         dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, TRUE);
3455     if (msg_start == NULL) {
3456         DHD_INFO(("%s:%d: Rxbufpost Msgbuf Not available\n", __FUNCTION__, __LINE__));
3457         goto cleanup;
3458     }
3459     /* if msg_start !=  NULL, we should have alloced space for atleast 1 item */
3460     ASSERT(alloced > 0);
3461 
3462     rxbuf_post_tmp = (uint8*)msg_start;
3463 
3464     for (i = 0; i < alloced; i++) {
3465         rxbuf_post = (host_rxbuf_post_t *)rxbuf_post_tmp;
3466         p = pktbuf[i];
3467         pa = pktbuf_pa[i];
3468 
3469 #if defined(DHD_LB_RXC)
3470         if (use_rsv_pktid == TRUE) {
3471             bcm_workq_t *workq = &prot->rx_compl_cons;
3472             int elem_ix = bcm_ring_cons(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
3473 
3474             if (elem_ix == BCM_RING_EMPTY) {
3475                 DHD_INFO(("%s rx_compl_cons ring is empty\n", __FUNCTION__));
3476                 pktid = DHD_PKTID_INVALID;
3477                 goto alloc_pkt_id;
3478             } else {
3479                 uint32 *elem = WORKQ_ELEMENT(uint32, workq, elem_ix);
3480                 pktid = *elem;
3481             }
3482 
3483             rxbuf_post->cmn_hdr.request_id = htol32(pktid);
3484 
3485             /* Now populate the previous locker with valid information */
3486             if (pktid != DHD_PKTID_INVALID) {
3487                 DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_rx_map,
3488                     p, pktid, pa, pktlen[i], DMA_RX, NULL, NULL,
3489                     PKTTYPE_DATA_RX);
3490             }
3491         } else
3492 #endif /* ! DHD_LB_RXC */
3493         {
3494 #if defined(DHD_LB_RXC)
3495 alloc_pkt_id:
3496 #endif /* DHD_LB_RXC */
3497         pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_rx_map, p, pa,
3498             pktlen[i], DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_RX);
3499 #if defined(DHD_PCIE_PKTID)
3500         if (pktid == DHD_PKTID_INVALID) {
3501             break;
3502         }
3503 #endif /* DHD_PCIE_PKTID */
3504         }
3505 
3506         /* Common msg header */
3507         rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_RXBUF_POST;
3508         rxbuf_post->cmn_hdr.if_id = 0;
3509         rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
3510         rxbuf_post->cmn_hdr.flags = ring->current_phase;
3511         ring->seqnum++;
3512         rxbuf_post->data_buf_len = htol16((uint16)pktlen[i]);
3513         rxbuf_post->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
3514         rxbuf_post->data_buf_addr.low_addr =
3515             htol32(PHYSADDRLO(pa) + prot->rx_metadata_offset);
3516 
3517         if (prot->rx_metadata_offset) {
3518             rxbuf_post->metadata_buf_len = prot->rx_metadata_offset;
3519             rxbuf_post->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
3520             rxbuf_post->metadata_buf_addr.low_addr  = htol32(PHYSADDRLO(pa));
3521         } else {
3522             rxbuf_post->metadata_buf_len = 0;
3523             rxbuf_post->metadata_buf_addr.high_addr = 0;
3524             rxbuf_post->metadata_buf_addr.low_addr  = 0;
3525         }
3526 
3527 #ifdef DHD_PKTID_AUDIT_RING
3528         DHD_PKTID_AUDIT(dhd, prot->pktid_rx_map, pktid, DHD_DUPLICATE_ALLOC);
3529 #endif /* DHD_PKTID_AUDIT_RING */
3530 
3531         rxbuf_post->cmn_hdr.request_id = htol32(pktid);
3532 
3533         /* Move rxbuf_post_tmp to next item */
3534         rxbuf_post_tmp = rxbuf_post_tmp + ring->item_len;
3535     }
3536 
3537     if (i < alloced) {
3538         if (ring->wr < (alloced - i))
3539             ring->wr = ring->max_items - (alloced - i);
3540         else
3541             ring->wr -= (alloced - i);
3542 
3543         if (ring->wr == 0) {
3544             DHD_INFO(("%s: flipping the phase now\n", ring->name));
3545                 ring->current_phase = ring->current_phase ?
3546                 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
3547         }
3548 
3549         alloced = i;
3550     }
3551 
3552     /* update ring's WR index and ring doorbell to dongle */
3553     if (alloced > 0) {
3554         unsigned long flags1;
3555         DHD_GENERAL_LOCK(dhd, flags1);
3556         dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced);
3557         DHD_GENERAL_UNLOCK(dhd, flags1);
3558     }
3559 
3560     DHD_SPIN_UNLOCK(prot->rx_lock, flags);
3561 
3562 cleanup:
3563     for (i = alloced; i < count; i++) {
3564         p = pktbuf[i];
3565         pa = pktbuf_pa[i];
3566 
3567         if (SECURE_DMA_ENAB(dhd->osh))
3568             SECURE_DMA_UNMAP(dhd->osh, pa, pktlen[i], DMA_RX, 0,
3569                 DHD_DMAH_NULL, ring->dma_buf.secdma, 0);
3570         else
3571             DMA_UNMAP(dhd->osh, pa, pktlen[i], DMA_RX, 0, DHD_DMAH_NULL);
3572         PKTFREE(dhd->osh, p, FALSE);
3573     }
3574 
3575     MFREE(dhd->osh, lcl_buf, lcl_buf_size);
3576 #ifdef PCIE_INB_DW
3577     dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
3578 #endif
3579     return alloced;
3580 } /* dhd_prot_rxbufpost */
3581 
3582 static int
dhd_prot_infobufpost(dhd_pub_t * dhd)3583 dhd_prot_infobufpost(dhd_pub_t *dhd)
3584 {
3585     unsigned long flags;
3586     uint32 pktid;
3587     dhd_prot_t *prot = dhd->prot;
3588     msgbuf_ring_t *ring = prot->h2dring_info_subn;
3589     uint16 alloced = 0;
3590     uint16 pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
3591     uint32 pktlen;
3592     info_buf_post_msg_t *infobuf_post;
3593     uint8 *infobuf_post_tmp;
3594     void *p;
3595     void* msg_start;
3596     uint8 i = 0;
3597     dmaaddr_t pa;
3598     int16 count;
3599 
3600     if (ring == NULL)
3601         return 0;
3602 
3603     if (ring->inited != TRUE)
3604         return 0;
3605     if (prot->max_infobufpost == 0)
3606         return 0;
3607 
3608     count = prot->max_infobufpost - prot->infobufpost;
3609 
3610     if (count <= 0) {
3611         DHD_INFO(("%s: Cannot post more than max info resp buffers\n",
3612             __FUNCTION__));
3613         return 0;
3614     }
3615 
3616 #ifdef PCIE_INB_DW
3617     if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
3618         return BCME_ERROR;
3619 #endif /* PCIE_INB_DW */
3620 
3621     DHD_GENERAL_LOCK(dhd, flags);
3622     /* Claim space for exactly 'count' no of messages, for mitigation purpose */
3623     msg_start = (void *) dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, FALSE);
3624     DHD_GENERAL_UNLOCK(dhd, flags);
3625 
3626     if (msg_start == NULL) {
3627         DHD_INFO(("%s:%d: infobufpost Msgbuf Not available\n", __FUNCTION__, __LINE__));
3628 #ifdef PCIE_INB_DW
3629         dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
3630 #endif
3631         return -1;
3632     }
3633 
3634     /* if msg_start !=  NULL, we should have alloced space for atleast 1 item */
3635     ASSERT(alloced > 0);
3636 
3637     infobuf_post_tmp = (uint8*) msg_start;
3638 
3639     /* loop through each allocated message in the host ring */
3640     for (i = 0; i < alloced; i++) {
3641         infobuf_post = (info_buf_post_msg_t *) infobuf_post_tmp;
3642         /* Create a rx buffer */
3643 #ifdef DHD_USE_STATIC_CTRLBUF
3644         p = PKTGET_STATIC(dhd->osh, pktsz, FALSE);
3645 #else
3646         p = PKTGET(dhd->osh, pktsz, FALSE);
3647 #endif /* DHD_USE_STATIC_CTRLBUF */
3648         if (p == NULL) {
3649             DHD_ERROR(("%s:%d: PKTGET for infobuf failed\n", __FUNCTION__, __LINE__));
3650             dhd->rx_pktgetfail++;
3651             break;
3652         }
3653         pktlen = PKTLEN(dhd->osh, p);
3654         if (SECURE_DMA_ENAB(dhd->osh)) {
3655             pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen,
3656                 DMA_RX, p, 0, ring->dma_buf.secdma, 0);
3657         }
3658 #ifndef BCM_SECURE_DMA
3659         else
3660             pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
3661 #endif /* #ifndef BCM_SECURE_DMA */
3662         if (PHYSADDRISZERO(pa)) {
3663             if (SECURE_DMA_ENAB(dhd->osh)) {
3664                 SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
3665                     ring->dma_buf.secdma, 0);
3666             }
3667             else
3668                 DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
3669 #ifdef DHD_USE_STATIC_CTRLBUF
3670             PKTFREE_STATIC(dhd->osh, p, FALSE);
3671 #else
3672             PKTFREE(dhd->osh, p, FALSE);
3673 #endif /* DHD_USE_STATIC_CTRLBUF */
3674             DHD_ERROR(("Invalid phyaddr 0\n"));
3675             ASSERT(0);
3676             break;
3677         }
3678 #ifdef DMAMAP_STATS
3679         dhd->dma_stats.info_rx++;
3680         dhd->dma_stats.info_rx_sz += pktlen;
3681 #endif /* DMAMAP_STATS */
3682         pktlen = PKTLEN(dhd->osh, p);
3683 
3684         /* Common msg header */
3685         infobuf_post->cmn_hdr.msg_type = MSG_TYPE_INFO_BUF_POST;
3686         infobuf_post->cmn_hdr.if_id = 0;
3687         infobuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
3688         infobuf_post->cmn_hdr.flags = ring->current_phase;
3689         ring->seqnum++;
3690 
3691 #if defined(DHD_PCIE_PKTID)
3692         /* get the lock before calling DHD_NATIVE_TO_PKTID */
3693         DHD_GENERAL_LOCK(dhd, flags);
3694 #endif /* DHD_PCIE_PKTID */
3695 
3696         pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_ctrl_map, p, pa,
3697             pktlen, DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_INFO_RX);
3698 
3699 
3700 #if defined(DHD_PCIE_PKTID)
3701         /* free lock */
3702         DHD_GENERAL_UNLOCK(dhd, flags);
3703 
3704         if (pktid == DHD_PKTID_INVALID) {
3705             if (SECURE_DMA_ENAB(dhd->osh)) {
3706                 DHD_GENERAL_LOCK(dhd, flags);
3707                 SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, 0,
3708                     ring->dma_buf.secdma, 0);
3709                 DHD_GENERAL_UNLOCK(dhd, flags);
3710             } else
3711                 DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, 0);
3712 
3713 #ifdef DHD_USE_STATIC_CTRLBUF
3714             PKTFREE_STATIC(dhd->osh, p, FALSE);
3715 #else
3716             PKTFREE(dhd->osh, p, FALSE);
3717 #endif /* DHD_USE_STATIC_CTRLBUF */
3718             DHD_ERROR(("%s: Pktid pool depleted.\n", __FUNCTION__));
3719             break;
3720         }
3721 #endif /* DHD_PCIE_PKTID */
3722 
3723         infobuf_post->host_buf_len = htol16((uint16)pktlen);
3724         infobuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
3725         infobuf_post->host_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
3726 
3727 #ifdef DHD_PKTID_AUDIT_RING
3728         DHD_PKTID_AUDIT(dhd, prot->pktid_ctrl_map, pktid, DHD_DUPLICATE_ALLOC);
3729 #endif /* DHD_PKTID_AUDIT_RING */
3730 
3731         DHD_INFO(("ID %d, low_addr 0x%08x, high_addr 0x%08x\n",
3732             infobuf_post->cmn_hdr.request_id,  infobuf_post->host_buf_addr.low_addr,
3733             infobuf_post->host_buf_addr.high_addr));
3734 
3735         infobuf_post->cmn_hdr.request_id = htol32(pktid);
3736         /* Move rxbuf_post_tmp to next item */
3737         infobuf_post_tmp = infobuf_post_tmp + ring->item_len;
3738     }
3739 
3740     if (i < alloced) {
3741         if (ring->wr < (alloced - i))
3742             ring->wr = ring->max_items - (alloced - i);
3743         else
3744             ring->wr -= (alloced - i);
3745 
3746         alloced = i;
3747         if (alloced && ring->wr == 0) {
3748             DHD_INFO(("%s: flipping the phase now\n", ring->name));
3749             ring->current_phase = ring->current_phase ?
3750                 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
3751         }
3752     }
3753 
3754     /* Update the write pointer in TCM & ring bell */
3755     if (alloced > 0) {
3756         prot->infobufpost += alloced;
3757         DHD_INFO(("allocated %d buffers for info ring\n",  alloced));
3758         DHD_GENERAL_LOCK(dhd, flags);
3759         dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced);
3760         DHD_GENERAL_UNLOCK(dhd, flags);
3761     }
3762 #ifdef PCIE_INB_DW
3763     dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
3764 #endif
3765     return alloced;
3766 } /* dhd_prot_infobufpost */
3767 
3768 #ifdef IOCTLRESP_USE_CONSTMEM
3769 static int
alloc_ioctl_return_buffer(dhd_pub_t * dhd,dhd_dma_buf_t * retbuf)3770 alloc_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf)
3771 {
3772     int err;
3773     memset(retbuf, 0, sizeof(dhd_dma_buf_t));
3774 
3775     if ((err = dhd_dma_buf_alloc(dhd, retbuf, IOCT_RETBUF_SIZE)) != BCME_OK) {
3776         DHD_ERROR(("%s: dhd_dma_buf_alloc err %d\n", __FUNCTION__, err));
3777         ASSERT(0);
3778         return BCME_NOMEM;
3779     }
3780 
3781     return BCME_OK;
3782 }
3783 
3784 static void
free_ioctl_return_buffer(dhd_pub_t * dhd,dhd_dma_buf_t * retbuf)3785 free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf)
3786 {
3787     /* retbuf (declared on stack) not fully populated ...  */
3788     if (retbuf->va) {
3789         uint32 dma_pad;
3790         dma_pad = (IOCT_RETBUF_SIZE % DHD_DMA_PAD) ? DHD_DMA_PAD : 0;
3791         retbuf->len = IOCT_RETBUF_SIZE;
3792         retbuf->_alloced = retbuf->len + dma_pad;
3793     }
3794 
3795     dhd_dma_buf_free(dhd, retbuf);
3796     return;
3797 }
3798 #endif /* IOCTLRESP_USE_CONSTMEM */
3799 
3800 static int
dhd_prot_rxbufpost_ctrl(dhd_pub_t * dhd,uint8 msg_type)3801 dhd_prot_rxbufpost_ctrl(dhd_pub_t *dhd, uint8 msg_type)
3802 {
3803     void *p;
3804     uint16 pktsz;
3805     ioctl_resp_evt_buf_post_msg_t *rxbuf_post;
3806     dmaaddr_t pa;
3807     uint32 pktlen;
3808     dhd_prot_t *prot = dhd->prot;
3809     uint16 alloced = 0;
3810     unsigned long flags;
3811     dhd_dma_buf_t retbuf;
3812     void *dmah = NULL;
3813     uint32 pktid;
3814     void *map_handle;
3815     msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
3816     bool non_ioctl_resp_buf = 0;
3817     dhd_pkttype_t buf_type;
3818 
3819     if (dhd->busstate == DHD_BUS_DOWN) {
3820         DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
3821         return -1;
3822     }
3823     memset(&retbuf, 0, sizeof(dhd_dma_buf_t));
3824 
3825     if (msg_type == MSG_TYPE_IOCTLRESP_BUF_POST)
3826         buf_type = PKTTYPE_IOCTL_RX;
3827     else if (msg_type == MSG_TYPE_EVENT_BUF_POST)
3828         buf_type = PKTTYPE_EVENT_RX;
3829     else if (msg_type == MSG_TYPE_TIMSTAMP_BUFPOST)
3830         buf_type = PKTTYPE_TSBUF_RX;
3831     else {
3832         DHD_ERROR(("invalid message type to be posted to Ctrl ring %d\n", msg_type));
3833         return -1;
3834     }
3835 
3836 
3837     if ((msg_type == MSG_TYPE_EVENT_BUF_POST) || (msg_type == MSG_TYPE_TIMSTAMP_BUFPOST))
3838         non_ioctl_resp_buf = TRUE;
3839     else
3840         non_ioctl_resp_buf = FALSE;
3841 
3842     if (non_ioctl_resp_buf) {
3843         /* Allocate packet for not ioctl resp buffer post */
3844         pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
3845     } else {
3846         /* Allocate packet for ctrl/ioctl buffer post */
3847         pktsz = DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ;
3848     }
3849 
3850 #ifdef IOCTLRESP_USE_CONSTMEM
3851     if (!non_ioctl_resp_buf) {
3852         if (alloc_ioctl_return_buffer(dhd, &retbuf) != BCME_OK) {
3853             DHD_ERROR(("Could not allocate IOCTL response buffer\n"));
3854             return -1;
3855         }
3856         ASSERT(retbuf.len == IOCT_RETBUF_SIZE);
3857         p = retbuf.va;
3858         pktlen = retbuf.len;
3859         pa = retbuf.pa;
3860         dmah = retbuf.dmah;
3861     } else
3862 #endif /* IOCTLRESP_USE_CONSTMEM */
3863     {
3864 #ifdef DHD_USE_STATIC_CTRLBUF
3865         p = PKTGET_STATIC(dhd->osh, pktsz, FALSE);
3866 #else
3867         p = PKTGET(dhd->osh, pktsz, FALSE);
3868 #endif /* DHD_USE_STATIC_CTRLBUF */
3869         if (p == NULL) {
3870             DHD_ERROR(("%s:%d: PKTGET for %s buf failed\n",
3871                 __FUNCTION__, __LINE__, non_ioctl_resp_buf ?
3872                 "EVENT" : "IOCTL RESP"));
3873             dhd->rx_pktgetfail++;
3874             return -1;
3875         }
3876 
3877         pktlen = PKTLEN(dhd->osh, p);
3878 
3879         if (SECURE_DMA_ENAB(dhd->osh)) {
3880             DHD_GENERAL_LOCK(dhd, flags);
3881             pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen,
3882                 DMA_RX, p, 0, ring->dma_buf.secdma, 0);
3883             DHD_GENERAL_UNLOCK(dhd, flags);
3884         }
3885 #ifndef BCM_SECURE_DMA
3886         else
3887             pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
3888 #endif /* #ifndef BCM_SECURE_DMA */
3889 
3890         if (PHYSADDRISZERO(pa)) {
3891             DHD_ERROR(("Invalid physaddr 0\n"));
3892             ASSERT(0);
3893             goto free_pkt_return;
3894         }
3895 
3896 #ifdef DMAMAP_STATS
3897         switch (buf_type) {
3898 #ifndef IOCTLRESP_USE_CONSTMEM
3899             case PKTTYPE_IOCTL_RX:
3900                 dhd->dma_stats.ioctl_rx++;
3901                 dhd->dma_stats.ioctl_rx_sz += pktlen;
3902                 break;
3903 #endif /* !IOCTLRESP_USE_CONSTMEM */
3904             case PKTTYPE_EVENT_RX:
3905                 dhd->dma_stats.event_rx++;
3906                 dhd->dma_stats.event_rx_sz += pktlen;
3907                 break;
3908             case PKTTYPE_TSBUF_RX:
3909                 dhd->dma_stats.tsbuf_rx++;
3910                 dhd->dma_stats.tsbuf_rx_sz += pktlen;
3911                 break;
3912             default:
3913                 break;
3914         }
3915 #endif /* DMAMAP_STATS */
3916     }
3917 #ifdef PCIE_INB_DW
3918     if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
3919         return BCME_ERROR;
3920 #endif /* PCIE_INB_DW */
3921 
3922     DHD_GENERAL_LOCK(dhd, flags);
3923 
3924     rxbuf_post = (ioctl_resp_evt_buf_post_msg_t *)
3925         dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
3926 
3927     if (rxbuf_post == NULL) {
3928         DHD_GENERAL_UNLOCK(dhd, flags);
3929         DHD_ERROR(("%s:%d: Ctrl submit Msgbuf Not available to post buffer \n",
3930             __FUNCTION__, __LINE__));
3931 
3932 #ifdef IOCTLRESP_USE_CONSTMEM
3933         if (non_ioctl_resp_buf)
3934 #endif /* IOCTLRESP_USE_CONSTMEM */
3935         {
3936             if (SECURE_DMA_ENAB(dhd->osh)) {
3937                 DHD_GENERAL_LOCK(dhd, flags);
3938                 SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
3939                     ring->dma_buf.secdma, 0);
3940                 DHD_GENERAL_UNLOCK(dhd, flags);
3941             } else {
3942                 DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
3943             }
3944         }
3945         goto free_pkt_return;
3946     }
3947 
3948     /* CMN msg header */
3949     rxbuf_post->cmn_hdr.msg_type = msg_type;
3950 
3951 #ifdef IOCTLRESP_USE_CONSTMEM
3952     if (!non_ioctl_resp_buf) {
3953         map_handle = dhd->prot->pktid_map_handle_ioctl;
3954         pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle, p, pa, pktlen, DMA_RX, dmah,
3955             ring->dma_buf.secdma, buf_type);
3956     } else
3957 #endif /* IOCTLRESP_USE_CONSTMEM */
3958     {
3959         map_handle = dhd->prot->pktid_ctrl_map;
3960         pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle,
3961             p, pa, pktlen, DMA_RX, dmah, ring->dma_buf.secdma,
3962             buf_type);
3963     }
3964 
3965     if (pktid == DHD_PKTID_INVALID) {
3966         if (ring->wr == 0) {
3967             ring->wr = ring->max_items - 1;
3968         } else {
3969             ring->wr--;
3970             if (ring->wr == 0) {
3971                 ring->current_phase = ring->current_phase ? 0 :
3972                     BCMPCIE_CMNHDR_PHASE_BIT_INIT;
3973             }
3974         }
3975         DHD_GENERAL_UNLOCK(dhd, flags);
3976         DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
3977         DHD_ERROR(("%s: Pktid pool depleted.\n", __FUNCTION__));
3978         goto free_pkt_return;
3979     }
3980 
3981 #ifdef DHD_PKTID_AUDIT_RING
3982     DHD_PKTID_AUDIT(dhd, map_handle, pktid, DHD_DUPLICATE_ALLOC);
3983 #endif /* DHD_PKTID_AUDIT_RING */
3984 
3985     rxbuf_post->cmn_hdr.request_id = htol32(pktid);
3986     rxbuf_post->cmn_hdr.if_id = 0;
3987     rxbuf_post->cmn_hdr.epoch =  ring->seqnum % H2D_EPOCH_MODULO;
3988     ring->seqnum++;
3989     rxbuf_post->cmn_hdr.flags = ring->current_phase;
3990 
3991 #if defined(DHD_PCIE_PKTID)
3992     if (rxbuf_post->cmn_hdr.request_id == DHD_PKTID_INVALID) {
3993         if (ring->wr == 0) {
3994             ring->wr = ring->max_items - 1;
3995         } else {
3996             if (ring->wr == 0) {
3997                 ring->current_phase = ring->current_phase ? 0 :
3998                     BCMPCIE_CMNHDR_PHASE_BIT_INIT;
3999             }
4000         }
4001         DHD_GENERAL_UNLOCK(dhd, flags);
4002 #ifdef IOCTLRESP_USE_CONSTMEM
4003         if (non_ioctl_resp_buf)
4004 #endif /* IOCTLRESP_USE_CONSTMEM */
4005         {
4006             if (SECURE_DMA_ENAB(dhd->osh)) {
4007                 DHD_GENERAL_LOCK(dhd, flags);
4008                 SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
4009                     ring->dma_buf.secdma, 0);
4010                 DHD_GENERAL_UNLOCK(dhd, flags);
4011             } else
4012                 DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
4013         }
4014         goto free_pkt_return;
4015     }
4016 #endif /* DHD_PCIE_PKTID */
4017 
4018 #ifndef IOCTLRESP_USE_CONSTMEM
4019     rxbuf_post->host_buf_len = htol16((uint16)PKTLEN(dhd->osh, p));
4020 #else
4021     rxbuf_post->host_buf_len = htol16((uint16)pktlen);
4022 #endif /* IOCTLRESP_USE_CONSTMEM */
4023     rxbuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
4024     rxbuf_post->host_buf_addr.low_addr  = htol32(PHYSADDRLO(pa));
4025 
4026     /* update ring's WR index and ring doorbell to dongle */
4027     dhd_prot_ring_write_complete(dhd, ring, rxbuf_post, 1);
4028     DHD_GENERAL_UNLOCK(dhd, flags);
4029 
4030 #ifdef PCIE_INB_DW
4031     dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
4032 #endif
4033 
4034     return 1;
4035 
4036 free_pkt_return:
4037 #ifdef IOCTLRESP_USE_CONSTMEM
4038     if (!non_ioctl_resp_buf) {
4039         free_ioctl_return_buffer(dhd, &retbuf);
4040     } else
4041 #endif
4042     {
4043         dhd_prot_packet_free(dhd, p, buf_type, FALSE);
4044     }
4045 
4046 #ifdef PCIE_INB_DW
4047     dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
4048 #endif
4049 
4050     return -1;
4051 } /* dhd_prot_rxbufpost_ctrl */
4052 
4053 static uint16
dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t * dhd,uint8 msg_type,uint32 max_to_post)4054 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, uint8 msg_type, uint32 max_to_post)
4055 {
4056     uint32 i = 0;
4057     int32 ret_val;
4058 
4059     DHD_INFO(("max to post %d, event %d \n", max_to_post, msg_type));
4060 
4061     if (dhd->busstate == DHD_BUS_DOWN) {
4062         DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
4063         return 0;
4064     }
4065 
4066     while (i < max_to_post) {
4067         ret_val  = dhd_prot_rxbufpost_ctrl(dhd, msg_type);
4068         if (ret_val < 0)
4069             break;
4070         i++;
4071     }
4072     DHD_INFO(("posted %d buffers of type %d\n", i, msg_type));
4073     return (uint16)i;
4074 }
4075 
4076 static void
dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t * dhd)4077 dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *dhd)
4078 {
4079     dhd_prot_t *prot = dhd->prot;
4080     int max_to_post;
4081 
4082     DHD_INFO(("ioctl resp buf post\n"));
4083     max_to_post = prot->max_ioctlrespbufpost - prot->cur_ioctlresp_bufs_posted;
4084     if (max_to_post <= 0) {
4085         DHD_INFO(("%s: Cannot post more than max IOCTL resp buffers\n",
4086             __FUNCTION__));
4087         return;
4088     }
4089     prot->cur_ioctlresp_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
4090         MSG_TYPE_IOCTLRESP_BUF_POST, max_to_post);
4091 }
4092 
4093 static void
dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t * dhd)4094 dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *dhd)
4095 {
4096     dhd_prot_t *prot = dhd->prot;
4097     int max_to_post;
4098 
4099     /* Use atomic variable to avoid re-entry */
4100     if (atomic_read(&dhd_msgbuf_rxbuf_post_event_bufs_running) > 0) {
4101         return;
4102     }
4103     atomic_inc(&dhd_msgbuf_rxbuf_post_event_bufs_running);
4104 
4105     max_to_post = prot->max_eventbufpost - prot->cur_event_bufs_posted;
4106     if (max_to_post <= 0) {
4107         DHD_ERROR(("%s: Cannot post more than max event buffers\n",
4108             __FUNCTION__));
4109         return;
4110     }
4111     prot->cur_event_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
4112         MSG_TYPE_EVENT_BUF_POST, max_to_post);
4113 
4114     atomic_dec(&dhd_msgbuf_rxbuf_post_event_bufs_running);
4115 }
4116 
4117 static int
dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t * dhd)4118 dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t *dhd)
4119 {
4120 #ifdef DHD_TIMESYNC
4121     dhd_prot_t *prot = dhd->prot;
4122     int max_to_post;
4123 
4124     if (prot->active_ipc_version < 7) {
4125         DHD_ERROR(("no ts buffers to device ipc rev is %d, needs to be atleast 7\n",
4126             prot->active_ipc_version));
4127         return 0;
4128     }
4129 
4130     max_to_post = prot->max_tsbufpost - prot->cur_ts_bufs_posted;
4131     if (max_to_post <= 0) {
4132         DHD_INFO(("%s: Cannot post more than max ts buffers\n",
4133             __FUNCTION__));
4134         return 0;
4135     }
4136 
4137     prot->cur_ts_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
4138         MSG_TYPE_TIMSTAMP_BUFPOST, max_to_post);
4139 #endif /* DHD_TIMESYNC */
4140     return 0;
4141 }
4142 
4143 bool BCMFASTPATH
dhd_prot_process_msgbuf_infocpl(dhd_pub_t * dhd,uint bound)4144 dhd_prot_process_msgbuf_infocpl(dhd_pub_t *dhd, uint bound)
4145 {
4146     dhd_prot_t *prot = dhd->prot;
4147     bool more = TRUE;
4148     uint n = 0;
4149     msgbuf_ring_t *ring = prot->d2hring_info_cpln;
4150 
4151     if (ring == NULL)
4152         return FALSE;
4153     if (ring->inited != TRUE)
4154         return FALSE;
4155 
4156     /* Process all the messages - DTOH direction */
4157     while (!dhd_is_device_removed(dhd)) {
4158         uint8 *msg_addr;
4159         uint32 msg_len;
4160 
4161         if (dhd->hang_was_sent) {
4162             more = FALSE;
4163             break;
4164         }
4165 
4166         /* Get the message from ring */
4167         msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
4168         if (msg_addr == NULL) {
4169             more = FALSE;
4170             break;
4171         }
4172 
4173         /* Prefetch data to populate the cache */
4174         OSL_PREFETCH(msg_addr);
4175 
4176         if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
4177             DHD_ERROR(("%s: Error at  process rxpl msgbuf of len %d\n",
4178                 __FUNCTION__, msg_len));
4179         }
4180 
4181         /* Update read pointer */
4182         dhd_prot_upd_read_idx(dhd, ring);
4183 
4184         /* After batch processing, check RX bound */
4185         n += msg_len / ring->item_len;
4186         if (n >= bound) {
4187             break;
4188         }
4189     }
4190 
4191     return more;
4192 }
4193 
4194 /** called when DHD needs to check for 'receive complete' messages from the dongle */
4195 bool BCMFASTPATH
dhd_prot_process_msgbuf_rxcpl(dhd_pub_t * dhd,uint bound)4196 dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound)
4197 {
4198     bool more = FALSE;
4199     uint n = 0;
4200     dhd_prot_t *prot = dhd->prot;
4201     msgbuf_ring_t *ring = &prot->d2hring_rx_cpln;
4202     uint16 item_len = ring->item_len;
4203     host_rxbuf_cmpl_t *msg = NULL;
4204     uint8 *msg_addr;
4205     uint32 msg_len;
4206     uint16 pkt_cnt, pkt_cnt_newidx;
4207     unsigned long flags;
4208     dmaaddr_t pa;
4209     uint32 len;
4210     void *dmah;
4211     void *secdma;
4212     int ifidx = 0, if_newidx = 0;
4213     void *pkt, *pktqhead = NULL, *prevpkt = NULL, *pkt_newidx, *nextpkt;
4214     uint32 pktid;
4215     int i;
4216     uint8 sync;
4217 
4218     while (1) {
4219         if (dhd_is_device_removed(dhd))
4220             break;
4221 
4222         if (dhd->hang_was_sent)
4223             break;
4224 
4225         pkt_cnt = 0;
4226         pktqhead = pkt_newidx = NULL;
4227         pkt_cnt_newidx = 0;
4228 
4229         DHD_SPIN_LOCK(prot->rx_lock, flags);
4230 
4231         /* Get the address of the next message to be read from ring */
4232         msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
4233         if (msg_addr == NULL) {
4234             DHD_SPIN_UNLOCK(prot->rx_lock, flags);
4235             break;
4236         }
4237 
4238         while (msg_len > 0) {
4239             msg = (host_rxbuf_cmpl_t *)msg_addr;
4240 
4241             /* Wait until DMA completes, then fetch msg_type */
4242             sync = prot->d2h_sync_cb(dhd, ring, &msg->cmn_hdr, item_len);
4243             /*
4244              * Update the curr_rd to the current index in the ring, from where
4245              * the work item is fetched. This way if the fetched work item
4246              * fails in LIVELOCK, we can print the exact read index in the ring
4247              * that shows up the corrupted work item.
4248              */
4249             if ((ring->curr_rd + 1) >= ring->max_items) {
4250                 ring->curr_rd = 0;
4251             } else {
4252                 ring->curr_rd += 1;
4253             }
4254 
4255             if (!sync) {
4256                 msg_len -= item_len;
4257                 msg_addr += item_len;
4258                 continue;
4259             }
4260 
4261             pktid = ltoh32(msg->cmn_hdr.request_id);
4262 
4263 #ifdef DHD_PKTID_AUDIT_RING
4264             DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_rx_map, pktid,
4265                 DHD_DUPLICATE_FREE, msg, D2HRING_RXCMPLT_ITEMSIZE);
4266 #endif /* DHD_PKTID_AUDIT_RING */
4267 
4268             pkt = DHD_PKTID_TO_NATIVE(dhd, prot->pktid_rx_map, pktid, pa,
4269                     len, dmah, secdma, PKTTYPE_DATA_RX);
4270             if (!pkt) {
4271                 msg_len -= item_len;
4272                 msg_addr += item_len;
4273                 continue;
4274             }
4275 
4276             if (SECURE_DMA_ENAB(dhd->osh))
4277                 SECURE_DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0,
4278                     dmah, secdma, 0);
4279             else
4280                 DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
4281 
4282 #ifdef DMAMAP_STATS
4283             dhd->dma_stats.rxdata--;
4284             dhd->dma_stats.rxdata_sz -= len;
4285 #endif /* DMAMAP_STATS */
4286             DHD_INFO(("id 0x%04x, offset %d, len %d, idx %d, phase 0x%02x, "
4287                 "pktdata %p, metalen %d\n",
4288                 ltoh32(msg->cmn_hdr.request_id),
4289                 ltoh16(msg->data_offset),
4290                 ltoh16(msg->data_len), msg->cmn_hdr.if_id,
4291                 msg->cmn_hdr.flags, PKTDATA(dhd->osh, pkt),
4292                 ltoh16(msg->metadata_len)));
4293 
4294             pkt_cnt++;
4295             msg_len -= item_len;
4296             msg_addr += item_len;
4297 
4298 #if DHD_DBG_SHOW_METADATA
4299             if (prot->metadata_dbg && prot->rx_metadata_offset &&
4300                     msg->metadata_len) {
4301                 uchar *ptr;
4302                 ptr = PKTDATA(dhd->osh, pkt) - (prot->rx_metadata_offset);
4303                 /* header followed by data */
4304                 bcm_print_bytes("rxmetadata", ptr, msg->metadata_len);
4305                 dhd_prot_print_metadata(dhd, ptr, msg->metadata_len);
4306             }
4307 #endif /* DHD_DBG_SHOW_METADATA */
4308 
4309             /* data_offset from buf start */
4310             if (ltoh16(msg->data_offset)) {
4311                 /* data offset given from dongle after split rx */
4312                 PKTPULL(dhd->osh, pkt, ltoh16(msg->data_offset));
4313             }
4314             else if (prot->rx_dataoffset) {
4315                 /* DMA RX offset updated through shared area */
4316                 PKTPULL(dhd->osh, pkt, prot->rx_dataoffset);
4317             }
4318             /* Actual length of the packet */
4319             PKTSETLEN(dhd->osh, pkt, ltoh16(msg->data_len));
4320 #if defined(WL_MONITOR)
4321             if (dhd_monitor_enabled(dhd, ifidx) &&
4322                 (msg->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11)) {
4323                 dhd_rx_mon_pkt(dhd, msg, pkt, ifidx);
4324                 continue;
4325             }
4326 #endif
4327 
4328             if (!pktqhead) {
4329                 pktqhead = prevpkt = pkt;
4330                 ifidx = msg->cmn_hdr.if_id;
4331             } else {
4332                 if (ifidx != msg->cmn_hdr.if_id) {
4333                     pkt_newidx = pkt;
4334                     if_newidx = msg->cmn_hdr.if_id;
4335                     pkt_cnt--;
4336                     pkt_cnt_newidx = 1;
4337                     break;
4338                 } else {
4339                     PKTSETNEXT(dhd->osh, prevpkt, pkt);
4340                     prevpkt = pkt;
4341                 }
4342             }
4343 
4344 #ifdef DHD_TIMESYNC
4345             if (dhd->prot->rx_ts_log_enabled) {
4346                 ts_timestamp_t *ts = (ts_timestamp_t *)&msg->ts;
4347                 dhd_timesync_log_rx_timestamp(dhd->ts, ifidx, ts->low, ts->high);
4348             }
4349 #endif /* DHD_TIMESYNC */
4350         }
4351 
4352         /* roll back read pointer for unprocessed message */
4353         if (msg_len > 0) {
4354             if (ring->rd < msg_len / item_len)
4355                 ring->rd = ring->max_items - msg_len / item_len;
4356             else
4357                 ring->rd -= msg_len / item_len;
4358         }
4359 
4360         /* Update read pointer */
4361         dhd_prot_upd_read_idx(dhd, ring);
4362 
4363         DHD_SPIN_UNLOCK(prot->rx_lock, flags);
4364 
4365         pkt = pktqhead;
4366         for (i = 0; pkt && i < pkt_cnt; i++, pkt = nextpkt) {
4367             nextpkt = PKTNEXT(dhd->osh, pkt);
4368             PKTSETNEXT(dhd->osh, pkt, NULL);
4369 #ifdef DHD_LB_RXP
4370             dhd_lb_rx_pkt_enqueue(dhd, pkt, ifidx);
4371 #elif defined(DHD_RX_CHAINING)
4372             dhd_rxchain_frame(dhd, pkt, ifidx);
4373 #else
4374             dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1);
4375 #endif /* DHD_LB_RXP */
4376         }
4377 
4378         if (pkt_newidx) {
4379 #ifdef DHD_LB_RXP
4380             dhd_lb_rx_pkt_enqueue(dhd, pkt_newidx, if_newidx);
4381 #elif defined(DHD_RX_CHAINING)
4382             dhd_rxchain_frame(dhd, pkt_newidx, if_newidx);
4383 #else
4384             dhd_bus_rx_frame(dhd->bus, pkt_newidx, if_newidx, 1);
4385 #endif /* DHD_LB_RXP */
4386         }
4387 
4388         pkt_cnt += pkt_cnt_newidx;
4389 
4390         /* Post another set of rxbufs to the device */
4391         dhd_prot_return_rxbuf(dhd, 0, pkt_cnt);
4392 
4393         /* After batch processing, check RX bound */
4394         n += pkt_cnt;
4395         if (n >= bound) {
4396             more = TRUE;
4397             break;
4398         }
4399     }
4400 
4401     /* Call lb_dispatch only if packets are queued */
4402     if (n) {
4403         DHD_LB_DISPATCH_RX_COMPL(dhd);
4404         DHD_LB_DISPATCH_RX_PROCESS(dhd);
4405     }
4406 
4407     return more;
4408 }
4409 
4410 /**
4411  * Hands transmit packets (with a caller provided flow_id) over to dongle territory (the flow ring)
4412  */
4413 void
dhd_prot_update_txflowring(dhd_pub_t * dhd,uint16 flowid,void * msgring)4414 dhd_prot_update_txflowring(dhd_pub_t *dhd, uint16 flowid, void *msgring)
4415 {
4416     msgbuf_ring_t *ring = (msgbuf_ring_t *)msgring;
4417 
4418     if (ring == NULL) {
4419         DHD_ERROR(("%s: NULL txflowring. exiting...\n",  __FUNCTION__));
4420         return;
4421     }
4422     /* Update read pointer */
4423     if (dhd->dma_d2h_ring_upd_support) {
4424         ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
4425     }
4426 
4427     DHD_TRACE(("ringid %d flowid %d write %d read %d \n\n",
4428         ring->idx, flowid, ring->wr, ring->rd));
4429 
4430     /* Need more logic here, but for now use it directly */
4431     dhd_bus_schedule_queue(dhd->bus, flowid, TRUE); /* from queue to flowring */
4432 }
4433 
4434 /** called when DHD needs to check for 'transmit complete' messages from the dongle */
4435 bool BCMFASTPATH
dhd_prot_process_msgbuf_txcpl(dhd_pub_t * dhd,uint bound)4436 dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound)
4437 {
4438     bool more = TRUE;
4439     uint n = 0;
4440     msgbuf_ring_t *ring = &dhd->prot->d2hring_tx_cpln;
4441 
4442     /* Process all the messages - DTOH direction */
4443     while (!dhd_is_device_removed(dhd)) {
4444         uint8 *msg_addr;
4445         uint32 msg_len;
4446 
4447         if (dhd->hang_was_sent) {
4448             more = FALSE;
4449             break;
4450         }
4451 
4452         /* Get the address of the next message to be read from ring */
4453         msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
4454         if (msg_addr == NULL) {
4455             more = FALSE;
4456             break;
4457         }
4458 
4459         /* Prefetch data to populate the cache */
4460         OSL_PREFETCH(msg_addr);
4461 
4462         if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
4463             DHD_ERROR(("%s: process %s msg addr %p len %d\n",
4464                 __FUNCTION__, ring->name, msg_addr, msg_len));
4465         }
4466 
4467         /* Write to dngl rd ptr */
4468         dhd_prot_upd_read_idx(dhd, ring);
4469 
4470         /* After batch processing, check bound */
4471         n += msg_len / ring->item_len;
4472         if (n >= bound) {
4473             break;
4474         }
4475     }
4476 
4477     DHD_LB_DISPATCH_TX_COMPL(dhd);
4478 
4479     return more;
4480 }
4481 
4482 int BCMFASTPATH
dhd_prot_process_trapbuf(dhd_pub_t * dhd)4483 dhd_prot_process_trapbuf(dhd_pub_t *dhd)
4484 {
4485     uint32 data;
4486     dhd_dma_buf_t *trap_addr = &dhd->prot->fw_trap_buf;
4487 
4488     /* Interrupts can come in before this struct
4489      *  has been initialized.
4490      */
4491     if (trap_addr->va == NULL) {
4492         DHD_ERROR(("%s: trap_addr->va is NULL\n", __FUNCTION__));
4493         return 0;
4494     }
4495 
4496     OSL_CACHE_INV((void *)trap_addr->va, sizeof(uint32));
4497     data = *(uint32 *)(trap_addr->va);
4498 
4499     if (data & D2H_DEV_FWHALT) {
4500         DHD_ERROR(("Firmware trapped and trap_data is 0x%04x\n", data));
4501         if (data & D2H_DEV_EXT_TRAP_DATA)
4502         {
4503             if (dhd->extended_trap_data) {
4504                 OSL_CACHE_INV((void *)trap_addr->va,
4505                        BCMPCIE_EXT_TRAP_DATA_MAXLEN);
4506                 memcpy(dhd->extended_trap_data, (uint32 *)trap_addr->va,
4507                        BCMPCIE_EXT_TRAP_DATA_MAXLEN);
4508             }
4509             DHD_ERROR(("Extended trap data available\n"));
4510         }
4511         return data;
4512     }
4513     return 0;
4514 }
4515 
4516 /** called when DHD needs to check for 'ioctl complete' messages from the dongle */
4517 int BCMFASTPATH
dhd_prot_process_ctrlbuf(dhd_pub_t * dhd)4518 dhd_prot_process_ctrlbuf(dhd_pub_t *dhd)
4519 {
4520     dhd_prot_t *prot = dhd->prot;
4521     msgbuf_ring_t *ring = &prot->d2hring_ctrl_cpln;
4522 
4523     /* Process all the messages - DTOH direction */
4524     while (!dhd_is_device_removed(dhd)) {
4525         uint8 *msg_addr;
4526         uint32 msg_len;
4527 
4528         if (dhd->hang_was_sent) {
4529             break;
4530         }
4531 
4532         /* Get the address of the next message to be read from ring */
4533         msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
4534         if (msg_addr == NULL) {
4535             break;
4536         }
4537 
4538         /* Prefetch data to populate the cache */
4539         OSL_PREFETCH(msg_addr);
4540         if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
4541             DHD_ERROR(("%s: process %s msg addr %p len %d\n",
4542                 __FUNCTION__, ring->name, msg_addr, msg_len));
4543         }
4544 
4545         /* Write to dngl rd ptr */
4546         dhd_prot_upd_read_idx(dhd, ring);
4547     }
4548 
4549     return 0;
4550 }
4551 
4552 /**
4553  * Consume messages out of the D2H ring. Ensure that the message's DMA to host
4554  * memory has completed, before invoking the message handler via a table lookup
4555  * of the cmn_msg_hdr::msg_type.
4556  */
4557 static int BCMFASTPATH
dhd_prot_process_msgtype(dhd_pub_t * dhd,msgbuf_ring_t * ring,uint8 * buf,uint32 len)4558 dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len)
4559 {
4560     uint32 buf_len = len;
4561     uint16 item_len;
4562     uint8 msg_type;
4563     cmn_msg_hdr_t *msg = NULL;
4564     int ret = BCME_OK;
4565 
4566     ASSERT(ring);
4567     item_len = ring->item_len;
4568     if (item_len == 0) {
4569         DHD_ERROR(("%s: ringidx %d, item_len %d buf_len %d \n",
4570             __FUNCTION__, ring->idx, item_len, buf_len));
4571         return BCME_ERROR;
4572     }
4573 
4574     while (buf_len > 0) {
4575         if (dhd->hang_was_sent) {
4576             ret = BCME_ERROR;
4577             goto done;
4578         }
4579 
4580         msg = (cmn_msg_hdr_t *)buf;
4581 
4582         /* Wait until DMA completes, then fetch msg_type */
4583         msg_type = dhd->prot->d2h_sync_cb(dhd, ring, msg, item_len);
4584 
4585         /*
4586          * Update the curr_rd to the current index in the ring, from where
4587          * the work item is fetched. This way if the fetched work item
4588          * fails in LIVELOCK, we can print the exact read index in the ring
4589          * that shows up the corrupted work item.
4590          */
4591         if ((ring->curr_rd + 1) >= ring->max_items) {
4592             ring->curr_rd = 0;
4593         } else {
4594             ring->curr_rd += 1;
4595         }
4596 
4597         /* Prefetch data to populate the cache */
4598         OSL_PREFETCH(buf + item_len);
4599 
4600         DHD_INFO(("msg_type %d item_len %d buf_len %d\n",
4601             msg_type, item_len, buf_len));
4602 
4603         if (msg_type == MSG_TYPE_LOOPBACK) {
4604             bcm_print_bytes("LPBK RESP: ", (uint8 *)msg, item_len);
4605             DHD_ERROR((" MSG_TYPE_LOOPBACK, len %d\n", item_len));
4606         }
4607 
4608         ASSERT(msg_type < DHD_PROT_FUNCS);
4609         if (msg_type >= DHD_PROT_FUNCS) {
4610             DHD_ERROR(("%s: msg_type %d, item_len %d buf_len %d\n",
4611                 __FUNCTION__, msg_type, item_len, buf_len));
4612             ret = BCME_ERROR;
4613             goto done;
4614         }
4615 
4616         if (table_lookup[msg_type]) {
4617             table_lookup[msg_type](dhd, buf);
4618         }
4619 
4620         if (buf_len < item_len) {
4621             ret = BCME_ERROR;
4622             goto done;
4623         }
4624         buf_len = buf_len - item_len;
4625         buf = buf + item_len;
4626     }
4627 
4628 done:
4629 
4630 #ifdef DHD_RX_CHAINING
4631     dhd_rxchain_commit(dhd);
4632 #endif
4633 
4634     return ret;
4635 } /* dhd_prot_process_msgtype */
4636 
4637 static void
dhd_prot_noop(dhd_pub_t * dhd,void * msg)4638 dhd_prot_noop(dhd_pub_t *dhd, void *msg)
4639 {
4640     return;
4641 }
4642 
4643 /** called on MSG_TYPE_RING_STATUS message received from dongle */
4644 static void
dhd_prot_ringstatus_process(dhd_pub_t * dhd,void * msg)4645 dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg)
4646 {
4647     pcie_ring_status_t *ring_status = (pcie_ring_status_t *) msg;
4648     uint32 request_id = ltoh32(ring_status->cmn_hdr.request_id);
4649     uint16 status = ltoh16(ring_status->compl_hdr.status);
4650     uint16 ring_id = ltoh16(ring_status->compl_hdr.flow_ring_id);
4651 
4652     DHD_ERROR(("ring status: request_id %d, status 0x%04x, flow ring %d, write_idx %d \n",
4653         request_id, status, ring_id, ltoh16(ring_status->write_idx)));
4654 
4655     if (ltoh16(ring_status->compl_hdr.ring_id) != BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT)
4656         return;
4657     if (status == BCMPCIE_BAD_PHASE) {
4658         /* bad phase report from */
4659         DHD_ERROR(("Bad phase\n"));
4660     }
4661     if (status != BCMPCIE_BADOPTION)
4662         return;
4663 
4664     if (request_id == DHD_H2D_DBGRING_REQ_PKTID) {
4665         if (dhd->prot->h2dring_info_subn != NULL) {
4666             if (dhd->prot->h2dring_info_subn->create_pending == TRUE) {
4667                 DHD_ERROR(("H2D ring create failed for info ring\n"));
4668                 dhd->prot->h2dring_info_subn->create_pending = FALSE;
4669             }
4670             else
4671                 DHD_ERROR(("ring create ID for a ring, create not pending\n"));
4672         } else {
4673             DHD_ERROR(("%s info submit ring doesn't exist\n", __FUNCTION__));
4674         }
4675     }
4676     else if (request_id == DHD_D2H_DBGRING_REQ_PKTID) {
4677         if (dhd->prot->d2hring_info_cpln != NULL) {
4678             if (dhd->prot->d2hring_info_cpln->create_pending == TRUE) {
4679                 DHD_ERROR(("D2H ring create failed for info ring\n"));
4680                 dhd->prot->d2hring_info_cpln->create_pending = FALSE;
4681             }
4682             else
4683                 DHD_ERROR(("ring create ID for info ring, create not pending\n"));
4684         } else {
4685             DHD_ERROR(("%s info cpl ring doesn't exist\n", __FUNCTION__));
4686         }
4687     }
4688     else {
4689         DHD_ERROR(("don;t know how to pair with original request\n"));
4690     }
4691     /* How do we track this to pair it with ??? */
4692     return;
4693 }
4694 
4695 /** called on MSG_TYPE_GEN_STATUS ('general status') message received from dongle */
4696 static void
dhd_prot_genstatus_process(dhd_pub_t * dhd,void * msg)4697 dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg)
4698 {
4699     pcie_gen_status_t *gen_status = (pcie_gen_status_t *)msg;
4700     DHD_ERROR(("ERROR: gen status: request_id %d, STATUS 0x%04x, flow ring %d \n",
4701         gen_status->cmn_hdr.request_id, gen_status->compl_hdr.status,
4702         gen_status->compl_hdr.flow_ring_id));
4703 
4704     /* How do we track this to pair it with ??? */
4705     return;
4706 }
4707 
4708 /**
4709  * Called on MSG_TYPE_IOCTLPTR_REQ_ACK ('ioctl ack') message received from dongle, meaning that the
4710  * dongle received the ioctl message in dongle memory.
4711  */
4712 static void
dhd_prot_ioctack_process(dhd_pub_t * dhd,void * msg)4713 dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg)
4714 {
4715     ioctl_req_ack_msg_t *ioct_ack = (ioctl_req_ack_msg_t *)msg;
4716     unsigned long flags;
4717 #ifdef DHD_PKTID_AUDIT_RING
4718     uint32 pktid = ltoh32(ioct_ack->cmn_hdr.request_id);
4719 
4720     /* Skip audit for ADHD_IOCTL_REQ_PKTID = 0xFFFE */
4721     if (pktid != DHD_IOCTL_REQ_PKTID) {
4722 #ifndef IOCTLRESP_USE_CONSTMEM
4723         DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid,
4724             DHD_TEST_IS_ALLOC, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
4725 #else
4726         DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_map_handle_ioctl, pktid,
4727             DHD_TEST_IS_ALLOC, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
4728 #endif /* !IOCTLRESP_USE_CONSTMEM */
4729     }
4730 #endif /* DHD_PKTID_AUDIT_RING */
4731 
4732     DHD_GENERAL_LOCK(dhd, flags);
4733     if ((dhd->prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) &&
4734         (dhd->prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) {
4735         dhd->prot->ioctl_state &= ~MSGBUF_IOCTL_ACK_PENDING;
4736     } else {
4737         DHD_ERROR(("%s: received ioctl ACK with state %02x trans_id = %d\n",
4738             __FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id));
4739         prhex("dhd_prot_ioctack_process:",
4740             (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
4741     }
4742     DHD_GENERAL_UNLOCK(dhd, flags);
4743 
4744     DHD_CTL(("ioctl req ack: request_id %d, status 0x%04x, flow ring %d \n",
4745         ioct_ack->cmn_hdr.request_id, ioct_ack->compl_hdr.status,
4746         ioct_ack->compl_hdr.flow_ring_id));
4747     if (ioct_ack->compl_hdr.status != 0)  {
4748         DHD_ERROR(("got an error status for the ioctl request...need to handle that\n"));
4749     }
4750 #ifdef REPORT_FATAL_TIMEOUTS
4751     else {
4752         dhd_stop_bus_timer(dhd);
4753     }
4754 #endif /* REPORT_FATAL_TIMEOUTS */
4755 }
4756 
4757 /** called on MSG_TYPE_IOCTL_CMPLT message received from dongle */
4758 static void
dhd_prot_ioctcmplt_process(dhd_pub_t * dhd,void * msg)4759 dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg)
4760 {
4761     dhd_prot_t *prot = dhd->prot;
4762     uint32 pkt_id, xt_id;
4763     ioctl_comp_resp_msg_t *ioct_resp = (ioctl_comp_resp_msg_t *)msg;
4764     void *pkt;
4765     unsigned long flags;
4766     dhd_dma_buf_t retbuf;
4767 #ifdef REPORT_FATAL_TIMEOUTS
4768     uint16    dhd_xt_id;
4769 #endif
4770 
4771     memset(&retbuf, 0, sizeof(dhd_dma_buf_t));
4772 
4773     pkt_id = ltoh32(ioct_resp->cmn_hdr.request_id);
4774 
4775 #ifdef DHD_PKTID_AUDIT_RING
4776 #ifndef IOCTLRESP_USE_CONSTMEM
4777     DHD_PKTID_AUDIT_RING_DEBUG(dhd, prot->pktid_ctrl_map, pkt_id,
4778         DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
4779 #else
4780     DHD_PKTID_AUDIT_RING_DEBUG(dhd, prot->pktid_map_handle_ioctl, pkt_id,
4781         DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
4782 #endif /* !IOCTLRESP_USE_CONSTMEM */
4783 #endif /* DHD_PKTID_AUDIT_RING */
4784 
4785     DHD_GENERAL_LOCK(dhd, flags);
4786     if ((prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) ||
4787         !(prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) {
4788         DHD_ERROR(("%s: received ioctl response with state %02x trans_id = %d\n",
4789             __FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id));
4790         prhex("dhd_prot_ioctcmplt_process:",
4791             (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
4792         DHD_GENERAL_UNLOCK(dhd, flags);
4793         return;
4794     }
4795 
4796     /* Clear Response pending bit */
4797     prot->ioctl_state &= ~MSGBUF_IOCTL_RESP_PENDING;
4798 
4799 #ifndef IOCTLRESP_USE_CONSTMEM
4800     pkt = dhd_prot_packet_get(dhd, pkt_id, PKTTYPE_IOCTL_RX, TRUE);
4801 #else
4802     dhd_prot_ioctl_ret_buffer_get(dhd, pkt_id, &retbuf);
4803     pkt = retbuf.va;
4804 #endif /* !IOCTLRESP_USE_CONSTMEM */
4805     if (!pkt) {
4806         DHD_GENERAL_UNLOCK(dhd, flags);
4807         DHD_ERROR(("%s: received ioctl response with NULL pkt\n", __FUNCTION__));
4808         prhex("dhd_prot_ioctcmplt_process:",
4809             (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
4810         return;
4811     }
4812     DHD_GENERAL_UNLOCK(dhd, flags);
4813 
4814     prot->ioctl_resplen = ltoh16(ioct_resp->resp_len);
4815     prot->ioctl_status = ltoh16(ioct_resp->compl_hdr.status);
4816     xt_id = ltoh16(ioct_resp->trans_id);
4817 
4818     if (xt_id != prot->ioctl_trans_id || prot->curr_ioctl_cmd != ioct_resp->cmd) {
4819         DHD_ERROR(("%s: transaction id(%d %d) or cmd(%d %d) mismatch\n",
4820             __FUNCTION__, xt_id, prot->ioctl_trans_id,
4821             prot->curr_ioctl_cmd, ioct_resp->cmd));
4822 #ifdef REPORT_FATAL_TIMEOUTS
4823         dhd_stop_cmd_timer(dhd);
4824 #endif /* REPORT_FATAL_TIMEOUTS */
4825         dhd_wakeup_ioctl_event(dhd, IOCTL_RETURN_ON_ERROR);
4826         dhd_prot_debug_info_print(dhd);
4827 #ifdef DHD_FW_COREDUMP
4828         if (dhd->memdump_enabled) {
4829             /* collect core dump */
4830             dhd->memdump_type = DUMP_TYPE_TRANS_ID_MISMATCH;
4831             dhd_bus_mem_dump(dhd);
4832         }
4833 #else
4834         ASSERT(0);
4835 #endif /* DHD_FW_COREDUMP */
4836         dhd_schedule_reset(dhd);
4837         goto exit;
4838     }
4839 #ifdef REPORT_FATAL_TIMEOUTS
4840     dhd_xt_id = dhd_get_request_id(dhd);
4841     if (xt_id == dhd_xt_id) {
4842         dhd_stop_cmd_timer(dhd);
4843     } else {
4844         DHD_ERROR(("%s: Cmd timer not stopped received xt_id %d stored xt_id %d",
4845             __FUNCTION__, xt_id, dhd_xt_id));
4846     }
4847 #endif /* REPORT_FATAL_TIMEOUTS */
4848     DHD_CTL(("IOCTL_COMPLETE: req_id %x transid %d status %x resplen %d\n",
4849         pkt_id, xt_id, prot->ioctl_status, prot->ioctl_resplen));
4850 
4851     if (prot->ioctl_resplen > 0) {
4852 #ifndef IOCTLRESP_USE_CONSTMEM
4853         bcopy(PKTDATA(dhd->osh, pkt), prot->retbuf.va, prot->ioctl_resplen);
4854 #else
4855         bcopy(pkt, prot->retbuf.va, prot->ioctl_resplen);
4856 #endif /* !IOCTLRESP_USE_CONSTMEM */
4857     }
4858 
4859     /* wake up any dhd_os_ioctl_resp_wait() */
4860     dhd_wakeup_ioctl_event(dhd, IOCTL_RETURN_ON_SUCCESS);
4861 
4862 exit:
4863 #ifndef IOCTLRESP_USE_CONSTMEM
4864     dhd_prot_packet_free(dhd, pkt,
4865         PKTTYPE_IOCTL_RX, FALSE);
4866 #else
4867     free_ioctl_return_buffer(dhd, &retbuf);
4868 #endif /* !IOCTLRESP_USE_CONSTMEM */
4869 
4870     /* Post another ioctl buf to the device */
4871     if (prot->cur_ioctlresp_bufs_posted > 0) {
4872         prot->cur_ioctlresp_bufs_posted--;
4873     }
4874 
4875     dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
4876 }
4877 
4878 /** called on MSG_TYPE_TX_STATUS message received from dongle */
4879 static void BCMFASTPATH
dhd_prot_txstatus_process(dhd_pub_t * dhd,void * msg)4880 dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg)
4881 {
4882     dhd_prot_t *prot = dhd->prot;
4883     host_txbuf_cmpl_t * txstatus;
4884     unsigned long flags;
4885     uint32 pktid;
4886     void *pkt;
4887     dmaaddr_t pa;
4888     uint32 len;
4889     void *dmah;
4890     void *secdma;
4891     bool pkt_fate;
4892 #ifdef DEVICE_TX_STUCK_DETECT
4893     flow_ring_node_t *flow_ring_node;
4894     uint16 flowid;
4895 #endif /* DEVICE_TX_STUCK_DETECT */
4896 
4897 
4898     txstatus = (host_txbuf_cmpl_t *)msg;
4899 #ifdef DEVICE_TX_STUCK_DETECT
4900     flowid = txstatus->compl_hdr.flow_ring_id;
4901     flow_ring_node = DHD_FLOW_RING(dhd, flowid);
4902     /**
4903      * Since we got a completion message on this flowid,
4904      * update tx_cmpl time stamp
4905      */
4906     flow_ring_node->tx_cmpl = OSL_SYSUPTIME();
4907 #endif /* DEVICE_TX_STUCK_DETECT */
4908 
4909     /* locks required to protect circular buffer accesses */
4910     DHD_GENERAL_LOCK(dhd, flags);
4911     pktid = ltoh32(txstatus->cmn_hdr.request_id);
4912     pkt_fate = TRUE;
4913 
4914 #ifdef DHD_PKTID_AUDIT_RING
4915     DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_tx_map, pktid,
4916             DHD_DUPLICATE_FREE, msg, D2HRING_TXCMPLT_ITEMSIZE);
4917 #endif /* DHD_PKTID_AUDIT_RING */
4918 
4919     DHD_INFO(("txstatus for pktid 0x%04x\n", pktid));
4920     if (prot->active_tx_count) {
4921         prot->active_tx_count--;
4922 
4923         /* Release the Lock when no more tx packets are pending */
4924         if (prot->active_tx_count == 0) {
4925             DHD_TXFL_WAKE_UNLOCK(dhd);
4926         }
4927     } else {
4928         DHD_ERROR(("Extra packets are freed\n"));
4929     }
4930 
4931     ASSERT(pktid != 0);
4932 #if defined(DHD_LB_TXC) && !defined(BCM_SECURE_DMA)
4933     {
4934         int elem_ix;
4935         void **elem;
4936         bcm_workq_t *workq;
4937         dmaaddr_t pa;
4938         uint32 pa_len;
4939 
4940         pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map,
4941             pktid, pa, pa_len, dmah, secdma, PKTTYPE_DATA_TX);
4942 
4943         workq = &prot->tx_compl_prod;
4944         /*
4945          * Produce the packet into the tx_compl workq for the tx compl tasklet
4946          * to consume.
4947          */
4948         OSL_PREFETCH(PKTTAG(pkt));
4949 
4950         /* fetch next available slot in workq */
4951         elem_ix = bcm_ring_prod(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
4952 
4953         DHD_PKTTAG_SET_PA((dhd_pkttag_fr_t *)PKTTAG(pkt), pa);
4954         DHD_PKTTAG_SET_PA_LEN((dhd_pkttag_fr_t *)PKTTAG(pkt), pa_len);
4955 
4956         if (elem_ix == BCM_RING_FULL) {
4957             DHD_ERROR(("tx_compl_prod BCM_RING_FULL\n"));
4958             goto workq_ring_full;
4959         }
4960 
4961         elem = WORKQ_ELEMENT(void *, &prot->tx_compl_prod, elem_ix);
4962         *elem = pkt;
4963 
4964         smp_wmb();
4965 
4966         /* Sync WR index to consumer if the SYNC threshold has been reached */
4967         if (++prot->tx_compl_prod_sync >= DHD_LB_WORKQ_SYNC) {
4968             bcm_workq_prod_sync(workq);
4969             prot->tx_compl_prod_sync = 0;
4970         }
4971 
4972         DHD_INFO(("%s: tx_compl_prod pkt<%p> sync<%d>\n",
4973             __FUNCTION__, pkt, prot->tx_compl_prod_sync));
4974 
4975         DHD_GENERAL_UNLOCK(dhd, flags);
4976 
4977         return;
4978     }
4979 
4980 workq_ring_full:
4981 
4982 #endif /* !DHD_LB_TXC */
4983 
4984     pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, pktid,
4985         pa, len, dmah, secdma, PKTTYPE_DATA_TX);
4986 
4987     if (pkt) {
4988         if (SECURE_DMA_ENAB(dhd->osh)) {
4989             int offset = 0;
4990             BCM_REFERENCE(offset);
4991 
4992             if (dhd->prot->tx_metadata_offset)
4993                 offset = dhd->prot->tx_metadata_offset + ETHER_HDR_LEN;
4994             SECURE_DMA_UNMAP(dhd->osh, (uint) pa,
4995                 (uint) dhd->prot->tx_metadata_offset, DMA_RX, 0, dmah,
4996                 secdma, offset);
4997         } else
4998             DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
4999 #ifdef DMAMAP_STATS
5000         dhd->dma_stats.txdata--;
5001         dhd->dma_stats.txdata_sz -= len;
5002 #endif /* DMAMAP_STATS */
5003 #if defined(DBG_PKT_MON) || defined(DHD_PKT_LOGGING)
5004         if (dhd->d11_tx_status) {
5005             uint16 tx_status;
5006 
5007             tx_status = ltoh16(txstatus->compl_hdr.status) &
5008                 WLFC_CTL_PKTFLAG_MASK;
5009             pkt_fate = (tx_status == WLFC_CTL_PKTFLAG_DISCARD) ? TRUE : FALSE;
5010 
5011             DHD_DBG_PKT_MON_TX_STATUS(dhd, pkt, pktid, tx_status);
5012 #ifdef DHD_PKT_LOGGING
5013             DHD_PKTLOG_TXS(dhd, pkt, pktid, tx_status);
5014 #endif /* DHD_PKT_LOGGING */
5015         }
5016 #endif /* DBG_PKT_MON || DHD_PKT_LOGGING */
5017 #if defined(BCMPCIE)
5018         dhd_txcomplete(dhd, pkt, pkt_fate);
5019 #endif
5020 
5021 #if DHD_DBG_SHOW_METADATA
5022         if (dhd->prot->metadata_dbg &&
5023             dhd->prot->tx_metadata_offset && txstatus->metadata_len) {
5024             uchar *ptr;
5025             /* The Ethernet header of TX frame was copied and removed.
5026              * Here, move the data pointer forward by Ethernet header size.
5027              */
5028             PKTPULL(dhd->osh, pkt, ETHER_HDR_LEN);
5029             ptr = PKTDATA(dhd->osh, pkt)  - (dhd->prot->tx_metadata_offset);
5030             bcm_print_bytes("txmetadata", ptr, txstatus->metadata_len);
5031             dhd_prot_print_metadata(dhd, ptr, txstatus->metadata_len);
5032         }
5033 #endif /* DHD_DBG_SHOW_METADATA */
5034         DHD_GENERAL_UNLOCK(dhd, flags);
5035         PKTFREE(dhd->osh, pkt, TRUE);
5036         DHD_GENERAL_LOCK(dhd, flags);
5037         DHD_FLOWRING_TXSTATUS_CNT_UPDATE(dhd->bus, txstatus->compl_hdr.flow_ring_id,
5038         txstatus->tx_status);
5039 
5040 #ifdef DHD_TIMESYNC
5041         if (dhd->prot->tx_ts_log_enabled) {
5042             ts_timestamp_t *ts = (ts_timestamp_t *)&(txstatus->ts);
5043             dhd_timesync_log_tx_timestamp(dhd->ts,
5044                 txstatus->compl_hdr.flow_ring_id,
5045                 txstatus->cmn_hdr.if_id,
5046                 ts->low, ts->high);
5047         }
5048 #endif /* DHD_TIMESYNC */
5049     }
5050 
5051     DHD_GENERAL_UNLOCK(dhd, flags);
5052 
5053     return;
5054 } /* dhd_prot_txstatus_process */
5055 
5056 /** called on MSG_TYPE_WL_EVENT message received from dongle */
5057 static void
dhd_prot_event_process(dhd_pub_t * dhd,void * msg)5058 dhd_prot_event_process(dhd_pub_t *dhd, void *msg)
5059 {
5060     wlevent_req_msg_t *evnt;
5061     uint32 bufid;
5062     uint16 buflen;
5063     int ifidx = 0;
5064     void* pkt;
5065     unsigned long flags;
5066     dhd_prot_t *prot = dhd->prot;
5067 
5068     /* Event complete header */
5069     evnt = (wlevent_req_msg_t *)msg;
5070     bufid = ltoh32(evnt->cmn_hdr.request_id);
5071 
5072 #ifdef DHD_PKTID_AUDIT_RING
5073     DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, bufid,
5074             DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
5075 #endif /* DHD_PKTID_AUDIT_RING */
5076 
5077     buflen = ltoh16(evnt->event_data_len);
5078 
5079     ifidx = BCMMSGBUF_API_IFIDX(&evnt->cmn_hdr);
5080 
5081     /* Post another rxbuf to the device */
5082     if (prot->cur_event_bufs_posted)
5083         prot->cur_event_bufs_posted--;
5084     dhd_msgbuf_rxbuf_post_event_bufs(dhd);
5085 
5086     /* locks required to protect pktid_map */
5087     DHD_GENERAL_LOCK(dhd, flags);
5088     pkt = dhd_prot_packet_get(dhd, bufid, PKTTYPE_EVENT_RX, TRUE);
5089     DHD_GENERAL_UNLOCK(dhd, flags);
5090 
5091     if (!pkt) {
5092         DHD_ERROR(("%s: pkt is NULL for pktid %d\n", __FUNCTION__, bufid));
5093         return;
5094     }
5095 
5096     /* DMA RX offset updated through shared area */
5097     if (dhd->prot->rx_dataoffset)
5098         PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
5099 
5100     PKTSETLEN(dhd->osh, pkt, buflen);
5101 
5102     dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1);
5103 }
5104 
5105 /** called on MSG_TYPE_INFO_BUF_CMPLT message received from dongle */
5106 static void BCMFASTPATH
dhd_prot_process_infobuf_complete(dhd_pub_t * dhd,void * buf)5107 dhd_prot_process_infobuf_complete(dhd_pub_t *dhd, void* buf)
5108 {
5109     info_buf_resp_t *resp;
5110     uint32 pktid;
5111     uint16 buflen;
5112     void * pkt;
5113     unsigned long flags;
5114 
5115     resp = (info_buf_resp_t *)buf;
5116     pktid = ltoh32(resp->cmn_hdr.request_id);
5117     buflen = ltoh16(resp->info_data_len);
5118 
5119 #ifdef DHD_PKTID_AUDIT_RING
5120     DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid,
5121             DHD_DUPLICATE_FREE, buf, D2HRING_INFO_BUFCMPLT_ITEMSIZE);
5122 #endif /* DHD_PKTID_AUDIT_RING */
5123 
5124     DHD_INFO(("id 0x%04x, len %d, phase 0x%02x, seqnum %d, rx_dataoffset %d\n",
5125         pktid, buflen, resp->cmn_hdr.flags, ltoh16(resp->seqnum),
5126         dhd->prot->rx_dataoffset));
5127 
5128     if (!dhd->prot->infobufpost) {
5129         DHD_ERROR(("infobuf posted are zero, but there is a completion\n"));
5130         return;
5131     }
5132 
5133     dhd->prot->infobufpost--;
5134     dhd_prot_infobufpost(dhd);
5135 
5136     DHD_GENERAL_LOCK(dhd, flags);
5137     pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_INFO_RX, TRUE);
5138     DHD_GENERAL_UNLOCK(dhd, flags);
5139 
5140     if (!pkt)
5141         return;
5142 
5143     /* DMA RX offset updated through shared area */
5144     if (dhd->prot->rx_dataoffset)
5145         PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
5146 
5147     PKTSETLEN(dhd->osh, pkt, buflen);
5148 
5149     /* info ring "debug" data, which is not a 802.3 frame, is sent/hacked with a
5150      * special ifidx of -1.  This is just internal to dhd to get the data to
5151      * dhd_linux.c:dhd_rx_frame() from here (dhd_prot_infobuf_cmplt_process).
5152      */
5153     dhd_bus_rx_frame(dhd->bus, pkt, DHD_EVENT_IF /* ifidx HACK */, 1);
5154 }
5155 
5156 /** Stop protocol: sync w/dongle state. */
dhd_prot_stop(dhd_pub_t * dhd)5157 void dhd_prot_stop(dhd_pub_t *dhd)
5158 {
5159     ASSERT(dhd);
5160     DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5161 }
5162 
5163 /* Add any protocol-specific data header.
5164  * Caller must reserve prot_hdrlen prepend space.
5165  */
5166 void BCMFASTPATH
dhd_prot_hdrpush(dhd_pub_t * dhd,int ifidx,void * PKTBUF)5167 dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, void *PKTBUF)
5168 {
5169     return;
5170 }
5171 
5172 uint
dhd_prot_hdrlen(dhd_pub_t * dhd,void * PKTBUF)5173 dhd_prot_hdrlen(dhd_pub_t *dhd, void *PKTBUF)
5174 {
5175     return 0;
5176 }
5177 
5178 
5179 #define PKTBUF pktbuf
5180 
5181 /**
5182  * Called when a tx ethernet packet has been dequeued from a flow queue, and has to be inserted in
5183  * the corresponding flow ring.
5184  */
5185 int BCMFASTPATH
dhd_prot_txdata(dhd_pub_t * dhd,void * PKTBUF,uint8 ifidx)5186 dhd_prot_txdata(dhd_pub_t *dhd, void *PKTBUF, uint8 ifidx)
5187 {
5188     unsigned long flags;
5189     dhd_prot_t *prot = dhd->prot;
5190     host_txbuf_post_t *txdesc = NULL;
5191     dmaaddr_t pa, meta_pa;
5192     uint8 *pktdata;
5193     uint32 pktlen;
5194     uint32 pktid;
5195     uint8    prio;
5196     uint16 flowid = 0;
5197     uint16 alloced = 0;
5198     uint16    headroom;
5199     msgbuf_ring_t *ring;
5200     flow_ring_table_t *flow_ring_table;
5201     flow_ring_node_t *flow_ring_node;
5202 
5203     if (dhd->flow_ring_table == NULL) {
5204         return BCME_NORESOURCE;
5205     }
5206 
5207     flowid = DHD_PKT_GET_FLOWID(PKTBUF);
5208     flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
5209     flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
5210 
5211     ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
5212 
5213 #ifdef PCIE_INB_DW
5214     if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
5215         return BCME_ERROR;
5216 #endif /* PCIE_INB_DW */
5217 
5218     DHD_GENERAL_LOCK(dhd, flags);
5219 
5220     /* Create a unique 32-bit packet id */
5221     pktid = DHD_NATIVE_TO_PKTID_RSV(dhd, dhd->prot->pktid_tx_map,
5222         PKTBUF, PKTTYPE_DATA_TX);
5223 #if defined(DHD_PCIE_PKTID)
5224     if (pktid == DHD_PKTID_INVALID) {
5225         DHD_ERROR(("%s: Pktid pool depleted.\n", __FUNCTION__));
5226         /*
5227          * If we return error here, the caller would queue the packet
5228          * again. So we'll just free the skb allocated in DMA Zone.
5229          * Since we have not freed the original SKB yet the caller would
5230          * requeue the same.
5231          */
5232         goto err_no_res_pktfree;
5233     }
5234 #endif /* DHD_PCIE_PKTID */
5235 
5236     /* Reserve space in the circular buffer */
5237     txdesc = (host_txbuf_post_t *)
5238         dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
5239     if (txdesc == NULL) {
5240         DHD_INFO(("%s:%d: HTOD Msgbuf Not available TxCount = %d\n",
5241             __FUNCTION__, __LINE__, prot->active_tx_count));
5242         goto err_free_pktid;
5243     }
5244 
5245 #ifdef DBG_PKT_MON
5246     DHD_DBG_PKT_MON_TX(dhd, PKTBUF, pktid);
5247 #endif /* DBG_PKT_MON */
5248 #ifdef DHD_PKT_LOGGING
5249     DHD_PKTLOG_TX(dhd, PKTBUF, pktid);
5250 #endif /* DHD_PKT_LOGGING */
5251 
5252 
5253     /* Extract the data pointer and length information */
5254     pktdata = PKTDATA(dhd->osh, PKTBUF);
5255     pktlen  = PKTLEN(dhd->osh, PKTBUF);
5256 
5257     /* Ethernet header: Copy before we cache flush packet using DMA_MAP */
5258     bcopy(pktdata, txdesc->txhdr, ETHER_HDR_LEN);
5259 
5260     /* Extract the ethernet header and adjust the data pointer and length */
5261     pktdata = PKTPULL(dhd->osh, PKTBUF, ETHER_HDR_LEN);
5262     pktlen -= ETHER_HDR_LEN;
5263 
5264     /* Map the data pointer to a DMA-able address */
5265     if (SECURE_DMA_ENAB(dhd->osh)) {
5266         int offset = 0;
5267         BCM_REFERENCE(offset);
5268 
5269         if (prot->tx_metadata_offset)
5270             offset = prot->tx_metadata_offset + ETHER_HDR_LEN;
5271 
5272         pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen,
5273             DMA_TX, PKTBUF, 0, ring->dma_buf.secdma, offset);
5274     }
5275 #ifndef BCM_SECURE_DMA
5276     else
5277         pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen, DMA_TX, PKTBUF, 0);
5278 #endif /* #ifndef BCM_SECURE_DMA */
5279 
5280     if (PHYSADDRISZERO(pa)) {
5281         DHD_ERROR(("%s: Something really bad, unless 0 is "
5282             "a valid phyaddr for pa\n", __FUNCTION__));
5283         ASSERT(0);
5284         goto err_rollback_idx;
5285     }
5286 
5287 #ifdef DMAMAP_STATS
5288     dhd->dma_stats.txdata++;
5289     dhd->dma_stats.txdata_sz += pktlen;
5290 #endif /* DMAMAP_STATS */
5291     /* No need to lock. Save the rest of the packet's metadata */
5292     DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_tx_map, PKTBUF, pktid,
5293         pa, pktlen, DMA_TX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_TX);
5294 
5295 #ifdef TXP_FLUSH_NITEMS
5296     if (ring->pend_items_count == 0)
5297         ring->start_addr = (void *)txdesc;
5298     ring->pend_items_count++;
5299 #endif
5300 
5301     /* Form the Tx descriptor message buffer */
5302 
5303     /* Common message hdr */
5304     txdesc->cmn_hdr.msg_type = MSG_TYPE_TX_POST;
5305     txdesc->cmn_hdr.if_id = ifidx;
5306     txdesc->cmn_hdr.flags = ring->current_phase;
5307 
5308     txdesc->flags = BCMPCIE_PKT_FLAGS_FRAME_802_3;
5309     prio = (uint8)PKTPRIO(PKTBUF);
5310 
5311 
5312     txdesc->flags |= (prio & 0x7) << BCMPCIE_PKT_FLAGS_PRIO_SHIFT;
5313     txdesc->seg_cnt = 1;
5314 
5315     txdesc->data_len = htol16((uint16) pktlen);
5316     txdesc->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
5317     txdesc->data_buf_addr.low_addr  = htol32(PHYSADDRLO(pa));
5318 
5319     /* Move data pointer to keep ether header in local PKTBUF for later reference */
5320     PKTPUSH(dhd->osh, PKTBUF, ETHER_HDR_LEN);
5321 
5322     /* Handle Tx metadata */
5323     headroom = (uint16)PKTHEADROOM(dhd->osh, PKTBUF);
5324     if (prot->tx_metadata_offset && (headroom < prot->tx_metadata_offset))
5325         DHD_ERROR(("No headroom for Metadata tx %d %d\n",
5326         prot->tx_metadata_offset, headroom));
5327 
5328     if (prot->tx_metadata_offset && (headroom >= prot->tx_metadata_offset)) {
5329         DHD_TRACE(("Metadata in tx %d\n", prot->tx_metadata_offset));
5330 
5331         /* Adjust the data pointer to account for meta data in DMA_MAP */
5332         PKTPUSH(dhd->osh, PKTBUF, prot->tx_metadata_offset);
5333 
5334         if (SECURE_DMA_ENAB(dhd->osh)) {
5335             meta_pa = SECURE_DMA_MAP_TXMETA(dhd->osh, PKTDATA(dhd->osh, PKTBUF),
5336                 prot->tx_metadata_offset + ETHER_HDR_LEN, DMA_RX, PKTBUF,
5337                 0, ring->dma_buf.secdma);
5338         }
5339 #ifndef BCM_SECURE_DMA
5340         else
5341             meta_pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF),
5342                 prot->tx_metadata_offset, DMA_RX, PKTBUF, 0);
5343 #endif /* #ifndef BCM_SECURE_DMA */
5344 
5345         if (PHYSADDRISZERO(meta_pa)) {
5346             /* Unmap the data pointer to a DMA-able address */
5347             if (SECURE_DMA_ENAB(dhd->osh)) {
5348                 int offset = 0;
5349                 BCM_REFERENCE(offset);
5350 
5351                 if (prot->tx_metadata_offset) {
5352                     offset = prot->tx_metadata_offset + ETHER_HDR_LEN;
5353                 }
5354 
5355                 SECURE_DMA_UNMAP(dhd->osh, pa, pktlen,
5356                     DMA_TX, 0, DHD_DMAH_NULL, ring->dma_buf.secdma, offset);
5357             }
5358 #ifndef BCM_SECURE_DMA
5359             else {
5360                 DMA_UNMAP(dhd->osh, pa, pktlen, DMA_TX, 0, DHD_DMAH_NULL);
5361             }
5362 #endif /* #ifndef BCM_SECURE_DMA */
5363 #ifdef TXP_FLUSH_NITEMS
5364             /* update pend_items_count */
5365             ring->pend_items_count--;
5366 #endif /* TXP_FLUSH_NITEMS */
5367 
5368             DHD_ERROR(("%s: Something really bad, unless 0 is "
5369                 "a valid phyaddr for meta_pa\n", __FUNCTION__));
5370             ASSERT(0);
5371             goto err_rollback_idx;
5372         }
5373 
5374         /* Adjust the data pointer back to original value */
5375         PKTPULL(dhd->osh, PKTBUF, prot->tx_metadata_offset);
5376 
5377         txdesc->metadata_buf_len = prot->tx_metadata_offset;
5378         txdesc->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(meta_pa));
5379         txdesc->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(meta_pa));
5380     } else {
5381         txdesc->metadata_buf_len = htol16(0);
5382         txdesc->metadata_buf_addr.high_addr = 0;
5383         txdesc->metadata_buf_addr.low_addr = 0;
5384     }
5385 
5386 #ifdef DHD_PKTID_AUDIT_RING
5387     DHD_PKTID_AUDIT(dhd, prot->pktid_tx_map, pktid, DHD_DUPLICATE_ALLOC);
5388 #endif /* DHD_PKTID_AUDIT_RING */
5389 
5390     txdesc->cmn_hdr.request_id = htol32(pktid);
5391 
5392     DHD_TRACE(("txpost: data_len %d, pktid 0x%04x\n", txdesc->data_len,
5393         txdesc->cmn_hdr.request_id));
5394 
5395     /* Update the write pointer in TCM & ring bell */
5396 #ifdef TXP_FLUSH_NITEMS
5397     /* Flush if we have either hit the txp_threshold or if this msg is */
5398     /* occupying the last slot in the flow_ring - before wrap around.  */
5399     if ((ring->pend_items_count == prot->txp_threshold) ||
5400         ((uint8 *) txdesc == (uint8 *) DHD_RING_END_VA(ring))) {
5401         dhd_prot_txdata_write_flush(dhd, flowid, TRUE);
5402     }
5403 #else
5404     /* update ring's WR index and ring doorbell to dongle */
5405     dhd_prot_ring_write_complete(dhd, ring, txdesc, 1);
5406 #endif
5407 
5408     prot->active_tx_count++;
5409 
5410     /*
5411      * Take a wake lock, do not sleep if we have atleast one packet
5412      * to finish.
5413      */
5414     if (prot->active_tx_count >= 1)
5415         DHD_TXFL_WAKE_LOCK_TIMEOUT(dhd, MAX_TX_TIMEOUT);
5416 
5417     DHD_GENERAL_UNLOCK(dhd, flags);
5418 
5419 #ifdef PCIE_INB_DW
5420     dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
5421 #endif
5422 
5423     return BCME_OK;
5424 
5425 err_rollback_idx:
5426     /* roll back write pointer for unprocessed message */
5427     if (ring->wr == 0) {
5428         ring->wr = ring->max_items - 1;
5429     } else {
5430         ring->wr--;
5431         if (ring->wr == 0) {
5432             DHD_INFO(("%s: flipping the phase now\n", ring->name));
5433             ring->current_phase = ring->current_phase ?
5434                 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
5435         }
5436     }
5437 
5438 err_free_pktid:
5439 #if defined(DHD_PCIE_PKTID)
5440     {
5441         void *dmah;
5442         void *secdma;
5443         /* Free up the PKTID. physaddr and pktlen will be garbage. */
5444         DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, pktid,
5445             pa, pktlen, dmah, secdma, PKTTYPE_NO_CHECK);
5446     }
5447 
5448 err_no_res_pktfree:
5449 #endif /* DHD_PCIE_PKTID */
5450 
5451 
5452 
5453     DHD_GENERAL_UNLOCK(dhd, flags);
5454 #ifdef PCIE_INB_DW
5455     dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
5456 #endif
5457     return BCME_NORESOURCE;
5458 } /* dhd_prot_txdata */
5459 
5460 /* called with a lock */
5461 /** optimization to write "n" tx items at a time to ring */
5462 void BCMFASTPATH
dhd_prot_txdata_write_flush(dhd_pub_t * dhd,uint16 flowid,bool in_lock)5463 dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flowid, bool in_lock)
5464 {
5465 #ifdef TXP_FLUSH_NITEMS
5466     unsigned long flags = 0;
5467     flow_ring_table_t *flow_ring_table;
5468     flow_ring_node_t *flow_ring_node;
5469     msgbuf_ring_t *ring;
5470 
5471     if (dhd->flow_ring_table == NULL) {
5472         return;
5473     }
5474 
5475     if (!in_lock) {
5476         DHD_GENERAL_LOCK(dhd, flags);
5477     }
5478 
5479     flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
5480     flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
5481     ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
5482 
5483     if (ring->pend_items_count) {
5484         /* update ring's WR index and ring doorbell to dongle */
5485         dhd_prot_ring_write_complete(dhd, ring, ring->start_addr,
5486             ring->pend_items_count);
5487         ring->pend_items_count = 0;
5488         ring->start_addr = NULL;
5489     }
5490 
5491     if (!in_lock) {
5492         DHD_GENERAL_UNLOCK(dhd, flags);
5493     }
5494 #endif /* TXP_FLUSH_NITEMS */
5495 }
5496 
5497 #undef PKTBUF    /* Only defined in the above routine */
5498 
5499 int BCMFASTPATH
dhd_prot_hdrpull(dhd_pub_t * dhd,int * ifidx,void * pkt,uchar * buf,uint * len)5500 dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pkt, uchar *buf, uint *len)
5501 {
5502     return 0;
5503 }
5504 
5505 /** post a set of receive buffers to the dongle */
5506 static void BCMFASTPATH
dhd_prot_return_rxbuf(dhd_pub_t * dhd,uint32 pktid,uint32 rxcnt)5507 dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint32 pktid, uint32 rxcnt)
5508 {
5509     dhd_prot_t *prot = dhd->prot;
5510 #if defined(DHD_LB_RXC)
5511     int elem_ix;
5512     uint32 *elem;
5513     bcm_workq_t *workq;
5514 
5515     workq = &prot->rx_compl_prod;
5516 
5517     /* Produce the work item */
5518     elem_ix = bcm_ring_prod(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
5519     if (elem_ix == BCM_RING_FULL) {
5520         DHD_ERROR(("%s LB RxCompl workQ is full\n", __FUNCTION__));
5521         ASSERT(0);
5522         return;
5523     }
5524 
5525     elem = WORKQ_ELEMENT(uint32, workq, elem_ix);
5526     *elem = pktid;
5527 
5528     smp_wmb();
5529 
5530     /* Sync WR index to consumer if the SYNC threshold has been reached */
5531     if (++prot->rx_compl_prod_sync >= DHD_LB_WORKQ_SYNC) {
5532         bcm_workq_prod_sync(workq);
5533         prot->rx_compl_prod_sync = 0;
5534     }
5535 
5536     DHD_INFO(("%s: rx_compl_prod pktid<%u> sync<%d>\n",
5537         __FUNCTION__, pktid, prot->rx_compl_prod_sync));
5538 
5539 #endif /* DHD_LB_RXC */
5540 
5541     if (prot->rxbufpost >= rxcnt) {
5542         prot->rxbufpost -= (uint16)rxcnt;
5543     } else {
5544         prot->rxbufpost = 0;
5545     }
5546 
5547 #if !defined(DHD_LB_RXC)
5548     if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD))
5549         dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
5550 #endif /* !DHD_LB_RXC */
5551     return;
5552 }
5553 
5554 /* called before an ioctl is sent to the dongle */
5555 static void
dhd_prot_wlioctl_intercept(dhd_pub_t * dhd,wl_ioctl_t * ioc,void * buf)5556 dhd_prot_wlioctl_intercept(dhd_pub_t *dhd, wl_ioctl_t * ioc, void * buf)
5557 {
5558     dhd_prot_t *prot = dhd->prot;
5559 
5560     if (ioc->cmd == WLC_SET_VAR && buf != NULL && !strcmp(buf, "pcie_bus_tput")) {
5561         int slen = 0;
5562         pcie_bus_tput_params_t *tput_params;
5563 
5564         slen = strlen("pcie_bus_tput") + 1;
5565         tput_params = (pcie_bus_tput_params_t*)((char *)buf + slen);
5566         bcopy(&prot->host_bus_throughput_buf.pa, &tput_params->host_buf_addr,
5567             sizeof(tput_params->host_buf_addr));
5568         tput_params->host_buf_len = DHD_BUS_TPUT_BUF_LEN;
5569     }
5570 }
5571 
5572 
5573 /** Use protocol to issue ioctl to dongle. Only one ioctl may be in transit. */
dhd_prot_ioctl(dhd_pub_t * dhd,int ifidx,wl_ioctl_t * ioc,void * buf,int len)5574 int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len)
5575 {
5576     int ret = -1;
5577     uint8 action;
5578 
5579     if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) {
5580         DHD_ERROR(("%s : bus is down. we have nothing to do - bs: %d, has: %d\n",
5581                 __FUNCTION__, dhd->busstate, dhd->hang_was_sent));
5582         goto done;
5583     }
5584 
5585     if (dhd->busstate == DHD_BUS_SUSPEND) {
5586         DHD_ERROR(("%s : bus is suspended\n", __FUNCTION__));
5587         goto done;
5588     }
5589 
5590     DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5591 
5592     if (ioc->cmd == WLC_SET_PM) {
5593         DHD_TRACE_HW4(("%s: SET PM to %d\n", __FUNCTION__, buf ? *(char *)buf : 0));
5594     }
5595 
5596     ASSERT(len <= WLC_IOCTL_MAXLEN);
5597 
5598     if (len > WLC_IOCTL_MAXLEN)
5599         goto done;
5600 
5601     action = ioc->set;
5602 
5603     dhd_prot_wlioctl_intercept(dhd, ioc, buf);
5604 
5605     if (action & WL_IOCTL_ACTION_SET) {
5606         ret = dhd_msgbuf_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
5607     } else {
5608         ret = dhd_msgbuf_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
5609         if (ret > 0)
5610             ioc->used = ret;
5611     }
5612 
5613     /* Too many programs assume ioctl() returns 0 on success */
5614     if (ret >= 0) {
5615         ret = 0;
5616     } else {
5617         DHD_INFO(("%s: status ret value is %d \n", __FUNCTION__, ret));
5618         dhd->dongle_error = ret;
5619     }
5620 
5621     if (!ret && ioc->cmd == WLC_SET_VAR && buf != NULL) {
5622         /* Intercept the wme_dp ioctl here */
5623         if (!strcmp(buf, "wme_dp")) {
5624             int slen, val = 0;
5625 
5626             slen = strlen("wme_dp") + 1;
5627             if (len >= (int)(slen + sizeof(int)))
5628                 bcopy(((char *)buf + slen), &val, sizeof(int));
5629             dhd->wme_dp = (uint8) ltoh32(val);
5630         }
5631     }
5632 
5633 done:
5634     return ret;
5635 } /* dhd_prot_ioctl */
5636 
5637 /** test / loopback */
5638 
5639 int
dhdmsgbuf_lpbk_req(dhd_pub_t * dhd,uint len)5640 dhdmsgbuf_lpbk_req(dhd_pub_t *dhd, uint len)
5641 {
5642     unsigned long flags;
5643     dhd_prot_t *prot = dhd->prot;
5644     uint16 alloced = 0;
5645 
5646     ioct_reqst_hdr_t *ioct_rqst;
5647 
5648     uint16 hdrlen = sizeof(ioct_reqst_hdr_t);
5649     uint16 msglen = len + hdrlen;
5650     msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
5651 
5652     msglen = ALIGN_SIZE(msglen, DMA_ALIGN_LEN);
5653     msglen = LIMIT_TO_MAX(msglen, MSGBUF_MAX_MSG_SIZE);
5654 
5655 #ifdef PCIE_INB_DW
5656     if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
5657         return BCME_ERROR;
5658 #endif /* PCIE_INB_DW */
5659 
5660     DHD_GENERAL_LOCK(dhd, flags);
5661 
5662     ioct_rqst = (ioct_reqst_hdr_t *)
5663         dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
5664 
5665     if (ioct_rqst == NULL) {
5666         DHD_GENERAL_UNLOCK(dhd, flags);
5667 #ifdef PCIE_INB_DW
5668         dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
5669 #endif
5670         return 0;
5671     }
5672 
5673     {
5674         uint8 *ptr;
5675         uint16 i;
5676 
5677         ptr = (uint8 *)ioct_rqst;
5678         for (i = 0; i < msglen; i++) {
5679             ptr[i] = i % 256;
5680         }
5681     }
5682 
5683     /* Common msg buf hdr */
5684     ioct_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
5685     ring->seqnum++;
5686 
5687     ioct_rqst->msg.msg_type = MSG_TYPE_LOOPBACK;
5688     ioct_rqst->msg.if_id = 0;
5689     ioct_rqst->msg.flags = ring->current_phase;
5690 
5691     bcm_print_bytes("LPBK REQ: ", (uint8 *)ioct_rqst, msglen);
5692 
5693     /* update ring's WR index and ring doorbell to dongle */
5694     dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1);
5695     DHD_GENERAL_UNLOCK(dhd, flags);
5696 #ifdef PCIE_INB_DW
5697     dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
5698 #endif
5699 
5700     return 0;
5701 }
5702 
5703 /** test / loopback */
dmaxfer_free_dmaaddr(dhd_pub_t * dhd,dhd_dmaxfer_t * dmaxfer)5704 void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dmaxfer)
5705 {
5706     if (dmaxfer == NULL)
5707         return;
5708 
5709     dhd_dma_buf_free(dhd, &dmaxfer->srcmem);
5710     dhd_dma_buf_free(dhd, &dmaxfer->dstmem);
5711 }
5712 
5713 /** test / loopback */
5714 int
dhd_prepare_schedule_dmaxfer_free(dhd_pub_t * dhdp)5715 dhd_prepare_schedule_dmaxfer_free(dhd_pub_t *dhdp)
5716 {
5717     dhd_prot_t *prot = dhdp->prot;
5718     dhd_dmaxfer_t *dmaxfer = &prot->dmaxfer;
5719     dmaxref_mem_map_t *dmap = NULL;
5720 
5721     dmap = MALLOCZ(dhdp->osh, sizeof(dmaxref_mem_map_t));
5722     if (!dmap) {
5723         DHD_ERROR(("%s: dmap alloc failed\n", __FUNCTION__));
5724         goto mem_alloc_fail;
5725     }
5726     dmap->srcmem = &(dmaxfer->srcmem);
5727     dmap->dstmem = &(dmaxfer->dstmem);
5728 
5729     DMAXFER_FREE(dhdp, dmap);
5730     return BCME_OK;
5731 
5732 mem_alloc_fail:
5733     if (dmap) {
5734         MFREE(dhdp->osh, dmap, sizeof(dmaxref_mem_map_t));
5735         dmap = NULL;
5736     }
5737     return BCME_NOMEM;
5738 } /* dhd_prepare_schedule_dmaxfer_free */
5739 
5740 
5741 /** test / loopback */
5742 void
dmaxfer_free_prev_dmaaddr(dhd_pub_t * dhdp,dmaxref_mem_map_t * dmmap)5743 dmaxfer_free_prev_dmaaddr(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap)
5744 {
5745     dhd_dma_buf_free(dhdp, dmmap->srcmem);
5746     dhd_dma_buf_free(dhdp, dmmap->dstmem);
5747 
5748     MFREE(dhdp->osh, dmmap, sizeof(dmaxref_mem_map_t));
5749     dmmap = NULL;
5750 } /* dmaxfer_free_prev_dmaaddr */
5751 
5752 
5753 /** test / loopback */
dmaxfer_prepare_dmaaddr(dhd_pub_t * dhd,uint len,uint srcdelay,uint destdelay,dhd_dmaxfer_t * dmaxfer)5754 int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len,
5755     uint srcdelay, uint destdelay, dhd_dmaxfer_t *dmaxfer)
5756 {
5757     uint i;
5758     if (!dmaxfer)
5759         return BCME_ERROR;
5760 
5761     /* First free up existing buffers */
5762     dmaxfer_free_dmaaddr(dhd, dmaxfer);
5763 
5764     if (dhd_dma_buf_alloc(dhd, &dmaxfer->srcmem, len)) {
5765         return BCME_NOMEM;
5766     }
5767 
5768     if (dhd_dma_buf_alloc(dhd, &dmaxfer->dstmem, len + 8)) {
5769         dhd_dma_buf_free(dhd, &dmaxfer->srcmem);
5770         return BCME_NOMEM;
5771     }
5772 
5773     dmaxfer->len = len;
5774 
5775     /* Populate source with a pattern */
5776     for (i = 0; i < dmaxfer->len; i++) {
5777         ((uint8*)dmaxfer->srcmem.va)[i] = i % 256;
5778     }
5779     OSL_CACHE_FLUSH(dmaxfer->srcmem.va, dmaxfer->len);
5780 
5781     dmaxfer->srcdelay = srcdelay;
5782     dmaxfer->destdelay = destdelay;
5783 
5784     return BCME_OK;
5785 } /* dmaxfer_prepare_dmaaddr */
5786 
5787 static void
dhd_msgbuf_dmaxfer_process(dhd_pub_t * dhd,void * msg)5788 dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg)
5789 {
5790     dhd_prot_t *prot = dhd->prot;
5791     uint64 end_usec;
5792     pcie_dmaxfer_cmplt_t *cmplt = (pcie_dmaxfer_cmplt_t *)msg;
5793 
5794     BCM_REFERENCE(cmplt);
5795     DHD_INFO(("DMA status: %d\n", cmplt->compl_hdr.status));
5796     OSL_CACHE_INV(prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
5797     if (prot->dmaxfer.srcmem.va && prot->dmaxfer.dstmem.va) {
5798         if (memcmp(prot->dmaxfer.srcmem.va,
5799                 prot->dmaxfer.dstmem.va, prot->dmaxfer.len)) {
5800             prhex("XFER SRC: ",
5801                 prot->dmaxfer.srcmem.va, prot->dmaxfer.len);
5802             prhex("XFER DST: ",
5803                 prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
5804                 DHD_ERROR(("DMA failed\n"));
5805         }
5806         else {
5807             if (prot->dmaxfer.d11_lpbk) {
5808                 DHD_ERROR(("DMA successful with d11 loopback\n"));
5809             } else {
5810                 DHD_ERROR(("DMA successful without d11 loopback\n"));
5811             }
5812         }
5813     }
5814     end_usec = OSL_SYSUPTIME_US();
5815     dhd_prepare_schedule_dmaxfer_free(dhd);
5816     end_usec -= prot->dmaxfer.start_usec;
5817     DHD_ERROR(("DMA loopback %d bytes in %llu usec, %u kBps\n",
5818         prot->dmaxfer.len, end_usec,
5819         (prot->dmaxfer.len * (1000 * 1000 / 1024) / (uint32)(end_usec + 1))));
5820     dhd->prot->dmaxfer.in_progress = FALSE;
5821 }
5822 
5823 /** Test functionality.
5824  * Transfers bytes from host to dongle and to host again using DMA
5825  * This function is not reentrant, as prot->dmaxfer.in_progress is not protected
5826  * by a spinlock.
5827  */
5828 int
dhdmsgbuf_dmaxfer_req(dhd_pub_t * dhd,uint len,uint srcdelay,uint destdelay,uint d11_lpbk)5829 dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay, uint d11_lpbk)
5830 {
5831     unsigned long flags;
5832     int ret = BCME_OK;
5833     dhd_prot_t *prot = dhd->prot;
5834     pcie_dma_xfer_params_t *dmap;
5835     uint32 xferlen = LIMIT_TO_MAX(len, DMA_XFER_LEN_LIMIT);
5836     uint16 alloced = 0;
5837     msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
5838 
5839     if (prot->dmaxfer.in_progress) {
5840         DHD_ERROR(("DMA is in progress...\n"));
5841         return ret;
5842     }
5843 
5844     prot->dmaxfer.in_progress = TRUE;
5845     if ((ret = dmaxfer_prepare_dmaaddr(dhd, xferlen, srcdelay, destdelay,
5846             &prot->dmaxfer)) != BCME_OK) {
5847         prot->dmaxfer.in_progress = FALSE;
5848         return ret;
5849     }
5850 
5851 #ifdef PCIE_INB_DW
5852     if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
5853         return BCME_ERROR;
5854 #endif /* PCIE_INB_DW */
5855 
5856     DHD_GENERAL_LOCK(dhd, flags);
5857 
5858     dmap = (pcie_dma_xfer_params_t *)
5859         dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
5860 
5861     if (dmap == NULL) {
5862         dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer);
5863         prot->dmaxfer.in_progress = FALSE;
5864         DHD_GENERAL_UNLOCK(dhd, flags);
5865 #ifdef PCIE_INB_DW
5866         dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
5867 #endif
5868         return BCME_NOMEM;
5869     }
5870 
5871     /* Common msg buf hdr */
5872     dmap->cmn_hdr.msg_type = MSG_TYPE_LPBK_DMAXFER;
5873     dmap->cmn_hdr.request_id = htol32(DHD_FAKE_PKTID);
5874     dmap->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
5875     dmap->cmn_hdr.flags = ring->current_phase;
5876     ring->seqnum++;
5877 
5878     dmap->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.srcmem.pa));
5879     dmap->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.srcmem.pa));
5880     dmap->host_ouput_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.dstmem.pa));
5881     dmap->host_ouput_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.dstmem.pa));
5882     dmap->xfer_len = htol32(prot->dmaxfer.len);
5883     dmap->srcdelay = htol32(prot->dmaxfer.srcdelay);
5884     dmap->destdelay = htol32(prot->dmaxfer.destdelay);
5885     prot->dmaxfer.d11_lpbk = d11_lpbk ? 1 : 0;
5886     dmap->flags = (prot->dmaxfer.d11_lpbk << PCIE_DMA_XFER_FLG_D11_LPBK_SHIFT)
5887             & PCIE_DMA_XFER_FLG_D11_LPBK_MASK;
5888 
5889     /* update ring's WR index and ring doorbell to dongle */
5890     prot->dmaxfer.start_usec = OSL_SYSUPTIME_US();
5891     dhd_prot_ring_write_complete(dhd, ring, dmap, 1);
5892     DHD_GENERAL_UNLOCK(dhd, flags);
5893 #ifdef PCIE_INB_DW
5894     dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
5895 #endif
5896 
5897     DHD_INFO(("DMA Started...\n"));
5898 
5899     return BCME_OK;
5900 } /* dhdmsgbuf_dmaxfer_req */
5901 
5902 /** Called in the process of submitting an ioctl to the dongle */
5903 static int
dhd_msgbuf_query_ioctl(dhd_pub_t * dhd,int ifidx,uint cmd,void * buf,uint len,uint8 action)5904 dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
5905 {
5906     int ret = 0;
5907     uint copylen = 0;
5908 
5909     DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5910 
5911     if (cmd == WLC_GET_VAR && buf)
5912     {
5913         if (!len || !*(uint8 *)buf) {
5914             DHD_ERROR(("%s(): Zero length bailing\n", __FUNCTION__));
5915             ret = BCME_BADARG;
5916             goto done;
5917         }
5918 
5919         /* Respond "bcmerror" and "bcmerrorstr" with local cache */
5920         copylen = MIN(len, BCME_STRLEN);
5921 
5922         if ((len >= strlen("bcmerrorstr")) &&
5923             (!strcmp((char *)buf, "bcmerrorstr"))) {
5924             strncpy((char *)buf, bcmerrorstr(dhd->dongle_error), copylen);
5925             *(uint8 *)((uint8 *)buf + (copylen - 1)) = '\0';
5926 
5927             goto done;
5928         } else if ((len >= strlen("bcmerror")) &&
5929             !strcmp((char *)buf, "bcmerror")) {
5930             *(uint32 *)(uint32 *)buf = dhd->dongle_error;
5931 
5932             goto done;
5933         }
5934     }
5935 
5936 
5937     DHD_CTL(("query_ioctl: ACTION %d ifdix %d cmd %d len %d \n",
5938         action, ifidx, cmd, len));
5939 #ifdef REPORT_FATAL_TIMEOUTS
5940     /*
5941      * These timers "should" be started before sending H2D interrupt.
5942      * Think of the scenario where H2D interrupt is fired and the Dongle
5943      * responds back immediately. From the DPC we would stop the cmd, bus
5944      * timers. But the process context could have switched out leading to
5945      * a situation where the timers are Not started yet, but are actually stopped.
5946      *
5947      * Disable preemption from the time we start the timer until we are done
5948      * with seding H2D interrupts.
5949      */
5950     OSL_DISABLE_PREEMPTION(dhd->osh);
5951     dhd_set_request_id(dhd, dhd->prot->ioctl_trans_id+1, cmd);
5952     dhd_start_cmd_timer(dhd);
5953     dhd_start_bus_timer(dhd);
5954 #endif /* REPORT_FATAL_TIMEOUTS */
5955 
5956     ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx);
5957 
5958 #ifdef REPORT_FATAL_TIMEOUTS
5959     /* For some reason if we fail to ring door bell, stop the timers */
5960     if (ret < 0) {
5961         DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
5962         dhd_stop_cmd_timer(dhd);
5963         dhd_stop_bus_timer(dhd);
5964         OSL_ENABLE_PREEMPTION(dhd->osh);
5965         goto done;
5966     }
5967     OSL_ENABLE_PREEMPTION(dhd->osh);
5968 #else
5969     if (ret < 0) {
5970         DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
5971         goto done;
5972     }
5973 #endif /* REPORT_FATAL_TIMEOUTS */
5974 
5975     /* wait for IOCTL completion message from dongle and get first fragment */
5976     ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf);
5977 
5978 done:
5979     return ret;
5980 }
5981 
5982 /**
5983  * Waits for IOCTL completion message from the dongle, copies this into caller
5984  * provided parameter 'buf'.
5985  */
5986 static int
dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t * dhd,uint32 len,void * buf)5987 dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf)
5988 {
5989     dhd_prot_t *prot = dhd->prot;
5990     int timeleft;
5991     unsigned long flags;
5992     int ret = 0;
5993     static uint cnt = 0;
5994 
5995     DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5996 
5997     if (dhd_query_bus_erros(dhd)) {
5998         ret = -EIO;
5999         goto out;
6000     }
6001 
6002     timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received, false);
6003 
6004 #ifdef DHD_RECOVER_TIMEOUT
6005     if (prot->ioctl_received == 0) {
6006         uint32 intstatus = 0;
6007         uint32 intmask = 0;
6008         intstatus = si_corereg(dhd->bus->sih,
6009             dhd->bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
6010         intmask = si_corereg(dhd->bus->sih,
6011             dhd->bus->sih->buscoreidx, PCIMailBoxMask, 0, 0);
6012         if ((intstatus) && (!intmask) && (timeleft == 0) && (!dhd_query_bus_erros(dhd)))
6013         {
6014             DHD_ERROR(("%s: iovar timeout trying again intstatus=%x intmask=%x\n",
6015                 __FUNCTION__, intstatus, intmask));
6016             DHD_ERROR(("\n ------- DUMPING INTR enable/disable counters\r\n"));
6017             DHD_ERROR(("resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n"
6018                 "isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n"
6019                 "dpc_return_busdown_count=%lu\n",
6020                 dhd->bus->resume_intr_enable_count, dhd->bus->dpc_intr_enable_count,
6021                 dhd->bus->isr_intr_disable_count,
6022                 dhd->bus->suspend_intr_disable_count,
6023                 dhd->bus->dpc_return_busdown_count));
6024 
6025             dhd_prot_process_ctrlbuf(dhd);
6026 
6027             timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received);
6028             /* Enable Back Interrupts using IntMask */
6029             dhdpcie_bus_intr_enable(dhd->bus);
6030         }
6031     }
6032 #endif /* DHD_RECOVER_TIMEOUT */
6033 
6034     if (dhd->conf->ctrl_resched > 0 && timeleft == 0 && (!dhd_query_bus_erros(dhd))) {
6035         cnt++;
6036         if (cnt <= dhd->conf->ctrl_resched) {
6037             uint32 intstatus = 0, intmask = 0;
6038             intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
6039             intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxMask, 0, 0);
6040             if (intstatus) {
6041                 DHD_ERROR(("%s: reschedule dhd_dpc, cnt=%d, intstatus=0x%x, intmask=0x%x\n",
6042                     __FUNCTION__, cnt, intstatus, intmask));
6043                 dhd->bus->intstatus = intstatus;
6044                 dhd->bus->ipend = TRUE;
6045                 dhd->bus->dpc_sched = TRUE;
6046                 dhd_sched_dpc(dhd);
6047                 timeleft = dhd_os_ioctl_resp_wait(dhd, &prot->ioctl_received, true);
6048             }
6049         }
6050     } else {
6051         cnt = 0;
6052     }
6053 
6054     if (timeleft == 0 && (!dhd_query_bus_erros(dhd))) {
6055         uint32 intstatus;
6056 
6057         dhd->rxcnt_timeout++;
6058         dhd->rx_ctlerrs++;
6059         dhd->iovar_timeout_occured = TRUE;
6060         DHD_ERROR(("%s: resumed on timeout rxcnt_timeout %d ioctl_cmd %d "
6061             "trans_id %d state %d busstate=%d ioctl_received=%d\n",
6062             __FUNCTION__, dhd->rxcnt_timeout, prot->curr_ioctl_cmd,
6063             prot->ioctl_trans_id, prot->ioctl_state,
6064             dhd->busstate, prot->ioctl_received));
6065         if (prot->curr_ioctl_cmd == WLC_SET_VAR ||
6066             prot->curr_ioctl_cmd == WLC_GET_VAR) {
6067             char iovbuf[32];
6068             int i;
6069             int dump_size = 128;
6070             uint8 *ioctl_buf = (uint8 *)prot->ioctbuf.va;
6071             memset(iovbuf, 0, sizeof(iovbuf));
6072             strncpy(iovbuf, ioctl_buf, sizeof(iovbuf) - 1);
6073             iovbuf[sizeof(iovbuf) - 1] = '\0';
6074             DHD_ERROR(("Current IOVAR (%s): %s\n",
6075                 prot->curr_ioctl_cmd == WLC_SET_VAR ?
6076                 "WLC_SET_VAR" : "WLC_GET_VAR", iovbuf));
6077             DHD_ERROR(("========== START IOCTL REQBUF DUMP ==========\n"));
6078             for (i = 0; i < dump_size; i++) {
6079                 DHD_ERROR(("%02X ", ioctl_buf[i]));
6080                 if ((i % 32) == 31) {
6081                     DHD_ERROR(("\n"));
6082                 }
6083             }
6084             DHD_ERROR(("\n========== END IOCTL REQBUF DUMP ==========\n"));
6085         }
6086 
6087         /* Check the PCIe link status by reading intstatus register */
6088         intstatus = si_corereg(dhd->bus->sih,
6089             dhd->bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
6090         if (intstatus == (uint32)-1) {
6091             DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__));
6092             dhd->bus->is_linkdown = TRUE;
6093         }
6094 
6095         dhd_bus_dump_console_buffer(dhd->bus);
6096         dhd_prot_debug_info_print(dhd);
6097 
6098 #ifdef DHD_FW_COREDUMP
6099         /* Collect socram dump */
6100         if (dhd->memdump_enabled) {
6101             /* collect core dump */
6102             dhd->memdump_type = DUMP_TYPE_RESUMED_ON_TIMEOUT;
6103             dhd_bus_mem_dump(dhd);
6104         }
6105 #endif /* DHD_FW_COREDUMP */
6106 #ifdef SUPPORT_LINKDOWN_RECOVERY
6107 #ifdef CONFIG_ARCH_MSM
6108         dhd->bus->no_cfg_restore = 1;
6109 #endif /* CONFIG_ARCH_MSM */
6110 #endif /* SUPPORT_LINKDOWN_RECOVERY */
6111         ret = -ETIMEDOUT;
6112         goto out;
6113     } else {
6114         if (prot->ioctl_received != IOCTL_RETURN_ON_SUCCESS) {
6115             DHD_ERROR(("%s: IOCTL failure due to ioctl_received = %d\n",
6116                 __FUNCTION__, prot->ioctl_received));
6117             ret = -EINVAL;
6118             goto out;
6119         }
6120         dhd->rxcnt_timeout = 0;
6121         dhd->rx_ctlpkts++;
6122         DHD_CTL(("%s: ioctl resp resumed, got %d\n",
6123             __FUNCTION__, prot->ioctl_resplen));
6124     }
6125 
6126     if (dhd->prot->ioctl_resplen > len)
6127         dhd->prot->ioctl_resplen = (uint16)len;
6128     if (buf)
6129         bcopy(dhd->prot->retbuf.va, buf, dhd->prot->ioctl_resplen);
6130 
6131     ret = (int)(dhd->prot->ioctl_status);
6132 
6133 out:
6134     DHD_GENERAL_LOCK(dhd, flags);
6135     dhd->prot->ioctl_state = 0;
6136     dhd->prot->ioctl_resplen = 0;
6137     dhd->prot->ioctl_received = IOCTL_WAIT;
6138     dhd->prot->curr_ioctl_cmd = 0;
6139     DHD_GENERAL_UNLOCK(dhd, flags);
6140 
6141     return ret;
6142 } /* dhd_msgbuf_wait_ioctl_cmplt */
6143 
6144 static int
dhd_msgbuf_set_ioctl(dhd_pub_t * dhd,int ifidx,uint cmd,void * buf,uint len,uint8 action)6145 dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
6146 {
6147     int ret = 0;
6148 
6149     DHD_TRACE(("%s: Enter \n", __FUNCTION__));
6150 
6151     if (dhd->busstate == DHD_BUS_DOWN) {
6152         DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
6153         return -EIO;
6154     }
6155 
6156     /* don't talk to the dongle if fw is about to be reloaded */
6157     if (dhd->hang_was_sent) {
6158         DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n",
6159             __FUNCTION__));
6160         return -EIO;
6161     }
6162 
6163     DHD_CTL(("ACTION %d ifdix %d cmd %d len %d \n",
6164         action, ifidx, cmd, len));
6165 
6166 #ifdef REPORT_FATAL_TIMEOUTS
6167     /*
6168      * These timers "should" be started before sending H2D interrupt.
6169      * Think of the scenario where H2D interrupt is fired and the Dongle
6170      * responds back immediately. From the DPC we would stop the cmd, bus
6171      * timers. But the process context could have switched out leading to
6172      * a situation where the timers are Not started yet, but are actually stopped.
6173      *
6174      * Disable preemption from the time we start the timer until we are done
6175      * with seding H2D interrupts.
6176      */
6177     OSL_DISABLE_PREEMPTION(dhd->osh);
6178     dhd_set_request_id(dhd, dhd->prot->ioctl_trans_id+1, cmd);
6179     dhd_start_cmd_timer(dhd);
6180     dhd_start_bus_timer(dhd);
6181 #endif /* REPORT_FATAL_TIMEOUTS */
6182 
6183     /* Fill up msgbuf for ioctl req */
6184     ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx);
6185 
6186 #ifdef REPORT_FATAL_TIMEOUTS
6187     /* For some reason if we fail to ring door bell, stop the timers */
6188     if (ret < 0) {
6189         DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
6190         dhd_stop_cmd_timer(dhd);
6191         dhd_stop_bus_timer(dhd);
6192         OSL_ENABLE_PREEMPTION(dhd->osh);
6193         goto done;
6194     }
6195 
6196     OSL_ENABLE_PREEMPTION(dhd->osh);
6197 #else
6198     if (ret < 0) {
6199         DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
6200         goto done;
6201     }
6202 #endif /* REPORT_FATAL_TIMEOUTS */
6203 
6204     ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf);
6205 
6206 done:
6207     return ret;
6208 }
6209 
6210 /** Called by upper DHD layer. Handles a protocol control response asynchronously. */
dhd_prot_ctl_complete(dhd_pub_t * dhd)6211 int dhd_prot_ctl_complete(dhd_pub_t *dhd)
6212 {
6213     return 0;
6214 }
6215 
6216 /** Called by upper DHD layer. Check for and handle local prot-specific iovar commands */
dhd_prot_iovar_op(dhd_pub_t * dhd,const char * name,void * params,int plen,void * arg,int len,bool set)6217 int dhd_prot_iovar_op(dhd_pub_t *dhd, const char *name,
6218                              void *params, int plen, void *arg, int len, bool set)
6219 {
6220     return BCME_UNSUPPORTED;
6221 }
6222 
6223 /** Add prot dump output to a buffer */
dhd_prot_dump(dhd_pub_t * dhd,struct bcmstrbuf * b)6224 void dhd_prot_dump(dhd_pub_t *dhd, struct bcmstrbuf *b)
6225 {
6226     if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM)
6227         bcm_bprintf(b, "\nd2h_sync: SEQNUM:");
6228     else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM)
6229         bcm_bprintf(b, "\nd2h_sync: XORCSUM:");
6230     else
6231         bcm_bprintf(b, "\nd2h_sync: NONE:");
6232     bcm_bprintf(b, " d2h_sync_wait max<%lu> tot<%lu>\n",
6233         dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot);
6234 
6235     bcm_bprintf(b, "\nDongle DMA Indices: h2d %d  d2h %d index size %d bytes\n",
6236         dhd->dma_h2d_ring_upd_support,
6237         dhd->dma_d2h_ring_upd_support,
6238         dhd->prot->rw_index_sz);
6239     bcm_bprintf(b, "h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n",
6240         h2d_max_txpost, dhd->prot->h2d_max_txpost);
6241 }
6242 
6243 /* Update local copy of dongle statistics */
dhd_prot_dstats(dhd_pub_t * dhd)6244 void dhd_prot_dstats(dhd_pub_t *dhd)
6245 {
6246     return;
6247 }
6248 
6249 /** Called by upper DHD layer */
dhd_process_pkt_reorder_info(dhd_pub_t * dhd,uchar * reorder_info_buf,uint reorder_info_len,void ** pkt,uint32 * free_buf_count)6250 int dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf,
6251     uint reorder_info_len, void **pkt, uint32 *free_buf_count)
6252 {
6253     return 0;
6254 }
6255 
6256 /** Debug related, post a dummy message to interrupt dongle. Used to process cons commands. */
6257 int
dhd_post_dummy_msg(dhd_pub_t * dhd)6258 dhd_post_dummy_msg(dhd_pub_t *dhd)
6259 {
6260     unsigned long flags;
6261     hostevent_hdr_t *hevent = NULL;
6262     uint16 alloced = 0;
6263 
6264     dhd_prot_t *prot = dhd->prot;
6265     msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
6266 
6267 #ifdef PCIE_INB_DW
6268     if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
6269         return BCME_ERROR;
6270 #endif /* PCIE_INB_DW */
6271 
6272     DHD_GENERAL_LOCK(dhd, flags);
6273 
6274     hevent = (hostevent_hdr_t *)
6275         dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
6276 
6277     if (hevent == NULL) {
6278         DHD_GENERAL_UNLOCK(dhd, flags);
6279 #ifdef PCIE_INB_DW
6280         dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
6281 #endif
6282         return -1;
6283     }
6284 
6285     /* CMN msg header */
6286     hevent->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
6287     ring->seqnum++;
6288     hevent->msg.msg_type = MSG_TYPE_HOST_EVNT;
6289     hevent->msg.if_id = 0;
6290     hevent->msg.flags = ring->current_phase;
6291 
6292     /* Event payload */
6293     hevent->evnt_pyld = htol32(HOST_EVENT_CONS_CMD);
6294 
6295     /* Since, we are filling the data directly into the bufptr obtained
6296      * from the msgbuf, we can directly call the write_complete
6297      */
6298     dhd_prot_ring_write_complete(dhd, ring, hevent, 1);
6299     DHD_GENERAL_UNLOCK(dhd, flags);
6300 #ifdef PCIE_INB_DW
6301     dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
6302 #endif
6303 
6304     return 0;
6305 }
6306 
6307 /**
6308  * If exactly_nitems is true, this function will allocate space for nitems or fail
6309  * If exactly_nitems is false, this function will allocate space for nitems or less
6310  */
6311 static void * BCMFASTPATH
dhd_prot_alloc_ring_space(dhd_pub_t * dhd,msgbuf_ring_t * ring,uint16 nitems,uint16 * alloced,bool exactly_nitems)6312 dhd_prot_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring,
6313     uint16 nitems, uint16 * alloced, bool exactly_nitems)
6314 {
6315     void * ret_buf;
6316 
6317     /* Alloc space for nitems in the ring */
6318     ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems);
6319 
6320     if (ret_buf == NULL) {
6321         /* if alloc failed , invalidate cached read ptr */
6322         if (dhd->dma_d2h_ring_upd_support) {
6323             ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
6324         } else {
6325             dhd_bus_cmn_readshared(dhd->bus, &(ring->rd), RING_RD_UPD, ring->idx);
6326 #ifdef SUPPORT_LINKDOWN_RECOVERY
6327             /* Check if ring->rd is valid */
6328             if (ring->rd >= ring->max_items) {
6329                 dhd->bus->read_shm_fail = TRUE;
6330                 DHD_ERROR(("%s: Invalid rd idx=%d\n", ring->name, ring->rd));
6331                 return NULL;
6332             }
6333 #endif /* SUPPORT_LINKDOWN_RECOVERY */
6334         }
6335 
6336         /* Try allocating once more */
6337         ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems);
6338 
6339         if (ret_buf == NULL) {
6340             DHD_INFO(("%s: Ring space not available  \n", ring->name));
6341             return NULL;
6342         }
6343     }
6344 
6345     if (ret_buf == HOST_RING_BASE(ring)) {
6346         DHD_INFO(("%s: setting the phase now\n", ring->name));
6347         ring->current_phase = ring->current_phase ? 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
6348     }
6349 
6350     /* Return alloced space */
6351     return ret_buf;
6352 }
6353 
6354 /**
6355  * Non inline ioct request.
6356  * Form a ioctl request first as per ioctptr_reqst_hdr_t header in the circular buffer
6357  * Form a separate request buffer where a 4 byte cmn header is added in the front
6358  * buf contents from parent function is copied to remaining section of this buffer
6359  */
6360 static int
dhd_fillup_ioct_reqst(dhd_pub_t * dhd,uint16 len,uint cmd,void * buf,int ifidx)6361 dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd, void* buf, int ifidx)
6362 {
6363     dhd_prot_t *prot = dhd->prot;
6364     ioctl_req_msg_t *ioct_rqst;
6365     void * ioct_buf;    /* For ioctl payload */
6366     uint16  rqstlen, resplen;
6367     unsigned long flags;
6368     uint16 alloced = 0;
6369     msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
6370 
6371     if (dhd_query_bus_erros(dhd)) {
6372         return -EIO;
6373     }
6374 
6375     rqstlen = len;
6376     resplen = len;
6377 
6378     /* Limit ioct request to MSGBUF_MAX_MSG_SIZE bytes including hdrs */
6379     /* 8K allocation of dongle buffer fails */
6380     /* dhd doesnt give separate input & output buf lens */
6381     /* so making the assumption that input length can never be more than 1.5k */
6382     rqstlen = MIN(rqstlen, MSGBUF_MAX_MSG_SIZE);
6383 
6384 #ifdef PCIE_INB_DW
6385     if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
6386         return BCME_ERROR;
6387 #endif /* PCIE_INB_DW */
6388 
6389     DHD_GENERAL_LOCK(dhd, flags);
6390 
6391     if (prot->ioctl_state) {
6392         DHD_ERROR(("%s: pending ioctl %02x\n", __FUNCTION__, prot->ioctl_state));
6393         DHD_GENERAL_UNLOCK(dhd, flags);
6394 #ifdef PCIE_INB_DW
6395         dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
6396 #endif
6397         return BCME_BUSY;
6398     } else {
6399         prot->ioctl_state = MSGBUF_IOCTL_ACK_PENDING | MSGBUF_IOCTL_RESP_PENDING;
6400     }
6401 
6402     /* Request for cbuf space */
6403     ioct_rqst = (ioctl_req_msg_t*)
6404         dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
6405     if (ioct_rqst == NULL) {
6406         DHD_ERROR(("couldn't allocate space on msgring to send ioctl request\n"));
6407         prot->ioctl_state = 0;
6408         prot->curr_ioctl_cmd = 0;
6409         prot->ioctl_received = IOCTL_WAIT;
6410         DHD_GENERAL_UNLOCK(dhd, flags);
6411 #ifdef PCIE_INB_DW
6412         dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
6413 #endif
6414         return -1;
6415     }
6416 
6417     /* Common msg buf hdr */
6418     ioct_rqst->cmn_hdr.msg_type = MSG_TYPE_IOCTLPTR_REQ;
6419     ioct_rqst->cmn_hdr.if_id = (uint8)ifidx;
6420     ioct_rqst->cmn_hdr.flags = ring->current_phase;
6421     ioct_rqst->cmn_hdr.request_id = htol32(DHD_IOCTL_REQ_PKTID);
6422     ioct_rqst->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
6423     ring->seqnum++;
6424 
6425     ioct_rqst->cmd = htol32(cmd);
6426     prot->curr_ioctl_cmd = cmd;
6427     ioct_rqst->output_buf_len = htol16(resplen);
6428     prot->ioctl_trans_id++;
6429     ioct_rqst->trans_id = prot->ioctl_trans_id;
6430 
6431     /* populate ioctl buffer info */
6432     ioct_rqst->input_buf_len = htol16(rqstlen);
6433     ioct_rqst->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->ioctbuf.pa));
6434     ioct_rqst->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->ioctbuf.pa));
6435     /* copy ioct payload */
6436     ioct_buf = (void *) prot->ioctbuf.va;
6437 
6438     if (buf)
6439         memcpy(ioct_buf, buf, len);
6440 
6441     OSL_CACHE_FLUSH((void *) prot->ioctbuf.va, len);
6442 
6443     if (!ISALIGNED(ioct_buf, DMA_ALIGN_LEN))
6444         DHD_ERROR(("host ioct address unaligned !!!!! \n"));
6445 
6446     DHD_CTL(("submitted IOCTL request request_id %d, cmd %d, output_buf_len %d, tx_id %d\n",
6447         ioct_rqst->cmn_hdr.request_id, cmd, ioct_rqst->output_buf_len,
6448         ioct_rqst->trans_id));
6449 
6450     /* update ring's WR index and ring doorbell to dongle */
6451     dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1);
6452     DHD_GENERAL_UNLOCK(dhd, flags);
6453 #ifdef PCIE_INB_DW
6454     dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
6455 #endif
6456 
6457     return 0;
6458 } /* dhd_fillup_ioct_reqst */
6459 
6460 
6461 /**
6462  * dhd_prot_ring_attach - Initialize the msgbuf_ring object and attach a
6463  * DMA-able buffer to it. The ring is NOT tagged as inited until all the ring
6464  * information is posted to the dongle.
6465  *
6466  * Invoked in dhd_prot_attach for the common rings, and in dhd_prot_init for
6467  * each flowring in pool of flowrings.
6468  *
6469  * returns BCME_OK=0 on success
6470  * returns non-zero negative error value on failure.
6471  */
6472 static int
dhd_prot_ring_attach(dhd_pub_t * dhd,msgbuf_ring_t * ring,const char * name,uint16 max_items,uint16 item_len,uint16 ringid)6473 dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring, const char *name,
6474     uint16 max_items, uint16 item_len, uint16 ringid)
6475 {
6476     int dma_buf_alloced = BCME_NOMEM;
6477     uint32 dma_buf_len = max_items * item_len;
6478     dhd_prot_t *prot = dhd->prot;
6479     uint16 max_flowrings = dhd->bus->max_tx_flowrings;
6480 
6481     ASSERT(ring);
6482     ASSERT(name);
6483     ASSERT((max_items < 0xFFFF) && (item_len < 0xFFFF) && (ringid < 0xFFFF));
6484 
6485     /* Init name */
6486     strncpy(ring->name, name, RING_NAME_MAX_LENGTH);
6487     ring->name[RING_NAME_MAX_LENGTH - 1] = '\0';
6488 
6489     ring->idx = ringid;
6490 
6491     ring->max_items = max_items;
6492     ring->item_len = item_len;
6493 
6494     /* A contiguous space may be reserved for all flowrings */
6495     if (DHD_IS_FLOWRING(ringid, max_flowrings) && (prot->flowrings_dma_buf.va)) {
6496         /* Carve out from the contiguous DMA-able flowring buffer */
6497         uint16 flowid;
6498         uint32 base_offset;
6499 
6500         dhd_dma_buf_t *dma_buf = &ring->dma_buf;
6501         dhd_dma_buf_t *rsv_buf = &prot->flowrings_dma_buf;
6502 
6503         flowid = DHD_RINGID_TO_FLOWID(ringid);
6504         base_offset = (flowid - BCMPCIE_H2D_COMMON_MSGRINGS) * dma_buf_len;
6505 
6506         ASSERT(base_offset + dma_buf_len <= rsv_buf->len);
6507 
6508         dma_buf->len = dma_buf_len;
6509         dma_buf->va = (void *)((uintptr)rsv_buf->va + base_offset);
6510         PHYSADDRHISET(dma_buf->pa, PHYSADDRHI(rsv_buf->pa));
6511         PHYSADDRLOSET(dma_buf->pa, PHYSADDRLO(rsv_buf->pa) + base_offset);
6512 
6513         /* On 64bit, contiguous space may not span across 0x00000000FFFFFFFF */
6514         ASSERT(PHYSADDRLO(dma_buf->pa) >= PHYSADDRLO(rsv_buf->pa));
6515 
6516         dma_buf->dmah   = rsv_buf->dmah;
6517         dma_buf->secdma = rsv_buf->secdma;
6518 
6519         (void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
6520     } else {
6521         /* Allocate a dhd_dma_buf */
6522         dma_buf_alloced = dhd_dma_buf_alloc(dhd, &ring->dma_buf, dma_buf_len);
6523         if (dma_buf_alloced != BCME_OK) {
6524             return BCME_NOMEM;
6525         }
6526     }
6527 
6528     /* CAUTION: Save ring::base_addr in little endian format! */
6529     dhd_base_addr_htolpa(&ring->base_addr, ring->dma_buf.pa);
6530 
6531 #ifdef BCM_SECURE_DMA
6532     if (SECURE_DMA_ENAB(prot->osh)) {
6533         ring->dma_buf.secdma = MALLOCZ(prot->osh, sizeof(sec_cma_info_t));
6534         if (ring->dma_buf.secdma == NULL) {
6535             goto free_dma_buf;
6536         }
6537     }
6538 #endif /* BCM_SECURE_DMA */
6539 
6540     DHD_INFO(("RING_ATTACH : %s Max item %d len item %d total size %d "
6541         "ring start %p buf phys addr  %x:%x \n",
6542         ring->name, ring->max_items, ring->item_len,
6543         dma_buf_len, ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
6544         ltoh32(ring->base_addr.low_addr)));
6545 
6546     return BCME_OK;
6547 
6548 #ifdef BCM_SECURE_DMA
6549 free_dma_buf:
6550     if (dma_buf_alloced == BCME_OK) {
6551         dhd_dma_buf_free(dhd, &ring->dma_buf);
6552     }
6553 #endif /* BCM_SECURE_DMA */
6554 
6555     return BCME_NOMEM;
6556 } /* dhd_prot_ring_attach */
6557 
6558 
6559 /**
6560  * dhd_prot_ring_init - Post the common ring information to dongle.
6561  *
6562  * Used only for common rings.
6563  *
6564  * The flowrings information is passed via the create flowring control message
6565  * (tx_flowring_create_request_t) sent over the H2D control submission common
6566  * ring.
6567  */
6568 static void
dhd_prot_ring_init(dhd_pub_t * dhd,msgbuf_ring_t * ring)6569 dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring)
6570 {
6571     ring->wr = 0;
6572     ring->rd = 0;
6573     ring->curr_rd = 0;
6574 
6575     /* CAUTION: ring::base_addr already in Little Endian */
6576     dhd_bus_cmn_writeshared(dhd->bus, &ring->base_addr,
6577         sizeof(sh_addr_t), RING_BUF_ADDR, ring->idx);
6578     dhd_bus_cmn_writeshared(dhd->bus, &ring->max_items,
6579         sizeof(uint16), RING_MAX_ITEMS, ring->idx);
6580     dhd_bus_cmn_writeshared(dhd->bus, &ring->item_len,
6581         sizeof(uint16), RING_ITEM_LEN, ring->idx);
6582 
6583     dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
6584         sizeof(uint16), RING_WR_UPD, ring->idx);
6585     dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd),
6586         sizeof(uint16), RING_RD_UPD, ring->idx);
6587 
6588     /* ring inited */
6589     ring->inited = TRUE;
6590 } /* dhd_prot_ring_init */
6591 
6592 
6593 /**
6594  * dhd_prot_ring_reset - bzero a ring's DMA-ble buffer and cache flush
6595  * Reset WR and RD indices to 0.
6596  */
6597 static void
dhd_prot_ring_reset(dhd_pub_t * dhd,msgbuf_ring_t * ring)6598 dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring)
6599 {
6600     DHD_TRACE(("%s\n", __FUNCTION__));
6601 
6602     dhd_dma_buf_reset(dhd, &ring->dma_buf);
6603 
6604     ring->rd = ring->wr = 0;
6605     ring->curr_rd = 0;
6606     ring->inited = FALSE;
6607     ring->create_pending = FALSE;
6608 }
6609 
6610 
6611 /**
6612  * dhd_prot_ring_detach - Detach the DMA-able buffer and any other objects
6613  * hanging off the msgbuf_ring.
6614  */
6615 static void
dhd_prot_ring_detach(dhd_pub_t * dhd,msgbuf_ring_t * ring)6616 dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring)
6617 {
6618     dhd_prot_t *prot = dhd->prot;
6619     uint16 max_flowrings = dhd->bus->max_tx_flowrings;
6620     ASSERT(ring);
6621 
6622     ring->inited = FALSE;
6623     /* rd = ~0, wr = ring->rd - 1, max_items = 0, len_item = ~0 */
6624 
6625 #ifdef BCM_SECURE_DMA
6626     if (SECURE_DMA_ENAB(prot->osh)) {
6627         if (ring->dma_buf.secdma) {
6628             SECURE_DMA_UNMAP_ALL(prot->osh, ring->dma_buf.secdma);
6629             MFREE(prot->osh, ring->dma_buf.secdma, sizeof(sec_cma_info_t));
6630             ring->dma_buf.secdma = NULL;
6631         }
6632     }
6633 #endif /* BCM_SECURE_DMA */
6634 
6635     /* If the DMA-able buffer was carved out of a pre-reserved contiguous
6636      * memory, then simply stop using it.
6637      */
6638     if (DHD_IS_FLOWRING(ring->idx, max_flowrings) && (prot->flowrings_dma_buf.va)) {
6639         (void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
6640         memset(&ring->dma_buf, 0, sizeof(dhd_dma_buf_t));
6641     } else {
6642         dhd_dma_buf_free(dhd, &ring->dma_buf);
6643     }
6644 } /* dhd_prot_ring_detach */
6645 
6646 
6647 /*
6648  * +----------------------------------------------------------------------------
6649  * Flowring Pool
6650  *
6651  * Unlike common rings, which are attached very early on (dhd_prot_attach),
6652  * flowrings are dynamically instantiated. Moreover, flowrings may require a
6653  * larger DMA-able buffer. To avoid issues with fragmented cache coherent
6654  * DMA-able memory, a pre-allocated pool of msgbuf_ring_t is allocated once.
6655  * The DMA-able buffers are attached to these pre-allocated msgbuf_ring.
6656  *
6657  * Each DMA-able buffer may be allocated independently, or may be carved out
6658  * of a single large contiguous region that is registered with the protocol
6659  * layer into flowrings_dma_buf. On a 64bit platform, this contiguous region
6660  * may not span 0x00000000FFFFFFFF (avoid dongle side 64bit ptr arithmetic).
6661  *
6662  * No flowring pool action is performed in dhd_prot_attach(), as the number
6663  * of h2d rings is not yet known.
6664  *
6665  * In dhd_prot_init(), the dongle advertized number of h2d rings is used to
6666  * determine the number of flowrings required, and a pool of msgbuf_rings are
6667  * allocated and a DMA-able buffer (carved or allocated) is attached.
6668  * See: dhd_prot_flowrings_pool_attach()
6669  *
6670  * A flowring msgbuf_ring object may be fetched from this pool during flowring
6671  * creation, using the flowid. Likewise, flowrings may be freed back into the
6672  * pool on flowring deletion.
6673  * See: dhd_prot_flowrings_pool_fetch(), dhd_prot_flowrings_pool_release()
6674  *
6675  * In dhd_prot_detach(), the flowring pool is detached. The DMA-able buffers
6676  * are detached (returned back to the carved region or freed), and the pool of
6677  * msgbuf_ring and any objects allocated against it are freed.
6678  * See: dhd_prot_flowrings_pool_detach()
6679  *
6680  * In dhd_prot_reset(), the flowring pool is simply reset by returning it to a
6681  * state as-if upon an attach. All DMA-able buffers are retained.
6682  * Following a dhd_prot_reset(), in a subsequent dhd_prot_init(), the flowring
6683  * pool attach will notice that the pool persists and continue to use it. This
6684  * will avoid the case of a fragmented DMA-able region.
6685  *
6686  * +----------------------------------------------------------------------------
6687  */
6688 
6689 /* Conversion of a flowid to a flowring pool index */
6690 #define DHD_FLOWRINGS_POOL_OFFSET(flowid) \
6691     ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS)
6692 
6693 /* Fetch the msgbuf_ring_t from the flowring pool given a flowid */
6694 #define DHD_RING_IN_FLOWRINGS_POOL(prot, flowid) \
6695     (msgbuf_ring_t*)((prot)->h2d_flowrings_pool) + \
6696         DHD_FLOWRINGS_POOL_OFFSET(flowid)
6697 
6698 /* Traverse each flowring in the flowring pool, assigning ring and flowid */
6699 #define FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, total_flowrings) \
6700     for ((flowid) = DHD_FLOWRING_START_FLOWID, \
6701         (ring) = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); \
6702          (flowid) < ((total_flowrings) + DHD_FLOWRING_START_FLOWID); \
6703          (ring)++, (flowid)++)
6704 
6705 /* Fetch number of H2D flowrings given the total number of h2d rings */
6706 static uint16
dhd_get_max_flow_rings(dhd_pub_t * dhd)6707 dhd_get_max_flow_rings(dhd_pub_t *dhd)
6708 {
6709     if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
6710         return dhd->bus->max_tx_flowrings;
6711     else
6712         return (dhd->bus->max_tx_flowrings - BCMPCIE_H2D_COMMON_MSGRINGS);
6713 }
6714 
6715 /**
6716  * dhd_prot_flowrings_pool_attach - Initialize a pool of flowring msgbuf_ring_t.
6717  *
6718  * Allocate a pool of msgbuf_ring along with DMA-able buffers for flowrings.
6719  * Dongle includes common rings when it advertizes the number of H2D rings.
6720  * Allocates a pool of msgbuf_ring_t and invokes dhd_prot_ring_attach to
6721  * allocate the DMA-able buffer and initialize each msgbuf_ring_t object.
6722  *
6723  * dhd_prot_ring_attach is invoked to perform the actual initialization and
6724  * attaching the DMA-able buffer.
6725  *
6726  * Later dhd_prot_flowrings_pool_fetch() may be used to fetch a preallocated and
6727  * initialized msgbuf_ring_t object.
6728  *
6729  * returns BCME_OK=0 on success
6730  * returns non-zero negative error value on failure.
6731  */
6732 static int
dhd_prot_flowrings_pool_attach(dhd_pub_t * dhd)6733 dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd)
6734 {
6735     uint16 flowid;
6736     msgbuf_ring_t *ring;
6737     uint16 h2d_flowrings_total; /* exclude H2D common rings */
6738     dhd_prot_t *prot = dhd->prot;
6739     char ring_name[RING_NAME_MAX_LENGTH];
6740 
6741     if (prot->h2d_flowrings_pool != NULL)
6742         return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
6743 
6744     ASSERT(prot->h2d_rings_total == 0);
6745 
6746     /* h2d_rings_total includes H2D common rings: ctrl and rxbuf subn */
6747     prot->h2d_rings_total = (uint16)dhd_bus_max_h2d_queues(dhd->bus);
6748 
6749     if (prot->h2d_rings_total < BCMPCIE_H2D_COMMON_MSGRINGS) {
6750         DHD_ERROR(("%s: h2d_rings_total advertized as %u\n",
6751             __FUNCTION__, prot->h2d_rings_total));
6752         return BCME_ERROR;
6753     }
6754 
6755     /* Subtract number of H2D common rings, to determine number of flowrings */
6756     h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
6757 
6758     DHD_ERROR(("Attach flowrings pool for %d rings\n", h2d_flowrings_total));
6759 
6760     /* Allocate pool of msgbuf_ring_t objects for all flowrings */
6761     prot->h2d_flowrings_pool = (msgbuf_ring_t *)MALLOCZ(prot->osh,
6762         (h2d_flowrings_total * sizeof(msgbuf_ring_t)));
6763 
6764     if (prot->h2d_flowrings_pool == NULL) {
6765         DHD_ERROR(("%s: flowrings pool for %d flowrings, alloc failure\n",
6766             __FUNCTION__, h2d_flowrings_total));
6767         goto fail;
6768     }
6769 
6770     /* Setup & Attach a DMA-able buffer to each flowring in the flowring pool */
6771     FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
6772         snprintf(ring_name, sizeof(ring_name), "h2dflr_%03u", flowid);
6773         if (dhd_prot_ring_attach(dhd, ring, ring_name,
6774                 prot->h2d_max_txpost, H2DRING_TXPOST_ITEMSIZE,
6775                 DHD_FLOWID_TO_RINGID(flowid)) != BCME_OK) {
6776             goto attach_fail;
6777         }
6778     }
6779 
6780     return BCME_OK;
6781 
6782 attach_fail:
6783     dhd_prot_flowrings_pool_detach(dhd); /* Free entire pool of flowrings */
6784 
6785 fail:
6786     prot->h2d_rings_total = 0;
6787     return BCME_NOMEM;
6788 } /* dhd_prot_flowrings_pool_attach */
6789 
6790 
6791 /**
6792  * dhd_prot_flowrings_pool_reset - Reset all msgbuf_ring_t objects in the pool.
6793  * Invokes dhd_prot_ring_reset to perform the actual reset.
6794  *
6795  * The DMA-able buffer is not freed during reset and neither is the flowring
6796  * pool freed.
6797  *
6798  * dhd_prot_flowrings_pool_reset will be invoked in dhd_prot_reset. Following
6799  * the dhd_prot_reset, dhd_prot_init will be re-invoked, and the flowring pool
6800  * from a previous flowring pool instantiation will be reused.
6801  *
6802  * This will avoid a fragmented DMA-able memory condition, if multiple
6803  * dhd_prot_reset were invoked to reboot the dongle without a full detach/attach
6804  * cycle.
6805  */
6806 static void
dhd_prot_flowrings_pool_reset(dhd_pub_t * dhd)6807 dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd)
6808 {
6809     uint16 flowid, h2d_flowrings_total;
6810     msgbuf_ring_t *ring;
6811     dhd_prot_t *prot = dhd->prot;
6812 
6813     if (prot->h2d_flowrings_pool == NULL) {
6814         ASSERT(prot->h2d_rings_total == 0);
6815         return;
6816     }
6817     h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
6818     /* Reset each flowring in the flowring pool */
6819     FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
6820         dhd_prot_ring_reset(dhd, ring);
6821         ring->inited = FALSE;
6822     }
6823 
6824     /* Flowring pool state must be as-if dhd_prot_flowrings_pool_attach */
6825 }
6826 
6827 
6828 /**
6829  * dhd_prot_flowrings_pool_detach - Free pool of msgbuf_ring along with
6830  * DMA-able buffers for flowrings.
6831  * dhd_prot_ring_detach is invoked to free the DMA-able buffer and perform any
6832  * de-initialization of each msgbuf_ring_t.
6833  */
6834 static void
dhd_prot_flowrings_pool_detach(dhd_pub_t * dhd)6835 dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd)
6836 {
6837     int flowid;
6838     msgbuf_ring_t *ring;
6839     uint16 h2d_flowrings_total; /* exclude H2D common rings */
6840     dhd_prot_t *prot = dhd->prot;
6841 
6842     if (prot->h2d_flowrings_pool == NULL) {
6843         ASSERT(prot->h2d_rings_total == 0);
6844         return;
6845     }
6846 
6847     h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
6848     /* Detach the DMA-able buffer for each flowring in the flowring pool */
6849     FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
6850         dhd_prot_ring_detach(dhd, ring);
6851     }
6852 
6853 
6854     MFREE(prot->osh, prot->h2d_flowrings_pool,
6855         (h2d_flowrings_total * sizeof(msgbuf_ring_t)));
6856 
6857     prot->h2d_flowrings_pool = (msgbuf_ring_t*)NULL;
6858     prot->h2d_rings_total = 0;
6859 } /* dhd_prot_flowrings_pool_detach */
6860 
6861 
6862 /**
6863  * dhd_prot_flowrings_pool_fetch - Fetch a preallocated and initialized
6864  * msgbuf_ring from the flowring pool, and assign it.
6865  *
6866  * Unlike common rings, which uses a dhd_prot_ring_init() to pass the common
6867  * ring information to the dongle, a flowring's information is passed via a
6868  * flowring create control message.
6869  *
6870  * Only the ring state (WR, RD) index are initialized.
6871  */
6872 static msgbuf_ring_t *
dhd_prot_flowrings_pool_fetch(dhd_pub_t * dhd,uint16 flowid)6873 dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd, uint16 flowid)
6874 {
6875     msgbuf_ring_t *ring;
6876     dhd_prot_t *prot = dhd->prot;
6877 
6878     ASSERT(flowid >= DHD_FLOWRING_START_FLOWID);
6879     ASSERT(flowid < prot->h2d_rings_total);
6880     ASSERT(prot->h2d_flowrings_pool != NULL);
6881 
6882     ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid);
6883 
6884     /* ASSERT flow_ring->inited == FALSE */
6885 
6886     ring->wr = 0;
6887     ring->rd = 0;
6888     ring->curr_rd = 0;
6889     ring->inited = TRUE;
6890     /**
6891      * Every time a flowring starts dynamically, initialize current_phase with 0
6892      * then flip to BCMPCIE_CMNHDR_PHASE_BIT_INIT
6893      */
6894     ring->current_phase = 0;
6895     return ring;
6896 }
6897 
6898 
6899 /**
6900  * dhd_prot_flowrings_pool_release - release a previously fetched flowring's
6901  * msgbuf_ring back to the flow_ring pool.
6902  */
6903 void
dhd_prot_flowrings_pool_release(dhd_pub_t * dhd,uint16 flowid,void * flow_ring)6904 dhd_prot_flowrings_pool_release(dhd_pub_t *dhd, uint16 flowid, void *flow_ring)
6905 {
6906     msgbuf_ring_t *ring;
6907     dhd_prot_t *prot = dhd->prot;
6908 
6909     ASSERT(flowid >= DHD_FLOWRING_START_FLOWID);
6910     ASSERT(flowid < prot->h2d_rings_total);
6911     ASSERT(prot->h2d_flowrings_pool != NULL);
6912 
6913     ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid);
6914 
6915     ASSERT(ring == (msgbuf_ring_t*)flow_ring);
6916     /* ASSERT flow_ring->inited == TRUE */
6917 
6918     (void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
6919 
6920     ring->wr = 0;
6921     ring->rd = 0;
6922     ring->inited = FALSE;
6923 
6924     ring->curr_rd = 0;
6925 }
6926 
6927 
6928 /* Assumes only one index is updated at a time */
6929 /* If exactly_nitems is true, this function will allocate space for nitems or fail */
6930 /*    Exception: when wrap around is encountered, to prevent hangup (last nitems of ring buffer) */
6931 /* If exactly_nitems is false, this function will allocate space for nitems or less */
6932 static void *BCMFASTPATH
dhd_prot_get_ring_space(msgbuf_ring_t * ring,uint16 nitems,uint16 * alloced,bool exactly_nitems)6933 dhd_prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems, uint16 * alloced,
6934     bool exactly_nitems)
6935 {
6936     void *ret_ptr = NULL;
6937     uint16 ring_avail_cnt;
6938 
6939     ASSERT(nitems <= ring->max_items);
6940 
6941     ring_avail_cnt = CHECK_WRITE_SPACE(ring->rd, ring->wr, ring->max_items);
6942 
6943     if ((ring_avail_cnt == 0) ||
6944            (exactly_nitems && (ring_avail_cnt < nitems) &&
6945            ((ring->max_items - ring->wr) >= nitems))) {
6946         DHD_INFO(("Space not available: ring %s items %d write %d read %d\n",
6947             ring->name, nitems, ring->wr, ring->rd));
6948         return NULL;
6949     }
6950     *alloced = MIN(nitems, ring_avail_cnt);
6951 
6952     /* Return next available space */
6953     ret_ptr = (char *)DHD_RING_BGN_VA(ring) + (ring->wr * ring->item_len);
6954 
6955     /* Update write index */
6956     if ((ring->wr + *alloced) == ring->max_items)
6957         ring->wr = 0;
6958     else if ((ring->wr + *alloced) < ring->max_items)
6959         ring->wr += *alloced;
6960     else {
6961         /* Should never hit this */
6962         ASSERT(0);
6963         return NULL;
6964     }
6965 
6966     return ret_ptr;
6967 } /* dhd_prot_get_ring_space */
6968 
6969 
6970 /**
6971  * dhd_prot_ring_write_complete - Host updates the new WR index on producing
6972  * new messages in a H2D ring. The messages are flushed from cache prior to
6973  * posting the new WR index. The new WR index will be updated in the DMA index
6974  * array or directly in the dongle's ring state memory.
6975  * A PCIE doorbell will be generated to wake up the dongle.
6976  * This is a non-atomic function, make sure the callers
6977  * always hold appropriate locks.
6978  */
6979 static void BCMFASTPATH
dhd_prot_ring_write_complete(dhd_pub_t * dhd,msgbuf_ring_t * ring,void * p,uint16 nitems)6980 dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p,
6981     uint16 nitems)
6982 {
6983     dhd_prot_t *prot = dhd->prot;
6984     uint8 db_index;
6985     uint16 max_flowrings = dhd->bus->max_tx_flowrings;
6986 
6987     /* cache flush */
6988     OSL_CACHE_FLUSH(p, ring->item_len * nitems);
6989 
6990     if (IDMA_DS_ACTIVE(dhd) && IDMA_ACTIVE(dhd)) {
6991         dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
6992             sizeof(uint16), RING_WR_UPD, ring->idx);
6993     } else if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
6994             dhd_prot_dma_indx_set(dhd, ring->wr,
6995                                   H2D_DMA_INDX_WR_UPD, ring->idx);
6996     } else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings)) {
6997             dhd_prot_dma_indx_set(dhd, ring->wr,
6998             H2D_IFRM_INDX_WR_UPD, ring->idx);
6999     } else {
7000             dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
7001                 sizeof(uint16), RING_WR_UPD, ring->idx);
7002     }
7003 
7004     /* raise h2d interrupt */
7005     if (IDMA_ACTIVE(dhd) ||
7006         (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings))) {
7007         if (IDMA_DS_ACTIVE(dhd)) {
7008             prot->mb_ring_fn(dhd->bus, ring->wr);
7009         } else {
7010             db_index = IDMA_IDX0;
7011             prot->mb_2_ring_fn(dhd->bus, db_index, TRUE);
7012         }
7013     } else {
7014         prot->mb_ring_fn(dhd->bus, ring->wr);
7015     }
7016 }
7017 
7018 /**
7019  * dhd_prot_upd_read_idx - Host updates the new RD index on consuming messages
7020  * from a D2H ring. The new RD index will be updated in the DMA Index array or
7021  * directly in dongle's ring state memory.
7022  */
7023 static void
dhd_prot_upd_read_idx(dhd_pub_t * dhd,msgbuf_ring_t * ring)7024 dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring)
7025 {
7026     dhd_prot_t *prot = dhd->prot;
7027     uint8 db_index;
7028 
7029     /* update read index */
7030     /* If dma'ing h2d indices supported
7031      * update the r -indices in the
7032      * host memory o/w in TCM
7033      */
7034     if (IDMA_ACTIVE(dhd)) {
7035         dhd_prot_dma_indx_set(dhd, ring->rd,
7036                               D2H_DMA_INDX_RD_UPD, ring->idx);
7037         if (IDMA_DS_ACTIVE(dhd)) {
7038             dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd),
7039                 sizeof(uint16), RING_RD_UPD, ring->idx);
7040         } else {
7041             db_index = IDMA_IDX1;
7042             prot->mb_2_ring_fn(dhd->bus, db_index, FALSE);
7043         }
7044     } else if (dhd->dma_h2d_ring_upd_support) {
7045         dhd_prot_dma_indx_set(dhd, ring->rd,
7046                               D2H_DMA_INDX_RD_UPD, ring->idx);
7047     } else {
7048         dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd),
7049             sizeof(uint16), RING_RD_UPD, ring->idx);
7050     }
7051 }
7052 
7053 static int
dhd_send_d2h_ringcreate(dhd_pub_t * dhd,msgbuf_ring_t * ring_to_create)7054 dhd_send_d2h_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create)
7055 {
7056     unsigned long flags;
7057     d2h_ring_create_req_t  *d2h_ring;
7058     uint16 alloced = 0;
7059     int ret = BCME_OK;
7060     uint16 max_h2d_rings = dhd->bus->max_submission_rings;
7061 
7062 #ifdef PCIE_INB_DW
7063     if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
7064         return BCME_ERROR;
7065 #endif /* PCIE_INB_DW */
7066     DHD_GENERAL_LOCK(dhd, flags);
7067 
7068     DHD_TRACE(("%s trying to send D2H ring create Req\n", __FUNCTION__));
7069 
7070     if (ring_to_create == NULL) {
7071         DHD_ERROR(("%s: FATAL: ring_to_create is NULL\n", __FUNCTION__));
7072         ret = BCME_ERROR;
7073         goto err;
7074     }
7075 
7076     /* Request for ring buffer space */
7077     d2h_ring = (d2h_ring_create_req_t *) dhd_prot_alloc_ring_space(dhd,
7078         &dhd->prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
7079         &alloced, FALSE);
7080 
7081     if (d2h_ring == NULL) {
7082         DHD_ERROR(("%s: FATAL: No space in control ring to send D2H ring create\n",
7083             __FUNCTION__));
7084         ret = BCME_NOMEM;
7085         goto err;
7086     }
7087     ring_to_create->create_req_id = DHD_D2H_DBGRING_REQ_PKTID;
7088     ring_to_create->create_pending = TRUE;
7089 
7090     /* Common msg buf hdr */
7091     d2h_ring->msg.msg_type = MSG_TYPE_D2H_RING_CREATE;
7092     d2h_ring->msg.if_id = 0;
7093     d2h_ring->msg.flags = dhd->prot->h2dring_ctrl_subn.current_phase;
7094     d2h_ring->msg.request_id = htol32(ring_to_create->create_req_id);
7095     d2h_ring->ring_id = htol16(DHD_D2H_RING_OFFSET(ring_to_create->idx, max_h2d_rings));
7096     d2h_ring->ring_type = BCMPCIE_D2H_RING_TYPE_DBGBUF_CPL;
7097     d2h_ring->max_items = htol16(D2HRING_DYNAMIC_INFO_MAX_ITEM);
7098     d2h_ring->len_item = htol16(D2HRING_INFO_BUFCMPLT_ITEMSIZE);
7099     d2h_ring->ring_ptr.low_addr = ring_to_create->base_addr.low_addr;
7100     d2h_ring->ring_ptr.high_addr = ring_to_create->base_addr.high_addr;
7101 
7102     d2h_ring->flags = 0;
7103     d2h_ring->msg.epoch =
7104         dhd->prot->h2dring_ctrl_subn.seqnum % H2D_EPOCH_MODULO;
7105     dhd->prot->h2dring_ctrl_subn.seqnum++;
7106 
7107     /* Update the flow_ring's WRITE index */
7108     dhd_prot_ring_write_complete(dhd, &dhd->prot->h2dring_ctrl_subn, d2h_ring,
7109         DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
7110 
7111 err:
7112     DHD_GENERAL_UNLOCK(dhd, flags);
7113 #ifdef PCIE_INB_DW
7114     dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
7115 #endif
7116     return ret;
7117 }
7118 
7119 static int
dhd_send_h2d_ringcreate(dhd_pub_t * dhd,msgbuf_ring_t * ring_to_create)7120 dhd_send_h2d_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create)
7121 {
7122     unsigned long flags;
7123     h2d_ring_create_req_t  *h2d_ring;
7124     uint16 alloced = 0;
7125     uint8 i = 0;
7126     int ret = BCME_OK;
7127 
7128 
7129 #ifdef PCIE_INB_DW
7130     if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
7131         return BCME_ERROR;
7132 #endif /* PCIE_INB_DW */
7133     DHD_GENERAL_LOCK(dhd, flags);
7134 
7135     DHD_TRACE(("%s trying to send H2D ring create Req\n", __FUNCTION__));
7136 
7137     if (ring_to_create == NULL) {
7138         DHD_ERROR(("%s: FATAL: ring_to_create is NULL\n", __FUNCTION__));
7139         ret = BCME_ERROR;
7140         goto err;
7141     }
7142 
7143     /* Request for ring buffer space */
7144     h2d_ring = (h2d_ring_create_req_t *)dhd_prot_alloc_ring_space(dhd,
7145         &dhd->prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
7146         &alloced, FALSE);
7147 
7148     if (h2d_ring == NULL) {
7149         DHD_ERROR(("%s: FATAL: No space in control ring to send H2D ring create\n",
7150             __FUNCTION__));
7151         ret = BCME_NOMEM;
7152         goto err;
7153     }
7154     ring_to_create->create_req_id = DHD_H2D_DBGRING_REQ_PKTID;
7155     ring_to_create->create_pending = TRUE;
7156 
7157     /* Common msg buf hdr */
7158     h2d_ring->msg.msg_type = MSG_TYPE_H2D_RING_CREATE;
7159     h2d_ring->msg.if_id = 0;
7160     h2d_ring->msg.request_id = htol32(ring_to_create->create_req_id);
7161     h2d_ring->msg.flags = dhd->prot->h2dring_ctrl_subn.current_phase;
7162     h2d_ring->ring_id = htol16(DHD_H2D_RING_OFFSET(ring_to_create->idx));
7163     h2d_ring->ring_type = BCMPCIE_H2D_RING_TYPE_DBGBUF_SUBMIT;
7164     h2d_ring->max_items = htol16(H2DRING_DYNAMIC_INFO_MAX_ITEM);
7165     h2d_ring->n_completion_ids = ring_to_create->n_completion_ids;
7166     h2d_ring->len_item = htol16(H2DRING_INFO_BUFPOST_ITEMSIZE);
7167     h2d_ring->ring_ptr.low_addr = ring_to_create->base_addr.low_addr;
7168     h2d_ring->ring_ptr.high_addr = ring_to_create->base_addr.high_addr;
7169 
7170     for (i = 0; i < ring_to_create->n_completion_ids; i++) {
7171         h2d_ring->completion_ring_ids[i] = htol16(ring_to_create->compeltion_ring_ids[i]);
7172     }
7173 
7174     h2d_ring->flags = 0;
7175     h2d_ring->msg.epoch =
7176         dhd->prot->h2dring_ctrl_subn.seqnum % H2D_EPOCH_MODULO;
7177     dhd->prot->h2dring_ctrl_subn.seqnum++;
7178 
7179     /* Update the flow_ring's WRITE index */
7180     dhd_prot_ring_write_complete(dhd, &dhd->prot->h2dring_ctrl_subn, h2d_ring,
7181         DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
7182 
7183 err:
7184     DHD_GENERAL_UNLOCK(dhd, flags);
7185 #ifdef PCIE_INB_DW
7186     dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
7187 #endif
7188     return ret;
7189 }
7190 
7191 /**
7192  * dhd_prot_dma_indx_set - set a new WR or RD index in the DMA index array.
7193  * Dongle will DMA the entire array (if DMA_INDX feature is enabled).
7194  * See dhd_prot_dma_indx_init()
7195  */
7196 void
dhd_prot_dma_indx_set(dhd_pub_t * dhd,uint16 new_index,uint8 type,uint16 ringid)7197 dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type, uint16 ringid)
7198 {
7199     uint8 *ptr;
7200     uint16 offset;
7201     dhd_prot_t *prot = dhd->prot;
7202     uint16 max_h2d_rings = dhd->bus->max_submission_rings;
7203 
7204     switch (type) {
7205         case H2D_DMA_INDX_WR_UPD:
7206             ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va);
7207             offset = DHD_H2D_RING_OFFSET(ringid);
7208             break;
7209 
7210         case D2H_DMA_INDX_RD_UPD:
7211             ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va);
7212             offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
7213             break;
7214 
7215         case H2D_IFRM_INDX_WR_UPD:
7216             ptr = (uint8 *)(prot->h2d_ifrm_indx_wr_buf.va);
7217             offset = DHD_H2D_FRM_FLOW_RING_OFFSET(ringid);
7218             break;
7219 
7220         default:
7221             DHD_ERROR(("%s: Invalid option for DMAing read/write index\n",
7222                 __FUNCTION__));
7223             return;
7224     }
7225 
7226     ASSERT(prot->rw_index_sz != 0);
7227     ptr += offset * prot->rw_index_sz;
7228 
7229     *(uint16*)ptr = htol16(new_index);
7230 
7231     OSL_CACHE_FLUSH((void *)ptr, prot->rw_index_sz);
7232 
7233     DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n",
7234         __FUNCTION__, new_index, type, ringid, ptr, offset));
7235 } /* dhd_prot_dma_indx_set */
7236 
7237 
7238 /**
7239  * dhd_prot_dma_indx_get - Fetch a WR or RD index from the dongle DMA-ed index
7240  * array.
7241  * Dongle DMAes an entire array to host memory (if the feature is enabled).
7242  * See dhd_prot_dma_indx_init()
7243  */
7244 static uint16
dhd_prot_dma_indx_get(dhd_pub_t * dhd,uint8 type,uint16 ringid)7245 dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid)
7246 {
7247     uint8 *ptr;
7248     uint16 data;
7249     uint16 offset;
7250     dhd_prot_t *prot = dhd->prot;
7251     uint16 max_h2d_rings = dhd->bus->max_submission_rings;
7252 
7253     switch (type) {
7254         case H2D_DMA_INDX_WR_UPD:
7255             ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va);
7256             offset = DHD_H2D_RING_OFFSET(ringid);
7257             break;
7258 
7259         case H2D_DMA_INDX_RD_UPD:
7260             ptr = (uint8 *)(prot->h2d_dma_indx_rd_buf.va);
7261             offset = DHD_H2D_RING_OFFSET(ringid);
7262             break;
7263 
7264         case D2H_DMA_INDX_WR_UPD:
7265             ptr = (uint8 *)(prot->d2h_dma_indx_wr_buf.va);
7266             offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
7267             break;
7268 
7269         case D2H_DMA_INDX_RD_UPD:
7270             ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va);
7271             offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
7272             break;
7273 
7274         default:
7275             DHD_ERROR(("%s: Invalid option for DMAing read/write index\n",
7276                 __FUNCTION__));
7277             return 0;
7278     }
7279 
7280     ASSERT(prot->rw_index_sz != 0);
7281     ptr += offset * prot->rw_index_sz;
7282 
7283     OSL_CACHE_INV((void *)ptr, prot->rw_index_sz);
7284 
7285     data = LTOH16(*((uint16*)ptr));
7286 
7287     DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n",
7288         __FUNCTION__, data, type, ringid, ptr, offset));
7289 
7290     return (data);
7291 } /* dhd_prot_dma_indx_get */
7292 
7293 /**
7294  * An array of DMA read/write indices, containing information about host rings, can be maintained
7295  * either in host memory or in device memory, dependent on preprocessor options. This function is,
7296  * dependent on these options, called during driver initialization. It reserves and initializes
7297  * blocks of DMA'able host memory containing an array of DMA read or DMA write indices. The physical
7298  * address of these host memory blocks are communicated to the dongle later on. By reading this host
7299  * memory, the dongle learns about the state of the host rings.
7300  */
7301 
7302 static INLINE int
dhd_prot_dma_indx_alloc(dhd_pub_t * dhd,uint8 type,dhd_dma_buf_t * dma_buf,uint32 bufsz)7303 dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type,
7304     dhd_dma_buf_t *dma_buf, uint32 bufsz)
7305 {
7306     int rc;
7307 
7308     if ((dma_buf->len == bufsz) || (dma_buf->va != NULL))
7309         return BCME_OK;
7310 
7311     rc = dhd_dma_buf_alloc(dhd, dma_buf, bufsz);
7312 
7313     return rc;
7314 }
7315 
7316 int
dhd_prot_dma_indx_init(dhd_pub_t * dhd,uint32 rw_index_sz,uint8 type,uint32 length)7317 dhd_prot_dma_indx_init(dhd_pub_t *dhd, uint32 rw_index_sz, uint8 type, uint32 length)
7318 {
7319     uint32 bufsz;
7320     dhd_prot_t *prot = dhd->prot;
7321     dhd_dma_buf_t *dma_buf;
7322 
7323     if (prot == NULL) {
7324         DHD_ERROR(("prot is not inited\n"));
7325         return BCME_ERROR;
7326     }
7327 
7328     /* Dongle advertizes 2B or 4B RW index size */
7329     ASSERT(rw_index_sz != 0);
7330     prot->rw_index_sz = rw_index_sz;
7331 
7332     bufsz = rw_index_sz * length;
7333 
7334     switch (type) {
7335         case H2D_DMA_INDX_WR_BUF:
7336             dma_buf = &prot->h2d_dma_indx_wr_buf;
7337             if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
7338                 goto ret_no_mem;
7339             DHD_ERROR(("H2D DMA WR INDX : array size %d = %d * %d\n",
7340                 dma_buf->len, rw_index_sz, length));
7341             break;
7342 
7343         case H2D_DMA_INDX_RD_BUF:
7344             dma_buf = &prot->h2d_dma_indx_rd_buf;
7345             if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
7346                 goto ret_no_mem;
7347             DHD_ERROR(("H2D DMA RD INDX : array size %d = %d * %d\n",
7348                 dma_buf->len, rw_index_sz, length));
7349             break;
7350 
7351         case D2H_DMA_INDX_WR_BUF:
7352             dma_buf = &prot->d2h_dma_indx_wr_buf;
7353             if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
7354                 goto ret_no_mem;
7355             DHD_ERROR(("D2H DMA WR INDX : array size %d = %d * %d\n",
7356                 dma_buf->len, rw_index_sz, length));
7357             break;
7358 
7359         case D2H_DMA_INDX_RD_BUF:
7360             dma_buf = &prot->d2h_dma_indx_rd_buf;
7361             if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
7362                 goto ret_no_mem;
7363             DHD_ERROR(("D2H DMA RD INDX : array size %d = %d * %d\n",
7364                 dma_buf->len, rw_index_sz, length));
7365             break;
7366 
7367         case H2D_IFRM_INDX_WR_BUF:
7368             dma_buf = &prot->h2d_ifrm_indx_wr_buf;
7369             if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
7370                 goto ret_no_mem;
7371             DHD_ERROR(("H2D IFRM WR INDX : array size %d = %d * %d\n",
7372                 dma_buf->len, rw_index_sz, length));
7373             break;
7374 
7375         default:
7376             DHD_ERROR(("%s: Unexpected option\n", __FUNCTION__));
7377             return BCME_BADOPTION;
7378     }
7379 
7380     return BCME_OK;
7381 
7382 ret_no_mem:
7383     DHD_ERROR(("%s: dhd_prot_dma_indx_alloc type %d buf_sz %d failure\n",
7384         __FUNCTION__, type, bufsz));
7385     return BCME_NOMEM;
7386 } /* dhd_prot_dma_indx_init */
7387 
7388 
7389 /**
7390  * Called on checking for 'completion' messages from the dongle. Returns next host buffer to read
7391  * from, or NULL if there are no more messages to read.
7392  */
7393 static uint8*
dhd_prot_get_read_addr(dhd_pub_t * dhd,msgbuf_ring_t * ring,uint32 * available_len)7394 dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 *available_len)
7395 {
7396     uint16 wr;
7397     uint16 rd;
7398     uint16 depth;
7399     uint16 items;
7400     void  *read_addr = NULL; /* address of next msg to be read in ring */
7401     uint16 d2h_wr = 0;
7402 
7403     DHD_TRACE(("%s: d2h_dma_indx_rd_buf %p, d2h_dma_indx_wr_buf %p\n",
7404         __FUNCTION__, (uint32 *)(dhd->prot->d2h_dma_indx_rd_buf.va),
7405         (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va)));
7406 
7407     /* Remember the read index in a variable.
7408      * This is becuase ring->rd gets updated in the end of this function
7409      * So if we have to print the exact read index from which the
7410      * message is read its not possible.
7411      */
7412     ring->curr_rd = ring->rd;
7413 
7414     /* update write pointer */
7415     if (dhd->dma_d2h_ring_upd_support) {
7416         /* DMAing write/read indices supported */
7417         d2h_wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
7418         ring->wr = d2h_wr;
7419     } else {
7420         dhd_bus_cmn_readshared(dhd->bus, &(ring->wr), RING_WR_UPD, ring->idx);
7421     }
7422 
7423     wr = ring->wr;
7424     rd = ring->rd;
7425     depth = ring->max_items;
7426 
7427     /* check for avail space, in number of ring items */
7428     items = READ_AVAIL_SPACE(wr, rd, depth);
7429     if (items == 0)
7430         return NULL;
7431 
7432     /*
7433      * Note that there are builds where Assert translates to just printk
7434      * so, even if we had hit this condition we would never halt. Now
7435      * dhd_prot_process_msgtype can get into an big loop if this
7436      * happens.
7437      */
7438     if (items > ring->max_items) {
7439         DHD_ERROR(("\r\n======================= \r\n"));
7440         DHD_ERROR(("%s(): ring %p, ring->name %s, ring->max_items %d, items %d \r\n",
7441             __FUNCTION__, ring, ring->name, ring->max_items, items));
7442         DHD_ERROR(("wr: %d,  rd: %d,  depth: %d  \r\n", wr, rd, depth));
7443         DHD_ERROR(("dhd->busstate %d bus->wait_for_d3_ack %d \r\n",
7444             dhd->busstate, dhd->bus->wait_for_d3_ack));
7445         DHD_ERROR(("\r\n======================= \r\n"));
7446 #ifdef SUPPORT_LINKDOWN_RECOVERY
7447         if (wr >= ring->max_items) {
7448             dhd->bus->read_shm_fail = TRUE;
7449         }
7450 #else
7451 #ifdef DHD_FW_COREDUMP
7452         if (dhd->memdump_enabled) {
7453             /* collect core dump */
7454             dhd->memdump_type = DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR;
7455             dhd_bus_mem_dump(dhd);
7456         }
7457 #endif /* DHD_FW_COREDUMP */
7458 #endif /* SUPPORT_LINKDOWN_RECOVERY */
7459 
7460         *available_len = 0;
7461         dhd_schedule_reset(dhd);
7462 
7463         return NULL;
7464     }
7465 
7466     /* if space is available, calculate address to be read */
7467     read_addr = (char*)ring->dma_buf.va + (rd * ring->item_len);
7468 
7469     /* update read pointer */
7470     if ((ring->rd + items) >= ring->max_items)
7471         ring->rd = 0;
7472     else
7473         ring->rd += items;
7474 
7475     ASSERT(ring->rd < ring->max_items);
7476 
7477     /* convert items to bytes : available_len must be 32bits */
7478     *available_len = (uint32)(items * ring->item_len);
7479 
7480     OSL_CACHE_INV(read_addr, *available_len);
7481 
7482     /* return read address */
7483     return read_addr;
7484 } /* dhd_prot_get_read_addr */
7485 
7486 /**
7487  * dhd_prot_h2d_mbdata_send_ctrlmsg is a non-atomic function,
7488  * make sure the callers always hold appropriate locks.
7489  */
dhd_prot_h2d_mbdata_send_ctrlmsg(dhd_pub_t * dhd,uint32 mb_data)7490 int dhd_prot_h2d_mbdata_send_ctrlmsg(dhd_pub_t *dhd, uint32 mb_data)
7491 {
7492     h2d_mailbox_data_t *h2d_mb_data;
7493     uint16 alloced = 0;
7494     int num_post = 1;
7495     int i;
7496 
7497     DHD_INFO(("%s Sending H2D MB data Req data 0x%04x\n",
7498         __FUNCTION__, mb_data));
7499     if (!dhd->prot->h2dring_ctrl_subn.inited) {
7500         DHD_ERROR(("%s: Ctrl Submit Ring: not inited\n", __FUNCTION__));
7501         return BCME_ERROR;
7502     }
7503 #ifdef PCIE_INB_DW
7504     if ((INBAND_DW_ENAB(dhd->bus)) &&
7505         (dhdpcie_bus_get_pcie_inband_dw_state(dhd->bus) ==
7506             DW_DEVICE_DS_DEV_SLEEP)) {
7507         if (mb_data == H2D_HOST_CONS_INT) {
7508             /* One additional device_wake post needed */
7509             num_post = 2;
7510         }
7511     }
7512 #endif /* PCIE_INB_DW */
7513 
7514     for (i = 0; i < num_post; i ++) {
7515         /* Request for ring buffer space */
7516         h2d_mb_data = (h2d_mailbox_data_t *)dhd_prot_alloc_ring_space(dhd,
7517             &dhd->prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
7518             &alloced, FALSE);
7519 
7520         if (h2d_mb_data == NULL) {
7521             DHD_ERROR(("%s: FATAL: No space in control ring to send H2D Mb data\n",
7522                 __FUNCTION__));
7523             return BCME_NOMEM;
7524         }
7525 
7526         memset(h2d_mb_data, 0, sizeof(h2d_mailbox_data_t));
7527     /* Common msg buf hdr */
7528         h2d_mb_data->msg.msg_type = MSG_TYPE_H2D_MAILBOX_DATA;
7529         h2d_mb_data->msg.flags = dhd->prot->h2dring_ctrl_subn.current_phase;
7530 
7531         h2d_mb_data->msg.epoch =
7532             dhd->prot->h2dring_ctrl_subn.seqnum % H2D_EPOCH_MODULO;
7533         dhd->prot->h2dring_ctrl_subn.seqnum++;
7534 
7535 #ifdef PCIE_INB_DW
7536         /* post device_wake first */
7537         if ((num_post == 2) && (i == 0)) {
7538             h2d_mb_data->mail_box_data = htol32(H2DMB_DS_DEVICE_WAKE);
7539         } else
7540 #endif /* PCIE_INB_DW */
7541         {
7542             h2d_mb_data->mail_box_data = htol32(mb_data);
7543         }
7544 
7545         DHD_INFO(("%s Send H2D MB data Req data 0x%04x\n", __FUNCTION__, mb_data));
7546 
7547         /* upd wrt ptr and raise interrupt */
7548         /* caller of dhd_prot_h2d_mbdata_send_ctrlmsg already holding general lock */
7549         dhd_prot_ring_write_complete(dhd, &dhd->prot->h2dring_ctrl_subn, h2d_mb_data,
7550             DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
7551 #ifdef PCIE_INB_DW
7552         /* Add a delay if device_wake is posted */
7553         if ((num_post == 2) && (i == 0)) {
7554             OSL_DELAY(1000);
7555         }
7556 #endif /* PCIE_INB_DW */
7557     }
7558 
7559     return 0;
7560 }
7561 
7562 /** Creates a flow ring and informs dongle of this event */
7563 int
dhd_prot_flow_ring_create(dhd_pub_t * dhd,flow_ring_node_t * flow_ring_node)7564 dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
7565 {
7566     tx_flowring_create_request_t *flow_create_rqst;
7567     msgbuf_ring_t *flow_ring;
7568     dhd_prot_t *prot = dhd->prot;
7569     unsigned long flags;
7570     uint16 alloced = 0;
7571     msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
7572     uint16 max_flowrings = dhd->bus->max_tx_flowrings;
7573 
7574     /* Fetch a pre-initialized msgbuf_ring from the flowring pool */
7575     flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid);
7576     if (flow_ring == NULL) {
7577         DHD_ERROR(("%s: dhd_prot_flowrings_pool_fetch TX Flowid %d failed\n",
7578             __FUNCTION__, flow_ring_node->flowid));
7579         return BCME_NOMEM;
7580     }
7581 
7582 #ifdef PCIE_INB_DW
7583     if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
7584         return BCME_ERROR;
7585 #endif /* PCIE_INB_DW */
7586     DHD_GENERAL_LOCK(dhd, flags);
7587 
7588     /* Request for ctrl_ring buffer space */
7589     flow_create_rqst = (tx_flowring_create_request_t *)
7590         dhd_prot_alloc_ring_space(dhd, ctrl_ring, 1, &alloced, FALSE);
7591 
7592     if (flow_create_rqst == NULL) {
7593         dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring);
7594         DHD_ERROR(("%s: Flow Create Req flowid %d - failure ring space\n",
7595             __FUNCTION__, flow_ring_node->flowid));
7596         DHD_GENERAL_UNLOCK(dhd, flags);
7597 #ifdef PCIE_INB_DW
7598         dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
7599 #endif
7600         return BCME_NOMEM;
7601     }
7602 
7603     flow_ring_node->prot_info = (void *)flow_ring;
7604 
7605     /* Common msg buf hdr */
7606     flow_create_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_CREATE;
7607     flow_create_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
7608     flow_create_rqst->msg.request_id = htol32(0); /* TBD */
7609     flow_create_rqst->msg.flags = ctrl_ring->current_phase;
7610 
7611     flow_create_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
7612     ctrl_ring->seqnum++;
7613 
7614     /* Update flow create message */
7615     flow_create_rqst->tid = flow_ring_node->flow_info.tid;
7616     flow_create_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
7617     memcpy(flow_create_rqst->sa, flow_ring_node->flow_info.sa, sizeof(flow_create_rqst->sa));
7618     memcpy(flow_create_rqst->da, flow_ring_node->flow_info.da, sizeof(flow_create_rqst->da));
7619     /* CAUTION: ring::base_addr already in Little Endian */
7620     flow_create_rqst->flow_ring_ptr.low_addr = flow_ring->base_addr.low_addr;
7621     flow_create_rqst->flow_ring_ptr.high_addr = flow_ring->base_addr.high_addr;
7622     flow_create_rqst->max_items = htol16(prot->h2d_max_txpost);
7623     flow_create_rqst->len_item = htol16(H2DRING_TXPOST_ITEMSIZE);
7624 
7625     /* definition for ifrm mask : bit0:d11ac core, bit1:d11ad core
7626      * currently it is not used for priority. so uses solely for ifrm mask
7627      */
7628     if (IFRM_ACTIVE(dhd))
7629         flow_create_rqst->priority_ifrmmask = (1 << IFRM_DEV_0);
7630 
7631     DHD_ERROR(("%s: Send Flow Create Req flow ID %d for peer " MACDBG
7632         " prio %d ifindex %d\n", __FUNCTION__, flow_ring_node->flowid,
7633         MAC2STRDBG(flow_ring_node->flow_info.da), flow_ring_node->flow_info.tid,
7634         flow_ring_node->flow_info.ifindex));
7635 
7636     /* Update the flow_ring's WRITE index */
7637     if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
7638         dhd_prot_dma_indx_set(dhd, flow_ring->wr,
7639                               H2D_DMA_INDX_WR_UPD, flow_ring->idx);
7640     } else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(flow_ring->idx, max_flowrings)) {
7641         dhd_prot_dma_indx_set(dhd, flow_ring->wr,
7642             H2D_IFRM_INDX_WR_UPD, flow_ring->idx);
7643     } else {
7644         dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr),
7645             sizeof(uint16), RING_WR_UPD, flow_ring->idx);
7646     }
7647 
7648     /* update control subn ring's WR index and ring doorbell to dongle */
7649     dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_create_rqst, 1);
7650 
7651     DHD_GENERAL_UNLOCK(dhd, flags);
7652 #ifdef PCIE_INB_DW
7653     dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
7654 #endif
7655 
7656     return BCME_OK;
7657 } /* dhd_prot_flow_ring_create */
7658 
7659 /** called on receiving MSG_TYPE_FLOW_RING_CREATE_CMPLT message from dongle */
7660 static void
dhd_prot_flow_ring_create_response_process(dhd_pub_t * dhd,void * msg)7661 dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg)
7662 {
7663     tx_flowring_create_response_t *flow_create_resp = (tx_flowring_create_response_t *)msg;
7664 
7665     DHD_ERROR(("%s: Flow Create Response status = %d Flow %d\n", __FUNCTION__,
7666         ltoh16(flow_create_resp->cmplt.status),
7667         ltoh16(flow_create_resp->cmplt.flow_ring_id)));
7668 
7669     dhd_bus_flow_ring_create_response(dhd->bus,
7670         ltoh16(flow_create_resp->cmplt.flow_ring_id),
7671         ltoh16(flow_create_resp->cmplt.status));
7672 }
7673 
7674 static void
dhd_prot_process_h2d_ring_create_complete(dhd_pub_t * dhd,void * buf)7675 dhd_prot_process_h2d_ring_create_complete(dhd_pub_t *dhd, void *buf)
7676 {
7677     h2d_ring_create_response_t *resp = (h2d_ring_create_response_t *)buf;
7678     DHD_INFO(("%s ring create Response status = %d ring %d, id 0x%04x\n", __FUNCTION__,
7679         ltoh16(resp->cmplt.status),
7680         ltoh16(resp->cmplt.ring_id),
7681         ltoh32(resp->cmn_hdr.request_id)));
7682     if (ltoh32(resp->cmn_hdr.request_id) != DHD_H2D_DBGRING_REQ_PKTID) {
7683         DHD_ERROR(("invalid request ID with h2d ring create complete\n"));
7684         return;
7685     }
7686     if (!dhd->prot->h2dring_info_subn->create_pending) {
7687         DHD_ERROR(("info ring create status for not pending submit ring\n"));
7688     }
7689 
7690     if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
7691         DHD_ERROR(("info ring create failed with status %d\n",
7692             ltoh16(resp->cmplt.status)));
7693         return;
7694     }
7695     dhd->prot->h2dring_info_subn->create_pending = FALSE;
7696     dhd->prot->h2dring_info_subn->inited = TRUE;
7697     dhd_prot_infobufpost(dhd);
7698 }
7699 
7700 static void
dhd_prot_process_d2h_ring_create_complete(dhd_pub_t * dhd,void * buf)7701 dhd_prot_process_d2h_ring_create_complete(dhd_pub_t *dhd, void *buf)
7702 {
7703     d2h_ring_create_response_t *resp = (d2h_ring_create_response_t *)buf;
7704     DHD_INFO(("%s ring create Response status = %d ring %d, id 0x%04x\n", __FUNCTION__,
7705         ltoh16(resp->cmplt.status),
7706         ltoh16(resp->cmplt.ring_id),
7707         ltoh32(resp->cmn_hdr.request_id)));
7708     if (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_DBGRING_REQ_PKTID) {
7709         DHD_ERROR(("invalid request ID with d2h ring create complete\n"));
7710         return;
7711     }
7712     if (!dhd->prot->d2hring_info_cpln->create_pending) {
7713         DHD_ERROR(("info ring create status for not pending cpl ring\n"));
7714         return;
7715     }
7716 
7717     if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
7718         DHD_ERROR(("info cpl ring create failed with status %d\n",
7719             ltoh16(resp->cmplt.status)));
7720         return;
7721     }
7722     dhd->prot->d2hring_info_cpln->create_pending = FALSE;
7723     dhd->prot->d2hring_info_cpln->inited = TRUE;
7724 }
7725 
7726 static void
dhd_prot_process_d2h_mb_data(dhd_pub_t * dhd,void * buf)7727 dhd_prot_process_d2h_mb_data(dhd_pub_t *dhd, void* buf)
7728 {
7729     d2h_mailbox_data_t *d2h_data;
7730 
7731     d2h_data = (d2h_mailbox_data_t *)buf;
7732     DHD_INFO(("%s dhd_prot_process_d2h_mb_data, 0x%04x\n", __FUNCTION__,
7733         d2h_data->d2h_mailbox_data));
7734     dhd_bus_handle_mb_data(dhd->bus, d2h_data->d2h_mailbox_data);
7735 }
7736 
7737 static void
dhd_prot_process_d2h_host_ts_complete(dhd_pub_t * dhd,void * buf)7738 dhd_prot_process_d2h_host_ts_complete(dhd_pub_t *dhd, void* buf)
7739 {
7740 #ifdef DHD_TIMESYNC
7741     host_timestamp_msg_cpl_t  *host_ts_cpl;
7742     uint32 pktid;
7743     dhd_prot_t *prot = dhd->prot;
7744 
7745     host_ts_cpl = (host_timestamp_msg_cpl_t *)buf;
7746     DHD_INFO(("%s host TS cpl: status %d, req_ID: 0x%04x, xt_id %d \n", __FUNCTION__,
7747         host_ts_cpl->cmplt.status, host_ts_cpl->msg.request_id, host_ts_cpl->xt_id));
7748 
7749     pktid = ltoh32(host_ts_cpl->msg.request_id);
7750     if (prot->hostts_req_buf_inuse == FALSE) {
7751         DHD_ERROR(("No Pending Host TS req, but completion\n"));
7752         return;
7753     }
7754     prot->hostts_req_buf_inuse = FALSE;
7755     if (pktid != DHD_H2D_HOSTTS_REQ_PKTID) {
7756         DHD_ERROR(("Host TS req CPL, but req ID different 0x%04x, exp 0x%04x\n",
7757             pktid, DHD_H2D_HOSTTS_REQ_PKTID));
7758         return;
7759     }
7760     dhd_timesync_handle_host_ts_complete(dhd->ts, host_ts_cpl->xt_id,
7761         host_ts_cpl->cmplt.status);
7762 #else /* DHD_TIMESYNC */
7763     DHD_ERROR(("Timesunc feature not compiled in but GOT HOST_TS_COMPLETE\n"));
7764 #endif /* DHD_TIMESYNC */
7765 }
7766 
7767 /** called on e.g. flow ring delete */
dhd_prot_clean_flow_ring(dhd_pub_t * dhd,void * msgbuf_flow_info)7768 void dhd_prot_clean_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info)
7769 {
7770     msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info;
7771     dhd_prot_ring_detach(dhd, flow_ring);
7772     DHD_INFO(("%s Cleaning up Flow \n", __FUNCTION__));
7773 }
7774 
dhd_prot_print_flow_ring(dhd_pub_t * dhd,void * msgbuf_flow_info,struct bcmstrbuf * strbuf,const char * fmt)7775 void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info,
7776     struct bcmstrbuf *strbuf, const char * fmt)
7777 {
7778     const char *default_fmt = "RD %d WR %d BASE(VA) %p BASE(PA) %x:%x"
7779         " WORK ITEM SIZE %d MAX WORK ITEMS %d SIZE %d\n";
7780     msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info;
7781     uint16 rd, wr;
7782     uint32 dma_buf_len = flow_ring->max_items * flow_ring->item_len;
7783 
7784     if (fmt == NULL) {
7785         fmt = default_fmt;
7786     }
7787     dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx);
7788     dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx);
7789     bcm_bprintf(strbuf, fmt, rd, wr, flow_ring->dma_buf.va,
7790         ltoh32(flow_ring->base_addr.high_addr),
7791         ltoh32(flow_ring->base_addr.low_addr),
7792         flow_ring->item_len, flow_ring->max_items, dma_buf_len);
7793 }
7794 
dhd_prot_print_info(dhd_pub_t * dhd,struct bcmstrbuf * strbuf)7795 void dhd_prot_print_info(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
7796 {
7797     dhd_prot_t *prot = dhd->prot;
7798     bcm_bprintf(strbuf, "IPCrevs: Dev %d, \t Host %d, \tactive %d\n",
7799         dhd->prot->device_ipc_version,
7800         dhd->prot->host_ipc_version,
7801         dhd->prot->active_ipc_version);
7802 
7803     bcm_bprintf(strbuf, "max Host TS bufs to post: %d, \t posted %d \n",
7804         dhd->prot->max_tsbufpost, dhd->prot->cur_ts_bufs_posted);
7805     bcm_bprintf(strbuf, "max INFO bufs to post: %d, \t posted %d \n",
7806         dhd->prot->max_infobufpost, dhd->prot->infobufpost);
7807     bcm_bprintf(strbuf, "max event bufs to post: %d, \t posted %d \n",
7808         dhd->prot->max_eventbufpost, dhd->prot->cur_event_bufs_posted);
7809     bcm_bprintf(strbuf, "max ioctlresp bufs to post: %d, \t posted %d \n",
7810         dhd->prot->max_ioctlrespbufpost, dhd->prot->cur_ioctlresp_bufs_posted);
7811     bcm_bprintf(strbuf, "max RX bufs to post: %d, \t posted %d \n",
7812         dhd->prot->max_rxbufpost, dhd->prot->rxbufpost);
7813 
7814     bcm_bprintf(strbuf,
7815         "%14s %5s %5s %17s %17s %14s %14s %10s\n",
7816         "Type", "RD", "WR", "BASE(VA)", "BASE(PA)",
7817         "WORK_ITEM_SIZE", "MAX_WORK_ITEMS", "TOTAL_SIZE");
7818     bcm_bprintf(strbuf, "%14s", "H2DCtrlPost");
7819     dhd_prot_print_flow_ring(dhd, &prot->h2dring_ctrl_subn, strbuf,
7820         " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
7821     bcm_bprintf(strbuf, "%14s", "D2HCtrlCpl");
7822     dhd_prot_print_flow_ring(dhd, &prot->d2hring_ctrl_cpln, strbuf,
7823         " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
7824     bcm_bprintf(strbuf, "%14s", "H2DRxPost", prot->rxbufpost);
7825     dhd_prot_print_flow_ring(dhd, &prot->h2dring_rxp_subn, strbuf,
7826         " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
7827     bcm_bprintf(strbuf, "%14s", "D2HRxCpl");
7828     dhd_prot_print_flow_ring(dhd, &prot->d2hring_rx_cpln, strbuf,
7829         " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
7830     bcm_bprintf(strbuf, "%14s", "D2HTxCpl");
7831     dhd_prot_print_flow_ring(dhd, &prot->d2hring_tx_cpln, strbuf,
7832         " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
7833     if (dhd->prot->h2dring_info_subn != NULL && dhd->prot->d2hring_info_cpln != NULL) {
7834         bcm_bprintf(strbuf, "%14s", "H2DRingInfoSub");
7835         dhd_prot_print_flow_ring(dhd, prot->h2dring_info_subn, strbuf,
7836             " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
7837         bcm_bprintf(strbuf, "%14s", "D2HRingInfoCpl");
7838         dhd_prot_print_flow_ring(dhd, prot->d2hring_info_cpln, strbuf,
7839             " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
7840     }
7841 
7842     bcm_bprintf(strbuf, "active_tx_count %d pktidmap_avail(ctrl/rx/tx) %d %d %d\n",
7843         dhd->prot->active_tx_count,
7844         DHD_PKTID_AVAIL(dhd->prot->pktid_ctrl_map),
7845         DHD_PKTID_AVAIL(dhd->prot->pktid_rx_map),
7846         DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map));
7847 }
7848 
7849 int
dhd_prot_flow_ring_delete(dhd_pub_t * dhd,flow_ring_node_t * flow_ring_node)7850 dhd_prot_flow_ring_delete(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
7851 {
7852     tx_flowring_delete_request_t *flow_delete_rqst;
7853     dhd_prot_t *prot = dhd->prot;
7854     unsigned long flags;
7855     uint16 alloced = 0;
7856     msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
7857 
7858 #ifdef PCIE_INB_DW
7859     if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
7860         return BCME_ERROR;
7861 #endif /* PCIE_INB_DW */
7862 
7863     DHD_GENERAL_LOCK(dhd, flags);
7864 
7865     /* Request for ring buffer space */
7866     flow_delete_rqst = (tx_flowring_delete_request_t *)
7867         dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
7868 
7869     if (flow_delete_rqst == NULL) {
7870         DHD_GENERAL_UNLOCK(dhd, flags);
7871         DHD_ERROR(("%s: Flow Delete Req - failure ring space\n", __FUNCTION__));
7872 #ifdef PCIE_INB_DW
7873         dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
7874 #endif
7875         return BCME_NOMEM;
7876     }
7877 
7878     /* Common msg buf hdr */
7879     flow_delete_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_DELETE;
7880     flow_delete_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
7881     flow_delete_rqst->msg.request_id = htol32(0); /* TBD */
7882     flow_delete_rqst->msg.flags = ring->current_phase;
7883 
7884     flow_delete_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
7885     ring->seqnum++;
7886 
7887     /* Update Delete info */
7888     flow_delete_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
7889     flow_delete_rqst->reason = htol16(BCME_OK);
7890 
7891     DHD_ERROR(("%s: Send Flow Delete Req RING ID %d for peer " MACDBG
7892         " prio %d ifindex %d\n", __FUNCTION__, flow_ring_node->flowid,
7893         MAC2STRDBG(flow_ring_node->flow_info.da), flow_ring_node->flow_info.tid,
7894         flow_ring_node->flow_info.ifindex));
7895 
7896     /* update ring's WR index and ring doorbell to dongle */
7897     dhd_prot_ring_write_complete(dhd, ring, flow_delete_rqst, 1);
7898     DHD_GENERAL_UNLOCK(dhd, flags);
7899 #ifdef PCIE_INB_DW
7900     dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
7901 #endif
7902 
7903     return BCME_OK;
7904 }
7905 
7906 static void
dhd_prot_flow_ring_delete_response_process(dhd_pub_t * dhd,void * msg)7907 dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg)
7908 {
7909     tx_flowring_delete_response_t *flow_delete_resp = (tx_flowring_delete_response_t *)msg;
7910 
7911     DHD_ERROR(("%s: Flow Delete Response status = %d Flow %d\n", __FUNCTION__,
7912         flow_delete_resp->cmplt.status, flow_delete_resp->cmplt.flow_ring_id));
7913 
7914     dhd_bus_flow_ring_delete_response(dhd->bus, flow_delete_resp->cmplt.flow_ring_id,
7915         flow_delete_resp->cmplt.status);
7916 }
7917 
7918 static void
dhd_prot_process_flow_ring_resume_response(dhd_pub_t * dhd,void * msg)7919 dhd_prot_process_flow_ring_resume_response(dhd_pub_t *dhd, void* msg)
7920 {
7921 #ifdef IDLE_TX_FLOW_MGMT
7922     tx_idle_flowring_resume_response_t    *flow_resume_resp =
7923         (tx_idle_flowring_resume_response_t *)msg;
7924 
7925     DHD_ERROR(("%s Flow resume Response status = %d Flow %d\n", __FUNCTION__,
7926         flow_resume_resp->cmplt.status, flow_resume_resp->cmplt.flow_ring_id));
7927 
7928     dhd_bus_flow_ring_resume_response(dhd->bus, flow_resume_resp->cmplt.flow_ring_id,
7929         flow_resume_resp->cmplt.status);
7930 #endif /* IDLE_TX_FLOW_MGMT */
7931 }
7932 
7933 static void
dhd_prot_process_flow_ring_suspend_response(dhd_pub_t * dhd,void * msg)7934 dhd_prot_process_flow_ring_suspend_response(dhd_pub_t *dhd, void* msg)
7935 {
7936 #ifdef IDLE_TX_FLOW_MGMT
7937     int16 status;
7938     tx_idle_flowring_suspend_response_t    *flow_suspend_resp =
7939         (tx_idle_flowring_suspend_response_t *)msg;
7940     status = flow_suspend_resp->cmplt.status;
7941 
7942     DHD_ERROR(("%s Flow id %d suspend Response status = %d\n",
7943         __FUNCTION__, flow_suspend_resp->cmplt.flow_ring_id,
7944         status));
7945 
7946     if (status != BCME_OK) {
7947         DHD_ERROR(("%s Error in Suspending Flow rings!!"
7948             "Dongle will still be polling idle rings!!Status = %d \n",
7949             __FUNCTION__, status));
7950     }
7951 #endif /* IDLE_TX_FLOW_MGMT */
7952 }
7953 
7954 int
dhd_prot_flow_ring_flush(dhd_pub_t * dhd,flow_ring_node_t * flow_ring_node)7955 dhd_prot_flow_ring_flush(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
7956 {
7957     tx_flowring_flush_request_t *flow_flush_rqst;
7958     dhd_prot_t *prot = dhd->prot;
7959     unsigned long flags;
7960     uint16 alloced = 0;
7961     msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
7962 
7963 #ifdef PCIE_INB_DW
7964     if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
7965         return BCME_ERROR;
7966 #endif /* PCIE_INB_DW */
7967 
7968     DHD_GENERAL_LOCK(dhd, flags);
7969 
7970     /* Request for ring buffer space */
7971     flow_flush_rqst = (tx_flowring_flush_request_t *)
7972         dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
7973     if (flow_flush_rqst == NULL) {
7974         DHD_GENERAL_UNLOCK(dhd, flags);
7975         DHD_ERROR(("%s: Flow Flush Req - failure ring space\n", __FUNCTION__));
7976 #ifdef PCIE_INB_DW
7977         dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
7978 #endif
7979         return BCME_NOMEM;
7980     }
7981 
7982     /* Common msg buf hdr */
7983     flow_flush_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_FLUSH;
7984     flow_flush_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
7985     flow_flush_rqst->msg.request_id = htol32(0); /* TBD */
7986     flow_flush_rqst->msg.flags = ring->current_phase;
7987     flow_flush_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
7988     ring->seqnum++;
7989 
7990     flow_flush_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
7991     flow_flush_rqst->reason = htol16(BCME_OK);
7992 
7993     DHD_INFO(("%s: Send Flow Flush Req\n", __FUNCTION__));
7994 
7995     /* update ring's WR index and ring doorbell to dongle */
7996     dhd_prot_ring_write_complete(dhd, ring, flow_flush_rqst, 1);
7997     DHD_GENERAL_UNLOCK(dhd, flags);
7998 #ifdef PCIE_INB_DW
7999     dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
8000 #endif
8001 
8002     return BCME_OK;
8003 } /* dhd_prot_flow_ring_flush */
8004 
8005 static void
dhd_prot_flow_ring_flush_response_process(dhd_pub_t * dhd,void * msg)8006 dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg)
8007 {
8008     tx_flowring_flush_response_t *flow_flush_resp = (tx_flowring_flush_response_t *)msg;
8009 
8010     DHD_INFO(("%s: Flow Flush Response status = %d\n", __FUNCTION__,
8011         flow_flush_resp->cmplt.status));
8012 
8013     dhd_bus_flow_ring_flush_response(dhd->bus, flow_flush_resp->cmplt.flow_ring_id,
8014         flow_flush_resp->cmplt.status);
8015 }
8016 
8017 /**
8018  * Request dongle to configure soft doorbells for D2H rings. Host populated soft
8019  * doorbell information is transferred to dongle via the d2h ring config control
8020  * message.
8021  */
8022 void
dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t * dhd)8023 dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd)
8024 {
8025 #if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT)
8026     uint16 ring_idx;
8027     uint8 *msg_next;
8028     void *msg_start;
8029     uint16 alloced = 0;
8030     unsigned long flags;
8031     dhd_prot_t *prot = dhd->prot;
8032     ring_config_req_t *ring_config_req;
8033     bcmpcie_soft_doorbell_t *soft_doorbell;
8034     msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
8035     const uint16 d2h_rings = BCMPCIE_D2H_COMMON_MSGRINGS;
8036 
8037 #ifdef PCIE_INB_DW
8038     if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
8039         return BCME_ERROR;
8040 #endif /* PCIE_INB_DW */
8041     /* Claim space for d2h_ring number of d2h_ring_config_req_t messages */
8042     DHD_GENERAL_LOCK(dhd, flags);
8043 
8044     msg_start = dhd_prot_alloc_ring_space(dhd, ctrl_ring, d2h_rings, &alloced, TRUE);
8045 
8046     if (msg_start == NULL) {
8047         DHD_ERROR(("%s Msgbuf no space for %d D2H ring config soft doorbells\n",
8048             __FUNCTION__, d2h_rings));
8049         DHD_GENERAL_UNLOCK(dhd, flags);
8050 #ifdef PCIE_INB_DW
8051         dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
8052 #endif
8053         return;
8054     }
8055 
8056     msg_next = (uint8*)msg_start;
8057 
8058     for (ring_idx = 0; ring_idx < d2h_rings; ring_idx++) {
8059         /* position the ring_config_req into the ctrl subm ring */
8060         ring_config_req = (ring_config_req_t *)msg_next;
8061 
8062         /* Common msg header */
8063         ring_config_req->msg.msg_type = MSG_TYPE_D2H_RING_CONFIG;
8064         ring_config_req->msg.if_id = 0;
8065         ring_config_req->msg.flags = 0;
8066 
8067         ring_config_req->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
8068         ctrl_ring->seqnum++;
8069 
8070         ring_config_req->msg.request_id = htol32(DHD_FAKE_PKTID); /* unused */
8071 
8072         /* Ring Config subtype and d2h ring_id */
8073         ring_config_req->subtype = htol16(D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL);
8074         ring_config_req->ring_id = htol16(DHD_D2H_RINGID(ring_idx));
8075 
8076         /* Host soft doorbell configuration */
8077         soft_doorbell = &prot->soft_doorbell[ring_idx];
8078 
8079         ring_config_req->soft_doorbell.value = htol32(soft_doorbell->value);
8080         ring_config_req->soft_doorbell.haddr.high =
8081             htol32(soft_doorbell->haddr.high);
8082         ring_config_req->soft_doorbell.haddr.low =
8083             htol32(soft_doorbell->haddr.low);
8084         ring_config_req->soft_doorbell.items = htol16(soft_doorbell->items);
8085         ring_config_req->soft_doorbell.msecs = htol16(soft_doorbell->msecs);
8086 
8087         DHD_INFO(("%s: Soft doorbell haddr 0x%08x 0x%08x value 0x%08x\n",
8088             __FUNCTION__, ring_config_req->soft_doorbell.haddr.high,
8089             ring_config_req->soft_doorbell.haddr.low,
8090             ring_config_req->soft_doorbell.value));
8091 
8092         msg_next = msg_next + ctrl_ring->item_len;
8093     }
8094 
8095     /* update control subn ring's WR index and ring doorbell to dongle */
8096     dhd_prot_ring_write_complete(dhd, ctrl_ring, msg_start, d2h_rings);
8097     DHD_GENERAL_UNLOCK(dhd, flags);
8098 #ifdef PCIE_INB_DW
8099     dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
8100 #endif
8101 #endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */
8102 }
8103 
8104 static void
dhd_prot_process_d2h_ring_config_complete(dhd_pub_t * dhd,void * msg)8105 dhd_prot_process_d2h_ring_config_complete(dhd_pub_t *dhd, void *msg)
8106 {
8107     DHD_INFO(("%s: Ring Config Response - status %d ringid %d\n",
8108         __FUNCTION__, ltoh16(((ring_config_resp_t *)msg)->compl_hdr.status),
8109         ltoh16(((ring_config_resp_t *)msg)->compl_hdr.flow_ring_id)));
8110 }
8111 
8112 int
dhd_prot_debug_dma_info_print(dhd_pub_t * dhd)8113 dhd_prot_debug_dma_info_print(dhd_pub_t *dhd)
8114 {
8115     if (dhd->bus->is_linkdown) {
8116         DHD_ERROR(("\n ------- SKIP DUMPING DMA Registers "
8117             "due to PCIe link down ------- \r\n"));
8118         return 0;
8119     }
8120 
8121     DHD_ERROR(("\n ------- DUMPING DMA Registers ------- \r\n"));
8122 
8123     //HostToDev
8124     DHD_ERROR(("HostToDev TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n",
8125         si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x200, 0, 0),
8126         si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x204, 0, 0)));
8127     DHD_ERROR(("            : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n",
8128         si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x208, 0, 0),
8129         si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x20C, 0, 0)));
8130     DHD_ERROR(("            : XmtStatus0=0x%08x XmtStatus1=0x%08x\n",
8131         si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x210, 0, 0),
8132         si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x214, 0, 0)));
8133 
8134     DHD_ERROR(("HostToDev RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n",
8135         si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x220, 0, 0),
8136         si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x224, 0, 0)));
8137     DHD_ERROR(("            : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n",
8138         si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x228, 0, 0),
8139         si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x22C, 0, 0)));
8140     DHD_ERROR(("            : RcvStatus0=0x%08x RcvStatus1=0x%08x\n",
8141         si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x230, 0, 0),
8142         si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x234, 0, 0)));
8143 
8144     //DevToHost
8145     DHD_ERROR(("DevToHost TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n",
8146         si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x240, 0, 0),
8147         si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x244, 0, 0)));
8148     DHD_ERROR(("            : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n",
8149         si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x248, 0, 0),
8150         si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x24C, 0, 0)));
8151     DHD_ERROR(("            : XmtStatus0=0x%08x XmtStatus1=0x%08x\n",
8152         si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x250, 0, 0),
8153         si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x254, 0, 0)));
8154 
8155     DHD_ERROR(("DevToHost RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n",
8156         si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x260, 0, 0),
8157         si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x264, 0, 0)));
8158     DHD_ERROR(("            : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n",
8159         si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x268, 0, 0),
8160         si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x26C, 0, 0)));
8161     DHD_ERROR(("            : RcvStatus0=0x%08x RcvStatus1=0x%08x\n",
8162         si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x270, 0, 0),
8163         si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x274, 0, 0)));
8164 
8165     return 0;
8166 }
8167 
8168 int
dhd_prot_debug_info_print(dhd_pub_t * dhd)8169 dhd_prot_debug_info_print(dhd_pub_t *dhd)
8170 {
8171     dhd_prot_t *prot = dhd->prot;
8172     msgbuf_ring_t *ring;
8173     uint16 rd, wr;
8174     uint32 intstatus = 0;
8175     uint32 intmask = 0;
8176     uint32 mbintstatus = 0;
8177     uint32 d2h_mb_data = 0;
8178     uint32 dma_buf_len;
8179 
8180     DHD_ERROR(("\n ------- DUMPING VERSION INFORMATION ------- \r\n"));
8181     DHD_ERROR(("DHD: %s\n", dhd_version));
8182     DHD_ERROR(("Firmware: %s\n", fw_version));
8183 
8184     DHD_ERROR(("\n ------- DUMPING PROTOCOL INFORMATION ------- \r\n"));
8185     DHD_ERROR(("ICPrevs: Dev %d, Host %d, active %d\n",
8186         prot->device_ipc_version,
8187         prot->host_ipc_version,
8188         prot->active_ipc_version));
8189     DHD_ERROR(("max Host TS bufs to post: %d, posted %d\n",
8190         prot->max_tsbufpost, prot->cur_ts_bufs_posted));
8191     DHD_ERROR(("max INFO bufs to post: %d, posted %d\n",
8192         prot->max_infobufpost, prot->infobufpost));
8193     DHD_ERROR(("max event bufs to post: %d, posted %d\n",
8194         prot->max_eventbufpost, prot->cur_event_bufs_posted));
8195     DHD_ERROR(("max ioctlresp bufs to post: %d, posted %d\n",
8196         prot->max_ioctlrespbufpost, prot->cur_ioctlresp_bufs_posted));
8197     DHD_ERROR(("max RX bufs to post: %d, posted %d\n",
8198         prot->max_rxbufpost, prot->rxbufpost));
8199     DHD_ERROR(("h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n",
8200         h2d_max_txpost, prot->h2d_max_txpost));
8201 
8202     DHD_ERROR(("\n ------- DUMPING IOCTL RING RD WR Pointers ------- \r\n"));
8203 
8204     ring = &prot->h2dring_ctrl_subn;
8205     dma_buf_len = ring->max_items * ring->item_len;
8206     DHD_ERROR(("CtrlPost: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
8207         ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
8208         ltoh32(ring->base_addr.low_addr), dma_buf_len));
8209     DHD_ERROR(("CtrlPost: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
8210     dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
8211     dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
8212     DHD_ERROR(("CtrlPost: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
8213     DHD_ERROR(("CtrlPost: seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
8214 
8215     ring = &prot->d2hring_ctrl_cpln;
8216     dma_buf_len = ring->max_items * ring->item_len;
8217     DHD_ERROR(("CtrlCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
8218         ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
8219         ltoh32(ring->base_addr.low_addr), dma_buf_len));
8220     DHD_ERROR(("CtrlCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
8221     dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
8222     dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
8223     DHD_ERROR(("CtrlCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
8224     DHD_ERROR(("CtrlCpl: Expected seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
8225 
8226     ring = prot->h2dring_info_subn;
8227     if (ring) {
8228         dma_buf_len = ring->max_items * ring->item_len;
8229         DHD_ERROR(("InfoSub: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
8230             ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
8231             ltoh32(ring->base_addr.low_addr), dma_buf_len));
8232         DHD_ERROR(("InfoSub: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
8233         dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
8234         dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
8235         DHD_ERROR(("InfoSub: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
8236         DHD_ERROR(("InfoSub: seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
8237     }
8238     ring = prot->d2hring_info_cpln;
8239     if (ring) {
8240         dma_buf_len = ring->max_items * ring->item_len;
8241         DHD_ERROR(("InfoCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
8242             ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
8243             ltoh32(ring->base_addr.low_addr), dma_buf_len));
8244         DHD_ERROR(("InfoCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
8245         dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
8246         dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
8247         DHD_ERROR(("InfoCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
8248         DHD_ERROR(("InfoCpl: Expected seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
8249     }
8250 
8251     DHD_ERROR(("%s: cur_ioctlresp_bufs_posted %d cur_event_bufs_posted %d\n",
8252         __FUNCTION__, prot->cur_ioctlresp_bufs_posted, prot->cur_event_bufs_posted));
8253 
8254     if (!dhd->bus->is_linkdown && dhd->bus->intstatus != (uint32)-1) {
8255         DHD_ERROR(("\n ------- DUMPING INTR Status and Masks ------- \r\n"));
8256         intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
8257             PCIMailBoxInt, 0, 0);
8258         intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
8259             PCIMailBoxMask, 0, 0);
8260         mbintstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
8261             PCID2H_MailBox, 0, 0);
8262         dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0);
8263 
8264         DHD_ERROR(("intstatus=0x%x intmask=0x%x mbintstatus=0x%x\n",
8265             intstatus, intmask, mbintstatus));
8266         DHD_ERROR(("d2h_mb_data=0x%x def_intmask=0x%x \r\n", d2h_mb_data,
8267             dhd->bus->def_intmask));
8268 
8269         DHD_ERROR(("host pcie_irq enabled = %d\n", dhdpcie_irq_enabled(dhd->bus)));
8270 
8271         DHD_ERROR(("\n ------- DUMPING PCIE Registers ------- \r\n"));
8272         /* hwnbu-twiki.sj.broadcom.com/bin/view/Mwgroup/CurrentPcieGen2ProgramGuide */
8273         DHD_ERROR(("Status Command(0x%x)=0x%x, BaseAddress0(0x%x)=0x%x\n",
8274             PCIECFGREG_STATUS_CMD,
8275             dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_STATUS_CMD, sizeof(uint32)),
8276             PCIECFGREG_BASEADDR0,
8277             dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_BASEADDR0, sizeof(uint32))));
8278         DHD_ERROR(("LinkCtl(0x%x)=0x%x DeviceStatusControl2(0x%x)=0x%x "
8279             "L1SSControl(0x%x)=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL,
8280             dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_LINK_STATUS_CTRL,
8281             sizeof(uint32)), PCIECFGGEN_DEV_STATUS_CTRL2,
8282             dhd_pcie_config_read(dhd->bus->osh, PCIECFGGEN_DEV_STATUS_CTRL2,
8283             sizeof(uint32)), PCIECFGREG_PML1_SUB_CTRL1,
8284             dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_PML1_SUB_CTRL1,
8285             sizeof(uint32))));
8286 
8287         /* hwnbu-twiki.sj.broadcom.com/twiki/pub/Mwgroup/
8288          * CurrentPcieGen2ProgramGuide/pcie_ep.htm
8289          */
8290         DHD_ERROR(("ClkReq0(0x%x)=0x%x ClkReq1(0x%x)=0x%x ClkReq2(0x%x)=0x%x "
8291             "ClkReq3(0x%x)=0x%x\n", PCIECFGREG_PHY_DBG_CLKREQ0,
8292             dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ0),
8293             PCIECFGREG_PHY_DBG_CLKREQ1,
8294             dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ1),
8295             PCIECFGREG_PHY_DBG_CLKREQ2,
8296             dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ2),
8297             PCIECFGREG_PHY_DBG_CLKREQ3,
8298             dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ3)));
8299 
8300 #if defined(PCIE_RC_VENDOR_ID) && defined(PCIE_RC_DEVICE_ID)
8301         DHD_ERROR(("Pcie RC Error Status Val=0x%x\n",
8302             dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
8303             PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0)));
8304 
8305         DHD_ERROR(("RootPort PCIe linkcap=0x%08x\n",
8306             dhd_debug_get_rc_linkcap(dhd->bus)));
8307 #endif
8308 
8309         DHD_ERROR(("\n ------- DUMPING INTR enable/disable counters  ------- \r\n"));
8310         DHD_ERROR(("resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n"
8311             "isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n"
8312             "dpc_return_busdown_count=%lu\n",
8313             dhd->bus->resume_intr_enable_count, dhd->bus->dpc_intr_enable_count,
8314             dhd->bus->isr_intr_disable_count, dhd->bus->suspend_intr_disable_count,
8315             dhd->bus->dpc_return_busdown_count));
8316     }
8317     dhd_prot_debug_dma_info_print(dhd);
8318 #ifdef DHD_FW_COREDUMP
8319     if (dhd->memdump_enabled) {
8320 #ifdef DHD_SSSR_DUMP
8321         if (dhd->sssr_inited) {
8322             dhdpcie_sssr_dump(dhd);
8323         }
8324 #endif /* DHD_SSSR_DUMP */
8325     }
8326 #endif /* DHD_FW_COREDUMP */
8327     return 0;
8328 }
8329 
8330 int
dhd_prot_ringupd_dump(dhd_pub_t * dhd,struct bcmstrbuf * b)8331 dhd_prot_ringupd_dump(dhd_pub_t *dhd, struct bcmstrbuf *b)
8332 {
8333     uint32 *ptr;
8334     uint32 value;
8335     uint32 i;
8336     uint32 max_h2d_queues = dhd_bus_max_h2d_queues(dhd->bus);
8337 
8338     OSL_CACHE_INV((void *)dhd->prot->d2h_dma_indx_wr_buf.va,
8339         dhd->prot->d2h_dma_indx_wr_buf.len);
8340 
8341     ptr = (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va);
8342 
8343     bcm_bprintf(b, "\n max_tx_queues %d\n", max_h2d_queues);
8344 
8345     bcm_bprintf(b, "\nRPTR block H2D common rings, 0x%04x\n", ptr);
8346     value = ltoh32(*ptr);
8347     bcm_bprintf(b, "\tH2D CTRL: value 0x%04x\n", value);
8348     ptr++;
8349     value = ltoh32(*ptr);
8350     bcm_bprintf(b, "\tH2D RXPOST: value 0x%04x\n", value);
8351 
8352     ptr++;
8353     bcm_bprintf(b, "RPTR block Flow rings , 0x%04x\n", ptr);
8354     for (i = BCMPCIE_H2D_COMMON_MSGRINGS; i < max_h2d_queues; i++) {
8355         value = ltoh32(*ptr);
8356         bcm_bprintf(b, "\tflowring ID %d: value 0x%04x\n", i, value);
8357         ptr++;
8358     }
8359 
8360     OSL_CACHE_INV((void *)dhd->prot->h2d_dma_indx_rd_buf.va,
8361         dhd->prot->h2d_dma_indx_rd_buf.len);
8362 
8363     ptr = (uint32 *)(dhd->prot->h2d_dma_indx_rd_buf.va);
8364 
8365     bcm_bprintf(b, "\nWPTR block D2H common rings, 0x%04x\n", ptr);
8366     value = ltoh32(*ptr);
8367     bcm_bprintf(b, "\tD2H CTRLCPLT: value 0x%04x\n", value);
8368     ptr++;
8369     value = ltoh32(*ptr);
8370     bcm_bprintf(b, "\tD2H TXCPLT: value 0x%04x\n", value);
8371     ptr++;
8372     value = ltoh32(*ptr);
8373     bcm_bprintf(b, "\tD2H RXCPLT: value 0x%04x\n", value);
8374 
8375     return 0;
8376 }
8377 
8378 uint32
dhd_prot_metadata_dbg_set(dhd_pub_t * dhd,bool val)8379 dhd_prot_metadata_dbg_set(dhd_pub_t *dhd, bool val)
8380 {
8381     dhd_prot_t *prot = dhd->prot;
8382 #if DHD_DBG_SHOW_METADATA
8383     prot->metadata_dbg = val;
8384 #endif
8385     return (uint32)prot->metadata_dbg;
8386 }
8387 
8388 uint32
dhd_prot_metadata_dbg_get(dhd_pub_t * dhd)8389 dhd_prot_metadata_dbg_get(dhd_pub_t *dhd)
8390 {
8391     dhd_prot_t *prot = dhd->prot;
8392     return (uint32)prot->metadata_dbg;
8393 }
8394 
8395 uint32
dhd_prot_metadatalen_set(dhd_pub_t * dhd,uint32 val,bool rx)8396 dhd_prot_metadatalen_set(dhd_pub_t *dhd, uint32 val, bool rx)
8397 {
8398     dhd_prot_t *prot = dhd->prot;
8399     if (rx)
8400         prot->rx_metadata_offset = (uint16)val;
8401     else
8402         prot->tx_metadata_offset = (uint16)val;
8403     return dhd_prot_metadatalen_get(dhd, rx);
8404 }
8405 
8406 uint32
dhd_prot_metadatalen_get(dhd_pub_t * dhd,bool rx)8407 dhd_prot_metadatalen_get(dhd_pub_t *dhd, bool rx)
8408 {
8409     dhd_prot_t *prot = dhd->prot;
8410     if (rx)
8411         return prot->rx_metadata_offset;
8412     else
8413         return prot->tx_metadata_offset;
8414 }
8415 
8416 /** optimization to write "n" tx items at a time to ring */
8417 uint32
dhd_prot_txp_threshold(dhd_pub_t * dhd,bool set,uint32 val)8418 dhd_prot_txp_threshold(dhd_pub_t *dhd, bool set, uint32 val)
8419 {
8420     dhd_prot_t *prot = dhd->prot;
8421     if (set)
8422         prot->txp_threshold = (uint16)val;
8423     val = prot->txp_threshold;
8424     return val;
8425 }
8426 
8427 #ifdef DHD_RX_CHAINING
8428 
8429 static INLINE void BCMFASTPATH
dhd_rxchain_reset(rxchain_info_t * rxchain)8430 dhd_rxchain_reset(rxchain_info_t *rxchain)
8431 {
8432     rxchain->pkt_count = 0;
8433 }
8434 
8435 static void BCMFASTPATH
dhd_rxchain_frame(dhd_pub_t * dhd,void * pkt,uint ifidx)8436 dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx)
8437 {
8438     uint8 *eh;
8439     uint8 prio;
8440     dhd_prot_t *prot = dhd->prot;
8441     rxchain_info_t *rxchain = &prot->rxchain;
8442 
8443     ASSERT(!PKTISCHAINED(pkt));
8444     ASSERT(PKTCLINK(pkt) == NULL);
8445     ASSERT(PKTCGETATTR(pkt) == 0);
8446 
8447     eh = PKTDATA(dhd->osh, pkt);
8448     prio = IP_TOS46(eh + ETHER_HDR_LEN) >> IPV4_TOS_PREC_SHIFT;
8449 
8450     if (rxchain->pkt_count && !(PKT_CTF_CHAINABLE(dhd, ifidx, eh, prio, rxchain->h_sa,
8451         rxchain->h_da, rxchain->h_prio))) {
8452         /* Different flow - First release the existing chain */
8453         dhd_rxchain_commit(dhd);
8454     }
8455 
8456     /* For routers, with HNDCTF, link the packets using PKTSETCLINK, */
8457     /* so that the chain can be handed off to CTF bridge as is. */
8458     if (rxchain->pkt_count == 0) {
8459         /* First packet in chain */
8460         rxchain->pkthead = rxchain->pkttail = pkt;
8461 
8462         /* Keep a copy of ptr to ether_da, ether_sa and prio */
8463         rxchain->h_da = ((struct ether_header *)eh)->ether_dhost;
8464         rxchain->h_sa = ((struct ether_header *)eh)->ether_shost;
8465         rxchain->h_prio = prio;
8466         rxchain->ifidx = ifidx;
8467         rxchain->pkt_count++;
8468     } else {
8469         /* Same flow - keep chaining */
8470         PKTSETCLINK(rxchain->pkttail, pkt);
8471         rxchain->pkttail = pkt;
8472         rxchain->pkt_count++;
8473     }
8474 
8475     if ((dhd_rx_pkt_chainable(dhd, ifidx)) && (!ETHER_ISMULTI(rxchain->h_da)) &&
8476         ((((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IP)) ||
8477         (((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IPV6)))) {
8478         PKTSETCHAINED(dhd->osh, pkt);
8479         PKTCINCRCNT(rxchain->pkthead);
8480         PKTCADDLEN(rxchain->pkthead, PKTLEN(dhd->osh, pkt));
8481     } else {
8482         dhd_rxchain_commit(dhd);
8483         return;
8484     }
8485 
8486     /* If we have hit the max chain length, dispatch the chain and reset */
8487     if (rxchain->pkt_count >= DHD_PKT_CTF_MAX_CHAIN_LEN) {
8488         dhd_rxchain_commit(dhd);
8489     }
8490 }
8491 
8492 static void BCMFASTPATH
dhd_rxchain_commit(dhd_pub_t * dhd)8493 dhd_rxchain_commit(dhd_pub_t *dhd)
8494 {
8495     dhd_prot_t *prot = dhd->prot;
8496     rxchain_info_t *rxchain = &prot->rxchain;
8497 
8498     if (rxchain->pkt_count == 0)
8499         return;
8500 
8501     /* Release the packets to dhd_linux */
8502     dhd_bus_rx_frame(dhd->bus, rxchain->pkthead, rxchain->ifidx, rxchain->pkt_count);
8503 
8504     /* Reset the chain */
8505     dhd_rxchain_reset(rxchain);
8506 }
8507 
8508 #endif /* DHD_RX_CHAINING */
8509 
8510 
8511 #ifdef IDLE_TX_FLOW_MGMT
8512 int
dhd_prot_flow_ring_resume(dhd_pub_t * dhd,flow_ring_node_t * flow_ring_node)8513 dhd_prot_flow_ring_resume(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
8514 {
8515     tx_idle_flowring_resume_request_t *flow_resume_rqst;
8516     msgbuf_ring_t *flow_ring;
8517     dhd_prot_t *prot = dhd->prot;
8518     unsigned long flags;
8519     uint16 alloced = 0;
8520     msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
8521 
8522     /* Fetch a pre-initialized msgbuf_ring from the flowring pool */
8523     flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid);
8524     if (flow_ring == NULL) {
8525         DHD_ERROR(("%s: dhd_prot_flowrings_pool_fetch TX Flowid %d failed\n",
8526             __FUNCTION__, flow_ring_node->flowid));
8527         return BCME_NOMEM;
8528     }
8529 #ifdef PCIE_INB_DW
8530     if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
8531         return BCME_ERROR;
8532 #endif /* PCIE_INB_DW */
8533 
8534     DHD_GENERAL_LOCK(dhd, flags);
8535 
8536     /* Request for ctrl_ring buffer space */
8537     flow_resume_rqst = (tx_idle_flowring_resume_request_t *)
8538         dhd_prot_alloc_ring_space(dhd, ctrl_ring, 1, &alloced, FALSE);
8539 
8540     if (flow_resume_rqst == NULL) {
8541         dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring);
8542         DHD_ERROR(("%s: Flow resume Req flowid %d - failure ring space\n",
8543             __FUNCTION__, flow_ring_node->flowid));
8544         DHD_GENERAL_UNLOCK(dhd, flags);
8545 #ifdef PCIE_INB_DW
8546         dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
8547 #endif
8548         return BCME_NOMEM;
8549     }
8550 
8551     flow_ring_node->prot_info = (void *)flow_ring;
8552 
8553     /* Common msg buf hdr */
8554     flow_resume_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_RESUME;
8555     flow_resume_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
8556     flow_resume_rqst->msg.request_id = htol32(0); /* TBD */
8557 
8558     flow_resume_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
8559     ctrl_ring->seqnum++;
8560 
8561     flow_resume_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
8562     DHD_ERROR(("%s Send Flow resume Req flow ID %d\n",
8563         __FUNCTION__, flow_ring_node->flowid));
8564 
8565     /* Update the flow_ring's WRITE index */
8566     if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
8567         dhd_prot_dma_indx_set(dhd, flow_ring->wr,
8568                               H2D_DMA_INDX_WR_UPD, flow_ring->idx);
8569     } else if (IFRM_ACTIVE(dhd) && (flow_ring->idx >= BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START)) {
8570         dhd_prot_dma_indx_set(dhd, flow_ring->wr,
8571             H2D_IFRM_INDX_WR_UPD,
8572             (flow_ring->idx - BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START));
8573     } else {
8574         dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr),
8575             sizeof(uint16), RING_WR_UPD, flow_ring->idx);
8576     }
8577 
8578     /* update control subn ring's WR index and ring doorbell to dongle */
8579     dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_resume_rqst, 1);
8580 
8581     DHD_GENERAL_UNLOCK(dhd, flags);
8582 #ifdef PCIE_INB_DW
8583     dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
8584 #endif
8585 
8586     return BCME_OK;
8587 } /* dhd_prot_flow_ring_create */
8588 
8589 int
dhd_prot_flow_ring_batch_suspend_request(dhd_pub_t * dhd,uint16 * ringid,uint16 count)8590 dhd_prot_flow_ring_batch_suspend_request(dhd_pub_t *dhd, uint16 *ringid, uint16 count)
8591 {
8592     tx_idle_flowring_suspend_request_t *flow_suspend_rqst;
8593     dhd_prot_t *prot = dhd->prot;
8594     unsigned long flags;
8595     uint16 index;
8596     uint16 alloced = 0;
8597     msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
8598 
8599 #ifdef PCIE_INB_DW
8600     if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
8601         return BCME_ERROR;
8602 #endif /* PCIE_INB_DW */
8603 
8604     DHD_GENERAL_LOCK(dhd, flags);
8605 
8606     /* Request for ring buffer space */
8607     flow_suspend_rqst = (tx_idle_flowring_suspend_request_t *)
8608         dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
8609 
8610     if (flow_suspend_rqst == NULL) {
8611         DHD_GENERAL_UNLOCK(dhd, flags);
8612         DHD_ERROR(("%s: Flow suspend Req - failure ring space\n", __FUNCTION__));
8613 #ifdef PCIE_INB_DW
8614         dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
8615 #endif
8616         return BCME_NOMEM;
8617     }
8618 
8619     /* Common msg buf hdr */
8620     flow_suspend_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_SUSPEND;
8621     flow_suspend_rqst->msg.request_id = htol32(0); /* TBD */
8622 
8623     flow_suspend_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
8624     ring->seqnum++;
8625 
8626     /* Update flow id  info */
8627     for (index = 0; index < count; index++)
8628     {
8629         flow_suspend_rqst->ring_id[index] = ringid[index];
8630     }
8631     flow_suspend_rqst->num = count;
8632 
8633     DHD_ERROR(("%s sending batch suspend!! count is %d\n", __FUNCTION__, count));
8634 
8635     /* update ring's WR index and ring doorbell to dongle */
8636     dhd_prot_ring_write_complete(dhd, ring, flow_suspend_rqst, 1);
8637     DHD_GENERAL_UNLOCK(dhd, flags);
8638 #ifdef PCIE_INB_DW
8639     dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
8640 #endif
8641 
8642     return BCME_OK;
8643 }
8644 #endif /* IDLE_TX_FLOW_MGMT */
8645 
8646 
dhd_prot_dump_extended_trap(dhd_pub_t * dhdp,struct bcmstrbuf * b,bool raw)8647 int dhd_prot_dump_extended_trap(dhd_pub_t *dhdp, struct bcmstrbuf *b, bool raw)
8648 {
8649     uint32 i;
8650     uint32 *ext_data;
8651     hnd_ext_trap_hdr_t *hdr;
8652     bcm_tlv_t *tlv;
8653     trap_t *tr;
8654     uint32 *stack;
8655     hnd_ext_trap_bp_err_t *bpe;
8656     uint32 raw_len;
8657 
8658     ext_data = dhdp->extended_trap_data;
8659 
8660     /* return if there is no extended trap data */
8661     if (!ext_data || !(dhdp->dongle_trap_data & D2H_DEV_EXT_TRAP_DATA))
8662     {
8663         bcm_bprintf(b, "%d (0x%x)", dhdp->dongle_trap_data, dhdp->dongle_trap_data);
8664         return BCME_OK;
8665     }
8666 
8667     bcm_bprintf(b, "Extended trap data\n");
8668 
8669     /* First word is original trap_data */
8670     bcm_bprintf(b, "trap_data = 0x%08x\n", *ext_data);
8671     ext_data++;
8672 
8673     /* Followed by the extended trap data header */
8674     hdr = (hnd_ext_trap_hdr_t *)ext_data;
8675     bcm_bprintf(b, "version: %d, len: %d\n", hdr->version, hdr->len);
8676 
8677     if (raw)
8678     {
8679         raw_len = sizeof(hnd_ext_trap_hdr_t) + (hdr->len / 4) + (hdr->len % 4 ? 1 : 0);
8680         for (i = 0; i < raw_len; i++)
8681         {
8682             bcm_bprintf(b, "0x%08x ", ext_data[i]);
8683             if (i % 4 == 3)
8684                 bcm_bprintf(b, "\n");
8685         }
8686         return BCME_OK;
8687     }
8688 
8689     /* Extract the various supported TLVs from the extended trap data */
8690     tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE);
8691     if (tlv)
8692     {
8693         bcm_bprintf(b, "\nTAG_TRAP_SIGNATURE len: %d\n", tlv->len);
8694         tr = (trap_t *)tlv->data;
8695 
8696         bcm_bprintf(b, "TRAP %x: pc %x, lr %x, sp %x, cpsr %x, spsr %x\n",
8697                tr->type, tr->pc, tr->r14, tr->r13, tr->cpsr, tr->spsr);
8698         bcm_bprintf(b, "  r0 %x, r1 %x, r2 %x, r3 %x, r4 %x, r5 %x, r6 %x\n",
8699                tr->r0, tr->r1, tr->r2, tr->r3, tr->r4, tr->r5, tr->r6);
8700         bcm_bprintf(b, "  r7 %x, r8 %x, r9 %x, r10 %x, r11 %x, r12 %x\n",
8701                tr->r7, tr->r8, tr->r9, tr->r10, tr->r11, tr->r12);
8702     }
8703 
8704     tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK);
8705     if (tlv)
8706     {
8707         bcm_bprintf(b, "\nTAG_TRAP_STACK len: %d\n", tlv->len);
8708         stack = (uint32 *)tlv->data;
8709         for (i = 0; i < (uint32)(tlv->len / 4); i++)
8710         {
8711             bcm_bprintf(b, "  0x%08x\n", *stack);
8712             stack++;
8713         }
8714     }
8715 
8716     tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_BACKPLANE);
8717     if (tlv)
8718     {
8719         bcm_bprintf(b, "\nTAG_TRAP_BACKPLANE len: %d\n", tlv->len);
8720         bpe = (hnd_ext_trap_bp_err_t *)tlv->data;
8721         bcm_bprintf(b, " error: %x\n", bpe->error);
8722         bcm_bprintf(b, " coreid: %x\n", bpe->coreid);
8723         bcm_bprintf(b, " baseaddr: %x\n", bpe->baseaddr);
8724         bcm_bprintf(b, " ioctrl: %x\n", bpe->ioctrl);
8725         bcm_bprintf(b, " iostatus: %x\n", bpe->iostatus);
8726         bcm_bprintf(b, " resetctrl: %x\n", bpe->resetctrl);
8727         bcm_bprintf(b, " resetstatus: %x\n", bpe->resetstatus);
8728         bcm_bprintf(b, " errlogctrl: %x\n", bpe->errlogctrl);
8729         bcm_bprintf(b, " errlogdone: %x\n", bpe->errlogdone);
8730         bcm_bprintf(b, " errlogstatus: %x\n", bpe->errlogstatus);
8731         bcm_bprintf(b, " errlogaddrlo: %x\n", bpe->errlogaddrlo);
8732         bcm_bprintf(b, " errlogaddrhi: %x\n", bpe->errlogaddrhi);
8733         bcm_bprintf(b, " errlogid: %x\n", bpe->errlogid);
8734         bcm_bprintf(b, " errloguser: %x\n", bpe->errloguser);
8735         bcm_bprintf(b, " errlogflags: %x\n", bpe->errlogflags);
8736     }
8737 
8738     return BCME_OK;
8739 }
8740 
8741 
8742 #ifdef BCMPCIE
8743 int
dhd_prot_send_host_timestamp(dhd_pub_t * dhdp,uchar * tlvs,uint16 tlv_len,uint16 seqnum,uint16 xt_id)8744 dhd_prot_send_host_timestamp(dhd_pub_t *dhdp, uchar *tlvs, uint16 tlv_len,
8745     uint16 seqnum, uint16 xt_id)
8746 {
8747     dhd_prot_t *prot = dhdp->prot;
8748     host_timestamp_msg_t *ts_req;
8749     unsigned long flags;
8750     uint16 alloced = 0;
8751     uchar *ts_tlv_buf;
8752 
8753     if ((tlvs == NULL) || (tlv_len == 0)) {
8754         DHD_ERROR(("%s: argument error tlv: %p, tlv_len %d\n",
8755             __FUNCTION__, tlvs, tlv_len));
8756         return -1;
8757     }
8758 #ifdef PCIE_INB_DW
8759     if (dhd_prot_inc_hostactive_devwake_assert(dhdp->bus) != BCME_OK)
8760         return BCME_ERROR;
8761 #endif /* PCIE_INB_DW */
8762 
8763     DHD_GENERAL_LOCK(dhdp, flags);
8764 
8765     /* if Host TS req already pending go away */
8766     if (prot->hostts_req_buf_inuse == TRUE) {
8767         DHD_ERROR(("one host TS request already pending at device\n"));
8768         DHD_GENERAL_UNLOCK(dhdp, flags);
8769 #ifdef PCIE_INB_DW
8770         dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus);
8771 #endif
8772         return -1;
8773     }
8774 
8775     /* Request for cbuf space */
8776     ts_req = (host_timestamp_msg_t*)dhd_prot_alloc_ring_space(dhdp, &prot->h2dring_ctrl_subn,
8777         DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,    &alloced, FALSE);
8778     if (ts_req == NULL) {
8779         DHD_ERROR(("couldn't allocate space on msgring to send host TS request\n"));
8780         DHD_GENERAL_UNLOCK(dhdp, flags);
8781 #ifdef PCIE_INB_DW
8782         dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus);
8783 #endif
8784         return -1;
8785     }
8786 
8787     /* Common msg buf hdr */
8788     ts_req->msg.msg_type = MSG_TYPE_HOSTTIMSTAMP;
8789     ts_req->msg.if_id = 0;
8790     ts_req->msg.flags =  prot->h2dring_ctrl_subn.current_phase;
8791     ts_req->msg.request_id = DHD_H2D_HOSTTS_REQ_PKTID;
8792 
8793     ts_req->msg.epoch = prot->h2dring_ctrl_subn.seqnum % H2D_EPOCH_MODULO;
8794     prot->h2dring_ctrl_subn.seqnum++;
8795 
8796     ts_req->xt_id = xt_id;
8797     ts_req->seqnum = seqnum;
8798     /* populate TS req buffer info */
8799     ts_req->input_data_len = htol16(tlv_len);
8800     ts_req->host_buf_addr.high = htol32(PHYSADDRHI(prot->hostts_req_buf.pa));
8801     ts_req->host_buf_addr.low = htol32(PHYSADDRLO(prot->hostts_req_buf.pa));
8802     /* copy ioct payload */
8803     ts_tlv_buf = (void *) prot->hostts_req_buf.va;
8804     prot->hostts_req_buf_inuse = TRUE;
8805     memcpy(ts_tlv_buf, tlvs, tlv_len);
8806 
8807     OSL_CACHE_FLUSH((void *) prot->hostts_req_buf.va, tlv_len);
8808 
8809     if (ISALIGNED(ts_tlv_buf, DMA_ALIGN_LEN) == FALSE) {
8810         DHD_ERROR(("host TS req buffer address unaligned !!!!! \n"));
8811     }
8812 
8813     DHD_CTL(("submitted Host TS request request_id %d, data_len %d, tx_id %d, seq %d\n",
8814         ts_req->msg.request_id, ts_req->input_data_len,
8815         ts_req->xt_id, ts_req->seqnum));
8816 
8817 
8818     /* upd wrt ptr and raise interrupt */
8819     dhd_prot_ring_write_complete(dhdp, &prot->h2dring_ctrl_subn, ts_req,
8820         DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
8821     DHD_GENERAL_UNLOCK(dhdp, flags);
8822 #ifdef PCIE_INB_DW
8823     dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus);
8824 #endif
8825 
8826     return 0;
8827 } /* dhd_prot_send_host_timestamp */
8828 
8829 
8830 bool
dhd_prot_data_path_tx_timestamp_logging(dhd_pub_t * dhd,bool enable,bool set)8831 dhd_prot_data_path_tx_timestamp_logging(dhd_pub_t *dhd,  bool enable, bool set)
8832 {
8833     if (set)
8834         dhd->prot->tx_ts_log_enabled = enable;
8835 
8836     return dhd->prot->tx_ts_log_enabled;
8837 }
8838 
8839 bool
dhd_prot_data_path_rx_timestamp_logging(dhd_pub_t * dhd,bool enable,bool set)8840 dhd_prot_data_path_rx_timestamp_logging(dhd_pub_t *dhd,  bool enable, bool set)
8841 {
8842     if (set)
8843         dhd->prot->rx_ts_log_enabled = enable;
8844 
8845     return dhd->prot->rx_ts_log_enabled;
8846 }
8847 #endif /* BCMPCIE */
8848 
8849 void
dhd_prot_dma_indx_free(dhd_pub_t * dhd)8850 dhd_prot_dma_indx_free(dhd_pub_t *dhd)
8851 {
8852     dhd_prot_t *prot = dhd->prot;
8853 
8854     dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf);
8855     dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf);
8856 }
8857 
8858 static void BCMFASTPATH
dhd_prot_process_fw_timestamp(dhd_pub_t * dhd,void * buf)8859 dhd_prot_process_fw_timestamp(dhd_pub_t *dhd, void* buf)
8860 {
8861 #ifdef DHD_TIMESYNC
8862     fw_timestamp_event_msg_t *resp;
8863     uint32 pktid;
8864     uint16 buflen, seqnum;
8865     void * pkt;
8866     unsigned long flags;
8867 
8868     resp = (fw_timestamp_event_msg_t *)buf;
8869     pktid = ltoh32(resp->msg.request_id);
8870     buflen = ltoh16(resp->buf_len);
8871     seqnum = ltoh16(resp->seqnum);
8872 
8873 #if defined(DHD_PKTID_AUDIT_RING)
8874     DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_ctrl_map, pktid,
8875         DHD_DUPLICATE_FREE);
8876 #endif /* DHD_PKTID_AUDIT_RING */
8877 
8878     DHD_INFO(("id 0x%04x, len %d, phase 0x%02x, seqnum %d\n",
8879         pktid, buflen, resp->msg.flags, ltoh16(resp->seqnum)));
8880 
8881     if (!dhd->prot->cur_ts_bufs_posted) {
8882         DHD_ERROR(("tsbuf posted are zero, but there is a completion\n"));
8883         return;
8884     }
8885 
8886     dhd->prot->cur_ts_bufs_posted--;
8887     if (dhd->prot->max_tsbufpost > 0)
8888         dhd_msgbuf_rxbuf_post_ts_bufs(dhd);
8889 
8890     DHD_GENERAL_LOCK(dhd, flags);
8891     pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_TSBUF_RX, TRUE);
8892     DHD_GENERAL_UNLOCK(dhd, flags);
8893 
8894     if (!pkt) {
8895         DHD_ERROR(("no ts buffer associated with pktid 0x%04x\n", pktid));
8896         return;
8897     }
8898 
8899     PKTSETLEN(dhd->osh, pkt, buflen);
8900     dhd_timesync_handle_fw_timestamp(dhd->ts, PKTDATA(dhd->osh, pkt), buflen, seqnum);
8901 #ifdef DHD_USE_STATIC_CTRLBUF
8902     PKTFREE_STATIC(dhd->osh, pkt, TRUE);
8903 #else
8904     PKTFREE(dhd->osh, pkt, TRUE);
8905 #endif /* DHD_USE_STATIC_CTRLBUF */
8906 #else /* DHD_TIMESYNC */
8907     DHD_ERROR(("Timesunc feature not compiled in but GOT FW TS message\n"));
8908 #endif /* DHD_TIMESYNC */
8909 }
8910