• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * DHD Bus Module for PCIE
3  *
4  * Copyright (C) 1999-2019, Broadcom.
5  *
6  *      Unless you and Broadcom execute a separate written software license
7  * agreement governing use of this software, this software is licensed to you
8  * under the terms of the GNU General Public License version 2 (the "GPL"),
9  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10  * following added to such license:
11  *
12  *      As a special exception, the copyright holders of this software give you
13  * permission to link this software with independent modules, and to copy and
14  * distribute the resulting executable under terms of your choice, provided that
15  * you also meet, for each linked independent module, the terms and conditions
16  * of the license of that module.  An independent module is a module which is
17  * not derived from this software.  The special exception does not apply to any
18  * modifications of the software.
19  *
20  *      Notwithstanding the above, under no circumstances may you combine this
21  * software in any way with any other Broadcom software provided under a license
22  * other than the GPL, without Broadcom's express prior written consent.
23  *
24  *
25  * <<Broadcom-WL-IPTag/Open:>>
26  *
27  * $Id: dhd_pcie.c 825481 2019-06-14 10:06:03Z $
28  */
29 
30 /* include files */
31 #include <typedefs.h>
32 #include <bcmutils.h>
33 #include <bcmdevs.h>
34 #include <siutils.h>
35 #include <hndoobr.h>
36 #include <hndsoc.h>
37 #include <hndpmu.h>
38 #include <etd.h>
39 #include <hnd_debug.h>
40 #include <sbchipc.h>
41 #include <sbhndarm.h>
42 #include <hnd_armtrap.h>
43 #if defined(DHD_DEBUG)
44 #include <hnd_cons.h>
45 #endif /* defined(DHD_DEBUG) */
46 #include <dngl_stats.h>
47 #include <pcie_core.h>
48 #include <dhd.h>
49 #include <dhd_bus.h>
50 #include <dhd_flowring.h>
51 #include <dhd_proto.h>
52 #include <dhd_dbg.h>
53 #include <dhd_debug.h>
54 #include <dhd_daemon.h>
55 #include <dhdioctl.h>
56 #include <sdiovar.h>
57 #include <bcmmsgbuf.h>
58 #include <pcicfg.h>
59 #include <dhd_pcie.h>
60 #include <bcmpcie.h>
61 #include <bcmendian.h>
62 #include <bcmstdlib_s.h>
63 #ifdef DHDTCPACK_SUPPRESS
64 #include <dhd_ip.h>
65 #endif /* DHDTCPACK_SUPPRESS */
66 #include <bcmevent.h>
67 #include <dhd_config.h>
68 
69 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
70 #include <linux/pm_runtime.h>
71 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
72 
73 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
74 #include <debugger.h>
75 #endif /* DEBUGGER || DHD_DSCOPE */
76 
77 #ifdef DNGL_AXI_ERROR_LOGGING
78 #include <dhd_linux_wq.h>
79 #include <dhd_linux.h>
80 #endif /* DNGL_AXI_ERROR_LOGGING */
81 
82 #if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
83 #include <dhd_linux_priv.h>
84 #endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
85 
86 #include <otpdefs.h>
87 #define EXTENDED_PCIE_DEBUG_DUMP 1 /* Enable Extended pcie registers dump */
88 
89 #define MEMBLOCK 2048 /* Block size used for downloading of dongle image */
90 #define MAX_WKLK_IDLE_CHECK                                                    \
91     3 /* times wake_lock checked before deciding not to suspend */
92 
93 #define DHD_MAX_ITEMS_HPP_TXCPL_RING 512
94 #define DHD_MAX_ITEMS_HPP_RXCPL_RING 512
95 
96 #define ARMCR4REG_CORECAP (0x4 / sizeof(uint32))
97 #define ARMCR4REG_MPUCTRL (0x90 / sizeof(uint32))
98 #define ACC_MPU_SHIFT 25
99 #define ACC_MPU_MASK (0x1u << ACC_MPU_SHIFT)
100 
101 #define REG_WORK_AROUND (0x1e4 / sizeof(uint32))
102 
103 #define ARMCR4REG_BANKIDX (0x40 / sizeof(uint32))
104 #define ARMCR4REG_BANKPDA (0x4C / sizeof(uint32))
105 /* Temporary war to fix precommit till sync issue between trunk & precommit
106  * branch is resolved */
107 
108 /* CTO Prevention Recovery */
109 #ifdef BCMQT_HW
110 #define CTO_TO_CLEAR_WAIT_MS 10000
111 #define CTO_TO_CLEAR_WAIT_MAX_CNT 100
112 #else
113 #define CTO_TO_CLEAR_WAIT_MS 1000
114 #define CTO_TO_CLEAR_WAIT_MAX_CNT 10
115 #endif // endif
116 
117 /* Fetch address of a member in the pciedev_shared structure in dongle memory */
118 #define DHD_PCIE_SHARED_MEMBER_ADDR(bus, member)                               \
119     (bus)->shared_addr + OFFSETOF(pciedev_shared_t, member)
120 
121 /* Fetch address of a member in rings_info_ptr structure in dongle memory */
122 #define DHD_RING_INFO_MEMBER_ADDR(bus, member)                                 \
123     (bus)->pcie_sh->rings_info_ptr + OFFSETOF(ring_info_t, member)
124 
125 /* Fetch address of a member in the ring_mem structure in dongle memory */
126 #define DHD_RING_MEM_MEMBER_ADDR(bus, ringid, member)                          \
127     (bus)->ring_sh[ringid].ring_mem_addr + OFFSETOF(ring_mem_t, member)
128 
129 #if defined(SUPPORT_MULTIPLE_BOARD_REV)
130 extern unsigned int system_rev;
131 #endif /* SUPPORT_MULTIPLE_BOARD_REV */
132 
133 #ifdef EWP_EDL
134 extern int host_edl_support;
135 #endif // endif
136 
137 /* This can be overwritten by module parameter(dma_ring_indices) defined in
138  * dhd_linux.c */
139 uint dma_ring_indices = 0;
140 /* This can be overwritten by module parameter(h2d_phase) defined in dhd_linux.c
141  */
142 bool h2d_phase = 0;
143 /* This can be overwritten by module parameter(force_trap_bad_h2d_phase)
144  * defined in dhd_linux.c
145  */
146 bool force_trap_bad_h2d_phase = 0;
147 
148 int dhd_dongle_memsize;
149 int dhd_dongle_ramsize;
150 struct dhd_bus *g_dhd_bus = NULL;
151 #ifdef DNGL_AXI_ERROR_LOGGING
152 static void dhd_log_dump_axi_error(uint8 *axi_err);
153 #endif /* DNGL_AXI_ERROR_LOGGING */
154 
155 static int dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size);
156 static int dhdpcie_bus_readconsole(dhd_bus_t *bus);
157 #if defined(DHD_FW_COREDUMP)
158 static int dhdpcie_mem_dump(dhd_bus_t *bus);
159 static int dhdpcie_get_mem_dump(dhd_bus_t *bus);
160 #endif /* DHD_FW_COREDUMP */
161 
162 static int dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address,
163                                 uint8 *data, uint size);
164 static int dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi,
165                                uint32 actionid, const char *name, void *params,
166                                int plen, void *arg, int len, int val_size);
167 static int dhdpcie_bus_lpback_req(struct dhd_bus *bus, uint32 intval);
168 static int dhdpcie_bus_dmaxfer_req(struct dhd_bus *bus, uint32 len,
169                                    uint32 srcdelay, uint32 destdelay,
170                                    uint32 d11_lpbk, uint32 core_num,
171                                    uint32 wait);
172 static int dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter);
173 static int _dhdpcie_download_firmware(struct dhd_bus *bus);
174 static int dhdpcie_download_firmware(dhd_bus_t *bus, osl_t *osh);
175 static int dhdpcie_bus_write_vars(dhd_bus_t *bus);
176 static bool dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus);
177 static bool dhdpci_bus_read_frames(dhd_bus_t *bus);
178 static int dhdpcie_readshared(dhd_bus_t *bus);
179 static void dhdpcie_init_shared_addr(dhd_bus_t *bus);
180 static bool dhdpcie_dongle_attach(dhd_bus_t *bus);
181 static void dhdpcie_bus_dongle_setmemsize(dhd_bus_t *bus, int mem_size);
182 static void dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh,
183                                        bool dongle_isolation, bool reset_flag);
184 static void dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh);
185 static int dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len);
186 static void dhdpcie_setbar1win(dhd_bus_t *bus, uint32 addr);
187 static uint8 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset);
188 static void dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data);
189 static void dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data);
190 static uint16 dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset);
191 static void dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data);
192 static uint32 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset);
193 #ifdef DHD_SUPPORT_64BIT
194 static void dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data)
195     __attribute__((used));
196 static uint64 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset)
197     __attribute__((used));
198 #endif /* DHD_SUPPORT_64BIT */
199 static void dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data);
200 static void dhdpcie_bus_reg_unmap(osl_t *osh, volatile char *addr, int size);
201 static int dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b);
202 static void dhdpcie_fw_trap(dhd_bus_t *bus);
203 static void dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus,
204                                            ring_info_t *ring_info);
205 static void dhdpcie_handle_mb_data(dhd_bus_t *bus);
206 extern void dhd_dpc_enable(dhd_pub_t *dhdp);
207 extern void dhd_dpc_kill(dhd_pub_t *dhdp);
208 
209 #ifdef IDLE_TX_FLOW_MGMT
210 static void dhd_bus_check_idle_scan(dhd_bus_t *bus);
211 static void dhd_bus_idle_scan(dhd_bus_t *bus);
212 #endif /* IDLE_TX_FLOW_MGMT */
213 
214 #ifdef EXYNOS_PCIE_DEBUG
215 extern void exynos_pcie_register_dump(int ch_num);
216 #endif /* EXYNOS_PCIE_DEBUG */
217 
218 #if defined(DHD_H2D_LOG_TIME_SYNC)
219 static void dhdpci_bus_rte_log_time_sync_poll(dhd_bus_t *bus);
220 #endif /* DHD_H2D_LOG_TIME_SYNC */
221 
222 #define PCI_VENDOR_ID_BROADCOM 0x14e4
223 
224 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
225 #define MAX_D3_ACK_TIMEOUT 100
226 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
227 
228 #define DHD_DEFAULT_DOORBELL_TIMEOUT 200 /* ms */
229 static bool dhdpcie_check_firmware_compatible(uint32 f_api_version,
230                                               uint32 h_api_version);
231 static int dhdpcie_cto_error_recovery(struct dhd_bus *bus);
232 
233 static int dhdpcie_init_d11status(struct dhd_bus *bus);
234 
235 static int dhdpcie_wrt_rnd(struct dhd_bus *bus);
236 
237 extern uint16 dhd_prot_get_h2d_max_txpost(dhd_pub_t *dhd);
238 extern void dhd_prot_set_h2d_max_txpost(dhd_pub_t *dhd, uint16 max_txpost);
239 
240 #ifdef DHD_HP2P
241 extern enum hrtimer_restart dhd_hp2p_write(struct hrtimer *timer);
242 static uint16 dhd_bus_set_hp2p_ring_max_size(struct dhd_bus *bus, bool tx,
243                                              uint16 val);
244 #endif // endif
245 #define NUM_PATTERNS 2
246 static bool dhd_bus_tcm_test(struct dhd_bus *bus);
247 
248 /* IOVar table */
249 enum {
250     IOV_INTR = 1,
251     IOV_MEMSIZE,
252     IOV_SET_DOWNLOAD_STATE,
253     IOV_DEVRESET,
254     IOV_VARS,
255     IOV_MSI_SIM,
256     IOV_PCIE_LPBK,
257     IOV_CC_NVMSHADOW,
258     IOV_RAMSIZE,
259     IOV_RAMSTART,
260     IOV_SLEEP_ALLOWED,
261     IOV_PCIE_DMAXFER,
262     IOV_PCIE_SUSPEND,
263     IOV_DONGLEISOLATION,
264     IOV_LTRSLEEPON_UNLOOAD,
265     IOV_METADATA_DBG,
266     IOV_RX_METADATALEN,
267     IOV_TX_METADATALEN,
268     IOV_TXP_THRESHOLD,
269     IOV_BUZZZ_DUMP,
270     IOV_DUMP_RINGUPD_BLOCK,
271     IOV_DMA_RINGINDICES,
272     IOV_FORCE_FW_TRAP,
273     IOV_DB1_FOR_MB,
274     IOV_FLOW_PRIO_MAP,
275     IOV_RXBOUND,
276     IOV_TXBOUND,
277     IOV_HANGREPORT,
278     IOV_H2D_MAILBOXDATA,
279     IOV_INFORINGS,
280     IOV_H2D_PHASE,
281     IOV_H2D_ENABLE_TRAP_BADPHASE,
282     IOV_H2D_TXPOST_MAX_ITEM,
283     IOV_TRAPDATA,
284     IOV_TRAPDATA_RAW,
285     IOV_CTO_PREVENTION,
286     IOV_PCIE_WD_RESET,
287     IOV_DUMP_DONGLE,
288     IOV_HWA_ENAB_BMAP,
289     IOV_IDMA_ENABLE,
290     IOV_IFRM_ENABLE,
291     IOV_CLEAR_RING,
292     IOV_DAR_ENABLE,
293     IOV_DNGL_CAPS, /**< returns string with dongle capabilities */
294 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
295     IOV_GDB_SERVER, /**< starts gdb server on given interface */
296 #endif              /* DEBUGGER || DHD_DSCOPE */
297     IOV_INB_DW_ENABLE,
298     IOV_CTO_THRESHOLD,
299     IOV_HSCBSIZE, /* get HSCB buffer size */
300     IOV_HP2P_ENABLE,
301     IOV_HP2P_PKT_THRESHOLD,
302     IOV_HP2P_TIME_THRESHOLD,
303     IOV_HP2P_PKT_EXPIRY,
304     IOV_HP2P_TXCPL_MAXITEMS,
305     IOV_HP2P_RXCPL_MAXITEMS,
306     IOV_EXTDTXS_IN_TXCPL,
307     IOV_HOSTRDY_AFTER_INIT,
308     IOV_PCIE_LAST /**< unused IOVAR */
309 };
310 
311 const bcm_iovar_t dhdpcie_iovars[] = {
312     {"intr", IOV_INTR, 0, 0, IOVT_BOOL, 0},
313     {"memsize", IOV_MEMSIZE, 0, 0, IOVT_UINT32, 0},
314     {"dwnldstate", IOV_SET_DOWNLOAD_STATE, 0, 0, IOVT_BOOL, 0},
315     {"vars", IOV_VARS, 0, 0, IOVT_BUFFER, 0},
316     {"devreset", IOV_DEVRESET, 0, 0, IOVT_UINT8, 0},
317     {"pcie_device_trap", IOV_FORCE_FW_TRAP, 0, 0, 0, 0},
318     {"pcie_lpbk", IOV_PCIE_LPBK, 0, 0, IOVT_UINT32, 0},
319     {"cc_nvmshadow", IOV_CC_NVMSHADOW, 0, 0, IOVT_BUFFER, 0},
320     {"ramsize", IOV_RAMSIZE, 0, 0, IOVT_UINT32, 0},
321     {"ramstart", IOV_RAMSTART, 0, 0, IOVT_UINT32, 0},
322     {"pcie_dmaxfer", IOV_PCIE_DMAXFER, 0, 0, IOVT_BUFFER,
323      sizeof(dma_xfer_info_t)},
324     {"pcie_suspend", IOV_PCIE_SUSPEND, DHD_IOVF_PWRREQ_BYPASS, 0, IOVT_UINT32,
325      0},
326     {"sleep_allowed", IOV_SLEEP_ALLOWED, 0, 0, IOVT_BOOL, 0},
327     {"dngl_isolation", IOV_DONGLEISOLATION, 0, 0, IOVT_UINT32, 0},
328     {"ltrsleep_on_unload", IOV_LTRSLEEPON_UNLOOAD, 0, 0, IOVT_UINT32, 0},
329     {"dump_ringupdblk", IOV_DUMP_RINGUPD_BLOCK, 0, 0, IOVT_BUFFER, 0},
330     {"dma_ring_indices", IOV_DMA_RINGINDICES, 0, 0, IOVT_UINT32, 0},
331     {"metadata_dbg", IOV_METADATA_DBG, 0, 0, IOVT_BOOL, 0},
332     {"rx_metadata_len", IOV_RX_METADATALEN, 0, 0, IOVT_UINT32, 0},
333     {"tx_metadata_len", IOV_TX_METADATALEN, 0, 0, IOVT_UINT32, 0},
334     {"db1_for_mb", IOV_DB1_FOR_MB, 0, 0, IOVT_UINT32, 0},
335     {"txp_thresh", IOV_TXP_THRESHOLD, 0, 0, IOVT_UINT32, 0},
336     {"buzzz_dump", IOV_BUZZZ_DUMP, 0, 0, IOVT_UINT32, 0},
337     {"flow_prio_map", IOV_FLOW_PRIO_MAP, 0, 0, IOVT_UINT32, 0},
338     {"rxbound", IOV_RXBOUND, 0, 0, IOVT_UINT32, 0},
339     {"txbound", IOV_TXBOUND, 0, 0, IOVT_UINT32, 0},
340     {"fw_hang_report", IOV_HANGREPORT, 0, 0, IOVT_BOOL, 0},
341     {"h2d_mb_data", IOV_H2D_MAILBOXDATA, 0, 0, IOVT_UINT32, 0},
342     {"inforings", IOV_INFORINGS, 0, 0, IOVT_UINT32, 0},
343     {"h2d_phase", IOV_H2D_PHASE, 0, 0, IOVT_UINT32, 0},
344     {"force_trap_bad_h2d_phase", IOV_H2D_ENABLE_TRAP_BADPHASE, 0, 0,
345      IOVT_UINT32, 0},
346     {"h2d_max_txpost", IOV_H2D_TXPOST_MAX_ITEM, 0, 0, IOVT_UINT32, 0},
347     {"trap_data", IOV_TRAPDATA, 0, 0, IOVT_BUFFER, 0},
348     {"trap_data_raw", IOV_TRAPDATA_RAW, 0, 0, IOVT_BUFFER, 0},
349     {"cto_prevention", IOV_CTO_PREVENTION, 0, 0, IOVT_UINT32, 0},
350     {"pcie_wd_reset", IOV_PCIE_WD_RESET, 0, 0, IOVT_BOOL, 0},
351     {"dump_dongle", IOV_DUMP_DONGLE, 0, 0, IOVT_BUFFER,
352      MAX(sizeof(dump_dongle_in_t), sizeof(dump_dongle_out_t))},
353     {"clear_ring", IOV_CLEAR_RING, 0, 0, IOVT_UINT32, 0},
354     {"hwa_enab_bmap", IOV_HWA_ENAB_BMAP, 0, 0, IOVT_UINT32, 0},
355     {"idma_enable", IOV_IDMA_ENABLE, 0, 0, IOVT_UINT32, 0},
356     {"ifrm_enable", IOV_IFRM_ENABLE, 0, 0, IOVT_UINT32, 0},
357     {"dar_enable", IOV_DAR_ENABLE, 0, 0, IOVT_UINT32, 0},
358     {"cap", IOV_DNGL_CAPS, 0, 0, IOVT_BUFFER, 0},
359 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
360     {"gdb_server", IOV_GDB_SERVER, 0, 0, IOVT_UINT32, 0},
361 #endif /* DEBUGGER || DHD_DSCOPE */
362     {"inb_dw_enable", IOV_INB_DW_ENABLE, 0, 0, IOVT_UINT32, 0},
363     {"cto_threshold", IOV_CTO_THRESHOLD, 0, 0, IOVT_UINT32, 0},
364     {"hscbsize", IOV_HSCBSIZE, 0, 0, IOVT_UINT32, 0},
365 #ifdef DHD_HP2P
366     {"hp2p_enable", IOV_HP2P_ENABLE, 0, 0, IOVT_UINT32, 0},
367     {"hp2p_pkt_thresh", IOV_HP2P_PKT_THRESHOLD, 0, 0, IOVT_UINT32, 0},
368     {"hp2p_time_thresh", IOV_HP2P_TIME_THRESHOLD, 0, 0, IOVT_UINT32, 0},
369     {"hp2p_pkt_expiry", IOV_HP2P_PKT_EXPIRY, 0, 0, IOVT_UINT32, 0},
370     {"hp2p_txcpl_maxitems", IOV_HP2P_TXCPL_MAXITEMS, 0, 0, IOVT_UINT32, 0},
371     {"hp2p_rxcpl_maxitems", IOV_HP2P_RXCPL_MAXITEMS, 0, 0, IOVT_UINT32, 0},
372 #endif // endif
373     {"extdtxs_in_txcpl", IOV_EXTDTXS_IN_TXCPL, 0, 0, IOVT_UINT32, 0},
374     {"hostrdy_after_init", IOV_HOSTRDY_AFTER_INIT, 0, 0, IOVT_UINT32, 0},
375     {NULL, 0, 0, 0, 0, 0}};
376 
377 #define MAX_READ_TIMEOUT 2 * 1000 * 1000
378 
379 #ifndef DHD_RXBOUND
380 #define DHD_RXBOUND 64
381 #endif // endif
382 #ifndef DHD_TXBOUND
383 #define DHD_TXBOUND 64
384 #endif // endif
385 
386 #define DHD_INFORING_BOUND 32
387 #define DHD_BTLOGRING_BOUND 32
388 
389 uint dhd_rxbound = DHD_RXBOUND;
390 uint dhd_txbound = DHD_TXBOUND;
391 
392 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
393 /** the GDB debugger layer will call back into this (bus) layer to read/write
394  * dongle memory */
395 static struct dhd_gdb_bus_ops_s bus_ops = {
396     .read_u16 = dhdpcie_bus_rtcm16,
397     .read_u32 = dhdpcie_bus_rtcm32,
398     .write_u32 = dhdpcie_bus_wtcm32,
399 };
400 #endif /* DEBUGGER || DHD_DSCOPE */
401 
dhd_bus_get_flr_force_fail(struct dhd_bus * bus)402 bool dhd_bus_get_flr_force_fail(struct dhd_bus *bus)
403 {
404     return bus->flr_force_fail;
405 }
406 
407 /**
408  * Register/Unregister functions are called by the main DHD entry point (eg
409  * module insertion) to link with the bus driver, in order to look for or await
410  * the device.
411  */
dhd_bus_register(void)412 int dhd_bus_register(void)
413 {
414     DHD_TRACE(("%s: Enter\n", __FUNCTION__));
415 
416     return dhdpcie_bus_register();
417 }
418 
dhd_bus_unregister(void)419 void dhd_bus_unregister(void)
420 {
421     DHD_TRACE(("%s: Enter\n", __FUNCTION__));
422 
423     dhdpcie_bus_unregister();
424     return;
425 }
426 
427 /** returns a host virtual address */
dhdpcie_bus_reg_map(osl_t * osh,ulong addr,int size)428 uint32 *dhdpcie_bus_reg_map(osl_t *osh, ulong addr, int size)
429 {
430     return (uint32 *)REG_MAP(addr, size);
431 }
432 
dhdpcie_bus_reg_unmap(osl_t * osh,volatile char * addr,int size)433 void dhdpcie_bus_reg_unmap(osl_t *osh, volatile char *addr, int size)
434 {
435     REG_UNMAP(addr);
436     return;
437 }
438 
439 /**
440  * retrun H2D Doorbell registers address
441  * use DAR registers instead of enum register for corerev >= 23 (4347B0)
442  */
dhd_bus_db0_addr_get(struct dhd_bus * bus)443 static INLINE uint dhd_bus_db0_addr_get(struct dhd_bus *bus)
444 {
445     uint addr = PCIH2D_MailBox;
446     uint dar_addr = DAR_PCIH2D_DB0_0(bus->sih->buscorerev);
447 
448     return ((DAR_ACTIVE(bus->dhd)) ? dar_addr : addr);
449 }
450 
dhd_bus_db0_addr_2_get(struct dhd_bus * bus)451 static INLINE uint dhd_bus_db0_addr_2_get(struct dhd_bus *bus)
452 {
453     return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB2_0(bus->sih->buscorerev)
454                                    : PCIH2D_MailBox_2);
455 }
456 
dhd_bus_db1_addr_get(struct dhd_bus * bus)457 static INLINE uint dhd_bus_db1_addr_get(struct dhd_bus *bus)
458 {
459     return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB0_1(bus->sih->buscorerev)
460                                    : PCIH2D_DB1);
461 }
462 
dhd_bus_db1_addr_1_get(struct dhd_bus * bus)463 static INLINE uint dhd_bus_db1_addr_1_get(struct dhd_bus *bus)
464 {
465     return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB1_1(bus->sih->buscorerev)
466                                    : PCIH2D_DB1_1);
467 }
468 
469 /*
470  * WAR for SWWLAN-215055 - [4378B0] ARM fails to boot without DAR WL domain
471  * request
472  */
dhd_bus_pcie_pwr_req_wl_domain(struct dhd_bus * bus,uint offset,bool enable)473 static INLINE void dhd_bus_pcie_pwr_req_wl_domain(struct dhd_bus *bus,
474                                                   uint offset, bool enable)
475 {
476     if (enable) {
477         si_corereg(bus->sih, bus->sih->buscoreidx, offset,
478                    SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT,
479                    SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT);
480     } else {
481         si_corereg(bus->sih, bus->sih->buscoreidx, offset,
482                    SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT, 0);
483     }
484 }
485 
_dhd_bus_pcie_pwr_req_clear_cmn(struct dhd_bus * bus)486 static INLINE void _dhd_bus_pcie_pwr_req_clear_cmn(struct dhd_bus *bus)
487 {
488     uint mask;
489 
490     /*
491      * If multiple de-asserts, decrement ref and return
492      * Clear power request when only one pending
493      * so initial request is not removed unexpectedly
494      */
495     if (bus->pwr_req_ref > 1) {
496         bus->pwr_req_ref--;
497         return;
498     }
499 
500     ASSERT(bus->pwr_req_ref == 1);
501 
502     if (MULTIBP_ENAB(bus->sih)) {
503         /* Common BP controlled by HW so only need to toggle WL/ARM backplane */
504         mask = SRPWR_DMN1_ARMBPSD_MASK;
505     } else {
506         mask = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK;
507     }
508 
509     si_srpwr_request(bus->sih, mask, 0);
510     bus->pwr_req_ref = 0;
511 }
512 
dhd_bus_pcie_pwr_req_clear(struct dhd_bus * bus)513 static INLINE void dhd_bus_pcie_pwr_req_clear(struct dhd_bus *bus)
514 {
515     unsigned long flags = 0;
516 
517     DHD_GENERAL_LOCK(bus->dhd, flags);
518     _dhd_bus_pcie_pwr_req_clear_cmn(bus);
519     DHD_GENERAL_UNLOCK(bus->dhd, flags);
520 }
521 
dhd_bus_pcie_pwr_req_clear_nolock(struct dhd_bus * bus)522 static INLINE void dhd_bus_pcie_pwr_req_clear_nolock(struct dhd_bus *bus)
523 {
524     _dhd_bus_pcie_pwr_req_clear_cmn(bus);
525 }
526 
_dhd_bus_pcie_pwr_req_cmn(struct dhd_bus * bus)527 static INLINE void _dhd_bus_pcie_pwr_req_cmn(struct dhd_bus *bus)
528 {
529     uint mask, val;
530 
531     /* If multiple request entries, increment reference and return */
532     if (bus->pwr_req_ref > 0) {
533         bus->pwr_req_ref++;
534         return;
535     }
536 
537     ASSERT(bus->pwr_req_ref == 0);
538 
539     if (MULTIBP_ENAB(bus->sih)) {
540         /* Common BP controlled by HW so only need to toggle WL/ARM backplane */
541         mask = SRPWR_DMN1_ARMBPSD_MASK;
542         val = SRPWR_DMN1_ARMBPSD_MASK;
543     } else {
544         mask = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK;
545         val = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK;
546     }
547 
548     si_srpwr_request(bus->sih, mask, val);
549 
550     bus->pwr_req_ref = 1;
551 }
552 
dhd_bus_pcie_pwr_req(struct dhd_bus * bus)553 static INLINE void dhd_bus_pcie_pwr_req(struct dhd_bus *bus)
554 {
555     unsigned long flags = 0;
556 
557     DHD_GENERAL_LOCK(bus->dhd, flags);
558     _dhd_bus_pcie_pwr_req_cmn(bus);
559     DHD_GENERAL_UNLOCK(bus->dhd, flags);
560 }
561 
_dhd_bus_pcie_pwr_req_pd0123_cmn(struct dhd_bus * bus)562 static INLINE void _dhd_bus_pcie_pwr_req_pd0123_cmn(struct dhd_bus *bus)
563 {
564     uint mask, val;
565 
566     mask = SRPWR_DMN_ALL_MASK(bus->sih);
567     val = SRPWR_DMN_ALL_MASK(bus->sih);
568 
569     si_srpwr_request(bus->sih, mask, val);
570 }
571 
dhd_bus_pcie_pwr_req_reload_war(struct dhd_bus * bus)572 static INLINE void dhd_bus_pcie_pwr_req_reload_war(struct dhd_bus *bus)
573 {
574     unsigned long flags = 0;
575 
576     DHD_GENERAL_LOCK(bus->dhd, flags);
577     _dhd_bus_pcie_pwr_req_pd0123_cmn(bus);
578     DHD_GENERAL_UNLOCK(bus->dhd, flags);
579 }
580 
_dhd_bus_pcie_pwr_req_clear_pd0123_cmn(struct dhd_bus * bus)581 static INLINE void _dhd_bus_pcie_pwr_req_clear_pd0123_cmn(struct dhd_bus *bus)
582 {
583     uint mask;
584 
585     mask = SRPWR_DMN_ALL_MASK(bus->sih);
586 
587     si_srpwr_request(bus->sih, mask, 0);
588 }
589 
dhd_bus_pcie_pwr_req_clear_reload_war(struct dhd_bus * bus)590 static INLINE void dhd_bus_pcie_pwr_req_clear_reload_war(struct dhd_bus *bus)
591 {
592     unsigned long flags = 0;
593 
594     DHD_GENERAL_LOCK(bus->dhd, flags);
595     _dhd_bus_pcie_pwr_req_clear_pd0123_cmn(bus);
596     DHD_GENERAL_UNLOCK(bus->dhd, flags);
597 }
598 
dhd_bus_pcie_pwr_req_nolock(struct dhd_bus * bus)599 static INLINE void dhd_bus_pcie_pwr_req_nolock(struct dhd_bus *bus)
600 {
601     _dhd_bus_pcie_pwr_req_cmn(bus);
602 }
603 
dhdpcie_chip_support_msi(dhd_bus_t * bus)604 bool dhdpcie_chip_support_msi(dhd_bus_t *bus)
605 {
606     DHD_INFO(("%s: buscorerev=%d chipid=0x%x\n", __FUNCTION__,
607               bus->sih->buscorerev, si_chipid(bus->sih)));
608     if (bus->sih->buscorerev <= 0xE || si_chipid(bus->sih) == BCM4375_CHIP_ID ||
609         si_chipid(bus->sih) == BCM4362_CHIP_ID ||
610         si_chipid(bus->sih) == BCM43751_CHIP_ID ||
611         si_chipid(bus->sih) == BCM4361_CHIP_ID ||
612         si_chipid(bus->sih) == BCM4359_CHIP_ID) {
613         return FALSE;
614     } else {
615         return TRUE;
616     }
617 }
618 
619 /**
620  * Called once for each hardware (dongle) instance that this DHD manages.
621  *
622  * 'regs' is the host virtual address that maps to the start of the PCIe BAR0
623  * window. The first 4096 bytes in this window are mapped to the backplane
624  * address in the PCIEBAR0Window register. The precondition is that the
625  * PCIEBAR0Window register 'points' at the PCIe core.
626  *
627  * 'tcm' is the *host* virtual address at which tcm is mapped.
628  */
dhdpcie_bus_attach(osl_t * osh,dhd_bus_t ** bus_ptr,volatile char * regs,volatile char * tcm,void * pci_dev,wifi_adapter_info_t * adapter)629 int dhdpcie_bus_attach(osl_t *osh, dhd_bus_t **bus_ptr, volatile char *regs,
630                        volatile char *tcm, void *pci_dev,
631                        wifi_adapter_info_t *adapter)
632 {
633     dhd_bus_t *bus = NULL;
634     int ret = BCME_OK;
635 
636     DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
637 
638     do {
639         if (!(bus = MALLOCZ(osh, sizeof(dhd_bus_t)))) {
640             DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
641             ret = BCME_NORESOURCE;
642             break;
643         }
644         bus->bus = adapter->bus_type;
645         bus->bus_num = adapter->bus_num;
646         bus->slot_num = adapter->slot_num;
647 
648         bus->regs = regs;
649         bus->tcm = tcm;
650         bus->osh = osh;
651         /* Save pci_dev into dhd_bus, as it may be needed in dhd_attach */
652         bus->dev = (struct pci_dev *)pci_dev;
653 
654         dll_init(&bus->flowring_active_list);
655 #ifdef IDLE_TX_FLOW_MGMT
656         bus->active_list_last_process_ts = OSL_SYSUPTIME();
657 #endif /* IDLE_TX_FLOW_MGMT */
658 
659         /* Attach pcie shared structure */
660         if (!(bus->pcie_sh = MALLOCZ(osh, sizeof(pciedev_shared_t)))) {
661             DHD_ERROR(("%s: MALLOC of bus->pcie_sh failed\n", __FUNCTION__));
662             ret = BCME_NORESOURCE;
663             break;
664         }
665 
666         if (dhdpcie_dongle_attach(bus)) {
667             DHD_ERROR(("%s: dhdpcie_probe_attach failed\n", __FUNCTION__));
668             ret = BCME_NOTREADY;
669             break;
670         }
671 
672         /* software resources */
673         if (!(bus->dhd = dhd_attach(osh, bus, PCMSGBUF_HDRLEN))) {
674             DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__));
675             ret = BCME_NORESOURCE;
676             break;
677         }
678 #if defined(GET_OTP_MAC_ENABLE) || defined(GET_OTP_MODULE_NAME)
679         dhd_conf_get_otp(bus->dhd, bus->sih);
680 #endif
681         DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
682         bus->dhd->busstate = DHD_BUS_DOWN;
683         bus->dhd->hostrdy_after_init = TRUE;
684         bus->db1_for_mb = TRUE;
685         bus->dhd->hang_report = TRUE;
686         bus->use_mailbox = FALSE;
687         bus->use_d0_inform = FALSE;
688         bus->intr_enabled = FALSE;
689         bus->flr_force_fail = FALSE;
690         /* By default disable HWA and enable it via iovar */
691         bus->hwa_enab_bmap = 0;
692         /* update the dma indices if set through module parameter. */
693         if (dma_ring_indices != 0) {
694             dhdpcie_set_dma_ring_indices(bus->dhd, dma_ring_indices);
695         }
696         /* update h2d phase support if set through module parameter */
697         bus->dhd->h2d_phase_supported = h2d_phase ? TRUE : FALSE;
698         /* update force trap on bad phase if set through module parameter */
699         bus->dhd->force_dongletrap_on_bad_h2d_phase =
700             force_trap_bad_h2d_phase ? TRUE : FALSE;
701 #ifdef IDLE_TX_FLOW_MGMT
702         bus->enable_idle_flowring_mgmt = FALSE;
703 #endif /* IDLE_TX_FLOW_MGMT */
704         bus->irq_registered = FALSE;
705 
706 #ifdef DHD_MSI_SUPPORT
707         bus->d2h_intr_method =
708             enable_msi && dhdpcie_chip_support_msi(bus) ? PCIE_MSI : PCIE_INTX;
709         if (bus->dhd->conf->d2h_intr_method >= 0) {
710             bus->d2h_intr_method = bus->dhd->conf->d2h_intr_method;
711         }
712 #else
713         bus->d2h_intr_method = PCIE_INTX;
714 #endif /* DHD_MSI_SUPPORT */
715 
716 #ifdef DHD_HP2P
717         bus->hp2p_txcpl_max_items = DHD_MAX_ITEMS_HPP_TXCPL_RING;
718         bus->hp2p_rxcpl_max_items = DHD_MAX_ITEMS_HPP_RXCPL_RING;
719 #endif /* DHD_HP2P */
720 
721         DHD_TRACE(("%s: EXIT SUCCESS\n", __FUNCTION__));
722         g_dhd_bus = bus;
723         *bus_ptr = bus;
724         return ret;
725     } while (0);
726 
727     DHD_TRACE(("%s: EXIT FAILURE\n", __FUNCTION__));
728 
729     if (bus && bus->pcie_sh) {
730         MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t));
731     }
732 
733     if (bus) {
734         MFREE(osh, bus, sizeof(dhd_bus_t));
735     }
736 
737     return ret;
738 }
739 
dhd_bus_skip_clm(dhd_pub_t * dhdp)740 bool dhd_bus_skip_clm(dhd_pub_t *dhdp)
741 {
742     switch (dhd_bus_chip_id(dhdp)) {
743         case BCM4369_CHIP_ID:
744             return TRUE;
745         default:
746             return FALSE;
747     }
748 }
749 
dhd_bus_chip(struct dhd_bus * bus)750 uint dhd_bus_chip(struct dhd_bus *bus)
751 {
752     ASSERT(bus->sih != NULL);
753     return bus->sih->chip;
754 }
755 
dhd_bus_chiprev(struct dhd_bus * bus)756 uint dhd_bus_chiprev(struct dhd_bus *bus)
757 {
758     ASSERT(bus);
759     ASSERT(bus->sih != NULL);
760     return bus->sih->chiprev;
761 }
762 
dhd_bus_pub(struct dhd_bus * bus)763 void *dhd_bus_pub(struct dhd_bus *bus)
764 {
765     return bus->dhd;
766 }
767 
dhd_bus_sih(struct dhd_bus * bus)768 void *dhd_bus_sih(struct dhd_bus *bus)
769 {
770     return (void *)bus->sih;
771 }
772 
dhd_bus_txq(struct dhd_bus * bus)773 void *dhd_bus_txq(struct dhd_bus *bus)
774 {
775     return &bus->txq;
776 }
777 
778 /** Get Chip ID version */
dhd_bus_chip_id(dhd_pub_t * dhdp)779 uint dhd_bus_chip_id(dhd_pub_t *dhdp)
780 {
781     dhd_bus_t *bus = dhdp->bus;
782     return bus->sih->chip;
783 }
784 
785 /** Get Chip Rev ID version */
dhd_bus_chiprev_id(dhd_pub_t * dhdp)786 uint dhd_bus_chiprev_id(dhd_pub_t *dhdp)
787 {
788     dhd_bus_t *bus = dhdp->bus;
789     return bus->sih->chiprev;
790 }
791 
792 /** Get Chip Pkg ID version */
dhd_bus_chippkg_id(dhd_pub_t * dhdp)793 uint dhd_bus_chippkg_id(dhd_pub_t *dhdp)
794 {
795     dhd_bus_t *bus = dhdp->bus;
796     return bus->sih->chippkg;
797 }
798 
dhd_bus_get_ids(struct dhd_bus * bus,uint32 * bus_type,uint32 * bus_num,uint32 * slot_num)799 int dhd_bus_get_ids(struct dhd_bus *bus, uint32 *bus_type, uint32 *bus_num,
800                     uint32 *slot_num)
801 {
802     *bus_type = bus->bus;
803     *bus_num = bus->bus_num;
804     *slot_num = bus->slot_num;
805     return 0;
806 }
807 
808 /** Conduct Loopback test */
dhd_bus_dmaxfer_lpbk(dhd_pub_t * dhdp,uint32 type)809 int dhd_bus_dmaxfer_lpbk(dhd_pub_t *dhdp, uint32 type)
810 {
811     dma_xfer_info_t dmaxfer_lpbk;
812     int ret = BCME_OK;
813 
814 #define PCIE_DMAXFER_LPBK_LENGTH 4096
815     memset(&dmaxfer_lpbk, 0, sizeof(dma_xfer_info_t));
816     dmaxfer_lpbk.version = DHD_DMAXFER_VERSION;
817     dmaxfer_lpbk.length = (uint16)sizeof(dma_xfer_info_t);
818     dmaxfer_lpbk.num_bytes = PCIE_DMAXFER_LPBK_LENGTH;
819     dmaxfer_lpbk.type = type;
820     dmaxfer_lpbk.should_wait = TRUE;
821 
822     ret = dhd_bus_iovar_op(dhdp, "pcie_dmaxfer", NULL, 0, (char *)&dmaxfer_lpbk,
823                            sizeof(dma_xfer_info_t), IOV_SET);
824     if (ret < 0) {
825         DHD_ERROR(("failed to start PCIe Loopback Test!!! "
826                    "Type:%d Reason:%d\n",
827                    type, ret));
828         return ret;
829     }
830 
831     if (dmaxfer_lpbk.status != DMA_XFER_SUCCESS) {
832         DHD_ERROR(("failed to check PCIe Loopback Test!!! "
833                    "Type:%d Status:%d Error code:%d\n",
834                    type, dmaxfer_lpbk.status, dmaxfer_lpbk.error_code));
835         ret = BCME_ERROR;
836     } else {
837         DHD_ERROR(("successful to check PCIe Loopback Test"
838                    " Type:%d\n",
839                    type));
840     }
841 #undef PCIE_DMAXFER_LPBK_LENGTH
842 
843     return ret;
844 }
845 
846 /* Log the lastest DPC schedule time */
dhd_bus_set_dpc_sched_time(dhd_pub_t * dhdp)847 void dhd_bus_set_dpc_sched_time(dhd_pub_t *dhdp)
848 {
849     dhdp->bus->dpc_sched_time = OSL_LOCALTIME_NS();
850 }
851 
852 /* Check if there is DPC scheduling errors */
dhd_bus_query_dpc_sched_errors(dhd_pub_t * dhdp)853 bool dhd_bus_query_dpc_sched_errors(dhd_pub_t *dhdp)
854 {
855     dhd_bus_t *bus = dhdp->bus;
856     bool sched_err;
857 
858     if (bus->dpc_entry_time < bus->isr_exit_time) {
859         /* Kernel doesn't schedule the DPC after processing PCIe IRQ */
860         sched_err = TRUE;
861     } else if (bus->dpc_entry_time < bus->resched_dpc_time) {
862         /* Kernel doesn't schedule the DPC after DHD tries to reschedule
863          * the DPC due to pending work items to be processed.
864          */
865         sched_err = TRUE;
866     } else {
867         sched_err = FALSE;
868     }
869 
870     if (sched_err) {
871         /* print out minimum timestamp info */
872         DHD_ERROR((
873             "isr_entry_time=" SEC_USEC_FMT " isr_exit_time=" SEC_USEC_FMT
874             " dpc_entry_time=" SEC_USEC_FMT "\ndpc_exit_time=" SEC_USEC_FMT
875             " dpc_sched_time=" SEC_USEC_FMT " resched_dpc_time=" SEC_USEC_FMT
876             "\n",
877             GET_SEC_USEC(bus->isr_entry_time), GET_SEC_USEC(bus->isr_exit_time),
878             GET_SEC_USEC(bus->dpc_entry_time), GET_SEC_USEC(bus->dpc_exit_time),
879             GET_SEC_USEC(bus->dpc_sched_time),
880             GET_SEC_USEC(bus->resched_dpc_time)));
881     }
882 
883     return sched_err;
884 }
885 
886 /** Read and clear intstatus. This should be called with interrupts disabled or
887  * inside isr */
dhdpcie_bus_intstatus(dhd_bus_t * bus)888 uint32 dhdpcie_bus_intstatus(dhd_bus_t *bus)
889 {
890     uint32 intstatus = 0;
891     uint32 intmask = 0;
892 
893     if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
894         DHD_ERROR(
895             ("%s: trying to clear intstatus after D3 Ack\n", __FUNCTION__));
896         return intstatus;
897     }
898     if ((bus->sih->buscorerev == 0x6) || (bus->sih->buscorerev == 0x4) ||
899         (bus->sih->buscorerev == 0x2)) {
900         intstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 0x4);
901         dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 0x4, intstatus);
902         intstatus &= I_MB;
903     } else {
904         /* this is a PCIE core register..not a config register... */
905         intstatus = si_corereg(bus->sih, bus->sih->buscoreidx,
906                                bus->pcie_mailbox_int, 0, 0);
907 
908         /* this is a PCIE core register..not a config register... */
909         intmask = si_corereg(bus->sih, bus->sih->buscoreidx,
910                              bus->pcie_mailbox_mask, 0, 0);
911         /* Is device removed. intstatus & intmask read 0xffffffff */
912         if (intstatus == (uint32)-1 || intmask == (uint32)-1) {
913             DHD_ERROR(
914                 ("%s: Device is removed or Link is down.\n", __FUNCTION__));
915             DHD_ERROR(("%s: INTSTAT : 0x%x INTMASK : 0x%x.\n", __FUNCTION__,
916                        intstatus, intmask));
917             bus->is_linkdown = TRUE;
918             dhd_pcie_debug_info_dump(bus->dhd);
919             return intstatus;
920         }
921 
922 #ifndef DHD_READ_INTSTATUS_IN_DPC
923         intstatus &= intmask;
924 #endif /* DHD_READ_INTSTATUS_IN_DPC */
925 
926         /*
927          * The fourth argument to si_corereg is the "mask" fields of the
928          * register to update and the fifth field is the "value" to update. Now
929          * if we are interested in only few fields of the "mask" bit map, we
930          * should not be writing back what we read By doing so, we might
931          * clear/ack interrupts that are not handled yet.
932          */
933         si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int,
934                    bus->def_intmask, intstatus);
935 
936         intstatus &= bus->def_intmask;
937     }
938 
939     return intstatus;
940 }
941 
dhdpcie_cto_recovery_handler(dhd_pub_t * dhd)942 void dhdpcie_cto_recovery_handler(dhd_pub_t *dhd)
943 {
944     dhd_bus_t *bus = dhd->bus;
945     int ret;
946 
947     /* Disable PCIe Runtime PM to avoid D3_ACK timeout.
948      */
949     DHD_DISABLE_RUNTIME_PM(dhd);
950 
951     /* Sleep for 1 seconds so that any AXI timeout
952      * if running on ALP clock also will be captured
953      */
954     OSL_SLEEP(0x3E8);
955 
956     /* reset backplane and cto,
957      * then access through pcie is recovered.
958      */
959     ret = dhdpcie_cto_error_recovery(bus);
960     if (!ret) {
961         /* Waiting for backplane reset */
962         OSL_SLEEP(0xA);
963         /* Dump debug Info */
964         dhd_prot_debug_info_print(bus->dhd);
965         /* Dump console buffer */
966         dhd_bus_dump_console_buffer(bus);
967 #if defined(DHD_FW_COREDUMP)
968         /* save core dump or write to a file */
969         if (!bus->is_linkdown && bus->dhd->memdump_enabled) {
970 #ifdef DHD_SSSR_DUMP
971             bus->dhd->collect_sssr = TRUE;
972 #endif /* DHD_SSSR_DUMP */
973             bus->dhd->memdump_type = DUMP_TYPE_CTO_RECOVERY;
974             dhdpcie_mem_dump(bus);
975         }
976 #endif /* DHD_FW_COREDUMP */
977     }
978     bus->is_linkdown = TRUE;
979     bus->dhd->hang_reason = HANG_REASON_PCIE_CTO_DETECT;
980     /* Send HANG event */
981     dhd_os_send_hang_message(bus->dhd);
982 }
983 
984 /**
985  * Name:  dhdpcie_bus_isr
986  * Parameters
987  * 1: IN int irq   -- interrupt vector
988  * 2: IN void *arg      -- handle to private data structure
989  * Return value:
990  * Status (TRUE or FALSE)
991  *
992  * Description:
993  * Interrupt Service routine checks for the status register,
994  * disable interrupt and queue DPC if mail box interrupts are raised.
995  */
dhdpcie_bus_isr(dhd_bus_t * bus)996 int32 dhdpcie_bus_isr(dhd_bus_t *bus)
997 {
998     uint32 intstatus = 0;
999 
1000     do {
1001         DHD_INTR(("%s: Enter\n", __FUNCTION__));
1002         /* verify argument */
1003         if (!bus) {
1004             DHD_LOG_MEM(("%s : bus is null pointer, exit \n", __FUNCTION__));
1005             break;
1006         }
1007 
1008         if (bus->dhd->dongle_reset) {
1009             DHD_LOG_MEM(("%s : dongle is reset\n", __FUNCTION__));
1010             break;
1011         }
1012 
1013         if (bus->dhd->busstate == DHD_BUS_DOWN) {
1014             DHD_LOG_MEM(("%s : bus is down \n", __FUNCTION__));
1015             break;
1016         }
1017 
1018         /* avoid processing of interrupts until msgbuf prot is inited */
1019         if (!bus->intr_enabled) {
1020             DHD_INFO(("%s, not ready to receive interrupts\n", __FUNCTION__));
1021             break;
1022         }
1023 
1024         if (PCIECTO_ENAB(bus)) {
1025             /* read pci_intstatus */
1026             intstatus = dhdpcie_bus_cfg_read_dword(bus, PCI_INT_STATUS, 0x4);
1027             if (intstatus == (uint32)-1) {
1028                 DHD_ERROR(("%s : Invalid intstatus for cto recovery\n",
1029                            __FUNCTION__));
1030                 dhdpcie_disable_irq_nosync(bus);
1031                 break;
1032             }
1033 
1034             if (intstatus & PCI_CTO_INT_MASK) {
1035                 DHD_ERROR(("%s: ##### CTO RECOVERY REPORTED BY DONGLE "
1036                            "intstat=0x%x enab=%d\n",
1037                            __FUNCTION__, intstatus, bus->cto_enable));
1038                 bus->cto_triggered = 1;
1039                 /*
1040                  * DAR still accessible
1041                  */
1042                 dhd_bus_dump_dar_registers(bus);
1043 
1044                 /* Disable further PCIe interrupts */
1045                 dhdpcie_disable_irq_nosync(bus); /* Disable interrupt!! */
1046                 /* Stop Tx flow */
1047                 dhd_bus_stop_queue(bus);
1048 
1049                 /* Schedule CTO recovery */
1050                 dhd_schedule_cto_recovery(bus->dhd);
1051 
1052                 return TRUE;
1053             }
1054         }
1055 
1056         if (bus->d2h_intr_method == PCIE_MSI &&
1057             !dhd_conf_legacy_msi_chip(bus->dhd)) {
1058             /* For MSI, as intstatus is cleared by firmware, no need to read */
1059             goto skip_intstatus_read;
1060         }
1061 
1062 #ifndef DHD_READ_INTSTATUS_IN_DPC
1063         intstatus = dhdpcie_bus_intstatus(bus);
1064         /* Check if the interrupt is ours or not */
1065         if (intstatus == 0) {
1066             /* in EFI since we poll for interrupt, this message will flood the
1067              * logs so disable this for EFI
1068              */
1069             DHD_LOG_MEM(("%s : this interrupt is not ours\n", __FUNCTION__));
1070             bus->non_ours_irq_count++;
1071             bus->last_non_ours_irq_time = OSL_LOCALTIME_NS();
1072             break;
1073         }
1074 
1075         /* save the intstatus */
1076         /* read interrupt status register!! Status bits will be cleared in DPC
1077          * !! */
1078         bus->intstatus = intstatus;
1079 
1080         /* return error for 0xFFFFFFFF */
1081         if (intstatus == (uint32)-1) {
1082             DHD_LOG_MEM(("%s : wrong interrupt status val : 0x%x\n",
1083                          __FUNCTION__, intstatus));
1084             dhdpcie_disable_irq_nosync(bus);
1085             break;
1086         }
1087 
1088     skip_intstatus_read:
1089         /*  Overall operation:
1090          *    - Mask further interrupts
1091          *    - Read/ack intstatus
1092          *    - Take action based on bits and state
1093          *    - Reenable interrupts (as per state)
1094          */
1095 
1096         /* Count the interrupt call */
1097         bus->intrcount++;
1098 #endif /* DHD_READ_INTSTATUS_IN_DPC */
1099 
1100         bus->ipend = TRUE;
1101 
1102         bus->isr_intr_disable_count++;
1103 
1104 #ifdef CHIP_INTR_CONTROL
1105         dhdpcie_bus_intr_disable(bus); /* Disable interrupt using IntMask!! */
1106 #else
1107         /* For Linux, Macos etc (otherthan NDIS) instead of disabling
1108          * dongle interrupt by clearing the IntMask, disable directly
1109          * interrupt from the host side, so that host will not recieve
1110          * any interrupts at all, even though dongle raises interrupts
1111          */
1112         dhdpcie_disable_irq_nosync(bus); /* Disable interrupt!! */
1113 #endif /* HOST_INTR_CONTROL */
1114 
1115         bus->intdis = TRUE;
1116 
1117 #if defined(PCIE_ISR_THREAD)
1118 
1119         DHD_TRACE(("Calling dhd_bus_dpc() from %s\n", __FUNCTION__));
1120         DHD_OS_WAKE_LOCK(bus->dhd);
1121         while (dhd_bus_dpc(bus)) {
1122             ;
1123         }
1124         DHD_OS_WAKE_UNLOCK(bus->dhd);
1125 #else
1126         bus->dpc_sched = TRUE;
1127         dhd_sched_dpc(bus->dhd); /* queue DPC now!! */
1128 #endif /* defined(SDIO_ISR_THREAD) */
1129 
1130         DHD_INTR(("%s: Exit Success DPC Queued\n", __FUNCTION__));
1131         return TRUE;
1132     } while (0);
1133 
1134     DHD_INTR(("%s: Exit Failure\n", __FUNCTION__));
1135     return FALSE;
1136 }
1137 
dhdpcie_set_pwr_state(dhd_bus_t * bus,uint state)1138 int dhdpcie_set_pwr_state(dhd_bus_t *bus, uint state)
1139 {
1140     uint32 cur_state = 0;
1141     uint32 pm_csr = 0;
1142     osl_t *osh = bus->osh;
1143 
1144     pm_csr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32));
1145     cur_state = pm_csr & PCIECFGREG_PM_CSR_STATE_MASK;
1146 
1147     if (cur_state == state) {
1148         DHD_ERROR(("%s: Already in state %u \n", __FUNCTION__, cur_state));
1149         return BCME_OK;
1150     }
1151 
1152     if (state > PCIECFGREG_PM_CSR_STATE_D3_HOT) {
1153         return BCME_ERROR;
1154     }
1155 
1156     /* Validate the state transition
1157      * if already in a lower power state, return error
1158      */
1159     if (state != PCIECFGREG_PM_CSR_STATE_D0 &&
1160         cur_state <= PCIECFGREG_PM_CSR_STATE_D3_COLD && cur_state > state) {
1161         DHD_ERROR(("%s: Invalid power state transition !\n", __FUNCTION__));
1162         return BCME_ERROR;
1163     }
1164 
1165     pm_csr &= ~PCIECFGREG_PM_CSR_STATE_MASK;
1166     pm_csr |= state;
1167 
1168     OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32), pm_csr);
1169 
1170     /* need to wait for the specified mandatory pcie power transition delay time
1171      */
1172     if (state == PCIECFGREG_PM_CSR_STATE_D3_HOT ||
1173         cur_state == PCIECFGREG_PM_CSR_STATE_D3_HOT) {
1174         OSL_DELAY(DHDPCIE_PM_D3_DELAY);
1175     } else if (state == PCIECFGREG_PM_CSR_STATE_D2 ||
1176                cur_state == PCIECFGREG_PM_CSR_STATE_D2) {
1177         OSL_DELAY(DHDPCIE_PM_D2_DELAY);
1178     }
1179 
1180     /* read back the power state and verify */
1181     pm_csr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32));
1182     cur_state = pm_csr & PCIECFGREG_PM_CSR_STATE_MASK;
1183     if (cur_state != state) {
1184         DHD_ERROR(("%s: power transition failed ! Current state is %u \n",
1185                    __FUNCTION__, cur_state));
1186         return BCME_ERROR;
1187     } else {
1188         DHD_ERROR(
1189             ("%s: power transition to %u success \n", __FUNCTION__, cur_state));
1190     }
1191 
1192     return BCME_OK;
1193 }
1194 
dhdpcie_config_check(dhd_bus_t * bus)1195 int dhdpcie_config_check(dhd_bus_t *bus)
1196 {
1197     uint32 i, val;
1198     int ret = BCME_ERROR;
1199 
1200     for (i = 0; i < DHDPCIE_CONFIG_CHECK_RETRY_COUNT; i++) {
1201         val = OSL_PCI_READ_CONFIG(bus->osh, PCI_CFG_VID, sizeof(uint32));
1202         if ((val & 0xFFFF) == VENDOR_BROADCOM) {
1203             ret = BCME_OK;
1204             break;
1205         }
1206         OSL_DELAY(DHDPCIE_CONFIG_CHECK_DELAY_MS * 0x3E8);
1207     }
1208 
1209     return ret;
1210 }
1211 
dhdpcie_config_restore(dhd_bus_t * bus,bool restore_pmcsr)1212 int dhdpcie_config_restore(dhd_bus_t *bus, bool restore_pmcsr)
1213 {
1214     uint32 i;
1215     osl_t *osh = bus->osh;
1216 
1217     if (BCME_OK != dhdpcie_config_check(bus)) {
1218         return BCME_ERROR;
1219     }
1220 
1221     for (i = PCI_CFG_REV >> 0x2; i < DHDPCIE_CONFIG_HDR_SIZE; i++) {
1222         OSL_PCI_WRITE_CONFIG(osh, i << 0x2, sizeof(uint32),
1223                              bus->saved_config.header[i]);
1224     }
1225     OSL_PCI_WRITE_CONFIG(osh, PCI_CFG_CMD, sizeof(uint32),
1226                          bus->saved_config.header[1]);
1227 
1228     if (restore_pmcsr) {
1229         OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32),
1230                              bus->saved_config.pmcsr);
1231     }
1232 
1233     OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_CAP, sizeof(uint32),
1234                          bus->saved_config.msi_cap);
1235     OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_ADDR_L, sizeof(uint32),
1236                          bus->saved_config.msi_addr0);
1237     OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_ADDR_H, sizeof(uint32),
1238                          bus->saved_config.msi_addr1);
1239     OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_DATA, sizeof(uint32),
1240                          bus->saved_config.msi_data);
1241 
1242     OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_DEV_STATUS_CTRL, sizeof(uint32),
1243                          bus->saved_config.exp_dev_ctrl_stat);
1244     OSL_PCI_WRITE_CONFIG(osh, PCIECFGGEN_DEV_STATUS_CTRL2, sizeof(uint32),
1245                          bus->saved_config.exp_dev_ctrl_stat2);
1246     OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(uint32),
1247                          bus->saved_config.exp_link_ctrl_stat);
1248     OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_LINK_STATUS_CTRL2, sizeof(uint32),
1249                          bus->saved_config.exp_link_ctrl_stat2);
1250 
1251     OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL1, sizeof(uint32),
1252                          bus->saved_config.l1pm0);
1253     OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL2, sizeof(uint32),
1254                          bus->saved_config.l1pm1);
1255 
1256     OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, sizeof(uint32),
1257                          bus->saved_config.bar0_win);
1258     dhdpcie_setbar1win(bus, bus->saved_config.bar1_win);
1259 
1260     return BCME_OK;
1261 }
1262 
dhdpcie_config_save(dhd_bus_t * bus)1263 int dhdpcie_config_save(dhd_bus_t *bus)
1264 {
1265     uint32 i;
1266     osl_t *osh = bus->osh;
1267 
1268     if (BCME_OK != dhdpcie_config_check(bus)) {
1269         return BCME_ERROR;
1270     }
1271 
1272     for (i = 0; i < DHDPCIE_CONFIG_HDR_SIZE; i++) {
1273         bus->saved_config.header[i] =
1274             OSL_PCI_READ_CONFIG(osh, i << 0x2, sizeof(uint32));
1275     }
1276 
1277     bus->saved_config.pmcsr =
1278         OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32));
1279 
1280     bus->saved_config.msi_cap =
1281         OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_CAP, sizeof(uint32));
1282     bus->saved_config.msi_addr0 =
1283         OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_ADDR_L, sizeof(uint32));
1284     bus->saved_config.msi_addr1 =
1285         OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_ADDR_H, sizeof(uint32));
1286     bus->saved_config.msi_data =
1287         OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_DATA, sizeof(uint32));
1288 
1289     bus->saved_config.exp_dev_ctrl_stat =
1290         OSL_PCI_READ_CONFIG(osh, PCIECFGREG_DEV_STATUS_CTRL, sizeof(uint32));
1291     bus->saved_config.exp_dev_ctrl_stat2 =
1292         OSL_PCI_READ_CONFIG(osh, PCIECFGGEN_DEV_STATUS_CTRL2, sizeof(uint32));
1293     bus->saved_config.exp_link_ctrl_stat =
1294         OSL_PCI_READ_CONFIG(osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(uint32));
1295     bus->saved_config.exp_link_ctrl_stat2 =
1296         OSL_PCI_READ_CONFIG(osh, PCIECFGREG_LINK_STATUS_CTRL2, sizeof(uint32));
1297 
1298     bus->saved_config.l1pm0 =
1299         OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL1, sizeof(uint32));
1300     bus->saved_config.l1pm1 =
1301         OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL2, sizeof(uint32));
1302 
1303     bus->saved_config.bar0_win =
1304         OSL_PCI_READ_CONFIG(osh, PCI_BAR0_WIN, sizeof(uint32));
1305     bus->saved_config.bar1_win =
1306         OSL_PCI_READ_CONFIG(osh, PCI_BAR1_WIN, sizeof(uint32));
1307 
1308     return BCME_OK;
1309 }
1310 
1311 #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
1312 dhd_pub_t *link_recovery = NULL;
1313 #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
1314 
dhdpcie_bus_intr_init(dhd_bus_t * bus)1315 static void dhdpcie_bus_intr_init(dhd_bus_t *bus)
1316 {
1317     uint buscorerev = bus->sih->buscorerev;
1318     bus->pcie_mailbox_int = PCIMailBoxInt(buscorerev);
1319     bus->pcie_mailbox_mask = PCIMailBoxMask(buscorerev);
1320     bus->d2h_mb_mask = PCIE_MB_D2H_MB_MASK(buscorerev);
1321     bus->def_intmask = PCIE_MB_D2H_MB_MASK(buscorerev);
1322     if (buscorerev < 0x40) {
1323         bus->def_intmask |= PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1;
1324     }
1325 }
1326 
dhdpcie_cc_watchdog_reset(dhd_bus_t * bus)1327 static void dhdpcie_cc_watchdog_reset(dhd_bus_t *bus)
1328 {
1329     uint32 wd_en = (bus->sih->buscorerev >= 66)
1330                        ? WD_SSRESET_PCIE_F0_EN
1331                        : (WD_SSRESET_PCIE_F0_EN | WD_SSRESET_PCIE_ALL_FN_EN);
1332     pcie_watchdog_reset(bus->osh, bus->sih, WD_ENABLE_MASK, wd_en);
1333 }
1334 
dhdpcie_dongle_reset(dhd_bus_t * bus)1335 void dhdpcie_dongle_reset(dhd_bus_t *bus)
1336 {
1337     /* if the pcie link is down, watchdog reset
1338      * should not be done, as it may hang
1339      */
1340     if (bus->is_linkdown) {
1341         return;
1342     }
1343 
1344     /* dhd_bus_perform_flr will return BCME_UNSUPPORTED if chip is not FLR
1345      * capable */
1346     if (dhd_bus_perform_flr(bus, FALSE) == BCME_UNSUPPORTED) {
1347 #ifdef DHD_USE_BP_RESET
1348         /* Backplane reset using SPROM cfg register(0x88) for buscorerev <= 24
1349          */
1350         dhd_bus_perform_bp_reset(bus);
1351 #else
1352         /* Legacy chipcommon watchdog reset */
1353         dhdpcie_cc_watchdog_reset(bus);
1354 #endif /* DHD_USE_BP_RESET */
1355     }
1356 }
1357 
dhdpcie_dongle_attach(dhd_bus_t * bus)1358 static bool dhdpcie_dongle_attach(dhd_bus_t *bus)
1359 {
1360     osl_t *osh = bus->osh;
1361     volatile void *regsva = (volatile void *)bus->regs;
1362     uint16 devid;
1363     uint32 val;
1364     sbpcieregs_t *sbpcieregs;
1365     bool dongle_isolation;
1366 
1367     DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
1368 
1369 #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
1370     link_recovery = bus->dhd;
1371 #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
1372 
1373     bus->alp_only = TRUE;
1374     bus->sih = NULL;
1375 
1376     /* Checking PCIe bus status with reading configuration space */
1377     val = OSL_PCI_READ_CONFIG(osh, PCI_CFG_VID, sizeof(uint32));
1378     if ((val & 0xFFFF) != VENDOR_BROADCOM) {
1379         DHD_ERROR(
1380             ("%s : failed to read PCI configuration space!\n", __FUNCTION__));
1381         goto fail;
1382     }
1383     devid = (val >> 0x10) & 0xFFFF;
1384     bus->cl_devid = devid;
1385 
1386     /* Set bar0 window to si_enum_base */
1387     dhdpcie_bus_cfg_set_bar0_win(bus, si_enum_base(devid));
1388 
1389     /*
1390      * Checking PCI_SPROM_CONTROL register for preventing invalid address access
1391      * due to switch address space from PCI_BUS to SI_BUS.
1392      */
1393     val = OSL_PCI_READ_CONFIG(osh, PCI_SPROM_CONTROL, sizeof(uint32));
1394     if (val == 0xffffffff) {
1395         DHD_ERROR(
1396             ("%s : failed to read SPROM control register\n", __FUNCTION__));
1397         goto fail;
1398     }
1399 
1400     /* si_attach() will provide an SI handle and scan the backplane */
1401     if (!(bus->sih = si_attach((uint)devid, osh, regsva, PCI_BUS, bus,
1402                                &bus->vars, &bus->varsz))) {
1403         DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__));
1404         goto fail;
1405     }
1406 
1407     /* Configure CTO Prevention functionality */
1408 #if defined(BCMFPGA_HW)
1409     DHD_ERROR(("Disable CTO\n"));
1410     bus->cto_enable = FALSE;
1411 #else
1412 #if defined(BCMPCIE_CTO_PREVENTION)
1413     if (bus->sih->buscorerev >= 0x18) {
1414         DHD_ERROR(("Enable CTO\n"));
1415         bus->cto_enable = TRUE;
1416     } else
1417 #endif /* BCMPCIE_CTO_PREVENTION */
1418     {
1419         DHD_ERROR(("Disable CTO\n"));
1420         bus->cto_enable = FALSE;
1421     }
1422 #endif /* BCMFPGA_HW */
1423 
1424     if (PCIECTO_ENAB(bus)) {
1425         dhdpcie_cto_init(bus, TRUE);
1426     }
1427 
1428     if (MULTIBP_ENAB(bus->sih) && (bus->sih->buscorerev >= 0x42)) {
1429         /*
1430          * HW JIRA - CRWLPCIEGEN2-672
1431          * Producer Index Feature which is used by F1 gets reset on F0 FLR
1432          * fixed in REV68
1433          */
1434         if (PCIE_ENUM_RESET_WAR_ENAB(bus->sih->buscorerev)) {
1435             dhdpcie_ssreset_dis_enum_rst(bus);
1436         }
1437 
1438         /* IOV_DEVRESET could exercise si_detach()/si_attach() again so reset
1439          *   dhdpcie_bus_release_dongle() --> si_detach()
1440          *   dhdpcie_dongle_attach() --> si_attach()
1441          */
1442         bus->pwr_req_ref = 0;
1443     }
1444 
1445     if (MULTIBP_ENAB(bus->sih)) {
1446         dhd_bus_pcie_pwr_req_nolock(bus);
1447     }
1448 
1449     /* Get info on the ARM and SOCRAM cores... */
1450     /* Should really be qualified by device id */
1451     if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) ||
1452         (si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) ||
1453         (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) ||
1454         (si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
1455         bus->armrev = si_corerev(bus->sih);
1456         bus->coreid = si_coreid(bus->sih);
1457     } else {
1458         DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__));
1459         goto fail;
1460     }
1461 
1462     /* CA7 requires coherent bits on */
1463     if (bus->coreid == ARMCA7_CORE_ID) {
1464         val = dhdpcie_bus_cfg_read_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 0x4);
1465         dhdpcie_bus_cfg_write_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 0x4,
1466                                     (val | PCIE_BARCOHERENTACCEN_MASK));
1467     }
1468 
1469     /* Olympic EFI requirement - stop driver load if FW is already running
1470      *  need to do this here before pcie_watchdog_reset, because
1471      *  pcie_watchdog_reset will put the ARM back into halt state
1472      */
1473     if (!dhdpcie_is_arm_halted(bus)) {
1474         DHD_ERROR(("%s: ARM is not halted,FW is already running! Abort.\n",
1475                    __FUNCTION__));
1476         goto fail;
1477     }
1478 
1479     BCM_REFERENCE(dongle_isolation);
1480 
1481     /* For inbuilt drivers pcie clk req will be done by RC,
1482      * so do not do clkreq from dhd
1483      */
1484     if (dhd_download_fw_on_driverload) {
1485         /* Enable CLKREQ# */
1486         dhdpcie_clkreq(bus->osh, 1, 1);
1487     }
1488 
1489     /*
1490      * bus->dhd will be NULL if it is called from dhd_bus_attach, so need to
1491      * reset without checking dongle_isolation flag, but if it is called via
1492      * some other path like quiesce FLR, then based on dongle_isolation flag,
1493      * watchdog_reset should be called.
1494      */
1495     if (bus->dhd == NULL) {
1496         /* dhd_attach not yet happened, do watchdog reset */
1497         dongle_isolation = FALSE;
1498     } else {
1499         dongle_isolation = bus->dhd->dongle_isolation;
1500     }
1501 
1502 #ifndef DHD_SKIP_DONGLE_RESET_IN_ATTACH
1503     /*
1504      * Issue CC watchdog to reset all the cores on the chip - similar to rmmod
1505      * dhd This is required to avoid spurious interrupts to the Host and bring
1506      * back dongle to a sane state (on host soft-reboot / watchdog-reboot).
1507      */
1508     if (dongle_isolation == FALSE) {
1509         dhdpcie_dongle_reset(bus);
1510     }
1511 #endif /* !DHD_SKIP_DONGLE_RESET_IN_ATTACH */
1512 
1513     /* need to set the force_bt_quiesce flag here
1514      * before calling dhdpcie_dongle_flr_or_pwr_toggle
1515      */
1516     bus->force_bt_quiesce = TRUE;
1517     /*
1518      * For buscorerev = 66 and after, F0 FLR should be done independent from F1.
1519      * So don't need BT quiesce.
1520      */
1521     if (bus->sih->buscorerev >= 0x42) {
1522         bus->force_bt_quiesce = FALSE;
1523     }
1524 
1525     dhdpcie_dongle_flr_or_pwr_toggle(bus);
1526 
1527     si_setcore(bus->sih, PCIE2_CORE_ID, 0);
1528     sbpcieregs = (sbpcieregs_t *)(bus->regs);
1529 
1530     /* WAR where the BAR1 window may not be sized properly */
1531     W_REG(osh, &sbpcieregs->configaddr, 0x4e0);
1532     val = R_REG(osh, &sbpcieregs->configdata);
1533     W_REG(osh, &sbpcieregs->configdata, val);
1534 
1535     if (si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) {
1536         /* Only set dongle RAMSIZE to default value when BMC vs ARM usage of
1537          * SYSMEM is not adjusted.
1538          */
1539         if (!bus->ramsize_adjusted) {
1540             if (!(bus->orig_ramsize = si_sysmem_size(bus->sih))) {
1541                 DHD_ERROR(
1542                     ("%s: failed to find SYSMEM memory!\n", __FUNCTION__));
1543                 goto fail;
1544             }
1545             switch ((uint16)bus->sih->chip) {
1546                 default:
1547                     /* also populate base address */
1548                     bus->dongle_ram_base = CA7_4365_RAM_BASE;
1549                     bus->orig_ramsize = 0x1c0000; /* Reserve 1.75MB for CA7 */
1550                     break;
1551             }
1552         }
1553     } else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
1554         if (!(bus->orig_ramsize = si_socram_size(bus->sih))) {
1555             DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__));
1556             goto fail;
1557         }
1558     } else {
1559         /* cr4 has a different way to find the RAM size from TCM's */
1560         if (!(bus->orig_ramsize = si_tcm_size(bus->sih))) {
1561             DHD_ERROR(("%s: failed to find CR4-TCM memory!\n", __FUNCTION__));
1562             goto fail;
1563         }
1564         /* also populate base address */
1565         switch ((uint16)bus->sih->chip) {
1566             case BCM4339_CHIP_ID:
1567             case BCM4335_CHIP_ID:
1568                 bus->dongle_ram_base = CR4_4335_RAM_BASE;
1569                 break;
1570             case BCM4358_CHIP_ID:
1571             case BCM4354_CHIP_ID:
1572             case BCM43567_CHIP_ID:
1573             case BCM43569_CHIP_ID:
1574             case BCM4350_CHIP_ID:
1575             case BCM43570_CHIP_ID:
1576                 bus->dongle_ram_base = CR4_4350_RAM_BASE;
1577                 break;
1578             case BCM4360_CHIP_ID:
1579                 bus->dongle_ram_base = CR4_4360_RAM_BASE;
1580                 break;
1581 
1582             case BCM4364_CHIP_ID:
1583                 bus->dongle_ram_base = CR4_4364_RAM_BASE;
1584                 break;
1585 
1586             CASE_BCM4345_CHIP:
1587                 bus->dongle_ram_base =
1588                     (bus->sih->chiprev < 0x6) /* changed at 4345C0 */
1589                         ? CR4_4345_LT_C0_RAM_BASE
1590                         : CR4_4345_GE_C0_RAM_BASE;
1591                 break;
1592             CASE_BCM43602_CHIP:
1593                 bus->dongle_ram_base = CR4_43602_RAM_BASE;
1594                 break;
1595             case BCM4349_CHIP_GRPID:
1596                 /* RAM based changed from 4349c0(revid=9) onwards */
1597                 bus->dongle_ram_base =
1598                     ((bus->sih->chiprev < 0x9) ? CR4_4349_RAM_BASE
1599                                              : CR4_4349_RAM_BASE_FROM_REV_9);
1600                 break;
1601             case BCM4347_CHIP_ID:
1602             case BCM4357_CHIP_ID:
1603             case BCM4361_CHIP_ID:
1604                 bus->dongle_ram_base = CR4_4347_RAM_BASE;
1605                 break;
1606             case BCM4362_CHIP_ID:
1607                 bus->dongle_ram_base = CR4_4362_RAM_BASE;
1608                 break;
1609             case BCM43751_CHIP_ID:
1610                 bus->dongle_ram_base = CR4_43751_RAM_BASE;
1611                 break;
1612             case BCM43752_CHIP_ID:
1613                 bus->dongle_ram_base = CR4_43752_RAM_BASE;
1614                 break;
1615             case BCM4375_CHIP_ID:
1616             case BCM4369_CHIP_ID:
1617                 bus->dongle_ram_base = CR4_4369_RAM_BASE;
1618                 break;
1619             default:
1620                 bus->dongle_ram_base = 0;
1621                 DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n",
1622                            __FUNCTION__, bus->dongle_ram_base));
1623         }
1624     }
1625     bus->ramsize = bus->orig_ramsize;
1626     if (dhd_dongle_memsize) {
1627         dhdpcie_bus_dongle_setmemsize(bus, dhd_dongle_memsize);
1628     }
1629 
1630     if (bus->ramsize > DONGLE_TCM_MAP_SIZE) {
1631         DHD_ERROR(("%s : invalid ramsize %d(0x%x) is returned from dongle\n",
1632                    __FUNCTION__, bus->ramsize, bus->ramsize));
1633         goto fail;
1634     }
1635 
1636     DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d) at 0x%x\n",
1637                bus->ramsize, bus->orig_ramsize, bus->dongle_ram_base));
1638 
1639     bus->srmemsize = si_socram_srmem_size(bus->sih);
1640 
1641     dhdpcie_bus_intr_init(bus);
1642 
1643     /* Set the poll and/or interrupt flags */
1644     bus->intr = (bool)dhd_intr;
1645     if ((bus->poll = (bool)dhd_poll)) {
1646         bus->pollrate = 1;
1647     }
1648 #ifdef DHD_DISABLE_ASPM
1649     dhd_bus_aspm_enable_rc_ep(bus, FALSE);
1650 #endif /* DHD_DISABLE_ASPM */
1651 
1652     bus->idma_enabled = TRUE;
1653     bus->ifrm_enabled = TRUE;
1654     DHD_TRACE(("%s: EXIT: SUCCESS\n", __FUNCTION__));
1655 
1656     if (MULTIBP_ENAB(bus->sih)) {
1657         dhd_bus_pcie_pwr_req_clear_nolock(bus);
1658 
1659         /*
1660          * One time clearing of Common Power Domain since HW default is set
1661          * Needs to be after FLR because FLR resets PCIe enum back to HW
1662          * defaults for 4378B0 (rev 68). On 4378A0 (rev 66), PCIe enum reset is
1663          * disabled due to CRWLPCIEGEN2-672
1664          */
1665         si_srpwr_request(bus->sih, SRPWR_DMN0_PCIE_MASK, 0);
1666 
1667         /*
1668          * WAR to fix ARM cold boot;
1669          * Assert WL domain in DAR helps but not enum
1670          */
1671         if (bus->sih->buscorerev >= 0x44) {
1672             dhd_bus_pcie_pwr_req_wl_domain(
1673                 bus, DAR_PCIE_PWR_CTRL((bus->sih)->buscorerev), TRUE);
1674         }
1675     }
1676 
1677     return 0;
1678 
1679 fail:
1680     if (bus->sih != NULL) {
1681         if (MULTIBP_ENAB(bus->sih)) {
1682             dhd_bus_pcie_pwr_req_clear_nolock(bus);
1683         }
1684         /* for EFI even if there is an error, load still succeeds
1685          * so si_detach should not be called here, it is called during unload
1686          */
1687         si_detach(bus->sih);
1688         bus->sih = NULL;
1689     }
1690     DHD_TRACE(("%s: EXIT: FAILURE\n", __FUNCTION__));
1691     return -1;
1692 }
1693 
dhpcie_bus_unmask_interrupt(dhd_bus_t * bus)1694 int dhpcie_bus_unmask_interrupt(dhd_bus_t *bus)
1695 {
1696     dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 0x4, I_MB);
1697     return 0;
1698 }
dhpcie_bus_mask_interrupt(dhd_bus_t * bus)1699 int dhpcie_bus_mask_interrupt(dhd_bus_t *bus)
1700 {
1701     dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 0x4, 0x0);
1702     return 0;
1703 }
1704 
1705 /* Non atomic function, caller should hold appropriate lock */
dhdpcie_bus_intr_enable(dhd_bus_t * bus)1706 void dhdpcie_bus_intr_enable(dhd_bus_t *bus)
1707 {
1708     DHD_TRACE(("%s Enter\n", __FUNCTION__));
1709     if (bus) {
1710         if (bus->sih && !bus->is_linkdown) {
1711             /* Skip after recieving D3 ACK */
1712             if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
1713                 return;
1714             }
1715             if ((bus->sih->buscorerev == 0x2) || (bus->sih->buscorerev == 0x6) ||
1716                 (bus->sih->buscorerev == 0x4)) {
1717                 dhpcie_bus_unmask_interrupt(bus);
1718             } else {
1719 #if defined(BCMINTERNAL) && defined(DHD_DBG_DUMP)
1720                 dhd_bus_mmio_trace(bus, bus->pcie_mailbox_mask,
1721                                    bus->def_intmask, TRUE);
1722 #endif
1723                 si_corereg(bus->sih, bus->sih->buscoreidx,
1724                            bus->pcie_mailbox_mask, bus->def_intmask,
1725                            bus->def_intmask);
1726             }
1727         }
1728     }
1729 
1730     DHD_TRACE(("%s Exit\n", __FUNCTION__));
1731 }
1732 
1733 /* Non atomic function, caller should hold appropriate lock */
dhdpcie_bus_intr_disable(dhd_bus_t * bus)1734 void dhdpcie_bus_intr_disable(dhd_bus_t *bus)
1735 {
1736     DHD_TRACE(("%s Enter\n", __FUNCTION__));
1737     if (bus && bus->sih && !bus->is_linkdown) {
1738         /* Skip after recieving D3 ACK */
1739         if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
1740             return;
1741         }
1742         if ((bus->sih->buscorerev == 0x2) || (bus->sih->buscorerev == 0x6) ||
1743             (bus->sih->buscorerev == 0x4)) {
1744             dhpcie_bus_mask_interrupt(bus);
1745         } else {
1746             si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask,
1747                        bus->def_intmask, 0);
1748         }
1749     }
1750 
1751     DHD_TRACE(("%s Exit\n", __FUNCTION__));
1752 }
1753 
1754 /*
1755  *  dhdpcie_advertise_bus_cleanup advertises that clean up is under progress
1756  * to other bus user contexts like Tx, Rx, IOVAR, WD etc and it waits for other
1757  * contexts to gracefully exit. All the bus usage contexts before marking
1758  * busstate as busy, will check for whether the busstate is DHD_BUS_DOWN or
1759  * DHD_BUS_DOWN_IN_PROGRESS, if so they will exit from there itself without
1760  * marking dhd_bus_busy_state as BUSY.
1761  */
dhdpcie_advertise_bus_cleanup(dhd_pub_t * dhdp)1762 void dhdpcie_advertise_bus_cleanup(dhd_pub_t *dhdp)
1763 {
1764     unsigned long flags;
1765     int timeleft;
1766 
1767     dhdp->dhd_watchdog_ms_backup = dhd_watchdog_ms;
1768     if (dhdp->dhd_watchdog_ms_backup) {
1769         DHD_ERROR(("%s: Disabling wdtick before dhd deinit\n", __FUNCTION__));
1770         dhd_os_wd_timer(dhdp, 0);
1771     }
1772     if (dhdp->busstate != DHD_BUS_DOWN) {
1773         DHD_GENERAL_LOCK(dhdp, flags);
1774         dhdp->busstate = DHD_BUS_DOWN_IN_PROGRESS;
1775         DHD_GENERAL_UNLOCK(dhdp, flags);
1776     }
1777 
1778     timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state);
1779     if ((timeleft == 0) || (timeleft == 1)) {
1780         DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n",
1781                    __FUNCTION__, dhdp->dhd_bus_busy_state));
1782         ASSERT(0);
1783     }
1784 
1785     return;
1786 }
1787 
dhdpcie_advertise_bus_remove(dhd_pub_t * dhdp)1788 static void dhdpcie_advertise_bus_remove(dhd_pub_t *dhdp)
1789 {
1790     unsigned long flags;
1791     int timeleft;
1792 
1793     DHD_GENERAL_LOCK(dhdp, flags);
1794     dhdp->busstate = DHD_BUS_REMOVE;
1795     DHD_GENERAL_UNLOCK(dhdp, flags);
1796 
1797     timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state);
1798     if ((timeleft == 0) || (timeleft == 1)) {
1799         DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n",
1800                    __FUNCTION__, dhdp->dhd_bus_busy_state));
1801         ASSERT(0);
1802     }
1803 
1804     return;
1805 }
1806 
dhdpcie_bus_remove_prep(dhd_bus_t * bus)1807 static void dhdpcie_bus_remove_prep(dhd_bus_t *bus)
1808 {
1809     unsigned long flags;
1810     DHD_TRACE(("%s Enter\n", __FUNCTION__));
1811 
1812     DHD_GENERAL_LOCK(bus->dhd, flags);
1813     DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
1814     bus->dhd->busstate = DHD_BUS_DOWN;
1815     DHD_GENERAL_UNLOCK(bus->dhd, flags);
1816 
1817     dhd_os_sdlock(bus->dhd);
1818 
1819     if (bus->sih && !bus->dhd->dongle_isolation) {
1820         if (PCIE_RELOAD_WAR_ENAB(bus->sih->buscorerev)) {
1821             dhd_bus_pcie_pwr_req_reload_war(bus);
1822         }
1823 
1824         /* Has insmod fails after rmmod issue in Brix Android */
1825 
1826         /* if the pcie link is down, watchdog reset
1827          * should not be done, as it may hang
1828          */
1829 
1830         if (!bus->is_linkdown) {
1831 #ifndef DHD_SKIP_DONGLE_RESET_IN_ATTACH
1832             /* for efi, depending on bt over pcie mode
1833              *  we either power toggle or do F0 FLR
1834              * from dhdpcie_bus_release dongle. So no need to
1835              * do dongle reset from here
1836              */
1837             dhdpcie_dongle_reset(bus);
1838 #endif /* !DHD_SKIP_DONGLE_RESET_IN_ATTACH */
1839         }
1840 
1841         bus->dhd->is_pcie_watchdog_reset = TRUE;
1842     }
1843 
1844     dhd_os_sdunlock(bus->dhd);
1845 
1846     DHD_TRACE(("%s Exit\n", __FUNCTION__));
1847 }
1848 
dhd_init_bus_lock(dhd_bus_t * bus)1849 void dhd_init_bus_lock(dhd_bus_t *bus)
1850 {
1851     if (!bus->bus_lock) {
1852         bus->bus_lock = dhd_os_spin_lock_init(bus->dhd->osh);
1853     }
1854 }
1855 
dhd_deinit_bus_lock(dhd_bus_t * bus)1856 void dhd_deinit_bus_lock(dhd_bus_t *bus)
1857 {
1858     if (bus->bus_lock) {
1859         dhd_os_spin_lock_deinit(bus->dhd->osh, bus->bus_lock);
1860         bus->bus_lock = NULL;
1861     }
1862 }
1863 
dhd_init_backplane_access_lock(dhd_bus_t * bus)1864 void dhd_init_backplane_access_lock(dhd_bus_t *bus)
1865 {
1866     if (!bus->backplane_access_lock) {
1867         bus->backplane_access_lock = dhd_os_spin_lock_init(bus->dhd->osh);
1868     }
1869 }
1870 
dhd_deinit_backplane_access_lock(dhd_bus_t * bus)1871 void dhd_deinit_backplane_access_lock(dhd_bus_t *bus)
1872 {
1873     if (bus->backplane_access_lock) {
1874         dhd_os_spin_lock_deinit(bus->dhd->osh, bus->backplane_access_lock);
1875         bus->backplane_access_lock = NULL;
1876     }
1877 }
1878 
1879 /** Detach and free everything */
dhdpcie_bus_release(dhd_bus_t * bus)1880 void dhdpcie_bus_release(dhd_bus_t *bus)
1881 {
1882     bool dongle_isolation = FALSE;
1883     osl_t *osh = NULL;
1884     unsigned long flags_bus;
1885 
1886     DHD_TRACE(("%s: Enter\n", __FUNCTION__));
1887 
1888     if (bus) {
1889         osh = bus->osh;
1890         ASSERT(osh);
1891 
1892         if (bus->dhd) {
1893 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
1894             debugger_close();
1895 #endif /* DEBUGGER || DHD_DSCOPE */
1896             dhdpcie_advertise_bus_remove(bus->dhd);
1897             dongle_isolation = bus->dhd->dongle_isolation;
1898             bus->dhd->is_pcie_watchdog_reset = FALSE;
1899             dhdpcie_bus_remove_prep(bus);
1900 
1901             if (bus->intr) {
1902                 DHD_BUS_LOCK(bus->bus_lock, flags_bus);
1903                 dhdpcie_bus_intr_disable(bus);
1904                 DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
1905                 dhdpcie_free_irq(bus);
1906             }
1907             dhd_deinit_bus_lock(bus);
1908             dhd_deinit_backplane_access_lock(bus);
1909             /**
1910              * dhdpcie_bus_release_dongle free bus->sih  handle, which is needed
1911              * to access Dongle registers. dhd_detach will communicate with
1912              * dongle to delete flowring ..etc. So dhdpcie_bus_release_dongle
1913              * should be called only after the dhd_detach.
1914              */
1915             dhd_detach(bus->dhd);
1916             dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
1917             dhd_free(bus->dhd);
1918             bus->dhd = NULL;
1919         }
1920         /* unmap the regs and tcm here!! */
1921         if (bus->regs) {
1922             dhdpcie_bus_reg_unmap(osh, bus->regs, DONGLE_REG_MAP_SIZE);
1923             bus->regs = NULL;
1924         }
1925         if (bus->tcm) {
1926             dhdpcie_bus_reg_unmap(osh, bus->tcm, DONGLE_TCM_MAP_SIZE);
1927             bus->tcm = NULL;
1928         }
1929 
1930         dhdpcie_bus_release_malloc(bus, osh);
1931         /* Detach pcie shared structure */
1932         if (bus->pcie_sh) {
1933             MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t));
1934             bus->pcie_sh = NULL;
1935         }
1936 
1937         if (bus->console.buf != NULL) {
1938             MFREE(osh, bus->console.buf, bus->console.bufsize);
1939         }
1940 
1941         /* Finally free bus info */
1942         MFREE(osh, bus, sizeof(dhd_bus_t));
1943 
1944         g_dhd_bus = NULL;
1945     }
1946 
1947     DHD_TRACE(("%s: Exit\n", __FUNCTION__));
1948 } /* dhdpcie_bus_release */
1949 
dhdpcie_bus_release_dongle(dhd_bus_t * bus,osl_t * osh,bool dongle_isolation,bool reset_flag)1950 void dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh,
1951                                 bool dongle_isolation, bool reset_flag)
1952 {
1953     DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n",
1954                __FUNCTION__, bus->dhd, bus->dhd->dongle_reset));
1955 
1956     if ((bus->dhd && bus->dhd->dongle_reset) && reset_flag) {
1957         DHD_TRACE(("%s Exit\n", __FUNCTION__));
1958         return;
1959     }
1960 
1961     if (bus->is_linkdown) {
1962         DHD_ERROR(
1963             ("%s : Skip release dongle due to linkdown \n", __FUNCTION__));
1964         return;
1965     }
1966 
1967     if (bus->sih) {
1968         if (!dongle_isolation &&
1969             (bus->dhd && !bus->dhd->is_pcie_watchdog_reset)) {
1970             dhdpcie_dongle_reset(bus);
1971         }
1972 
1973         dhdpcie_dongle_flr_or_pwr_toggle(bus);
1974 
1975         if (bus->ltrsleep_on_unload) {
1976             si_corereg(bus->sih, bus->sih->buscoreidx,
1977                        OFFSETOF(sbpcieregs_t, u.pcie2.ltr_state), ~0, 0);
1978         }
1979 
1980         if (bus->sih->buscorerev == 0xD) {
1981             pcie_serdes_iddqdisable(bus->osh, bus->sih,
1982                                     (sbpcieregs_t *)bus->regs);
1983         }
1984 
1985         /* For inbuilt drivers pcie clk req will be done by RC,
1986          * so do not do clkreq from dhd
1987          */
1988         if (dhd_download_fw_on_driverload) {
1989             /* Disable CLKREQ# */
1990             dhdpcie_clkreq(bus->osh, 1, 0);
1991         }
1992 
1993         if (bus->sih != NULL) {
1994             si_detach(bus->sih);
1995             bus->sih = NULL;
1996         }
1997         if (bus->vars && bus->varsz) {
1998             MFREE(osh, bus->vars, bus->varsz);
1999         }
2000         bus->vars = NULL;
2001     }
2002 
2003     DHD_TRACE(("%s Exit\n", __FUNCTION__));
2004 }
2005 
dhdpcie_bus_cfg_read_dword(dhd_bus_t * bus,uint32 addr,uint32 size)2006 uint32 dhdpcie_bus_cfg_read_dword(dhd_bus_t *bus, uint32 addr, uint32 size)
2007 {
2008     uint32 data = OSL_PCI_READ_CONFIG(bus->osh, addr, size);
2009     return data;
2010 }
2011 
2012 /** 32 bit config write */
dhdpcie_bus_cfg_write_dword(dhd_bus_t * bus,uint32 addr,uint32 size,uint32 data)2013 void dhdpcie_bus_cfg_write_dword(dhd_bus_t *bus, uint32 addr, uint32 size,
2014                                  uint32 data)
2015 {
2016     OSL_PCI_WRITE_CONFIG(bus->osh, addr, size, data);
2017 }
2018 
dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t * bus,uint32 data)2019 void dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data)
2020 {
2021     OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, 0x4, data);
2022 }
2023 
dhdpcie_bus_dongle_setmemsize(struct dhd_bus * bus,int mem_size)2024 void dhdpcie_bus_dongle_setmemsize(struct dhd_bus *bus, int mem_size)
2025 {
2026     int32 min_size = DONGLE_MIN_MEMSIZE;
2027     /* Restrict the memsize to user specified limit */
2028     DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d\n",
2029                dhd_dongle_memsize, min_size));
2030     if ((dhd_dongle_memsize > min_size) &&
2031         (dhd_dongle_memsize < (int32)bus->orig_ramsize)) {
2032         bus->ramsize = dhd_dongle_memsize;
2033     }
2034 }
2035 
dhdpcie_bus_release_malloc(dhd_bus_t * bus,osl_t * osh)2036 void dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh)
2037 {
2038     DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2039 
2040     if (bus->dhd && bus->dhd->dongle_reset) {
2041         return;
2042     }
2043 
2044     if (bus->vars && bus->varsz) {
2045         MFREE(osh, bus->vars, bus->varsz);
2046         bus->vars = NULL;
2047     }
2048 
2049     DHD_TRACE(("%s: Exit\n", __FUNCTION__));
2050     return;
2051 }
2052 
2053 /** Stop bus module: clear pending frames, disable data flow */
dhd_bus_stop(struct dhd_bus * bus,bool enforce_mutex)2054 void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex)
2055 {
2056     unsigned long flags, flags_bus;
2057 
2058     DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2059 
2060     if (!bus->dhd) {
2061         return;
2062     }
2063 
2064     if (bus->dhd->busstate == DHD_BUS_DOWN) {
2065         DHD_ERROR(("%s: already down by net_dev_reset\n", __FUNCTION__));
2066         goto done;
2067     }
2068 
2069     DHD_DISABLE_RUNTIME_PM(bus->dhd);
2070 
2071     DHD_GENERAL_LOCK(bus->dhd, flags);
2072     DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
2073     bus->dhd->busstate = DHD_BUS_DOWN;
2074     DHD_GENERAL_UNLOCK(bus->dhd, flags);
2075 
2076 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
2077     atomic_set(&bus->dhd->block_bus, TRUE);
2078 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
2079 
2080     DHD_BUS_LOCK(bus->bus_lock, flags_bus);
2081     dhdpcie_bus_intr_disable(bus);
2082     DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
2083 
2084     if (!bus->is_linkdown) {
2085         uint32 status;
2086         status = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 0x4);
2087         dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 0x4, status);
2088     }
2089 
2090     if (!dhd_download_fw_on_driverload) {
2091         dhd_dpc_kill(bus->dhd);
2092     }
2093 
2094 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
2095     pm_runtime_disable(dhd_bus_to_dev(bus));
2096     pm_runtime_set_suspended(dhd_bus_to_dev(bus));
2097     pm_runtime_enable(dhd_bus_to_dev(bus));
2098 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
2099 
2100     /* Clear rx control and wake any waiters */
2101     dhd_os_set_ioctl_resp_timeout(IOCTL_DISABLE_TIMEOUT);
2102     dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_BUS_STOP);
2103 
2104 done:
2105     return;
2106 }
2107 
2108 /**
2109  * Watchdog timer function.
2110  * @param dhd   Represents a specific hardware (dongle) instance that this DHD
2111  * manages
2112  */
dhd_bus_watchdog(dhd_pub_t * dhd)2113 bool dhd_bus_watchdog(dhd_pub_t *dhd)
2114 {
2115     unsigned long flags;
2116     dhd_bus_t *bus = dhd->bus;
2117 
2118     DHD_GENERAL_LOCK(dhd, flags);
2119     if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd) ||
2120         DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd)) {
2121         DHD_GENERAL_UNLOCK(dhd, flags);
2122         return FALSE;
2123     }
2124     DHD_BUS_BUSY_SET_IN_WD(dhd);
2125     DHD_GENERAL_UNLOCK(dhd, flags);
2126 
2127     /* Poll for console output periodically */
2128     if (dhd->busstate == DHD_BUS_DATA && dhd->dhd_console_ms != 0 &&
2129         bus->bus_low_power_state == DHD_BUS_NO_LOW_POWER_STATE) {
2130         bus->console.count += dhd_watchdog_ms;
2131         if (bus->console.count >= dhd->dhd_console_ms) {
2132             bus->console.count -= dhd->dhd_console_ms;
2133 
2134             if (MULTIBP_ENAB(bus->sih)) {
2135                 dhd_bus_pcie_pwr_req(bus);
2136             }
2137 
2138             /* Make sure backplane clock is on */
2139             if (dhdpcie_bus_readconsole(bus) < 0) {
2140                 dhd->dhd_console_ms = 0; /* On error, stop trying */
2141             }
2142 
2143             if (MULTIBP_ENAB(bus->sih)) {
2144                 dhd_bus_pcie_pwr_req_clear(bus);
2145             }
2146         }
2147     }
2148 
2149 #ifdef DHD_READ_INTSTATUS_IN_DPC
2150     if (bus->poll) {
2151         bus->ipend = TRUE;
2152         bus->dpc_sched = TRUE;
2153         dhd_sched_dpc(bus->dhd); /* queue DPC now!! */
2154     }
2155 #endif /* DHD_READ_INTSTATUS_IN_DPC */
2156 
2157     DHD_GENERAL_LOCK(dhd, flags);
2158     DHD_BUS_BUSY_CLEAR_IN_WD(dhd);
2159     dhd_os_busbusy_wake(dhd);
2160     DHD_GENERAL_UNLOCK(dhd, flags);
2161 
2162     return TRUE;
2163 } /* dhd_bus_watchdog */
2164 
2165 #if defined(SUPPORT_MULTIPLE_REVISION)
concate_revision_bcm4358(dhd_bus_t * bus,char * fw_path,char * nv_path)2166 static int concate_revision_bcm4358(dhd_bus_t *bus, char *fw_path,
2167                                     char *nv_path)
2168 {
2169     uint32 chiprev;
2170 #if defined(SUPPORT_MULTIPLE_CHIPS)
2171     char chipver_tag[20] = "_4358";
2172 #else
2173     char chipver_tag[10] = {
2174         0,
2175     };
2176 #endif /* SUPPORT_MULTIPLE_CHIPS */
2177 
2178     chiprev = dhd_bus_chiprev(bus);
2179     if (chiprev == 0) {
2180         DHD_ERROR(("----- CHIP 4358 A0 -----\n"));
2181         strcat(chipver_tag, "_a0");
2182     } else if (chiprev == 1) {
2183         DHD_ERROR(("----- CHIP 4358 A1 -----\n"));
2184 #if defined(SUPPORT_MULTIPLE_CHIPS) || defined(SUPPORT_MULTIPLE_MODULE_CIS)
2185         strcat(chipver_tag, "_a1");
2186 #endif /* defined(SUPPORT_MULTIPLE_CHIPS) ||                                   \
2187           defined(SUPPORT_MULTIPLE_MODULE_CIS) */
2188     } else if (chiprev == 0x3) {
2189         DHD_ERROR(("----- CHIP 4358 A3 -----\n"));
2190 #if defined(SUPPORT_MULTIPLE_CHIPS)
2191         strcat(chipver_tag, "_a3");
2192 #endif /* SUPPORT_MULTIPLE_CHIPS */
2193     } else {
2194         DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chiprev));
2195     }
2196 
2197     strcat(fw_path, chipver_tag);
2198 
2199 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK)
2200     if (chiprev == 1 || chiprev == 0x3) {
2201         int ret = dhd_check_module_b85a();
2202         if ((chiprev == 1) && (ret < 0)) {
2203             memset(chipver_tag, 0x00, sizeof(chipver_tag));
2204             strcat(chipver_tag, "_b85");
2205             strcat(chipver_tag, "_a1");
2206         }
2207     }
2208 
2209     DHD_ERROR(("%s: chipver_tag %s \n", __FUNCTION__, chipver_tag));
2210 #endif /* defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) */
2211 
2212 #if defined(SUPPORT_MULTIPLE_BOARD_REV)
2213     if (system_rev >= 0xA) {
2214         DHD_ERROR(("----- Board Rev  [%d]-----\n", system_rev));
2215         strcat(chipver_tag, "_r10");
2216     }
2217 #endif /* SUPPORT_MULTIPLE_BOARD_REV */
2218     strcat(nv_path, chipver_tag);
2219 
2220     return 0;
2221 }
2222 
concate_revision_bcm4359(dhd_bus_t * bus,char * fw_path,char * nv_path)2223 static int concate_revision_bcm4359(dhd_bus_t *bus, char *fw_path,
2224                                     char *nv_path)
2225 {
2226     uint32 chip_ver;
2227     char chipver_tag[10] = {
2228         0,
2229     };
2230 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) &&          \
2231     defined(SUPPORT_BCM4359_MIXED_MODULES)
2232     int module_type = -1;
2233 #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK &&                      \
2234           SUPPORT_BCM4359_MIXED_MODULES */
2235 
2236     chip_ver = bus->sih->chiprev;
2237     if (chip_ver == 0x4) {
2238         DHD_ERROR(("----- CHIP 4359 B0 -----\n"));
2239         strncat(chipver_tag, "_b0", strlen("_b0"));
2240     } else if (chip_ver == 0x5) {
2241         DHD_ERROR(("----- CHIP 4359 B1 -----\n"));
2242         strncat(chipver_tag, "_b1", strlen("_b1"));
2243     } else if (chip_ver == 0x9) {
2244         DHD_ERROR(("----- CHIP 4359 C0 -----\n"));
2245         strncat(chipver_tag, "_c0", strlen("_c0"));
2246     } else {
2247         DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chip_ver));
2248         return -1;
2249     }
2250 
2251 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) &&          \
2252     defined(SUPPORT_BCM4359_MIXED_MODULES)
2253     module_type = dhd_check_module_b90();
2254 
2255     switch (module_type) {
2256         case BCM4359_MODULE_TYPE_B90B:
2257             strcat(fw_path, chipver_tag);
2258             break;
2259         case BCM4359_MODULE_TYPE_B90S:
2260         default:
2261             /*
2262              * .cid.info file not exist case,
2263              * loading B90S FW force for initial MFG boot up.
2264              */
2265             if (chip_ver == 0x5) {
2266                 strncat(fw_path, "_b90s", strlen("_b90s"));
2267             }
2268             strcat(fw_path, chipver_tag);
2269             strcat(nv_path, chipver_tag);
2270             break;
2271     }
2272 #else  /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK &&                      \
2273           SUPPORT_BCM4359_MIXED_MODULES */
2274     strcat(fw_path, chipver_tag);
2275     strcat(nv_path, chipver_tag);
2276 #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK &&                      \
2277           SUPPORT_BCM4359_MIXED_MODULES */
2278 
2279     return 0;
2280 }
2281 
2282 #if defined(USE_CID_CHECK)
2283 
2284 #define MAX_EXTENSION 20
2285 #define MODULE_BCM4361_INDEX 3
2286 #define CHIP_REV_A0 1
2287 #define CHIP_REV_A1 2
2288 #define CHIP_REV_B0 3
2289 #define CHIP_REV_B1 4
2290 #define CHIP_REV_B2 5
2291 #define CHIP_REV_C0 6
2292 #define BOARD_TYPE_EPA 0x080f
2293 #define BOARD_TYPE_IPA 0x0827
2294 #define BOARD_TYPE_IPA_OLD 0x081a
2295 #define DEFAULT_CIDINFO_FOR_EPA "r00a_e000_a0_ePA"
2296 #define DEFAULT_CIDINFO_FOR_IPA "r00a_e000_a0_iPA"
2297 #define DEFAULT_CIDINFO_FOR_A1 "r01a_e30a_a1"
2298 #define DEFAULT_CIDINFO_FOR_B0 "r01i_e32_b0"
2299 #define MAX_VID_LEN 8
2300 #define CIS_TUPLE_HDR_LEN 2
2301 #if defined(BCM4361_CHIP)
2302 #define CIS_TUPLE_START_ADDRESS 0x18011110
2303 #define CIS_TUPLE_END_ADDRESS 0x18011167
2304 #elif defined(BCM4375_CHIP)
2305 #define CIS_TUPLE_START_ADDRESS 0x18011120
2306 #define CIS_TUPLE_END_ADDRESS 0x18011177
2307 #endif /* defined(BCM4361_CHIP) */
2308 #define CIS_TUPLE_MAX_COUNT                                                    \
2309     (uint32)((CIS_TUPLE_END_ADDRESS - CIS_TUPLE_START_ADDRESS + 1) /           \
2310              sizeof(uint32))
2311 #define CIS_TUPLE_TAG_START 0x80
2312 #define CIS_TUPLE_TAG_VENDOR 0x81
2313 #define CIS_TUPLE_TAG_BOARDTYPE 0x1b
2314 #define CIS_TUPLE_TAG_LENGTH 1
2315 #define NVRAM_FEM_MURATA "_murata"
2316 #define CID_FEM_MURATA "_mur_"
2317 
2318 typedef struct cis_tuple_format {
2319     uint8 id;
2320     uint8 len; /* total length of tag and data */
2321     uint8 tag;
2322     uint8 data[1];
2323 } cis_tuple_format_t;
2324 
2325 typedef struct {
2326     char cid_ext[MAX_EXTENSION];
2327     char nvram_ext[MAX_EXTENSION];
2328     char fw_ext[MAX_EXTENSION];
2329 } naming_info_t;
2330 
2331 naming_info_t bcm4361_naming_table[] = {
2332     {{""}, {""}, {""}},
2333     {{"r00a_e000_a0_ePA"}, {"_a0_ePA"}, {"_a0_ePA"}},
2334     {{"r00a_e000_a0_iPA"}, {"_a0"}, {"_a1"}},
2335     {{"r01a_e30a_a1"}, {"_r01a_a1"}, {"_a1"}},
2336     {{"r02a_e30a_a1"}, {"_r02a_a1"}, {"_a1"}},
2337     {{"r02c_e30a_a1"}, {"_r02c_a1"}, {"_a1"}},
2338     {{"r01d_e31_b0"}, {"_r01d_b0"}, {"_b0"}},
2339     {{"r01f_e31_b0"}, {"_r01f_b0"}, {"_b0"}},
2340     {{"r02g_e31_b0"}, {"_r02g_b0"}, {"_b0"}},
2341     {{"r01h_e32_b0"}, {"_r01h_b0"}, {"_b0"}},
2342     {{"r01i_e32_b0"}, {"_r01i_b0"}, {"_b0"}},
2343     {{"r02j_e32_b0"}, {"_r02j_b0"}, {"_b0"}},
2344     {{"r012_1kl_a1"}, {"_r012_a1"}, {"_a1"}},
2345     {{"r013_1kl_b0"}, {"_r013_b0"}, {"_b0"}},
2346     {{"r013_1kl_b0"}, {"_r013_b0"}, {"_b0"}},
2347     {{"r014_1kl_b0"}, {"_r014_b0"}, {"_b0"}},
2348     {{"r015_1kl_b0"}, {"_r015_b0"}, {"_b0"}},
2349     {{"r020_1kl_b0"}, {"_r020_b0"}, {"_b0"}},
2350     {{"r021_1kl_b0"}, {"_r021_b0"}, {"_b0"}},
2351     {{"r022_1kl_b0"}, {"_r022_b0"}, {"_b0"}},
2352     {{"r023_1kl_b0"}, {"_r023_b0"}, {"_b0"}},
2353     {{"r024_1kl_b0"}, {"_r024_b0"}, {"_b0"}},
2354     {{"r030_1kl_b0"}, {"_r030_b0"}, {"_b0"}},
2355     {{"r031_1kl_b0"},
2356      {"_r030_b0"},
2357      {"_b0"}}, /* exceptional case : r31 -> r30 */
2358     {{"r032_1kl_b0"}, {"_r032_b0"}, {"_b0"}},
2359     {{"r033_1kl_b0"}, {"_r033_b0"}, {"_b0"}},
2360     {{"r034_1kl_b0"}, {"_r034_b0"}, {"_b0"}},
2361     {{"r02a_e32a_b2"}, {"_r02a_b2"}, {"_b2"}},
2362     {{"r02b_e32a_b2"}, {"_r02b_b2"}, {"_b2"}},
2363     {{"r020_1qw_b2"}, {"_r020_b2"}, {"_b2"}},
2364     {{"r021_1qw_b2"}, {"_r021_b2"}, {"_b2"}},
2365     {{"r022_1qw_b2"}, {"_r022_b2"}, {"_b2"}},
2366     {{"r031_1qw_b2"}, {"_r031_b2"}, {"_b2"}},
2367     {{"r032_1qw_b2"}, {"_r032_b2"}, {"_b2"}},
2368     {{"r041_1qw_b2"}, {"_r041_b2"}, {"_b2"}}};
2369 
2370 #define MODULE_BCM4375_INDEX 3
2371 
2372 naming_info_t bcm4375_naming_table[] = {
2373     {{""}, {""}, {""}},
2374     {{"e41_es11"}, {"_ES00_semco_b0"}, {"_b0"}},
2375     {{"e43_es33"}, {"_ES01_semco_b0"}, {"_b0"}},
2376     {{"e43_es34"}, {"_ES02_semco_b0"}, {"_b0"}},
2377     {{"e43_es35"}, {"_ES02_semco_b0"}, {"_b0"}},
2378     {{"e43_es36"}, {"_ES03_semco_b0"}, {"_b0"}},
2379     {{"e43_cs41"}, {"_CS00_semco_b1"}, {"_b1"}},
2380     {{"e43_cs51"}, {"_CS01_semco_b1"}, {"_b1"}},
2381     {{"e43_cs53"}, {"_CS01_semco_b1"}, {"_b1"}},
2382     {{"e43_cs61"}, {"_CS00_skyworks_b1"}, {"_b1"}},
2383     {{"1rh_es10"}, {"_1rh_es10_b0"}, {"_b0"}},
2384     {{"1rh_es11"}, {"_1rh_es11_b0"}, {"_b0"}},
2385     {{"1rh_es12"}, {"_1rh_es12_b0"}, {"_b0"}},
2386     {{"1rh_es13"}, {"_1rh_es13_b0"}, {"_b0"}},
2387     {{"1rh_es20"}, {"_1rh_es20_b0"}, {"_b0"}},
2388     {{"1rh_es32"}, {"_1rh_es32_b0"}, {"_b0"}},
2389     {{"1rh_es41"}, {"_1rh_es41_b1"}, {"_b1"}},
2390     {{"1rh_es42"}, {"_1rh_es42_b1"}, {"_b1"}},
2391     {{"1rh_es43"}, {"_1rh_es43_b1"}, {"_b1"}},
2392     {{"1rh_es44"}, {"_1rh_es44_b1"}, {"_b1"}}};
2393 
dhd_find_naming_info(naming_info_t table[],int table_size,char * module_type)2394 static naming_info_t *dhd_find_naming_info(naming_info_t table[],
2395                                            int table_size, char *module_type)
2396 {
2397     int index_found = 0, i = 0;
2398 
2399     if (module_type && strlen(module_type) > 0) {
2400         for (i = 1; i < table_size; i++) {
2401             if (!strncmp(table[i].cid_ext, module_type,
2402                          strlen(table[i].cid_ext))) {
2403                 index_found = i;
2404                 break;
2405             }
2406         }
2407     }
2408 
2409     DHD_INFO(("%s: index_found=%d\n", __FUNCTION__, index_found));
2410 
2411     return &table[index_found];
2412 }
2413 
dhd_find_naming_info_by_cid(naming_info_t table[],int table_size,char * cid_info)2414 static naming_info_t *dhd_find_naming_info_by_cid(naming_info_t table[],
2415                                                   int table_size,
2416                                                   char *cid_info)
2417 {
2418     int index_found = 0, i = 0;
2419     char *ptr;
2420 
2421     /* truncate extension */
2422     for (i = 1, ptr = cid_info; i < MODULE_BCM4361_INDEX && ptr; i++) {
2423         ptr = bcmstrstr(ptr, "_");
2424         if (ptr) {
2425             ptr++;
2426         }
2427     }
2428 
2429     for (i = 1; i < table_size && ptr; i++) {
2430         if (!strncmp(table[i].cid_ext, ptr, strlen(table[i].cid_ext))) {
2431             index_found = i;
2432             break;
2433         }
2434     }
2435 
2436     DHD_INFO(("%s: index_found=%d\n", __FUNCTION__, index_found));
2437 
2438     return &table[index_found];
2439 }
2440 
dhd_parse_board_information_bcm(dhd_bus_t * bus,int * boardtype,unsigned char * vid,int * vid_length)2441 static int dhd_parse_board_information_bcm(dhd_bus_t *bus, int *boardtype,
2442                                            unsigned char *vid, int *vid_length)
2443 {
2444     int boardtype_backplane_addr[] = {
2445         0x18010324, /* OTP Control 1 */
2446         0x18012618, /* PMU min resource mask */
2447     };
2448     int boardtype_backplane_data[] = {
2449         0x00fa0000, 0x0e4fffff /* Keep on ARMHTAVAIL */
2450     };
2451     int int_val = 0, i = 0;
2452     cis_tuple_format_t *tuple;
2453     int totlen, len;
2454     uint32 raw_data[CIS_TUPLE_MAX_COUNT];
2455 
2456     for (i = 0; i < ARRAYSIZE(boardtype_backplane_addr); i++) {
2457         /* Write new OTP and PMU configuration */
2458         if (si_backplane_access(bus->sih, boardtype_backplane_addr[i],
2459                                 sizeof(int), &boardtype_backplane_data[i],
2460                                 FALSE) != BCME_OK) {
2461             DHD_ERROR(("invalid size/addr combination\n"));
2462             return BCME_ERROR;
2463         }
2464 
2465         if (si_backplane_access(bus->sih, boardtype_backplane_addr[i],
2466                                 sizeof(int), &int_val, TRUE) != BCME_OK) {
2467             DHD_ERROR(("invalid size/addr combination\n"));
2468             return BCME_ERROR;
2469         }
2470 
2471         DHD_INFO(("%s: boardtype_backplane_addr 0x%08x rdata 0x%04x\n",
2472                   __FUNCTION__, boardtype_backplane_addr[i], int_val));
2473     }
2474 
2475     /* read tuple raw data */
2476     for (i = 0; i < CIS_TUPLE_MAX_COUNT; i++) {
2477         if (si_backplane_access(
2478                 bus->sih, CIS_TUPLE_START_ADDRESS + i * sizeof(uint32),
2479                 sizeof(uint32), &raw_data[i], TRUE) != BCME_OK) {
2480             break;
2481         }
2482     }
2483 
2484     totlen = i * sizeof(uint32);
2485     tuple = (cis_tuple_format_t *)raw_data;
2486 
2487     /* check the first tuple has tag 'start' */
2488     if (tuple->id != CIS_TUPLE_TAG_START) {
2489         return BCME_ERROR;
2490     }
2491 
2492     *vid_length = *boardtype = 0;
2493 
2494     /* find tagged parameter */
2495     while ((totlen >= (tuple->len + CIS_TUPLE_HDR_LEN)) &&
2496            (*vid_length == 0 || *boardtype == 0)) {
2497         len = tuple->len;
2498 
2499         if ((tuple->tag == CIS_TUPLE_TAG_VENDOR) &&
2500             (totlen >= (int)(len + CIS_TUPLE_HDR_LEN))) {
2501             /* found VID */
2502             memcpy(vid, tuple->data, tuple->len - CIS_TUPLE_TAG_LENGTH);
2503             *vid_length = tuple->len - CIS_TUPLE_TAG_LENGTH;
2504             prhex("OTP VID", tuple->data, tuple->len - CIS_TUPLE_TAG_LENGTH);
2505         } else if ((tuple->tag == CIS_TUPLE_TAG_BOARDTYPE) &&
2506                    (totlen >= (int)(len + CIS_TUPLE_HDR_LEN))) {
2507             /* found boardtype */
2508             *boardtype = (int)tuple->data[0];
2509             prhex("OTP boardtype", tuple->data,
2510                   tuple->len - CIS_TUPLE_TAG_LENGTH);
2511         }
2512 
2513         tuple =
2514             (cis_tuple_format_t *)((uint8 *)tuple + (len + CIS_TUPLE_HDR_LEN));
2515         totlen -= (len + CIS_TUPLE_HDR_LEN);
2516     }
2517 
2518     if (*vid_length <= 0 || *boardtype <= 0) {
2519         DHD_ERROR(("failed to parse information (vid=%d, boardtype=%d)\n",
2520                    *vid_length, *boardtype));
2521         return BCME_ERROR;
2522     }
2523 
2524     return BCME_OK;
2525 }
2526 
dhd_find_naming_info_by_chip_rev(naming_info_t table[],int table_size,dhd_bus_t * bus,bool * is_murata_fem)2527 static naming_info_t *dhd_find_naming_info_by_chip_rev(naming_info_t table[],
2528                                                        int table_size,
2529                                                        dhd_bus_t *bus,
2530                                                        bool *is_murata_fem)
2531 {
2532     int board_type = 0, chip_rev = 0, vid_length = 0;
2533     unsigned char vid[MAX_VID_LEN];
2534     naming_info_t *info = &table[0];
2535     char *cid_info = NULL;
2536 
2537     if (!bus || !bus->sih) {
2538         DHD_ERROR(("%s:bus(%p) or bus->sih is NULL\n", __FUNCTION__, bus));
2539         return NULL;
2540     }
2541     chip_rev = bus->sih->chiprev;
2542 
2543     if (dhd_parse_board_information_bcm(bus, &board_type, vid, &vid_length) !=
2544         BCME_OK) {
2545         DHD_ERROR(("%s:failed to parse board information\n", __FUNCTION__));
2546         return NULL;
2547     }
2548 
2549     DHD_INFO(("%s:chip version %d\n", __FUNCTION__, chip_rev));
2550 
2551 #if defined(BCM4361_CHIP)
2552     /* A0 chipset has exception only */
2553     if (chip_rev == CHIP_REV_A0) {
2554         if (board_type == BOARD_TYPE_EPA) {
2555             info = dhd_find_naming_info(table, table_size,
2556                                         DEFAULT_CIDINFO_FOR_EPA);
2557         } else if ((board_type == BOARD_TYPE_IPA) ||
2558                    (board_type == BOARD_TYPE_IPA_OLD)) {
2559             info = dhd_find_naming_info(table, table_size,
2560                                         DEFAULT_CIDINFO_FOR_IPA);
2561         }
2562     } else {
2563         cid_info = dhd_get_cid_info(vid, vid_length);
2564         if (cid_info) {
2565             info = dhd_find_naming_info_by_cid(table, table_size, cid_info);
2566             if (strstr(cid_info, CID_FEM_MURATA)) {
2567                 *is_murata_fem = TRUE;
2568             }
2569         }
2570     }
2571 #else
2572     cid_info = dhd_get_cid_info(vid, vid_length);
2573     if (cid_info) {
2574         info = dhd_find_naming_info_by_cid(table, table_size, cid_info);
2575         if (strstr(cid_info, CID_FEM_MURATA)) {
2576             *is_murata_fem = TRUE;
2577         }
2578     }
2579 #endif /* BCM4361_CHIP */
2580 
2581     return info;
2582 }
2583 #endif /* USE_CID_CHECK */
2584 
concate_revision_bcm4361(dhd_bus_t * bus,char * fw_path,char * nv_path)2585 static int concate_revision_bcm4361(dhd_bus_t *bus, char *fw_path,
2586                                     char *nv_path)
2587 {
2588     int ret = BCME_OK;
2589 #if defined(SUPPORT_BCM4361_MIXED_MODULES) && defined(USE_CID_CHECK)
2590     char module_type[MAX_VNAME_LEN];
2591     naming_info_t *info = NULL;
2592     bool is_murata_fem = FALSE;
2593 
2594     memset(module_type, 0, sizeof(module_type));
2595 
2596     if (dhd_check_module_bcm(module_type, MODULE_BCM4361_INDEX,
2597                              &is_murata_fem) == BCME_OK) {
2598         info = dhd_find_naming_info(
2599             bcm4361_naming_table, ARRAYSIZE(bcm4361_naming_table), module_type);
2600     } else {
2601         /* in case of .cid.info doesn't exists */
2602         info = dhd_find_naming_info_by_chip_rev(bcm4361_naming_table,
2603                                                 ARRAYSIZE(bcm4361_naming_table),
2604                                                 bus, &is_murata_fem);
2605     }
2606 
2607     if (bcmstrnstr(nv_path, PATH_MAX, "_murata", 0x7)) {
2608         is_murata_fem = FALSE;
2609     }
2610 
2611     if (info) {
2612         if (is_murata_fem) {
2613             strncat(nv_path, NVRAM_FEM_MURATA, strlen(NVRAM_FEM_MURATA));
2614         }
2615         strncat(nv_path, info->nvram_ext, strlen(info->nvram_ext));
2616         strncat(fw_path, info->fw_ext, strlen(info->fw_ext));
2617     } else {
2618         DHD_ERROR(("%s:failed to find extension for nvram and firmware\n",
2619                    __FUNCTION__));
2620         ret = BCME_ERROR;
2621     }
2622 #else  /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK */
2623     char chipver_tag[10] = {
2624         0,
2625     };
2626 
2627     strcat(fw_path, chipver_tag);
2628     strcat(nv_path, chipver_tag);
2629 #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK */
2630 
2631     return ret;
2632 }
2633 
concate_revision_bcm4375(dhd_bus_t * bus,char * fw_path,char * nv_path)2634 static int concate_revision_bcm4375(dhd_bus_t *bus, char *fw_path,
2635                                     char *nv_path)
2636 {
2637     int ret = BCME_OK;
2638 #if defined(SUPPORT_BCM4375_MIXED_MODULES) && defined(USE_CID_CHECK)
2639     char module_type[MAX_VNAME_LEN];
2640     naming_info_t *info = NULL;
2641     bool is_murata_fem = FALSE;
2642 
2643     memset(module_type, 0, sizeof(module_type));
2644 
2645     if (dhd_check_module_bcm(module_type, MODULE_BCM4375_INDEX,
2646                              &is_murata_fem) == BCME_OK) {
2647         info = dhd_find_naming_info(
2648             bcm4375_naming_table, ARRAYSIZE(bcm4375_naming_table), module_type);
2649     } else {
2650         /* in case of .cid.info doesn't exists */
2651         info = dhd_find_naming_info_by_chip_rev(bcm4375_naming_table,
2652                                                 ARRAYSIZE(bcm4375_naming_table),
2653                                                 bus, &is_murata_fem);
2654     }
2655 
2656     if (info) {
2657         strncat(nv_path, info->nvram_ext, strlen(info->nvram_ext));
2658         strncat(fw_path, info->fw_ext, strlen(info->fw_ext));
2659     } else {
2660         DHD_ERROR(("%s:failed to find extension for nvram and firmware\n",
2661                    __FUNCTION__));
2662         ret = BCME_ERROR;
2663     }
2664 #else  /* SUPPORT_BCM4375_MIXED_MODULES && USE_CID_CHECK */
2665     char chipver_tag[10] = {
2666         0,
2667     };
2668 
2669     strcat(fw_path, chipver_tag);
2670     strcat(nv_path, chipver_tag);
2671 #endif /* SUPPORT_BCM4375_MIXED_MODULES && USE_CID_CHECK */
2672 
2673     return ret;
2674 }
2675 
concate_revision(dhd_bus_t * bus,char * fw_path,char * nv_path)2676 int concate_revision(dhd_bus_t *bus, char *fw_path, char *nv_path)
2677 {
2678     int res = 0;
2679 
2680     if (!bus || !bus->sih) {
2681         DHD_ERROR(("%s:Bus is Invalid\n", __FUNCTION__));
2682         return -1;
2683     }
2684 
2685     if (!fw_path || !nv_path) {
2686         DHD_ERROR(("fw_path or nv_path is null.\n"));
2687         return res;
2688     }
2689 
2690     switch (si_chipid(bus->sih)) {
2691         case BCM43569_CHIP_ID:
2692         case BCM4358_CHIP_ID:
2693             res = concate_revision_bcm4358(bus, fw_path, nv_path);
2694             break;
2695         case BCM4355_CHIP_ID:
2696         case BCM4359_CHIP_ID:
2697             res = concate_revision_bcm4359(bus, fw_path, nv_path);
2698             break;
2699         case BCM4361_CHIP_ID:
2700         case BCM4347_CHIP_ID:
2701             res = concate_revision_bcm4361(bus, fw_path, nv_path);
2702             break;
2703         case BCM4375_CHIP_ID:
2704             res = concate_revision_bcm4375(bus, fw_path, nv_path);
2705             break;
2706         default:
2707             DHD_ERROR(("REVISION SPECIFIC feature is not required\n"));
2708             return res;
2709     }
2710 
2711     return res;
2712 }
2713 #endif /* SUPPORT_MULTIPLE_REVISION */
2714 
dhd_get_chipid(dhd_pub_t * dhd)2715 uint16 dhd_get_chipid(dhd_pub_t *dhd)
2716 {
2717     dhd_bus_t *bus = dhd->bus;
2718 
2719     if (bus && bus->sih) {
2720         return (uint16)si_chipid(bus->sih);
2721     } else {
2722         return 0;
2723     }
2724 }
2725 
2726 /**
2727  * Loads firmware given by caller supplied path and nvram image into PCIe
2728  * dongle.
2729  *
2730  * BCM_REQUEST_FW specific :
2731  * Given the chip type, determines the to be used file paths within
2732  * /lib/firmware/brcm/ containing firmware and nvm for that chip. If the
2733  * download fails, retries download with a different nvm file
2734  *
2735  * BCMEMBEDIMAGE specific:
2736  * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware
2737  * contained in header file will be used instead.
2738  *
2739  * @return BCME_OK on success
2740  */
dhd_bus_download_firmware(struct dhd_bus * bus,osl_t * osh,char * pfw_path,char * pnv_path,char * pclm_path,char * pconf_path)2741 int dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh, char *pfw_path,
2742                               char *pnv_path, char *pclm_path, char *pconf_path)
2743 {
2744     int ret;
2745 
2746     bus->fw_path = pfw_path;
2747     bus->nv_path = pnv_path;
2748     bus->dhd->clm_path = pclm_path;
2749     bus->dhd->conf_path = pconf_path;
2750 
2751 #if defined(SUPPORT_MULTIPLE_REVISION)
2752     if (concate_revision(bus, bus->fw_path, bus->nv_path) != 0) {
2753         DHD_ERROR(("%s: fail to concatnate revison \n", __FUNCTION__));
2754         return BCME_BADARG;
2755     }
2756 #endif /* SUPPORT_MULTIPLE_REVISION */
2757 
2758 #if defined(DHD_BLOB_EXISTENCE_CHECK)
2759     dhd_set_blob_support(bus->dhd, bus->fw_path);
2760 #endif /* DHD_BLOB_EXISTENCE_CHECK */
2761 
2762     DHD_ERROR(("%s: firmware path=%s, nvram path=%s\n", __FUNCTION__,
2763                bus->fw_path, bus->nv_path));
2764     dhdpcie_dump_resource(bus);
2765 
2766     ret = dhdpcie_download_firmware(bus, osh);
2767 
2768     return ret;
2769 }
2770 
dhd_set_bus_params(struct dhd_bus * bus)2771 void dhd_set_bus_params(struct dhd_bus *bus)
2772 {
2773     if (bus->dhd->conf->dhd_poll >= 0) {
2774         bus->poll = bus->dhd->conf->dhd_poll;
2775         if (!bus->pollrate) {
2776             bus->pollrate = 1;
2777         }
2778         printf("%s: set polling mode %d\n", __FUNCTION__,
2779                bus->dhd->conf->dhd_poll);
2780     }
2781 }
2782 
2783 /**
2784  * Loads firmware given by 'bus->fw_path' into PCIe dongle.
2785  *
2786  * BCM_REQUEST_FW specific :
2787  * Given the chip type, determines the to be used file paths within
2788  * /lib/firmware/brcm/ containing firmware and nvm for that chip. If the
2789  * download fails, retries download with a different nvm file
2790  *
2791  * BCMEMBEDIMAGE specific:
2792  * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware
2793  * contained in header file will be used instead.
2794  *
2795  * @return BCME_OK on success
2796  */
dhdpcie_download_firmware(struct dhd_bus * bus,osl_t * osh)2797 static int dhdpcie_download_firmware(struct dhd_bus *bus, osl_t *osh)
2798 {
2799     int ret = 0;
2800 #if defined(BCM_REQUEST_FW)
2801     uint chipid = bus->sih->chip;
2802     uint revid = bus->sih->chiprev;
2803     char fw_path[64] = "/lib/firmware/brcm/bcm"; /* path to firmware image */
2804     char nv_path[64];                            /* path to nvram vars file */
2805     bus->fw_path = fw_path;
2806     bus->nv_path = nv_path;
2807     switch (chipid) {
2808         case BCM43570_CHIP_ID:
2809             bcmstrncat(fw_path, "43570", 0x5);
2810             switch (revid) {
2811                 case 0:
2812                     bcmstrncat(fw_path, "a0", 0x2);
2813                     break;
2814                 case 0x2:
2815                     bcmstrncat(fw_path, "a2", 0x2);
2816                     break;
2817                 default:
2818                     DHD_ERROR(
2819                         ("%s: revid is not found %x\n", __FUNCTION__, revid));
2820                     break;
2821             }
2822             break;
2823         default:
2824             DHD_ERROR(("%s: unsupported device %x\n", __FUNCTION__, chipid));
2825             return 0;
2826     }
2827     /* load board specific nvram file */
2828     snprintf(bus->nv_path, sizeof(nv_path), "%s.nvm", fw_path);
2829     /* load firmware */
2830     snprintf(bus->fw_path, sizeof(fw_path), "%s-firmware.bin", fw_path);
2831 #endif /* BCM_REQUEST_FW */
2832 
2833     DHD_OS_WAKE_LOCK(bus->dhd);
2834 
2835     dhd_conf_set_path_params(bus->dhd, bus->fw_path, bus->nv_path);
2836     dhd_set_bus_params(bus);
2837 
2838     ret = _dhdpcie_download_firmware(bus);
2839 
2840     DHD_OS_WAKE_UNLOCK(bus->dhd);
2841     return ret;
2842 } /* dhdpcie_download_firmware */
2843 
2844 #define DHD_MEMORY_SET_PATTERN 0xAA
2845 
2846 /**
2847  * Downloads a file containing firmware into dongle memory. In case of a .bea
2848  * file, the DHD is updated with the event logging partitions within that file
2849  * as well.
2850  *
2851  * @param pfw_path    Path to .bin or .bea file
2852  */
dhdpcie_download_code_file(struct dhd_bus * bus,char * pfw_path)2853 static int dhdpcie_download_code_file(struct dhd_bus *bus, char *pfw_path)
2854 {
2855     int bcmerror = BCME_ERROR;
2856     int offset = 0;
2857     int len = 0;
2858     bool store_reset;
2859     char *imgbuf = NULL;
2860     uint8 *memblock = NULL, *memptr = NULL;
2861 #ifdef CHECK_DOWNLOAD_FW
2862     uint8 *memptr_tmp = NULL; // terence: check downloaded firmware is correct
2863 #endif
2864     int offset_end = bus->ramsize;
2865     uint32 file_size = 0, read_len = 0;
2866 
2867 #if defined(DHD_FW_MEM_CORRUPTION)
2868     if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) {
2869         dhd_tcm_test_enable = TRUE;
2870     } else {
2871         dhd_tcm_test_enable = FALSE;
2872     }
2873 #endif /* DHD_FW_MEM_CORRUPTION */
2874     DHD_ERROR(
2875         ("%s: dhd_tcm_test_enable %u\n", __FUNCTION__, dhd_tcm_test_enable));
2876     /* TCM check */
2877     if (dhd_tcm_test_enable && !dhd_bus_tcm_test(bus)) {
2878         DHD_ERROR(("dhd_bus_tcm_test failed\n"));
2879         bcmerror = BCME_ERROR;
2880         goto err;
2881     }
2882     DHD_ERROR(("%s: download firmware %s\n", __FUNCTION__, pfw_path));
2883 
2884     /* Should succeed in opening image if it is actually given through registry
2885      * entry or in module param.
2886      */
2887     imgbuf = dhd_os_open_image1(bus->dhd, pfw_path);
2888     if (imgbuf == NULL) {
2889         printf("%s: Open firmware file failed %s\n", __FUNCTION__, pfw_path);
2890         goto err;
2891     }
2892 
2893     file_size = dhd_os_get_image_size(imgbuf);
2894     if (!file_size) {
2895         DHD_ERROR(("%s: get file size fails ! \n", __FUNCTION__));
2896         goto err;
2897     }
2898 
2899     memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
2900     if (memblock == NULL) {
2901         DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__,
2902                    MEMBLOCK));
2903         bcmerror = BCME_NOMEM;
2904         goto err;
2905     }
2906 #ifdef CHECK_DOWNLOAD_FW
2907     if (bus->dhd->conf->fwchk) {
2908         memptr_tmp = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
2909         if (memptr_tmp == NULL) {
2910             DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__,
2911                        MEMBLOCK));
2912             goto err;
2913         }
2914     }
2915 #endif
2916     if ((uint32)(uintptr)memblock % DHD_SDALIGN) {
2917         memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
2918     }
2919 
2920     /* check if CR4/CA7 */
2921     store_reset = (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) ||
2922                    si_setcore(bus->sih, ARMCA7_CORE_ID, 0));
2923     /* Download image with MEMBLOCK size */
2924     while ((len = dhd_os_get_image_block((char *)memptr, MEMBLOCK, imgbuf))) {
2925         if (len < 0) {
2926             DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__,
2927                        len));
2928             bcmerror = BCME_ERROR;
2929             goto err;
2930         }
2931         read_len += len;
2932         if (read_len > file_size) {
2933             DHD_ERROR(("%s: WARNING! reading beyond EOF, len=%d; read_len=%u;"
2934                        " file_size=%u truncating len to %d \n",
2935                        __FUNCTION__, len, read_len, file_size,
2936                        (len - (read_len - file_size))));
2937             len -= (read_len - file_size);
2938         }
2939 
2940         /* if address is 0, store the reset instruction to be written in 0 */
2941         if (store_reset) {
2942             ASSERT(offset == 0);
2943             bus->resetinstr = *(((uint32 *)memptr));
2944             /* Add start of RAM address to the address given by user */
2945             offset += bus->dongle_ram_base;
2946             offset_end += offset;
2947             store_reset = FALSE;
2948         }
2949 
2950         bcmerror =
2951             dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len);
2952         if (bcmerror) {
2953             DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
2954                        __FUNCTION__, bcmerror, MEMBLOCK, offset));
2955             goto err;
2956         }
2957 
2958 #ifdef CHECK_DOWNLOAD_FW
2959         if (bus->dhd->conf->fwchk) {
2960             bcmerror =
2961                 dhdpcie_bus_membytes(bus, FALSE, offset, memptr_tmp, len);
2962             if (bcmerror) {
2963                 DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
2964                            __FUNCTION__, bcmerror, MEMBLOCK, offset));
2965                 goto err;
2966             }
2967             if (memcmp(memptr_tmp, memptr, len)) {
2968                 DHD_ERROR(("%s: Downloaded image is corrupted at 0x%08x\n",
2969                            __FUNCTION__, offset));
2970                 bcmerror = BCME_ERROR;
2971                 goto err;
2972             } else {
2973                 DHD_INFO(("%s: Download, Upload and compare succeeded.\n",
2974                           __FUNCTION__));
2975             }
2976         }
2977 #endif
2978         offset += MEMBLOCK;
2979 
2980         if (offset >= offset_end) {
2981             DHD_ERROR(("%s: invalid address access to %x (offset end: %x)\n",
2982                        __FUNCTION__, offset, offset_end));
2983             bcmerror = BCME_ERROR;
2984             goto err;
2985         }
2986 
2987         if (read_len >= file_size) {
2988             break;
2989         }
2990     }
2991 err:
2992     if (memblock) {
2993         MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
2994 #ifdef CHECK_DOWNLOAD_FW
2995         if (memptr_tmp) {
2996             MFREE(bus->dhd->osh, memptr_tmp, MEMBLOCK + DHD_SDALIGN);
2997         }
2998 #endif
2999     }
3000 
3001     if (imgbuf) {
3002         dhd_os_close_image1(bus->dhd, imgbuf);
3003     }
3004 
3005     return bcmerror;
3006 } /* dhdpcie_download_code_file */
3007 
dhdpcie_download_nvram(struct dhd_bus * bus)3008 static int dhdpcie_download_nvram(struct dhd_bus *bus)
3009 {
3010     int bcmerror = BCME_ERROR;
3011     uint len;
3012     char *memblock = NULL;
3013     char *bufp;
3014     char *pnv_path;
3015     bool nvram_file_exists;
3016     bool nvram_uefi_exists = FALSE;
3017     bool local_alloc = FALSE;
3018     pnv_path = bus->nv_path;
3019 
3020     nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0'));
3021 
3022     /* First try UEFI */
3023     len = MAX_NVRAMBUF_SIZE;
3024     dhd_get_download_buffer(bus->dhd, NULL, NVRAM, &memblock, (int *)&len);
3025 
3026     /* If UEFI empty, then read from file system */
3027     if ((len <= 0) || (memblock == NULL)) {
3028         if (nvram_file_exists) {
3029             len = MAX_NVRAMBUF_SIZE;
3030             dhd_get_download_buffer(bus->dhd, pnv_path, NVRAM, &memblock,
3031                                     (int *)&len);
3032             if ((len <= 0 || len > MAX_NVRAMBUF_SIZE)) {
3033                 goto err;
3034             }
3035         } else {
3036             /* For SROM OTP no external file or UEFI required */
3037             bcmerror = BCME_OK;
3038         }
3039     } else {
3040         nvram_uefi_exists = TRUE;
3041     }
3042 
3043     DHD_ERROR(("%s: dhd_get_download_buffer len %d\n", __FUNCTION__, len));
3044 
3045     if (len > 0 && len <= MAX_NVRAMBUF_SIZE && memblock != NULL) {
3046         bufp = (char *)memblock;
3047 
3048         {
3049             bufp[len] = 0;
3050             if (nvram_uefi_exists || nvram_file_exists) {
3051                 len = process_nvram_vars(bufp, len);
3052             }
3053         }
3054 
3055         DHD_ERROR(("%s: process_nvram_vars len %d\n", __FUNCTION__, len));
3056 
3057         if (len % 0x4) {
3058             len += 0x4 - (len % 0x4);
3059         }
3060         bufp += len;
3061         *bufp++ = 0;
3062         if (len) {
3063             bcmerror = dhdpcie_downloadvars(bus, memblock, len + 1);
3064         }
3065         if (bcmerror) {
3066             DHD_ERROR(
3067                 ("%s: error downloading vars: %d\n", __FUNCTION__, bcmerror));
3068         }
3069     }
3070 
3071 err:
3072     if (memblock) {
3073         if (local_alloc) {
3074             MFREE(bus->dhd->osh, memblock, MAX_NVRAMBUF_SIZE);
3075         } else {
3076             dhd_free_download_buffer(bus->dhd, memblock, MAX_NVRAMBUF_SIZE);
3077         }
3078     }
3079 
3080     return bcmerror;
3081 }
3082 
dhdpcie_ramsize_read_image(struct dhd_bus * bus,char * buf,int len)3083 static int dhdpcie_ramsize_read_image(struct dhd_bus *bus, char *buf, int len)
3084 {
3085     int bcmerror = BCME_ERROR;
3086     char *imgbuf = NULL;
3087 
3088     if (buf == NULL || len == 0) {
3089         goto err;
3090     }
3091 
3092     /* External image takes precedence if specified */
3093     if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
3094         // opens and seeks to correct file offset:
3095         imgbuf = dhd_os_open_image1(bus->dhd, bus->fw_path);
3096         if (imgbuf == NULL) {
3097             DHD_ERROR(("%s: Failed to open firmware file\n", __FUNCTION__));
3098             goto err;
3099         }
3100 
3101         /* Read it */
3102         if (len != dhd_os_get_image_block(buf, len, imgbuf)) {
3103             DHD_ERROR(
3104                 ("%s: Failed to read %d bytes data\n", __FUNCTION__, len));
3105             goto err;
3106         }
3107 
3108         bcmerror = BCME_OK;
3109     }
3110 
3111 err:
3112     if (imgbuf) {
3113         dhd_os_close_image1(bus->dhd, imgbuf);
3114     }
3115 
3116     return bcmerror;
3117 }
3118 
3119 /* The ramsize can be changed in the dongle image, for example 4365 chip share
3120  * the sysmem with BMC and we can adjust how many sysmem belong to CA7 during
3121  * dongle compilation. So in DHD we need to detect this case and update the
3122  * correct dongle RAMSIZE as well.
3123  */
dhdpcie_ramsize_adj(struct dhd_bus * bus)3124 static void dhdpcie_ramsize_adj(struct dhd_bus *bus)
3125 {
3126     int i, search_len = 0;
3127     uint8 *memptr = NULL;
3128     uint8 *ramsizeptr = NULL;
3129     uint ramsizelen;
3130     uint32 ramsize_ptr_ptr[] = {RAMSIZE_PTR_PTR_LIST};
3131     hnd_ramsize_ptr_t ramsize_info;
3132 
3133     DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3134 
3135     /* Adjust dongle RAMSIZE already called. */
3136     if (bus->ramsize_adjusted) {
3137         return;
3138     }
3139 
3140     /* success or failure,  we don't want to be here
3141      * more than once.
3142      */
3143     bus->ramsize_adjusted = TRUE;
3144 
3145     /* Not handle if user restrict dongle ram size enabled */
3146     if (dhd_dongle_memsize) {
3147         DHD_ERROR(("%s: user restrict dongle ram size to %d.\n", __FUNCTION__,
3148                    dhd_dongle_memsize));
3149         return;
3150     }
3151 
3152     /* Out immediately if no image to download */
3153     if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
3154         DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__));
3155         return;
3156     }
3157 
3158     /* Get maximum RAMSIZE info search length */
3159     for (i = 0;; i++) {
3160         if (ramsize_ptr_ptr[i] == RAMSIZE_PTR_PTR_END) {
3161             break;
3162         }
3163 
3164         if (search_len < (int)ramsize_ptr_ptr[i]) {
3165             search_len = (int)ramsize_ptr_ptr[i];
3166         }
3167     }
3168 
3169     if (!search_len) {
3170         return;
3171     }
3172 
3173     search_len += sizeof(hnd_ramsize_ptr_t);
3174 
3175     memptr = MALLOC(bus->dhd->osh, search_len);
3176     if (memptr == NULL) {
3177         DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__,
3178                    search_len));
3179         return;
3180     }
3181 
3182     /* External image takes precedence if specified */
3183     if (dhdpcie_ramsize_read_image(bus, (char *)memptr, search_len) !=
3184         BCME_OK) {
3185         goto err;
3186     } else {
3187         ramsizeptr = memptr;
3188         ramsizelen = search_len;
3189     }
3190 
3191     if (ramsizeptr) {
3192         /* Check Magic */
3193         for (i = 0;; i++) {
3194             if (ramsize_ptr_ptr[i] == RAMSIZE_PTR_PTR_END) {
3195                 break;
3196             }
3197 
3198             if (ramsize_ptr_ptr[i] + sizeof(hnd_ramsize_ptr_t) > ramsizelen) {
3199                 continue;
3200             }
3201 
3202             memcpy((char *)&ramsize_info, ramsizeptr + ramsize_ptr_ptr[i],
3203                    sizeof(hnd_ramsize_ptr_t));
3204 
3205             if (ramsize_info.magic == HTOL32(HND_RAMSIZE_PTR_MAGIC)) {
3206                 bus->orig_ramsize = LTOH32(ramsize_info.ram_size);
3207                 bus->ramsize = LTOH32(ramsize_info.ram_size);
3208                 DHD_ERROR(("%s: Adjust dongle RAMSIZE to 0x%x\n", __FUNCTION__,
3209                            bus->ramsize));
3210                 break;
3211             }
3212         }
3213     }
3214 
3215 err:
3216     if (memptr) {
3217         MFREE(bus->dhd->osh, memptr, search_len);
3218     }
3219 
3220     return;
3221 } /* dhdpcie_ramsize_adj */
3222 
3223 /**
3224  * Downloads firmware file given by 'bus->fw_path' into PCIe dongle
3225  *
3226  * BCMEMBEDIMAGE specific:
3227  * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware
3228  * contained in header file will be used instead.
3229  *
3230  */
_dhdpcie_download_firmware(struct dhd_bus * bus)3231 static int _dhdpcie_download_firmware(struct dhd_bus *bus)
3232 {
3233     int bcmerror = -1;
3234 
3235     bool embed = FALSE; /* download embedded firmware */
3236     bool dlok = FALSE;  /* download firmware succeeded */
3237 
3238     /* Out immediately if no image to download */
3239     if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
3240         DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__));
3241         return 0;
3242     }
3243     /* Adjust ram size */
3244     dhdpcie_ramsize_adj(bus);
3245 
3246     /* Keep arm in reset */
3247     if (dhdpcie_bus_download_state(bus, TRUE)) {
3248         DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__));
3249         goto err;
3250     }
3251 
3252     /* External image takes precedence if specified */
3253     if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
3254         if (dhdpcie_download_code_file(bus, bus->fw_path)) {
3255             DHD_ERROR(("%s:%d dongle image file download failed\n",
3256                        __FUNCTION__, __LINE__));
3257             goto err;
3258         } else {
3259             embed = FALSE;
3260             dlok = TRUE;
3261         }
3262     }
3263 
3264     BCM_REFERENCE(embed);
3265     if (!dlok) {
3266         DHD_ERROR(
3267             ("%s:%d dongle image download failed\n", __FUNCTION__, __LINE__));
3268         goto err;
3269     }
3270 
3271     /* EXAMPLE: nvram_array */
3272     /* If a valid nvram_arry is specified as above, it can be passed down to
3273      * dongle */
3274     /* dhd_bus_set_nvram_params(bus, (char *)&nvram_array); */
3275 
3276     /* External nvram takes precedence if specified */
3277     if (dhdpcie_download_nvram(bus)) {
3278         DHD_ERROR(("%s:%d dongle nvram file download failed\n", __FUNCTION__,
3279                    __LINE__));
3280         goto err;
3281     }
3282 
3283     /* Take arm out of reset */
3284     if (dhdpcie_bus_download_state(bus, FALSE)) {
3285         DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__));
3286         goto err;
3287     }
3288 
3289     bcmerror = 0;
3290 
3291 err:
3292     return bcmerror;
3293 } /* _dhdpcie_download_firmware */
3294 
dhdpcie_bus_readconsole(dhd_bus_t * bus)3295 static int dhdpcie_bus_readconsole(dhd_bus_t *bus)
3296 {
3297     dhd_console_t *c = &bus->console;
3298     uint8 line[CONSOLE_LINE_MAX], ch;
3299     uint32 n, idx, addr;
3300     int rv;
3301     uint readlen = 0;
3302     uint i = 0;
3303 
3304     /* Don't do anything until FWREADY updates console address */
3305     if (bus->console_addr == 0) {
3306         return -1;
3307     }
3308 
3309     /* Read console log struct */
3310     addr = bus->console_addr + OFFSETOF(hnd_cons_t, log);
3311     if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&c->log,
3312                                    sizeof(c->log))) < 0) {
3313         return rv;
3314     }
3315     /* Allocate console buffer (one time only) */
3316     if (c->buf == NULL) {
3317         c->bufsize = ltoh32(c->log.buf_size);
3318         if ((c->buf = MALLOC(bus->dhd->osh, c->bufsize)) == NULL) {
3319             return BCME_NOMEM;
3320         }
3321         DHD_INFO(("conlog: bufsize=0x%x\n", c->bufsize));
3322     }
3323     idx = ltoh32(c->log.idx);
3324     /* Protect against corrupt value */
3325     if (idx > c->bufsize) {
3326         return BCME_ERROR;
3327     }
3328     /* Skip reading the console buffer if the index pointer has not moved */
3329     if (idx == c->last) {
3330         return BCME_OK;
3331     }
3332 
3333     DHD_INFO(("conlog: addr=0x%x, idx=0x%x, last=0x%x \n", c->log.buf, idx,
3334               c->last));
3335 
3336     /* Read the console buffer data to a local buffer */
3337     /* optimize and read only the portion of the buffer needed, but
3338      * important to handle wrap-around.
3339      */
3340     addr = ltoh32(c->log.buf);
3341 
3342     /* wrap around case - write ptr < read ptr */
3343     if (idx < c->last) {
3344         /* from read ptr to end of buffer */
3345         readlen = c->bufsize - c->last;
3346         if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr + c->last, c->buf,
3347                                        readlen)) < 0) {
3348             DHD_ERROR(("conlog: read error[1] ! \n"));
3349             return rv;
3350         }
3351         /* from beginning of buffer to write ptr */
3352         if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, c->buf + readlen,
3353                                        idx)) < 0) {
3354             DHD_ERROR(("conlog: read error[2] ! \n"));
3355             return rv;
3356         }
3357         readlen += idx;
3358     } else {
3359         /* non-wraparound case, write ptr > read ptr */
3360         readlen = (uint)idx - c->last;
3361         if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr + c->last, c->buf,
3362                                        readlen)) < 0) {
3363             DHD_ERROR(("conlog: read error[3] ! \n"));
3364             return rv;
3365         }
3366     }
3367     /* update read ptr */
3368     c->last = idx;
3369 
3370     /* now output the read data from the local buffer to the host console */
3371     while (i < readlen) {
3372         for (n = 0; n < CONSOLE_LINE_MAX - 0x2 && i < readlen; n++) {
3373             ch = c->buf[i];
3374             ++i;
3375             if (ch == '\n') {
3376                 break;
3377             }
3378             line[n] = ch;
3379         }
3380 
3381         if (n > 0) {
3382             if (line[n - 1] == '\r') {
3383                 n--;
3384             }
3385             line[n] = 0;
3386             printf("CONSOLE: %s\n", line);
3387         }
3388     }
3389 
3390     return BCME_OK;
3391 } /* dhdpcie_bus_readconsole */
3392 
dhd_bus_dump_console_buffer(dhd_bus_t * bus)3393 void dhd_bus_dump_console_buffer(dhd_bus_t *bus)
3394 {
3395     uint32 n, i;
3396     uint32 addr;
3397     char *console_buffer = NULL;
3398     uint32 console_ptr, console_size, console_index;
3399     uint8 line[CONSOLE_LINE_MAX], ch;
3400     int rv;
3401     DHD_ERROR(("%s: Dump Complete Console Buffer\n", __FUNCTION__));
3402 
3403     if (bus->is_linkdown) {
3404         DHD_ERROR(("%s: Skip dump Console Buffer due to PCIe link down\n",
3405                    __FUNCTION__));
3406         return;
3407     }
3408 
3409     addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log);
3410     if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&console_ptr,
3411                                    sizeof(console_ptr))) < 0) {
3412         goto exit;
3413     }
3414 
3415     addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.buf_size);
3416     if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&console_size,
3417                                    sizeof(console_size))) < 0) {
3418         goto exit;
3419     }
3420 
3421     addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.idx);
3422     if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&console_index,
3423                                    sizeof(console_index))) < 0) {
3424         goto exit;
3425     }
3426 
3427     console_ptr = ltoh32(console_ptr);
3428     console_size = ltoh32(console_size);
3429     console_index = ltoh32(console_index);
3430 
3431     if (console_size > CONSOLE_BUFFER_MAX ||
3432         !(console_buffer = MALLOC(bus->dhd->osh, console_size))) {
3433         goto exit;
3434     }
3435 
3436     if ((rv = dhdpcie_bus_membytes(bus, FALSE, console_ptr,
3437                                    (uint8 *)console_buffer, console_size)) <
3438         0) {
3439         goto exit;
3440     }
3441 
3442     for (i = 0, n = 0; i < console_size; i += n + 1) {
3443         for (n = 0; n < CONSOLE_LINE_MAX - 0x2; n++) {
3444             ch = console_buffer[(console_index + i + n) % console_size];
3445             if (ch == '\n') {
3446                 break;
3447             }
3448             line[n] = ch;
3449         }
3450 
3451         if (n > 0) {
3452             if (line[n - 1] == '\r') {
3453                 n--;
3454             }
3455             line[n] = 0;
3456             /* Don't use DHD_ERROR macro since we print
3457              * a lot of information quickly. The macro
3458              * will truncate a lot of the printfs
3459              */
3460 
3461             printf("CONSOLE: %s\n", line);
3462         }
3463     }
3464 
3465 exit:
3466     if (console_buffer) {
3467         MFREE(bus->dhd->osh, console_buffer, console_size);
3468     }
3469     return;
3470 }
3471 
3472 /**
3473  * Opens the file given by bus->fw_path, reads part of the file into a buffer
3474  * and closes the file.
3475  *
3476  * @return BCME_OK on success
3477  */
dhdpcie_checkdied(dhd_bus_t * bus,char * data,uint size)3478 static int dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size)
3479 {
3480     int bcmerror = 0;
3481     uint msize = 512;
3482     char *mbuffer = NULL;
3483     uint maxstrlen = 256;
3484     char *str = NULL;
3485     pciedev_shared_t *local_pciedev_shared = bus->pcie_sh;
3486     struct bcmstrbuf strbuf;
3487     unsigned long flags;
3488     bool dongle_trap_occured = FALSE;
3489 
3490     DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3491 
3492     if (DHD_NOCHECKDIED_ON()) {
3493         return 0;
3494     }
3495 
3496     if (data == NULL) {
3497         /*
3498          * Called after a rx ctrl timeout. "data" is NULL.
3499          * allocate memory to trace the trap or assert.
3500          */
3501         size = msize;
3502         mbuffer = data = MALLOC(bus->dhd->osh, msize);
3503         if (mbuffer == NULL) {
3504             DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize));
3505             bcmerror = BCME_NOMEM;
3506             goto done2;
3507         }
3508     }
3509     if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) {
3510         DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen));
3511         bcmerror = BCME_NOMEM;
3512         goto done2;
3513     }
3514     DHD_GENERAL_LOCK(bus->dhd, flags);
3515     DHD_BUS_BUSY_SET_IN_CHECKDIED(bus->dhd);
3516     DHD_GENERAL_UNLOCK(bus->dhd, flags);
3517 
3518     if (MULTIBP_ENAB(bus->sih)) {
3519         dhd_bus_pcie_pwr_req(bus);
3520     }
3521     if ((bcmerror = dhdpcie_readshared(bus)) < 0) {
3522         goto done1;
3523     }
3524 
3525     bcm_binit(&strbuf, data, size);
3526 
3527     bcm_bprintf(&strbuf,
3528                 "msgtrace address : 0x%08X\nconsole address  : 0x%08X\n",
3529                 local_pciedev_shared->msgtrace_addr,
3530                 local_pciedev_shared->console_addr);
3531 
3532     if ((local_pciedev_shared->flags & PCIE_SHARED_ASSERT_BUILT) == 0) {
3533         /* NOTE: Misspelled assert is intentional - DO NOT FIX.
3534          * (Avoids conflict with real asserts for programmatic parsing of
3535          * output.)
3536          */
3537         bcm_bprintf(&strbuf, "Assrt not built in dongle\n");
3538     }
3539 
3540     if ((bus->pcie_sh->flags & (PCIE_SHARED_ASSERT | PCIE_SHARED_TRAP)) == 0) {
3541         /* NOTE: Misspelled assert is intentional - DO NOT FIX.
3542          * (Avoids conflict with real asserts for programmatic parsing of
3543          * output.)
3544          */
3545         bcm_bprintf(&strbuf, "No trap%s in dongle",
3546                     (bus->pcie_sh->flags & PCIE_SHARED_ASSERT_BUILT) ? "/assrt"
3547                                                                      : "");
3548     } else {
3549         if (bus->pcie_sh->flags & PCIE_SHARED_ASSERT) {
3550             /* Download assert */
3551             bcm_bprintf(&strbuf, "Dongle assert");
3552             if (bus->pcie_sh->assert_exp_addr != 0) {
3553                 str[0] = '\0';
3554                 if ((bcmerror = dhdpcie_bus_membytes(
3555                          bus, FALSE, bus->pcie_sh->assert_exp_addr,
3556                          (uint8 *)str, maxstrlen)) < 0) {
3557                     goto done1;
3558                 }
3559 
3560                 str[maxstrlen - 1] = '\0';
3561                 bcm_bprintf(&strbuf, " expr \"%s\"", str);
3562             }
3563 
3564             if (bus->pcie_sh->assert_file_addr != 0) {
3565                 str[0] = '\0';
3566                 if ((bcmerror = dhdpcie_bus_membytes(
3567                          bus, FALSE, bus->pcie_sh->assert_file_addr,
3568                          (uint8 *)str, maxstrlen)) < 0) {
3569                     goto done1;
3570                 }
3571 
3572                 str[maxstrlen - 1] = '\0';
3573                 bcm_bprintf(&strbuf, " file \"%s\"", str);
3574             }
3575 
3576             bcm_bprintf(&strbuf, " line %d ", bus->pcie_sh->assert_line);
3577         }
3578 
3579         if (bus->pcie_sh->flags & PCIE_SHARED_TRAP) {
3580             trap_t *tr = &bus->dhd->last_trap_info;
3581             dongle_trap_occured = TRUE;
3582             if ((bcmerror =
3583                      dhdpcie_bus_membytes(bus, FALSE, bus->pcie_sh->trap_addr,
3584                                           (uint8 *)tr, sizeof(trap_t))) < 0) {
3585                 bus->dhd->dongle_trap_occured = TRUE;
3586                 goto done1;
3587             }
3588             dhd_bus_dump_trap_info(bus, &strbuf);
3589         }
3590     }
3591 
3592     if (bus->pcie_sh->flags & (PCIE_SHARED_ASSERT | PCIE_SHARED_TRAP)) {
3593         printf("%s: %s\n", __FUNCTION__, strbuf.origbuf);
3594 
3595         dhd_bus_dump_console_buffer(bus);
3596         dhd_prot_debug_info_print(bus->dhd);
3597 
3598 #if defined(DHD_FW_COREDUMP)
3599         /* save core dump or write to a file */
3600         if (bus->dhd->memdump_enabled) {
3601 #ifdef DHD_SSSR_DUMP
3602             bus->dhd->collect_sssr = TRUE;
3603 #endif /* DHD_SSSR_DUMP */
3604             bus->dhd->memdump_type = DUMP_TYPE_DONGLE_TRAP;
3605             dhdpcie_mem_dump(bus);
3606         }
3607 #endif /* DHD_FW_COREDUMP */
3608 
3609         /* set the trap occured flag only after all the memdump,
3610          * logdump and sssr dump collection has been scheduled
3611          */
3612         if (dongle_trap_occured) {
3613             bus->dhd->dongle_trap_occured = TRUE;
3614         }
3615 
3616         /* wake up IOCTL wait event */
3617         dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_TRAP);
3618 
3619         dhd_schedule_reset(bus->dhd);
3620     }
3621 
3622 done1:
3623     if (MULTIBP_ENAB(bus->sih)) {
3624         dhd_bus_pcie_pwr_req_clear(bus);
3625     }
3626 
3627     DHD_GENERAL_LOCK(bus->dhd, flags);
3628     DHD_BUS_BUSY_CLEAR_IN_CHECKDIED(bus->dhd);
3629     dhd_os_busbusy_wake(bus->dhd);
3630     DHD_GENERAL_UNLOCK(bus->dhd, flags);
3631 done2:
3632     if (mbuffer) {
3633         MFREE(bus->dhd->osh, mbuffer, msize);
3634     }
3635     if (str) {
3636         MFREE(bus->dhd->osh, str, maxstrlen);
3637     }
3638 
3639     return bcmerror;
3640 } /* dhdpcie_checkdied */
3641 
3642 /* Custom copy of dhdpcie_mem_dump() that can be called at interrupt level */
dhdpcie_mem_dump_bugcheck(dhd_bus_t * bus,uint8 * buf)3643 void dhdpcie_mem_dump_bugcheck(dhd_bus_t *bus, uint8 *buf)
3644 {
3645     int ret = 0;
3646     int size;          /* Full mem size */
3647     int start;         /* Start address */
3648     int read_size = 0; /* Read size of each iteration */
3649     uint8 *databuf = buf;
3650 
3651     if (bus == NULL) {
3652         return;
3653     }
3654 
3655     start = bus->dongle_ram_base;
3656     read_size = 0x4;
3657     /* check for dead bus */
3658     {
3659         uint test_word = 0;
3660         ret = dhdpcie_bus_membytes(bus, FALSE, start, (uint8 *)&test_word,
3661                                    read_size);
3662         /* if read error or bus timeout */
3663         if (ret || (test_word == 0xFFFFFFFF)) {
3664             return;
3665         }
3666     }
3667 
3668     /* Get full mem size */
3669     size = bus->ramsize;
3670     /* Read mem content */
3671     while (size) {
3672         read_size = MIN(MEMBLOCK, size);
3673         if ((ret =
3674                  dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size))) {
3675             return;
3676         }
3677 
3678         /* Decrement size and increment start address */
3679         size -= read_size;
3680         start += read_size;
3681         databuf += read_size;
3682     }
3683     bus->dhd->soc_ram = buf;
3684     bus->dhd->soc_ram_length = bus->ramsize;
3685     return;
3686 }
3687 
3688 #if defined(DHD_FW_COREDUMP)
dhdpcie_get_mem_dump(dhd_bus_t * bus)3689 static int dhdpcie_get_mem_dump(dhd_bus_t *bus)
3690 {
3691     int ret = BCME_OK;
3692     int size = 0;
3693     int start = 0;
3694     int read_size = 0; /* Read size of each iteration */
3695     uint8 *p_buf = NULL, *databuf = NULL;
3696 
3697     if (!bus) {
3698         DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
3699         return BCME_ERROR;
3700     }
3701 
3702     if (!bus->dhd) {
3703         DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
3704         return BCME_ERROR;
3705     }
3706 
3707     size = bus->ramsize;          /* Full mem size */
3708     start = bus->dongle_ram_base; /* Start address */
3709 
3710     /* Get full mem size */
3711     p_buf = dhd_get_fwdump_buf(bus->dhd, size);
3712     if (!p_buf) {
3713         DHD_ERROR(("%s: Out of memory (%d bytes)\n", __FUNCTION__, size));
3714         return BCME_ERROR;
3715     }
3716 
3717     /* Read mem content */
3718     DHD_TRACE_HW4(("Dump dongle memory\n"));
3719     databuf = p_buf;
3720     while (size > 0) {
3721         read_size = MIN(MEMBLOCK, size);
3722         ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size);
3723         if (ret) {
3724             DHD_ERROR(("%s: Error membytes %d\n", __FUNCTION__, ret));
3725 #ifdef DHD_DEBUG_UART
3726             bus->dhd->memdump_success = FALSE;
3727 #endif /* DHD_DEBUG_UART */
3728             break;
3729         }
3730         DHD_TRACE(("."));
3731 
3732         /* Decrement size and increment start address */
3733         size -= read_size;
3734         start += read_size;
3735         databuf += read_size;
3736     }
3737 
3738     return ret;
3739 }
3740 
dhdpcie_mem_dump(dhd_bus_t * bus)3741 static int dhdpcie_mem_dump(dhd_bus_t *bus)
3742 {
3743     dhd_pub_t *dhdp;
3744     int ret;
3745 
3746 #ifdef EXYNOS_PCIE_DEBUG
3747     exynos_pcie_register_dump(1);
3748 #endif /* EXYNOS_PCIE_DEBUG */
3749 
3750     dhdp = bus->dhd;
3751     if (!dhdp) {
3752         DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
3753         return BCME_ERROR;
3754     }
3755 
3756     if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
3757         DHD_ERROR(
3758             ("%s: bus is down! can't collect mem dump. \n", __FUNCTION__));
3759         return BCME_ERROR;
3760     }
3761 
3762 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
3763     if (pm_runtime_get_sync(dhd_bus_to_dev(bus)) < 0) {
3764         return BCME_ERROR;
3765     }
3766 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
3767 
3768     ret = dhdpcie_get_mem_dump(bus);
3769     if (ret) {
3770         DHD_ERROR(("%s: failed to get mem dump, err=%d\n", __FUNCTION__, ret));
3771         return ret;
3772     }
3773 
3774     dhd_schedule_memdump(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
3775     /* buf, actually soc_ram free handled in dhd_{free,clear} */
3776 
3777 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
3778     pm_runtime_mark_last_busy(dhd_bus_to_dev(bus));
3779     pm_runtime_put_autosuspend(dhd_bus_to_dev(bus));
3780 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
3781 
3782     return ret;
3783 }
3784 
dhd_bus_get_mem_dump(dhd_pub_t * dhdp)3785 int dhd_bus_get_mem_dump(dhd_pub_t *dhdp)
3786 {
3787     if (!dhdp) {
3788         DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
3789         return BCME_ERROR;
3790     }
3791 
3792     return dhdpcie_get_mem_dump(dhdp->bus);
3793 }
3794 
dhd_bus_mem_dump(dhd_pub_t * dhdp)3795 int dhd_bus_mem_dump(dhd_pub_t *dhdp)
3796 {
3797     dhd_bus_t *bus = dhdp->bus;
3798     int ret = BCME_ERROR;
3799 
3800     if (dhdp->busstate == DHD_BUS_DOWN) {
3801         DHD_ERROR(("%s bus is down\n", __FUNCTION__));
3802         return BCME_ERROR;
3803     }
3804 
3805     /* Try to resume if already suspended or suspend in progress */
3806 
3807     /* Skip if still in suspended or suspend in progress */
3808     if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhdp)) {
3809         DHD_ERROR(
3810             ("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
3811              __FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
3812         return BCME_ERROR;
3813     }
3814 
3815     DHD_OS_WAKE_LOCK(dhdp);
3816     ret = dhdpcie_mem_dump(bus);
3817     DHD_OS_WAKE_UNLOCK(dhdp);
3818     return ret;
3819 }
3820 #endif /* DHD_FW_COREDUMP */
3821 
dhd_socram_dump(dhd_bus_t * bus)3822 int dhd_socram_dump(dhd_bus_t *bus)
3823 {
3824 #if defined(DHD_FW_COREDUMP)
3825     DHD_OS_WAKE_LOCK(bus->dhd);
3826     dhd_bus_mem_dump(bus->dhd);
3827     DHD_OS_WAKE_UNLOCK(bus->dhd);
3828     return 0;
3829 #else
3830     return -1;
3831 #endif // endif
3832 }
3833 
3834 /**
3835  * Transfers bytes from host to dongle using pio mode.
3836  * Parameter 'address' is a backplane address.
3837  */
dhdpcie_bus_membytes(dhd_bus_t * bus,bool write,ulong address,uint8 * data,uint size)3838 static int dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address,
3839                                 uint8 *data, uint size)
3840 {
3841     uint dsize;
3842     int detect_endian_flag = 0x01;
3843     bool little_endian;
3844 
3845     if (write && bus->is_linkdown) {
3846         DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
3847         return BCME_ERROR;
3848     }
3849 
3850     if (MULTIBP_ENAB(bus->sih)) {
3851         dhd_bus_pcie_pwr_req(bus);
3852     }
3853     /* Detect endianness. */
3854     little_endian = *(char *)&detect_endian_flag;
3855 
3856     /* In remap mode, adjust address beyond socram and redirect
3857      * to devram at SOCDEVRAM_BP_ADDR since remap address > orig_ramsize
3858      * is not backplane accessible
3859      */
3860 
3861     /* Determine initial transfer parameters */
3862 #ifdef DHD_SUPPORT_64BIT
3863     dsize = sizeof(uint64);
3864 #else  /* !DHD_SUPPORT_64BIT */
3865     dsize = sizeof(uint32);
3866 #endif /* DHD_SUPPORT_64BIT */
3867 
3868     /* Do the transfer(s) */
3869     DHD_INFO(("%s: %s %d bytes in window 0x%08lx\n", __FUNCTION__,
3870               (write ? "write" : "read"), size, address));
3871     if (write) {
3872         while (size) {
3873 #ifdef DHD_SUPPORT_64BIT
3874             if (size >= sizeof(uint64) && little_endian && !(address % 0x8)) {
3875                 dhdpcie_bus_wtcm64(bus, address, *((uint64 *)data));
3876             }
3877 #else  /* !DHD_SUPPORT_64BIT */
3878             if (size >= sizeof(uint32) && little_endian && !(address % 4)) {
3879                 dhdpcie_bus_wtcm32(bus, address, *((uint32 *)data));
3880             }
3881 #endif /* DHD_SUPPORT_64BIT */
3882             else {
3883                 dsize = sizeof(uint8);
3884                 dhdpcie_bus_wtcm8(bus, address, *data);
3885             }
3886 
3887             /* Adjust for next transfer (if any) */
3888             if ((size -= dsize)) {
3889                 data += dsize;
3890                 address += dsize;
3891             }
3892         }
3893     } else {
3894         while (size) {
3895 #ifdef DHD_SUPPORT_64BIT
3896             if (size >= sizeof(uint64) && little_endian && !(address % 0x8)) {
3897                 *(uint64 *)data = dhdpcie_bus_rtcm64(bus, address);
3898             }
3899 #else  /* !DHD_SUPPORT_64BIT */
3900             if (size >= sizeof(uint32) && little_endian && !(address % 0x4)) {
3901                 *(uint32 *)data = dhdpcie_bus_rtcm32(bus, address);
3902             }
3903 #endif /* DHD_SUPPORT_64BIT */
3904             else {
3905                 dsize = sizeof(uint8);
3906                 *data = dhdpcie_bus_rtcm8(bus, address);
3907             }
3908 
3909             /* Adjust for next transfer (if any) */
3910             if ((size -= dsize) > 0) {
3911                 data += dsize;
3912                 address += dsize;
3913             }
3914         }
3915     }
3916     if (MULTIBP_ENAB(bus->sih)) {
3917         dhd_bus_pcie_pwr_req_clear(bus);
3918     }
3919     return BCME_OK;
3920 } /* dhdpcie_bus_membytes */
3921 
3922 /**
3923  * Transfers one transmit (ethernet) packet that was queued in the (flow
3924  * controlled) flow ring queue to the (non flow controlled) flow ring.
3925  */
dhd_bus_schedule_queue(struct dhd_bus * bus,uint16 flow_id,bool txs)3926 int BCMFASTPATH dhd_bus_schedule_queue(struct dhd_bus *bus, uint16 flow_id,
3927                                        bool txs)
3928 {
3929     flow_ring_node_t *flow_ring_node;
3930     int ret = BCME_OK;
3931 #ifdef DHD_LOSSLESS_ROAMING
3932     dhd_pub_t *dhdp = bus->dhd;
3933 #endif // endif
3934     DHD_INFO(("%s: flow_id is %d\n", __FUNCTION__, flow_id));
3935 
3936     /* ASSERT on flow_id */
3937     if (flow_id >= bus->max_submission_rings) {
3938         DHD_ERROR(("%s: flow_id is invalid %d, max %d\n", __FUNCTION__, flow_id,
3939                    bus->max_submission_rings));
3940         return 0;
3941     }
3942 
3943     flow_ring_node = DHD_FLOW_RING(bus->dhd, flow_id);
3944 
3945     if (flow_ring_node->prot_info == NULL) {
3946         DHD_ERROR((" %s : invalid flow_ring_node \n", __FUNCTION__));
3947         return BCME_NOTREADY;
3948     }
3949 
3950 #ifdef DHD_LOSSLESS_ROAMING
3951     if ((dhdp->dequeue_prec_map & (1 << flow_ring_node->flow_info.tid)) == 0) {
3952         DHD_INFO(("%s: tid %d is not in precedence map. block scheduling\n",
3953                   __FUNCTION__, flow_ring_node->flow_info.tid));
3954         return BCME_OK;
3955     }
3956 #endif /* DHD_LOSSLESS_ROAMING */
3957 
3958     {
3959         unsigned long flags;
3960         void *txp = NULL;
3961         flow_queue_t *queue;
3962 #ifdef DHD_LOSSLESS_ROAMING
3963         struct ether_header *eh;
3964         uint8 *pktdata;
3965 #endif /* DHD_LOSSLESS_ROAMING */
3966 #ifdef TPUT_MONITOR
3967         int pktlen;
3968 #endif
3969 
3970         queue = &flow_ring_node->queue; /* queue associated with flow ring */
3971 
3972         DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
3973 
3974         if (flow_ring_node->status != FLOW_RING_STATUS_OPEN) {
3975             DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
3976             return BCME_NOTREADY;
3977         }
3978 
3979         while ((txp = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
3980             if (bus->dhd->conf->orphan_move <= 1) {
3981                 PKTORPHAN(txp, bus->dhd->conf->tsq);
3982             }
3983 
3984             /*
3985              * Modifying the packet length caused P2P cert failures.
3986              * Specifically on test cases where a packet of size 52 bytes
3987              * was injected, the sniffer capture showed 62 bytes because of
3988              * which the cert tests failed. So making the below change
3989              * only Router specific.
3990              */
3991 
3992 #ifdef DHDTCPACK_SUPPRESS
3993             if (bus->dhd->tcpack_sup_mode != TCPACK_SUP_HOLD) {
3994                 ret = dhd_tcpack_check_xmit(bus->dhd, txp);
3995                 if (ret != BCME_OK) {
3996                     DHD_ERROR(
3997                         ("%s: dhd_tcpack_check_xmit() error.\n", __FUNCTION__));
3998                 }
3999             }
4000 #endif /* DHDTCPACK_SUPPRESS */
4001 #ifdef DHD_LOSSLESS_ROAMING
4002             pktdata = (uint8 *)PKTDATA(OSH_NULL, txp);
4003             eh = (struct ether_header *)pktdata;
4004             if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) {
4005                 uint8 prio = (uint8)PKTPRIO(txp);
4006                 /* Restore to original priority for 802.1X packet */
4007                 if (prio == PRIO_8021D_NC) {
4008                     PKTSETPRIO(txp, dhdp->prio_8021x);
4009                 }
4010             }
4011 #endif      /* DHD_LOSSLESS_ROAMING */
4012             /* Attempt to transfer packet over flow ring */
4013 #ifdef TPUT_MONITOR
4014             pktlen = PKTLEN(OSH_NULL, txp);
4015             if ((bus->dhd->conf->data_drop_mode == TXPKT_DROP) &&
4016                 (pktlen > 500)) {
4017                 ret = BCME_OK;
4018             } else
4019 #endif
4020                 ret = dhd_prot_txdata(bus->dhd, txp,
4021                                       flow_ring_node->flow_info.ifindex);
4022             if (ret != BCME_OK) { /* may not have resources in flow ring */
4023                 DHD_INFO(("%s: Reinserrt %d\n", __FUNCTION__, ret));
4024                 dhd_prot_txdata_write_flush(bus->dhd, flow_id);
4025                 /* reinsert at head */
4026                 dhd_flow_queue_reinsert(bus->dhd, queue, txp);
4027                 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4028 
4029                 /* If we are able to requeue back, return success */
4030                 return BCME_OK;
4031             }
4032         }
4033 
4034 #ifdef DHD_HP2P
4035         if (!flow_ring_node->hp2p_ring) {
4036             dhd_prot_txdata_write_flush(bus->dhd, flow_id);
4037         }
4038 #else
4039         dhd_prot_txdata_write_flush(bus->dhd, flow_id);
4040 #endif // endif
4041         DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4042     }
4043 
4044     return ret;
4045 } /* dhd_bus_schedule_queue */
4046 
4047 /** Sends an (ethernet) data frame (in 'txp') to the dongle. Callee disposes of
4048  * txp. */
dhd_bus_txdata(struct dhd_bus * bus,void * txp,uint8 ifidx)4049 int BCMFASTPATH dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx)
4050 {
4051     uint16 flowid;
4052 #ifdef IDLE_TX_FLOW_MGMT
4053     uint8 node_status;
4054 #endif /* IDLE_TX_FLOW_MGMT */
4055     flow_queue_t *queue;
4056     flow_ring_node_t *flow_ring_node;
4057     unsigned long flags;
4058     int ret = BCME_OK;
4059     void *txp_pend = NULL;
4060 
4061     if (!bus->dhd->flowid_allocator) {
4062         DHD_ERROR(("%s: Flow ring not intited yet  \n", __FUNCTION__));
4063         goto toss;
4064     }
4065 
4066     flowid = DHD_PKT_GET_FLOWID(txp);
4067 
4068     flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
4069 
4070     DHD_TRACE(("%s: pkt flowid %d, status %d active %d\n", __FUNCTION__, flowid,
4071                flow_ring_node->status, flow_ring_node->active));
4072 
4073     DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
4074     if ((flowid >= bus->dhd->num_flow_rings) ||
4075 #ifdef IDLE_TX_FLOW_MGMT
4076         (!flow_ring_node->active))
4077 #else
4078         (!flow_ring_node->active) ||
4079         (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) ||
4080         (flow_ring_node->status == FLOW_RING_STATUS_STA_FREEING))
4081 #endif /* IDLE_TX_FLOW_MGMT */
4082     {
4083         DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4084         DHD_INFO(("%s: Dropping pkt flowid %d, status %d active %d\n",
4085                   __FUNCTION__, flowid, flow_ring_node->status,
4086                   flow_ring_node->active));
4087         ret = BCME_ERROR;
4088         goto toss;
4089     }
4090 
4091 #ifdef IDLE_TX_FLOW_MGMT
4092     node_status = flow_ring_node->status;
4093 
4094     /* handle diffrent status states here!! */
4095     switch (node_status) {
4096         case FLOW_RING_STATUS_OPEN:
4097 
4098             if (bus->enable_idle_flowring_mgmt) {
4099                 /* Move the node to the head of active list */
4100                 dhd_flow_ring_move_to_active_list_head(bus, flow_ring_node);
4101             }
4102             break;
4103 
4104         case FLOW_RING_STATUS_SUSPENDED:
4105             DHD_INFO(("Need to Initiate TX Flow resume\n"));
4106             /* Issue resume_ring request */
4107             dhd_bus_flow_ring_resume_request(bus, flow_ring_node);
4108             break;
4109 
4110         case FLOW_RING_STATUS_CREATE_PENDING:
4111         case FLOW_RING_STATUS_RESUME_PENDING:
4112             /* Dont do anything here!! */
4113             DHD_INFO(("Waiting for Flow create/resume! status is %u\n",
4114                       node_status));
4115             break;
4116 
4117         case FLOW_RING_STATUS_DELETE_PENDING:
4118         default:
4119             DHD_ERROR(("Dropping packet!! flowid %u status is %u\n", flowid,
4120                        node_status));
4121             /* error here!! */
4122             ret = BCME_ERROR;
4123             DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4124             goto toss;
4125     }
4126         /* Now queue the packet */
4127 #endif /* IDLE_TX_FLOW_MGMT */
4128 
4129     queue = &flow_ring_node->queue; /* queue associated with flow ring */
4130 
4131     if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp)) != BCME_OK) {
4132         txp_pend = txp;
4133     }
4134 
4135     DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4136 
4137     if (flow_ring_node->status) {
4138         DHD_INFO(("%s: Enq pkt flowid %d, status %d active %d\n", __FUNCTION__,
4139                   flowid, flow_ring_node->status, flow_ring_node->active));
4140         if (txp_pend) {
4141             txp = txp_pend;
4142             goto toss;
4143         }
4144         return BCME_OK;
4145     }
4146     ret =
4147         dhd_bus_schedule_queue(bus, flowid, FALSE); /* from queue to flowring */
4148 
4149     /* If we have anything pending, try to push into q */
4150     if (txp_pend) {
4151         DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
4152 
4153         if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp_pend)) !=
4154             BCME_OK) {
4155             DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4156             txp = txp_pend;
4157             goto toss;
4158         }
4159 
4160         DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4161     }
4162 
4163     return ret;
4164 
4165 toss:
4166     DHD_INFO(("%s: Toss %d\n", __FUNCTION__, ret));
4167     PKTCFREE(bus->dhd->osh, txp, TRUE);
4168     return ret;
4169 } /* dhd_bus_txdata */
4170 
dhd_bus_stop_queue(struct dhd_bus * bus)4171 void dhd_bus_stop_queue(struct dhd_bus *bus)
4172 {
4173     dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
4174 }
4175 
dhd_bus_start_queue(struct dhd_bus * bus)4176 void dhd_bus_start_queue(struct dhd_bus *bus)
4177 {
4178     /*
4179      * Tx queue has been stopped due to resource shortage (or)
4180      * bus is not in a state to turn on.
4181      *
4182      * Note that we try to re-start network interface only
4183      * when we have enough resources, one has to first change the
4184      * flag indicating we have all the resources.
4185      */
4186     if (dhd_prot_check_tx_resource(bus->dhd)) {
4187         DHD_ERROR(("%s: Interface NOT started, previously stopped "
4188                    "due to resource shortage\n",
4189                    __FUNCTION__));
4190         return;
4191     }
4192     dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
4193 }
4194 
4195 /* Device console input function */
dhd_bus_console_in(dhd_pub_t * dhd,uchar * msg,uint msglen)4196 int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen)
4197 {
4198     dhd_bus_t *bus = dhd->bus;
4199     uint32 addr, val;
4200     int rv;
4201     /* Address could be zero if CONSOLE := 0 in dongle Makefile */
4202     if (bus->console_addr == 0) {
4203         return BCME_UNSUPPORTED;
4204     }
4205 
4206     /* Don't allow input if dongle is in reset */
4207     if (bus->dhd->dongle_reset) {
4208         return BCME_NOTREADY;
4209     }
4210 
4211     /* Zero cbuf_index */
4212     addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf_idx);
4213     val = htol32(0);
4214     if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val,
4215                                    sizeof(val))) < 0) {
4216         goto done;
4217     }
4218 
4219     /* Write message into cbuf */
4220     addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf);
4221     if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)msg, msglen)) <
4222         0) {
4223         goto done;
4224     }
4225 
4226     /* Write length into vcons_in */
4227     addr = bus->console_addr + OFFSETOF(hnd_cons_t, vcons_in);
4228     val = htol32(msglen);
4229     if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val,
4230                                    sizeof(val))) < 0) {
4231         goto done;
4232     }
4233 
4234     /* generate an interrupt to dongle to indicate that it needs to process cons
4235      * command */
4236     dhdpcie_send_mb_data(bus, H2D_HOST_CONS_INT);
4237 done:
4238     return rv;
4239 } /* dhd_bus_console_in */
4240 
4241 /**
4242  * Called on frame reception, the frame was received from the dongle on
4243  * interface 'ifidx' and is contained in 'pkt'. Processes rx frame, forwards up
4244  * the layer to netif.
4245  */
dhd_bus_rx_frame(struct dhd_bus * bus,void * pkt,int ifidx,uint pkt_count)4246 void BCMFASTPATH dhd_bus_rx_frame(struct dhd_bus *bus, void *pkt, int ifidx,
4247                                   uint pkt_count)
4248 {
4249     dhd_rx_frame(bus->dhd, ifidx, pkt, pkt_count, 0);
4250 }
4251 
dhdpcie_setbar1win(dhd_bus_t * bus,uint32 addr)4252 void dhdpcie_setbar1win(dhd_bus_t *bus, uint32 addr)
4253 {
4254     dhdpcie_os_setbar1win(bus, addr);
4255 }
4256 
4257 /** 'offset' is a backplane address */
dhdpcie_bus_wtcm8(dhd_bus_t * bus,ulong offset,uint8 data)4258 void dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data)
4259 {
4260     if (bus->is_linkdown) {
4261         DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4262         return;
4263     } else {
4264         dhdpcie_os_wtcm8(bus, offset, data);
4265     }
4266 }
4267 
dhdpcie_bus_rtcm8(dhd_bus_t * bus,ulong offset)4268 uint8 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset)
4269 {
4270     volatile uint8 data;
4271     if (bus->is_linkdown) {
4272         DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4273         data = (uint8)-1;
4274     } else {
4275         data = dhdpcie_os_rtcm8(bus, offset);
4276     }
4277     return data;
4278 }
4279 
dhdpcie_bus_wtcm32(dhd_bus_t * bus,ulong offset,uint32 data)4280 void dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data)
4281 {
4282     if (bus->is_linkdown) {
4283         DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4284         return;
4285     } else {
4286         dhdpcie_os_wtcm32(bus, offset, data);
4287     }
4288 }
dhdpcie_bus_wtcm16(dhd_bus_t * bus,ulong offset,uint16 data)4289 void dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data)
4290 {
4291     if (bus->is_linkdown) {
4292         DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4293         return;
4294     } else {
4295         dhdpcie_os_wtcm16(bus, offset, data);
4296     }
4297 }
4298 #ifdef DHD_SUPPORT_64BIT
dhdpcie_bus_wtcm64(dhd_bus_t * bus,ulong offset,uint64 data)4299 void dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data)
4300 {
4301     if (bus->is_linkdown) {
4302         DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4303         return;
4304     } else {
4305         dhdpcie_os_wtcm64(bus, offset, data);
4306     }
4307 }
4308 #endif /* DHD_SUPPORT_64BIT */
4309 
dhdpcie_bus_rtcm16(dhd_bus_t * bus,ulong offset)4310 uint16 dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset)
4311 {
4312     volatile uint16 data;
4313     if (bus->is_linkdown) {
4314         DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4315         data = (uint16)-1;
4316     } else {
4317         data = dhdpcie_os_rtcm16(bus, offset);
4318     }
4319     return data;
4320 }
4321 
dhdpcie_bus_rtcm32(dhd_bus_t * bus,ulong offset)4322 uint32 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset)
4323 {
4324     volatile uint32 data;
4325     if (bus->is_linkdown) {
4326         DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4327         data = (uint32)-1;
4328     } else {
4329         data = dhdpcie_os_rtcm32(bus, offset);
4330     }
4331     return data;
4332 }
4333 
4334 #ifdef DHD_SUPPORT_64BIT
dhdpcie_bus_rtcm64(dhd_bus_t * bus,ulong offset)4335 uint64 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset)
4336 {
4337     volatile uint64 data;
4338     if (bus->is_linkdown) {
4339         DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4340         data = (uint64)-1;
4341     } else {
4342         data = dhdpcie_os_rtcm64(bus, offset);
4343     }
4344     return data;
4345 }
4346 #endif /* DHD_SUPPORT_64BIT */
4347 
4348 /** A snippet of dongle memory is shared between host and dongle */
dhd_bus_cmn_writeshared(dhd_bus_t * bus,void * data,uint32 len,uint8 type,uint16 ringid)4349 void dhd_bus_cmn_writeshared(dhd_bus_t *bus, void *data, uint32 len, uint8 type,
4350                              uint16 ringid)
4351 {
4352     uint64 long_data;
4353     ulong addr; /* dongle address */
4354 
4355     DHD_INFO(
4356         ("%s: writing to dongle type %d len %d\n", __FUNCTION__, type, len));
4357 
4358     if (bus->is_linkdown) {
4359         DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
4360         return;
4361     }
4362 
4363     if (MULTIBP_ENAB(bus->sih)) {
4364         dhd_bus_pcie_pwr_req(bus);
4365     }
4366     switch (type) {
4367         case D2H_DMA_SCRATCH_BUF:
4368             addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_dma_scratch_buffer);
4369             long_data = HTOL64(*(uint64 *)data);
4370             dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&long_data, len);
4371             if (dhd_msg_level & DHD_INFO_VAL) {
4372                 prhex(__FUNCTION__, data, len);
4373             }
4374             break;
4375 
4376         case D2H_DMA_SCRATCH_BUF_LEN:
4377             addr =
4378                 DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_dma_scratch_buffer_len);
4379             dhdpcie_bus_wtcm32(bus, addr, (uint32)HTOL32(*(uint32 *)data));
4380             if (dhd_msg_level & DHD_INFO_VAL) {
4381                 prhex(__FUNCTION__, data, len);
4382             }
4383             break;
4384 
4385         case H2D_DMA_INDX_WR_BUF:
4386             long_data = HTOL64(*(uint64 *)data);
4387             addr = DHD_RING_INFO_MEMBER_ADDR(bus, h2d_w_idx_hostaddr);
4388             dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&long_data, len);
4389             if (dhd_msg_level & DHD_INFO_VAL) {
4390                 prhex(__FUNCTION__, data, len);
4391             }
4392             break;
4393 
4394         case H2D_DMA_INDX_RD_BUF:
4395             long_data = HTOL64(*(uint64 *)data);
4396             addr = DHD_RING_INFO_MEMBER_ADDR(bus, h2d_r_idx_hostaddr);
4397             dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&long_data, len);
4398             if (dhd_msg_level & DHD_INFO_VAL) {
4399                 prhex(__FUNCTION__, data, len);
4400             }
4401             break;
4402 
4403         case D2H_DMA_INDX_WR_BUF:
4404             long_data = HTOL64(*(uint64 *)data);
4405             addr = DHD_RING_INFO_MEMBER_ADDR(bus, d2h_w_idx_hostaddr);
4406             dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&long_data, len);
4407             if (dhd_msg_level & DHD_INFO_VAL) {
4408                 prhex(__FUNCTION__, data, len);
4409             }
4410             break;
4411 
4412         case D2H_DMA_INDX_RD_BUF:
4413             long_data = HTOL64(*(uint64 *)data);
4414             addr = DHD_RING_INFO_MEMBER_ADDR(bus, d2h_r_idx_hostaddr);
4415             dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&long_data, len);
4416             if (dhd_msg_level & DHD_INFO_VAL) {
4417                 prhex(__FUNCTION__, data, len);
4418             }
4419             break;
4420 
4421         case H2D_IFRM_INDX_WR_BUF:
4422             long_data = HTOL64(*(uint64 *)data);
4423             addr = DHD_RING_INFO_MEMBER_ADDR(bus, ifrm_w_idx_hostaddr);
4424             dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&long_data, len);
4425             if (dhd_msg_level & DHD_INFO_VAL) {
4426                 prhex(__FUNCTION__, data, len);
4427             }
4428             break;
4429 
4430         case RING_ITEM_LEN:
4431             addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, len_items);
4432             dhdpcie_bus_wtcm16(bus, addr, (uint16)HTOL16(*(uint16 *)data));
4433             break;
4434 
4435         case RING_MAX_ITEMS:
4436             addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, max_item);
4437             dhdpcie_bus_wtcm16(bus, addr, (uint16)HTOL16(*(uint16 *)data));
4438             break;
4439 
4440         case RING_BUF_ADDR:
4441             long_data = HTOL64(*(uint64 *)data);
4442             addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, base_addr);
4443             dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&long_data, len);
4444             if (dhd_msg_level & DHD_INFO_VAL) {
4445                 prhex(__FUNCTION__, data, len);
4446             }
4447             break;
4448 
4449         case RING_WR_UPD:
4450             addr = bus->ring_sh[ringid].ring_state_w;
4451             dhdpcie_bus_wtcm16(bus, addr, (uint16)HTOL16(*(uint16 *)data));
4452             break;
4453 
4454         case RING_RD_UPD:
4455             addr = bus->ring_sh[ringid].ring_state_r;
4456             dhdpcie_bus_wtcm16(bus, addr, (uint16)HTOL16(*(uint16 *)data));
4457             break;
4458 
4459         case D2H_MB_DATA:
4460             addr = bus->d2h_mb_data_ptr_addr;
4461             dhdpcie_bus_wtcm32(bus, addr, (uint32)HTOL32(*(uint32 *)data));
4462             break;
4463 
4464         case H2D_MB_DATA:
4465             addr = bus->h2d_mb_data_ptr_addr;
4466             dhdpcie_bus_wtcm32(bus, addr, (uint32)HTOL32(*(uint32 *)data));
4467             break;
4468 
4469         case HOST_API_VERSION:
4470             addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_cap);
4471             dhdpcie_bus_wtcm32(bus, addr, (uint32)HTOL32(*(uint32 *)data));
4472             break;
4473 
4474         case DNGL_TO_HOST_TRAP_ADDR:
4475             long_data = HTOL64(*(uint64 *)data);
4476             addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_trap_addr);
4477             dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&long_data, len);
4478             DHD_INFO(
4479                 ("Wrote trap addr:0x%x\n", (uint32)HTOL32(*(uint32 *)data)));
4480             break;
4481 
4482         case HOST_SCB_ADDR:
4483             addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_scb_addr);
4484 #ifdef DHD_SUPPORT_64BIT
4485             dhdpcie_bus_wtcm64(bus, addr, (uint64)HTOL64(*(uint64 *)data));
4486 #else  /* !DHD_SUPPORT_64BIT */
4487             dhdpcie_bus_wtcm32(bus, addr, *((uint32 *)data));
4488 #endif /* DHD_SUPPORT_64BIT */
4489             DHD_INFO(("Wrote host_scb_addr:0x%x\n",
4490                       (uint32)HTOL32(*(uint32 *)data)));
4491             break;
4492 
4493         default:
4494             break;
4495     }
4496     if (MULTIBP_ENAB(bus->sih)) {
4497         dhd_bus_pcie_pwr_req_clear(bus);
4498     }
4499 } /* dhd_bus_cmn_writeshared */
4500 
4501 /** A snippet of dongle memory is shared between host and dongle */
dhd_bus_cmn_readshared(dhd_bus_t * bus,void * data,uint8 type,uint16 ringid)4502 void dhd_bus_cmn_readshared(dhd_bus_t *bus, void *data, uint8 type,
4503                             uint16 ringid)
4504 {
4505     ulong addr; /* dongle address */
4506 
4507     if (MULTIBP_ENAB(bus->sih)) {
4508         dhd_bus_pcie_pwr_req(bus);
4509     }
4510     switch (type) {
4511         case RING_WR_UPD:
4512             addr = bus->ring_sh[ringid].ring_state_w;
4513             *(uint16 *)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
4514             break;
4515 
4516         case RING_RD_UPD:
4517             addr = bus->ring_sh[ringid].ring_state_r;
4518             *(uint16 *)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
4519             break;
4520 
4521         case TOTAL_LFRAG_PACKET_CNT:
4522             addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, total_lfrag_pkt_cnt);
4523             *(uint16 *)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
4524             break;
4525 
4526         case H2D_MB_DATA:
4527             addr = bus->h2d_mb_data_ptr_addr;
4528             *(uint32 *)data = LTOH32(dhdpcie_bus_rtcm32(bus, addr));
4529             break;
4530 
4531         case D2H_MB_DATA:
4532             addr = bus->d2h_mb_data_ptr_addr;
4533             *(uint32 *)data = LTOH32(dhdpcie_bus_rtcm32(bus, addr));
4534             break;
4535 
4536         case MAX_HOST_RXBUFS:
4537             addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, max_host_rxbufs);
4538             *(uint16 *)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
4539             break;
4540 
4541         case HOST_SCB_ADDR:
4542             addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_scb_size);
4543             *(uint32 *)data = LTOH32(dhdpcie_bus_rtcm32(bus, addr));
4544             break;
4545 
4546         default:
4547             break;
4548     }
4549     if (MULTIBP_ENAB(bus->sih)) {
4550         dhd_bus_pcie_pwr_req_clear(bus);
4551     }
4552 }
4553 
dhd_bus_get_sharedflags(dhd_bus_t * bus)4554 uint32 dhd_bus_get_sharedflags(dhd_bus_t *bus)
4555 {
4556     return ((pciedev_shared_t *)bus->pcie_sh)->flags;
4557 }
4558 
dhd_bus_clearcounts(dhd_pub_t * dhdp)4559 void dhd_bus_clearcounts(dhd_pub_t *dhdp)
4560 {
4561 }
4562 
4563 /**
4564  * @param params    input buffer, NULL for 'set' operation.
4565  * @param plen      length of 'params' buffer, 0 for 'set' operation.
4566  * @param arg       output buffer
4567  */
dhd_bus_iovar_op(dhd_pub_t * dhdp,const char * name,void * params,int plen,void * arg,int len,bool set)4568 int dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name, void *params, int plen,
4569                      void *arg, int len, bool set)
4570 {
4571     dhd_bus_t *bus = dhdp->bus;
4572     const bcm_iovar_t *vi = NULL;
4573     int bcmerror = BCME_UNSUPPORTED;
4574     int val_size;
4575     uint32 actionid;
4576 
4577     DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4578 
4579     ASSERT(name);
4580     ASSERT(len >= 0);
4581     if (!name || len < 0) {
4582         return BCME_BADARG;
4583     }
4584 
4585     /* Get MUST have return space */
4586     ASSERT(set || (arg && len));
4587     if (!(set || (arg && len))) {
4588         return BCME_BADARG;
4589     }
4590 
4591     /* Set does NOT take qualifiers */
4592     ASSERT(!set || (!params && !plen));
4593     if (!(!set || (!params && !plen))) {
4594         return BCME_BADARG;
4595     }
4596 
4597     DHD_INFO(("%s: %s %s, len %d plen %d\n", __FUNCTION__, name,
4598               (set ? "set" : "get"), len, plen));
4599 
4600     /* Look up var locally; if not found pass to host driver */
4601     if ((vi = bcm_iovar_lookup(dhdpcie_iovars, name)) == NULL) {
4602         goto exit;
4603     }
4604 
4605     if (MULTIBP_ENAB(bus->sih)) {
4606         if (vi->flags & DHD_IOVF_PWRREQ_BYPASS) {
4607             DHD_ERROR(("%s: Bypass pwr request\n", __FUNCTION__));
4608         } else {
4609             dhd_bus_pcie_pwr_req(bus);
4610         }
4611     }
4612 
4613     /* set up 'params' pointer in case this is a set command so that
4614      * the convenience int and bool code can be common to set and get
4615      */
4616     if (params == NULL) {
4617         params = arg;
4618         plen = len;
4619     }
4620 
4621     if (vi->type == IOVT_VOID) {
4622         val_size = 0;
4623     } else if (vi->type == IOVT_BUFFER) {
4624         val_size = len;
4625     } else {
4626         /* all other types are integer sized */
4627         val_size = sizeof(int);
4628     }
4629 
4630     actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
4631     bcmerror = dhdpcie_bus_doiovar(bus, vi, actionid, name, params, plen, arg,
4632                                    len, val_size);
4633 
4634 exit:
4635     /* In DEVRESET_QUIESCE/DEVRESET_ON,
4636      * this includes dongle re-attach which initialize pwr_req_ref count to 0
4637      * and causes pwr_req_ref count miss-match in pwr req clear function and
4638      * hang. In this case, bypass pwr req clear.
4639      */
4640     if (bcmerror == BCME_DNGL_DEVRESET) {
4641         bcmerror = BCME_OK;
4642     } else {
4643         if (MULTIBP_ENAB(bus->sih)) {
4644             if (vi && (vi->flags & DHD_IOVF_PWRREQ_BYPASS)) {
4645                 DHD_ERROR(("%s: Bypass pwr request clear\n", __FUNCTION__));
4646             } else {
4647                 dhd_bus_pcie_pwr_req_clear(bus);
4648             }
4649         }
4650     }
4651     return bcmerror;
4652 } /* dhd_bus_iovar_op */
4653 
4654 #ifdef BCM_BUZZZ
4655 #include <bcm_buzzz.h>
4656 
dhd_buzzz_dump_cntrs(char * p,uint32 * core,uint32 * log,const int num_counters)4657 int dhd_buzzz_dump_cntrs(char *p, uint32 *core, uint32 *log,
4658                          const int num_counters)
4659 {
4660     int bytes = 0;
4661     uint32 ctr;
4662     uint32 curr[BCM_BUZZZ_COUNTERS_MAX], prev[BCM_BUZZZ_COUNTERS_MAX];
4663     uint32 delta[BCM_BUZZZ_COUNTERS_MAX];
4664 
4665     /* Compute elapsed counter values per counter event type */
4666     for (ctr = 0U; ctr < num_counters; ctr++) {
4667         prev[ctr] = core[ctr];
4668         curr[ctr] = *log++;
4669         core[ctr] = curr[ctr]; /* saved for next log */
4670 
4671         if (curr[ctr] < prev[ctr]) {
4672             delta[ctr] = curr[ctr] + (~0U - prev[ctr]);
4673         } else {
4674             delta[ctr] = (curr[ctr] - prev[ctr]);
4675         }
4676 
4677         bytes += sprintf(p + bytes, "%12u ", delta[ctr]);
4678     }
4679 
4680     return bytes;
4681 }
4682 
4683 typedef union cm3_cnts { /* export this in bcm_buzzz.h */
4684     uint32 u32;
4685     uint8 u8[4];
4686     struct {
4687         uint8 cpicnt;
4688         uint8 exccnt;
4689         uint8 sleepcnt;
4690         uint8 lsucnt;
4691     };
4692 } cm3_cnts_t;
4693 
dhd_bcm_buzzz_dump_cntrs6(char * p,uint32 * core,uint32 * log)4694 int dhd_bcm_buzzz_dump_cntrs6(char *p, uint32 *core, uint32 *log)
4695 {
4696     int bytes = 0;
4697 
4698     uint32 cyccnt, instrcnt;
4699     cm3_cnts_t cm3_cnts;
4700     uint8 foldcnt;
4701 
4702     { /* 32bit cyccnt */
4703         uint32 curr, prev, delta;
4704         prev = core[0];
4705         curr = *log++;
4706         core[0] = curr;
4707         if (curr < prev) {
4708             delta = curr + (~0U - prev);
4709         } else {
4710             delta = (curr - prev);
4711         }
4712 
4713         bytes += sprintf(p + bytes, "%12u ", delta);
4714         cyccnt = delta;
4715     }
4716 
4717     { /* Extract the 4 cnts: cpi, exc, sleep and lsu */
4718         int i;
4719         uint8 max8 = ~0;
4720         cm3_cnts_t curr, prev, delta;
4721         prev.u32 = core[1];
4722         curr.u32 = *log++;
4723         core[1] = curr.u32;
4724         for (i = 0; i < 0x4; i++) {
4725             if (curr.u8[i] < prev.u8[i]) {
4726                 delta.u8[i] = curr.u8[i] + (max8 - prev.u8[i]);
4727             } else {
4728                 delta.u8[i] = (curr.u8[i] - prev.u8[i]);
4729             }
4730             bytes += sprintf(p + bytes, "%4u ", delta.u8[i]);
4731         }
4732         cm3_cnts.u32 = delta.u32;
4733     }
4734 
4735     { /* Extract the foldcnt from arg0 */
4736         uint8 curr, prev, delta, max8 = ~0;
4737         bcm_buzzz_arg0_t arg0;
4738         arg0.u32 = *log;
4739         prev = core[0x2];
4740         curr = arg0.klog.cnt;
4741         core[0x2] = curr;
4742         if (curr < prev) {
4743             delta = curr + (max8 - prev);
4744         } else {
4745             delta = (curr - prev);
4746         }
4747         bytes += sprintf(p + bytes, "%4u ", delta);
4748         foldcnt = delta;
4749     }
4750 
4751     instrcnt =
4752         cyccnt -
4753         (cm3_cnts.u8[0] + cm3_cnts.u8[1] + cm3_cnts.u8[0x2] + cm3_cnts.u8[0x3]) +
4754         foldcnt;
4755     if (instrcnt > 0xFFFFFF00) {
4756         bytes += sprintf(p + bytes, "[%10s] ", "~");
4757     } else {
4758         bytes += sprintf(p + bytes, "[%10u] ", instrcnt);
4759     }
4760     return bytes;
4761 }
4762 
dhd_buzzz_dump_log(char * p,uint32 * core,uint32 * log,bcm_buzzz_t * buzzz)4763 int dhd_buzzz_dump_log(char *p, uint32 *core, uint32 *log, bcm_buzzz_t *buzzz)
4764 {
4765     int bytes = 0;
4766     bcm_buzzz_arg0_t arg0;
4767     static uint8 *fmt[] = BCM_BUZZZ_FMT_STRINGS;
4768 
4769     if (buzzz->counters == 0x6) {
4770         bytes += dhd_bcm_buzzz_dump_cntrs6(p, core, log);
4771         log += 0x2; /* 32bit cyccnt + (4 x 8bit) CM3 */
4772     } else {
4773         bytes += dhd_buzzz_dump_cntrs(p, core, log, buzzz->counters);
4774         log += buzzz->counters; /* (N x 32bit) CR4=3, CA7=4 */
4775     }
4776 
4777     /* Dump the logged arguments using the registered formats */
4778     arg0.u32 = *log++;
4779 
4780     switch (arg0.klog.args) {
4781         case 0:
4782             bytes += sprintf(p + bytes, fmt[arg0.klog.id]);
4783             break;
4784         case 1: {
4785             uint32 arg1 = *log++;
4786             bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1);
4787             break;
4788         }
4789         case 0x2: {
4790             uint32 arg1, arg2;
4791             arg1 = *log++;
4792             arg2 = *log++;
4793             bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2);
4794             break;
4795         }
4796         case 0x3: {
4797             uint32 arg1, arg2, arg3;
4798             arg1 = *log++;
4799             arg2 = *log++;
4800             arg3 = *log++;
4801             bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2, arg3);
4802             break;
4803         }
4804         case 0x4: {
4805             uint32 arg1, arg2, arg3, arg4;
4806             arg1 = *log++;
4807             arg2 = *log++;
4808             arg3 = *log++;
4809             arg4 = *log++;
4810             bytes +=
4811                 sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2, arg3, arg4);
4812             break;
4813         }
4814         default:
4815             printf("%s: Maximum one argument supported\n", __FUNCTION__);
4816             break;
4817     }
4818 
4819     bytes += sprintf(p + bytes, "\n");
4820 
4821     return bytes;
4822 }
4823 
dhd_buzzz_dump(bcm_buzzz_t * buzzz_p,void * buffer_p,char * p)4824 void dhd_buzzz_dump(bcm_buzzz_t *buzzz_p, void *buffer_p, char *p)
4825 {
4826     int i;
4827     uint32 total, part1, part2, log_sz, core[BCM_BUZZZ_COUNTERS_MAX];
4828     void *log;
4829 
4830     for (i = 0; i < BCM_BUZZZ_COUNTERS_MAX; i++) {
4831         core[i] = 0;
4832     }
4833 
4834     log_sz = buzzz_p->log_sz;
4835 
4836     part1 = ((uint32)buzzz_p->cur - (uint32)buzzz_p->log) / log_sz;
4837 
4838     if (buzzz_p->wrap == TRUE) {
4839         part2 = ((uint32)buzzz_p->end - (uint32)buzzz_p->cur) / log_sz;
4840         total = (buzzz_p->buffer_sz - BCM_BUZZZ_LOGENTRY_MAXSZ) / log_sz;
4841     } else {
4842         part2 = 0U;
4843         total = buzzz_p->count;
4844     }
4845 
4846     if (total == 0U) {
4847         printf("%s: bcm_buzzz_dump total<%u> done\n", __FUNCTION__, total);
4848         return;
4849     } else {
4850         printf("%s: bcm_buzzz_dump total<%u> : part2<%u> + part1<%u>\n",
4851                __FUNCTION__, total, part2, part1);
4852     }
4853 
4854     if (part2) { /* with wrap */
4855         log = (void *)((size_t)buffer_p + (buzzz_p->cur - buzzz_p->log));
4856         while (part2--) { /* from cur to end : part2 */
4857             p[0] = '\0';
4858             dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p);
4859             printf("%s", p);
4860             log = (void *)((size_t)log + buzzz_p->log_sz);
4861         }
4862     }
4863 
4864     log = (void *)buffer_p;
4865     while (part1--) {
4866         p[0] = '\0';
4867         dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p);
4868         printf("%s", p);
4869         log = (void *)((size_t)log + buzzz_p->log_sz);
4870     }
4871 
4872     printf("%s: bcm_buzzz_dump done.\n", __FUNCTION__);
4873 }
4874 
dhd_buzzz_dump_dngl(dhd_bus_t * bus)4875 int dhd_buzzz_dump_dngl(dhd_bus_t *bus)
4876 {
4877     bcm_buzzz_t *buzzz_p = NULL;
4878     void *buffer_p = NULL;
4879     char *page_p = NULL;
4880     pciedev_shared_t *sh;
4881     int ret = 0;
4882 
4883     if (bus->dhd->busstate != DHD_BUS_DATA) {
4884         return BCME_UNSUPPORTED;
4885     }
4886     if ((page_p = (char *)MALLOC(bus->dhd->osh, 0x1000)) == NULL) {
4887         printf("%s: Page memory allocation failure\n", __FUNCTION__);
4888         goto done;
4889     }
4890     if ((buzzz_p = MALLOC(bus->dhd->osh, sizeof(bcm_buzzz_t))) == NULL) {
4891         printf("%s: BCM BUZZZ memory allocation failure\n", __FUNCTION__);
4892         goto done;
4893     }
4894 
4895     ret = dhdpcie_readshared(bus);
4896     if (ret < 0) {
4897         DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__));
4898         goto done;
4899     }
4900 
4901     sh = bus->pcie_sh;
4902 
4903     DHD_INFO(("%s buzzz:%08x\n", __FUNCTION__, sh->buzz_dbg_ptr));
4904 
4905     if (sh->buzz_dbg_ptr != 0U) { /* Fetch and display dongle BUZZZ Trace */
4906         dhdpcie_bus_membytes(bus, FALSE, (ulong)sh->buzz_dbg_ptr,
4907                              (uint8 *)buzzz_p, sizeof(bcm_buzzz_t));
4908         printf("BUZZZ[0x%08x]: log<0x%08x> cur<0x%08x> end<0x%08x> "
4909                "count<%u> status<%u> wrap<%u>\n"
4910                "cpu<0x%02X> counters<%u> group<%u> buffer_sz<%u> log_sz<%u>\n",
4911                (int)sh->buzz_dbg_ptr, (int)buzzz_p->log, (int)buzzz_p->cur,
4912                (int)buzzz_p->end, buzzz_p->count, buzzz_p->status,
4913                buzzz_p->wrap, buzzz_p->cpu_idcode, buzzz_p->counters,
4914                buzzz_p->group, buzzz_p->buffer_sz, buzzz_p->log_sz);
4915         if (buzzz_p->count == 0) {
4916             printf("%s: Empty dongle BUZZZ trace\n\n", __FUNCTION__);
4917             goto done;
4918         }
4919 
4920         /* Allocate memory for trace buffer and format strings */
4921         buffer_p = MALLOC(bus->dhd->osh, buzzz_p->buffer_sz);
4922         if (buffer_p == NULL) {
4923             printf("%s: Buffer memory allocation failure\n", __FUNCTION__);
4924             goto done;
4925         }
4926 
4927         /* Fetch the trace. format strings are exported via bcm_buzzz.h */
4928         dhdpcie_bus_membytes(bus, FALSE, (uint32)buzzz_p->log, /* Trace */
4929                              (uint8 *)buffer_p, buzzz_p->buffer_sz);
4930 
4931         /* Process and display the trace using formatted output */
4932 
4933         {
4934             int ctr;
4935             for (ctr = 0; ctr < buzzz_p->counters; ctr++) {
4936                 printf("<Evt[%02X]> ", buzzz_p->eventid[ctr]);
4937             }
4938             printf("<code execution point>\n");
4939         }
4940 
4941         dhd_buzzz_dump(buzzz_p, buffer_p, page_p);
4942 
4943         printf("%s: ----- End of dongle BCM BUZZZ Trace -----\n\n",
4944                __FUNCTION__);
4945 
4946         MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz);
4947         buffer_p = NULL;
4948     }
4949 
4950 done:
4951 
4952     if (page_p) {
4953         MFREE(bus->dhd->osh, page_p, 0x1000);
4954     }
4955     if (buzzz_p) {
4956         MFREE(bus->dhd->osh, buzzz_p, sizeof(bcm_buzzz_t));
4957     }
4958     if (buffer_p) {
4959         MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz);
4960     }
4961 
4962     return BCME_OK;
4963 }
4964 #endif /* BCM_BUZZZ */
4965 
4966 #define PCIE_GEN2(sih)                                                         \
4967     ((BUSTYPE((sih)->bustype) == PCI_BUS) &&                                   \
4968      ((sih)->buscoretype == PCIE2_CORE_ID))
4969 
4970 #define PCIE_FLR_CAPAB_BIT 28
4971 #define PCIE_FUNCTION_LEVEL_RESET_BIT 15
4972 
4973 /* Change delays for only QT HW, FPGA and silicon uses same delay */
4974 #ifdef BCMQT_HW
4975 #define DHD_FUNCTION_LEVEL_RESET_DELAY 300000u
4976 #define DHD_SSRESET_STATUS_RETRY_DELAY 10000u
4977 #else
4978 #define DHD_FUNCTION_LEVEL_RESET_DELAY 70u /* 70 msec delay */
4979 #define DHD_SSRESET_STATUS_RETRY_DELAY 40u
4980 #endif // endif
4981 /*
4982  * Increase SSReset de-assert time to 8ms.
4983  * since it takes longer time if re-scan time on 4378B0.
4984  */
4985 #define DHD_SSRESET_STATUS_RETRIES 200u
4986 
dhdpcie_enum_reg_init(dhd_bus_t * bus)4987 static void dhdpcie_enum_reg_init(dhd_bus_t *bus)
4988 {
4989     /* initialize Function control register (clear bit 4) to HW init value */
4990     si_corereg(bus->sih, bus->sih->buscoreidx,
4991                OFFSETOF(sbpcieregs_t, ftn_ctrl.control), ~0,
4992                PCIE_CPLCA_ENABLE | PCIE_DLY_PERST_TO_COE);
4993 
4994     /* clear IntMask */
4995     si_corereg(bus->sih, bus->sih->buscoreidx,
4996                OFFSETOF(sbpcieregs_t, ftn_ctrl.intmask), ~0, 0);
4997     /* clear IntStatus */
4998     si_corereg(bus->sih, bus->sih->buscoreidx,
4999                OFFSETOF(sbpcieregs_t, ftn_ctrl.intstatus), ~0,
5000                si_corereg(bus->sih, bus->sih->buscoreidx,
5001                           OFFSETOF(sbpcieregs_t, ftn_ctrl.intstatus), 0, 0));
5002 
5003     /* clear MSIVector */
5004     si_corereg(bus->sih, bus->sih->buscoreidx,
5005                OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_vector), ~0, 0);
5006     /* clear MSIIntMask */
5007     si_corereg(bus->sih, bus->sih->buscoreidx,
5008                OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intmask), ~0, 0);
5009     /* clear MSIIntStatus */
5010     si_corereg(bus->sih, bus->sih->buscoreidx,
5011                OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intstatus), ~0,
5012                si_corereg(bus->sih, bus->sih->buscoreidx,
5013                           OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intstatus), 0,
5014                           0));
5015 
5016     /* clear PowerIntMask */
5017     si_corereg(bus->sih, bus->sih->buscoreidx,
5018                OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intmask), ~0, 0);
5019     /* clear PowerIntStatus */
5020     si_corereg(bus->sih, bus->sih->buscoreidx,
5021                OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intstatus), ~0,
5022                si_corereg(bus->sih, bus->sih->buscoreidx,
5023                           OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intstatus), 0,
5024                           0));
5025 
5026     /* clear MailboxIntMask */
5027     si_corereg(bus->sih, bus->sih->buscoreidx,
5028                OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intmask), ~0, 0);
5029     /* clear MailboxInt */
5030     si_corereg(bus->sih, bus->sih->buscoreidx,
5031                OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intstatus), ~0,
5032                si_corereg(bus->sih, bus->sih->buscoreidx,
5033                           OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intstatus), 0,
5034                           0));
5035 }
5036 
dhd_bus_perform_flr(dhd_bus_t * bus,bool force_fail)5037 int dhd_bus_perform_flr(dhd_bus_t *bus, bool force_fail)
5038 {
5039     uint flr_capab;
5040     uint val;
5041     int retry = 0;
5042 
5043     DHD_ERROR(("******** Perform FLR ********\n"));
5044 
5045     if (PCIE_ENUM_RESET_WAR_ENAB(bus->sih->buscorerev)) {
5046         if (bus->pcie_mailbox_mask != 0) {
5047             dhdpcie_bus_intr_disable(bus);
5048         }
5049         /* initialize F0 enum registers before FLR for rev66/67 */
5050         dhdpcie_enum_reg_init(bus);
5051     }
5052 
5053     /* Read PCIE_CFG_DEVICE_CAPABILITY bit 28 to check FLR capability */
5054     val =
5055         OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CAPABILITY, sizeof(val));
5056     flr_capab = val & (1 << PCIE_FLR_CAPAB_BIT);
5057     DHD_INFO(("Read Device Capability: reg=0x%x read val=0x%x flr_capab=0x%x\n",
5058               PCIE_CFG_DEVICE_CAPABILITY, val, flr_capab));
5059     if (!flr_capab) {
5060         DHD_ERROR(("Chip does not support FLR\n"));
5061         return BCME_UNSUPPORTED;
5062     }
5063 
5064     /* Save pcie config space */
5065     DHD_INFO(("Save Pcie Config Space\n"));
5066     DHD_PCIE_CONFIG_SAVE(bus);
5067 
5068     /* Set bit 15 of PCIE_CFG_DEVICE_CONTROL */
5069     DHD_INFO(("Set PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of "
5070               "PCIE_CFG_DEVICE_CONTROL(0x%x)\n",
5071               PCIE_FUNCTION_LEVEL_RESET_BIT, PCIE_CFG_DEVICE_CONTROL));
5072     val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val));
5073     DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL,
5074               val));
5075     val = val | (1 << PCIE_FUNCTION_LEVEL_RESET_BIT);
5076     DHD_INFO(("write_config: reg=0x%x write val=0x%x\n",
5077               PCIE_CFG_DEVICE_CONTROL, val));
5078     OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val), val);
5079 
5080     /* wait for DHD_FUNCTION_LEVEL_RESET_DELAY msec */
5081     DHD_INFO(("Delay of %d msec\n", DHD_FUNCTION_LEVEL_RESET_DELAY));
5082     OSL_DELAY(DHD_FUNCTION_LEVEL_RESET_DELAY * 1000u);
5083 
5084     if (force_fail) {
5085         DHD_ERROR(("Set PCIE_SSRESET_DISABLE_BIT(%d) of "
5086                    "PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)\n",
5087                    PCIE_SSRESET_DISABLE_BIT, PCIE_CFG_SUBSYSTEM_CONTROL));
5088         val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL,
5089                                   sizeof(val));
5090         DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n",
5091                    PCIE_CFG_SUBSYSTEM_CONTROL, val));
5092         val = val | (1 << PCIE_SSRESET_DISABLE_BIT);
5093         DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n",
5094                    PCIE_CFG_SUBSYSTEM_CONTROL, val));
5095         OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val),
5096                              val);
5097 
5098         val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL,
5099                                   sizeof(val));
5100         DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n",
5101                    PCIE_CFG_SUBSYSTEM_CONTROL, val));
5102     }
5103 
5104     /* Clear bit 15 of PCIE_CFG_DEVICE_CONTROL */
5105     DHD_INFO(("Clear PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of "
5106               "PCIE_CFG_DEVICE_CONTROL(0x%x)\n",
5107               PCIE_FUNCTION_LEVEL_RESET_BIT, PCIE_CFG_DEVICE_CONTROL));
5108     val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val));
5109     DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL,
5110               val));
5111     val = val & ~(1 << PCIE_FUNCTION_LEVEL_RESET_BIT);
5112     DHD_INFO(("write_config: reg=0x%x write val=0x%x\n",
5113               PCIE_CFG_DEVICE_CONTROL, val));
5114     OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val), val);
5115 
5116     /* Wait till bit 13 of PCIE_CFG_SUBSYSTEM_CONTROL is cleared */
5117     DHD_INFO(("Wait till PCIE_SSRESET_STATUS_BIT(%d) of "
5118               "PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)"
5119               "is cleared\n",
5120               PCIE_SSRESET_STATUS_BIT, PCIE_CFG_SUBSYSTEM_CONTROL));
5121     do {
5122         val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL,
5123                                   sizeof(val));
5124         DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n",
5125                    PCIE_CFG_SUBSYSTEM_CONTROL, val));
5126         val = val & (1 << PCIE_SSRESET_STATUS_BIT);
5127         OSL_DELAY(DHD_SSRESET_STATUS_RETRY_DELAY);
5128     } while (val && (retry++ < DHD_SSRESET_STATUS_RETRIES));
5129 
5130     if (val) {
5131         DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
5132                    PCIE_CFG_SUBSYSTEM_CONTROL, PCIE_SSRESET_STATUS_BIT));
5133         /* User has to fire the IOVAR again, if force_fail is needed */
5134         if (force_fail) {
5135             bus->flr_force_fail = FALSE;
5136             DHD_ERROR(("%s cleared flr_force_fail flag\n", __FUNCTION__));
5137         }
5138         return BCME_DONGLE_DOWN;
5139     }
5140 
5141     /* Restore pcie config space */
5142     DHD_INFO(("Restore Pcie Config Space\n"));
5143     DHD_PCIE_CONFIG_RESTORE(bus);
5144 
5145     DHD_ERROR(("******** FLR Succedeed ********\n"));
5146 
5147     return BCME_OK;
5148 }
5149 
5150 #ifdef DHD_USE_BP_RESET
5151 #define DHD_BP_RESET_ASPM_DISABLE_DELAY 500u /* usec */
5152 
5153 #define DHD_BP_RESET_STATUS_RETRY_DELAY 40u /* usec */
5154 #define DHD_BP_RESET_STATUS_RETRIES 50u
5155 
5156 #define PCIE_CFG_SPROM_CTRL_SB_RESET_BIT 10
5157 #define PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT 21
dhd_bus_perform_bp_reset(struct dhd_bus * bus)5158 int dhd_bus_perform_bp_reset(struct dhd_bus *bus)
5159 {
5160     uint val;
5161     int retry = 0;
5162     uint dar_clk_ctrl_status_reg = DAR_CLK_CTRL(bus->sih->buscorerev);
5163     int ret = BCME_OK;
5164     bool cond;
5165 
5166     DHD_ERROR(("******** Perform BP reset ********\n"));
5167 
5168     /* Disable ASPM */
5169     DHD_INFO(
5170         ("Disable ASPM: Clear bits(1-0) of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n",
5171          PCIECFGREG_LINK_STATUS_CTRL));
5172     val =
5173         OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val));
5174     DHD_INFO(("read_config: reg=0x%x read val=0x%x\n",
5175               PCIECFGREG_LINK_STATUS_CTRL, val));
5176     val = val & (~PCIE_ASPM_ENAB);
5177     DHD_INFO(("write_config: reg=0x%x write val=0x%x\n",
5178               PCIECFGREG_LINK_STATUS_CTRL, val));
5179     OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val),
5180                          val);
5181 
5182     /* wait for delay usec */
5183     DHD_INFO(("Delay of %d usec\n", DHD_BP_RESET_ASPM_DISABLE_DELAY));
5184     OSL_DELAY(DHD_BP_RESET_ASPM_DISABLE_DELAY);
5185 
5186     /* Set bit 10 of PCIECFGREG_SPROM_CTRL */
5187     DHD_INFO(("Set PCIE_CFG_SPROM_CTRL_SB_RESET_BIT(%d) of "
5188               "PCIECFGREG_SPROM_CTRL(0x%x)\n",
5189               PCIE_CFG_SPROM_CTRL_SB_RESET_BIT, PCIECFGREG_SPROM_CTRL));
5190     val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val));
5191     DHD_INFO(
5192         ("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_SPROM_CTRL, val));
5193     val = val | (1 << PCIE_CFG_SPROM_CTRL_SB_RESET_BIT);
5194     DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_SPROM_CTRL,
5195               val));
5196     OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val), val);
5197 
5198     /* Wait till bit backplane reset is ASSERTED i,e
5199      * bit 10 of PCIECFGREG_SPROM_CTRL is cleared.
5200      * Only after this, poll for 21st bit of DAR reg 0xAE0 is valid
5201      * else DAR register will read previous old value
5202      */
5203     DHD_INFO(("Wait till PCIE_CFG_SPROM_CTRL_SB_RESET_BIT(%d) of "
5204               "PCIECFGREG_SPROM_CTRL(0x%x) is cleared\n",
5205               PCIE_CFG_SPROM_CTRL_SB_RESET_BIT, PCIECFGREG_SPROM_CTRL));
5206     do {
5207         val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val));
5208         DHD_INFO(("read_config: reg=0x%x read val=0x%x\n",
5209                   PCIECFGREG_SPROM_CTRL, val));
5210         cond = val & (1 << PCIE_CFG_SPROM_CTRL_SB_RESET_BIT);
5211         OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY);
5212     } while (cond && (retry++ < DHD_BP_RESET_STATUS_RETRIES));
5213 
5214     if (cond) {
5215         DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
5216                    PCIECFGREG_SPROM_CTRL, PCIE_CFG_SPROM_CTRL_SB_RESET_BIT));
5217         ret = BCME_ERROR;
5218         goto aspm_enab;
5219     }
5220 
5221     /* Wait till bit 21 of dar_clk_ctrl_status_reg is cleared */
5222     DHD_INFO(("Wait till PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT(%d) of "
5223               "dar_clk_ctrl_status_reg(0x%x) is cleared\n",
5224               PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT,
5225               dar_clk_ctrl_status_reg));
5226     do {
5227         val = si_corereg(bus->sih, bus->sih->buscoreidx,
5228                          dar_clk_ctrl_status_reg, 0, 0);
5229         DHD_INFO(("read_dar si_corereg: reg=0x%x read val=0x%x\n",
5230                   dar_clk_ctrl_status_reg, val));
5231         cond = val & (1 << PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT);
5232         OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY);
5233     } while (cond && (retry++ < DHD_BP_RESET_STATUS_RETRIES));
5234 
5235     if (cond) {
5236         DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
5237                    dar_clk_ctrl_status_reg,
5238                    PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT));
5239         ret = BCME_ERROR;
5240     }
5241 
5242 aspm_enab:
5243     /* Enable ASPM */
5244     DHD_INFO(("Enable ASPM: set bit 1 of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n",
5245               PCIECFGREG_LINK_STATUS_CTRL));
5246     val =
5247         OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val));
5248     DHD_INFO(("read_config: reg=0x%x read val=0x%x\n",
5249               PCIECFGREG_LINK_STATUS_CTRL, val));
5250     val = val | (PCIE_ASPM_L1_ENAB);
5251     DHD_INFO(("write_config: reg=0x%x write val=0x%x\n",
5252               PCIECFGREG_LINK_STATUS_CTRL, val));
5253     OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val),
5254                          val);
5255 
5256     DHD_ERROR(("******** BP reset Succedeed ********\n"));
5257 
5258     return ret;
5259 }
5260 #endif /* DHD_USE_BP_RESET */
5261 
dhd_bus_devreset(dhd_pub_t * dhdp,uint8 flag)5262 int dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag)
5263 {
5264     dhd_bus_t *bus = dhdp->bus;
5265     int bcmerror = 0;
5266     unsigned long flags;
5267     unsigned long flags_bus;
5268 #ifdef CONFIG_ARCH_MSM
5269     int retry = POWERUP_MAX_RETRY;
5270 #endif /* CONFIG_ARCH_MSM */
5271 
5272     if (flag == TRUE) { /* Turn off WLAN */
5273         /* Removing Power */
5274         DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__));
5275         DHD_ERROR(("%s: making dhdpub up FALSE\n", __FUNCTION__));
5276         bus->dhd->up = FALSE;
5277 
5278         /* wait for other contexts to finish -- if required a call
5279          * to OSL_DELAY for 1s can be added to give other contexts
5280          * a chance to finish
5281          */
5282         dhdpcie_advertise_bus_cleanup(bus->dhd);
5283 
5284         if (bus->dhd->busstate != DHD_BUS_DOWN) {
5285 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5286             atomic_set(&bus->dhd->block_bus, TRUE);
5287             dhd_flush_rx_tx_wq(bus->dhd);
5288 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5289 
5290 #ifdef BCMPCIE_OOB_HOST_WAKE
5291             /* Clean up any pending host wake IRQ */
5292             dhd_bus_oob_intr_set(bus->dhd, FALSE);
5293             dhd_bus_oob_intr_unregister(bus->dhd);
5294 #endif /* BCMPCIE_OOB_HOST_WAKE */
5295             dhd_os_wd_timer(dhdp, 0);
5296             dhd_bus_stop(bus, TRUE);
5297             if (bus->intr) {
5298                 DHD_BUS_LOCK(bus->bus_lock, flags_bus);
5299                 dhdpcie_bus_intr_disable(bus);
5300                 DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
5301                 dhdpcie_free_irq(bus);
5302             }
5303             dhd_deinit_bus_lock(bus);
5304             dhd_deinit_backplane_access_lock(bus);
5305             dhd_bus_release_dongle(bus);
5306             dhdpcie_bus_free_resource(bus);
5307             bcmerror = dhdpcie_bus_disable_device(bus);
5308             if (bcmerror) {
5309                 DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n", __FUNCTION__,
5310                            bcmerror));
5311 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5312                 atomic_set(&bus->dhd->block_bus, FALSE);
5313 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5314             }
5315             /* Clean up protocol data after Bus Master Enable bit clear
5316              * so that host can safely unmap DMA and remove the allocated
5317              * buffers from the PKTID MAP. Some Applicantion Processors
5318              * supported System MMU triggers Kernel panic when they detect to
5319              * attempt to DMA-unmapped memory access from the devices which use
5320              * the System MMU. Therefore, Kernel panic can be happened since it
5321              * is possible that dongle can access to DMA-unmapped memory after
5322              * calling the dhd_prot_reset().
5323              * For this reason, the dhd_prot_reset() and dhd_clear() functions
5324              * should be located after the dhdpcie_bus_disable_device().
5325              */
5326             dhd_prot_reset(dhdp);
5327             dhd_clear(dhdp);
5328 #ifdef CONFIG_ARCH_MSM
5329             bcmerror = dhdpcie_bus_clock_stop(bus);
5330             if (bcmerror) {
5331                 DHD_ERROR(("%s: host clock stop failed: %d\n", __FUNCTION__,
5332                            bcmerror));
5333 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5334                 atomic_set(&bus->dhd->block_bus, FALSE);
5335 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5336                 goto done;
5337             }
5338 #endif /* CONFIG_ARCH_MSM */
5339             DHD_GENERAL_LOCK(bus->dhd, flags);
5340             DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
5341             bus->dhd->busstate = DHD_BUS_DOWN;
5342             DHD_GENERAL_UNLOCK(bus->dhd, flags);
5343 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5344             atomic_set(&bus->dhd->block_bus, FALSE);
5345 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5346         } else {
5347             if (bus->intr) {
5348                 dhdpcie_free_irq(bus);
5349             }
5350 #ifdef BCMPCIE_OOB_HOST_WAKE
5351             /* Clean up any pending host wake IRQ */
5352             dhd_bus_oob_intr_set(bus->dhd, FALSE);
5353             dhd_bus_oob_intr_unregister(bus->dhd);
5354 #endif /* BCMPCIE_OOB_HOST_WAKE */
5355             dhd_dpc_kill(bus->dhd);
5356             if (!bus->no_bus_init) {
5357                 dhd_bus_release_dongle(bus);
5358                 dhdpcie_bus_free_resource(bus);
5359                 bcmerror = dhdpcie_bus_disable_device(bus);
5360                 if (bcmerror) {
5361                     DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
5362                                __FUNCTION__, bcmerror));
5363                 }
5364 
5365                 /* Clean up protocol data after Bus Master Enable bit clear
5366                  * so that host can safely unmap DMA and remove the allocated
5367                  * buffers from the PKTID MAP. Some Applicantion Processors
5368                  * supported System MMU triggers Kernel panic when they detect
5369                  * to attempt to DMA-unmapped memory access from the devices
5370                  * which use the System MMU.
5371                  * Therefore, Kernel panic can be happened since it is possible
5372                  * that dongle can access to DMA-unmapped memory after calling
5373                  * the dhd_prot_reset().
5374                  * For this reason, the dhd_prot_reset() and dhd_clear()
5375                  * functions should be located after the
5376                  * dhdpcie_bus_disable_device().
5377                  */
5378                 dhd_prot_reset(dhdp);
5379                 dhd_clear(dhdp);
5380             } else {
5381                 bus->no_bus_init = FALSE;
5382             }
5383 #ifdef CONFIG_ARCH_MSM
5384             bcmerror = dhdpcie_bus_clock_stop(bus);
5385             if (bcmerror) {
5386                 DHD_ERROR(("%s: host clock stop failed: %d\n", __FUNCTION__,
5387                            bcmerror));
5388                 goto done;
5389             }
5390 #endif /* CONFIG_ARCH_MSM */
5391         }
5392         bus->dhd->dongle_reset = TRUE;
5393         DHD_ERROR(("%s:  WLAN OFF Done\n", __FUNCTION__));
5394     } else { /* Turn on WLAN */
5395         if (bus->dhd->busstate == DHD_BUS_DOWN) {
5396             /* Powering On */
5397             DHD_ERROR(("%s: == Power ON ==\n", __FUNCTION__));
5398 #ifdef CONFIG_ARCH_MSM
5399             while (--retry) {
5400                 bcmerror = dhdpcie_bus_clock_start(bus);
5401                 if (!bcmerror) {
5402                     DHD_ERROR(
5403                         ("%s: dhdpcie_bus_clock_start OK\n", __FUNCTION__));
5404                     break;
5405                 } else {
5406                     OSL_SLEEP(0xA);
5407                 }
5408             }
5409             if (bcmerror && !retry) {
5410                 DHD_ERROR(("%s: host pcie clock enable failed: %d\n",
5411                            __FUNCTION__, bcmerror));
5412                 goto done;
5413             }
5414 #if defined(DHD_CONTROL_PCIE_ASPM_WIFI_TURNON)
5415             dhd_bus_aspm_enable_rc_ep(bus, FALSE);
5416 #endif /* DHD_CONTROL_PCIE_ASPM_WIFI_TURNON */
5417 #endif /* CONFIG_ARCH_MSM */
5418             bus->is_linkdown = 0;
5419             bus->cto_triggered = 0;
5420             bcmerror = dhdpcie_bus_enable_device(bus);
5421             if (bcmerror) {
5422                 DHD_ERROR(("%s: host configuration restore failed: %d\n",
5423                            __FUNCTION__, bcmerror));
5424                 goto done;
5425             }
5426 
5427             bcmerror = dhdpcie_bus_alloc_resource(bus);
5428             if (bcmerror) {
5429                 DHD_ERROR(("%s: dhdpcie_bus_resource_alloc failed: %d\n",
5430                            __FUNCTION__, bcmerror));
5431                 goto done;
5432             }
5433 
5434             bcmerror = dhdpcie_bus_dongle_attach(bus);
5435             if (bcmerror) {
5436                 DHD_ERROR(("%s: dhdpcie_bus_dongle_attach failed: %d\n",
5437                            __FUNCTION__, bcmerror));
5438                 goto done;
5439             }
5440 
5441             bcmerror = dhd_bus_request_irq(bus);
5442             if (bcmerror) {
5443                 DHD_ERROR(("%s: dhd_bus_request_irq failed: %d\n", __FUNCTION__,
5444                            bcmerror));
5445                 goto done;
5446             }
5447 
5448             bus->dhd->dongle_reset = FALSE;
5449 
5450 #if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
5451             dhd_irq_set_affinity(bus->dhd, cpumask_of(1));
5452 #endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
5453 
5454             bcmerror = dhd_bus_start(dhdp);
5455             if (bcmerror) {
5456                 DHD_ERROR(("%s: dhd_bus_start: %d\n", __FUNCTION__, bcmerror));
5457                 goto done;
5458             }
5459 
5460             bus->dhd->up = TRUE;
5461             /* Renabling watchdog which is disabled in
5462              * dhdpcie_advertise_bus_cleanup */
5463             if (bus->dhd->dhd_watchdog_ms_backup) {
5464                 DHD_ERROR(
5465                     ("%s: Enabling wdtick after dhd init\n", __FUNCTION__));
5466                 dhd_os_wd_timer(bus->dhd, bus->dhd->dhd_watchdog_ms_backup);
5467             }
5468             DHD_ERROR(("%s: WLAN Power On Done\n", __FUNCTION__));
5469         } else {
5470             DHD_ERROR(("%s: what should we do here\n", __FUNCTION__));
5471             goto done;
5472         }
5473     }
5474 
5475 done:
5476     if (bcmerror) {
5477         DHD_GENERAL_LOCK(bus->dhd, flags);
5478         DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
5479         bus->dhd->busstate = DHD_BUS_DOWN;
5480         DHD_GENERAL_UNLOCK(bus->dhd, flags);
5481     }
5482     return bcmerror;
5483 }
5484 
5485 /* si_backplane_access() manages a shared resource - BAR0 mapping, hence its
5486  * calls shall be serialized. This wrapper function provides such serialization
5487  * and shall be used everywjer einstead of direct call of si_backplane_access()
5488  *
5489  * Linux DHD driver calls si_backplane_access() from 3 three contexts: tasklet
5490  * (that may call dhdpcie_sssr_dump() from dhdpcie_sssr_dump()), iovar
5491  * ("sbreg", "membyres", etc.) and procfs (used by GDB proxy). To avoid race
5492  * conditions calls of si_backplane_access() shall be serialized. Presence of
5493  * tasklet context implies that serialization shall b ebased on spinlock. Hence
5494  * Linux implementation of dhd_pcie_backplane_access_[un]lock() is
5495  * spinlock-based.
5496  *
5497  * Other platforms may add their own implementations of
5498  * dhd_pcie_backplane_access_[un]lock() as needed (e.g. if serialization is not
5499  * needed implementation might be empty)
5500  */
serialized_backplane_access(dhd_bus_t * bus,uint addr,uint size,uint * val,bool read)5501 static uint serialized_backplane_access(dhd_bus_t *bus, uint addr, uint size,
5502                                         uint *val, bool read)
5503 {
5504     uint ret;
5505     unsigned long flags;
5506     DHD_BACKPLANE_ACCESS_LOCK(bus->backplane_access_lock, flags);
5507     ret = si_backplane_access(bus->sih, addr, size, val, read);
5508     DHD_BACKPLANE_ACCESS_UNLOCK(bus->backplane_access_lock, flags);
5509     return ret;
5510 }
5511 
dhdpcie_get_dma_ring_indices(dhd_pub_t * dhd)5512 static int dhdpcie_get_dma_ring_indices(dhd_pub_t *dhd)
5513 {
5514     int h2d_support, d2h_support;
5515 
5516     d2h_support = dhd->dma_d2h_ring_upd_support ? 1 : 0;
5517     h2d_support = dhd->dma_h2d_ring_upd_support ? 1 : 0;
5518     return (d2h_support | (h2d_support << 1));
5519 }
dhdpcie_set_dma_ring_indices(dhd_pub_t * dhd,int32 int_val)5520 int dhdpcie_set_dma_ring_indices(dhd_pub_t *dhd, int32 int_val)
5521 {
5522     int bcmerror = 0;
5523     /* Can change it only during initialization/FW download */
5524     if (dhd->busstate == DHD_BUS_DOWN) {
5525         if ((int_val > 0x3) || (int_val < 0)) {
5526             DHD_ERROR(("Bad argument. Possible values: 0, 1, 2 & 3\n"));
5527             bcmerror = BCME_BADARG;
5528         } else {
5529             dhd->dma_d2h_ring_upd_support = (int_val & 1) ? TRUE : FALSE;
5530             dhd->dma_h2d_ring_upd_support = (int_val & 0x2) ? TRUE : FALSE;
5531             dhd->dma_ring_upd_overwrite = TRUE;
5532         }
5533     } else {
5534         DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
5535                    __FUNCTION__));
5536         bcmerror = BCME_NOTDOWN;
5537     }
5538 
5539     return bcmerror;
5540 }
5541 
5542 /**
5543  * IOVAR handler of the DHD bus layer (in this case, the PCIe bus).
5544  *
5545  * @param actionid  e.g. IOV_SVAL(IOV_PCIEREG)
5546  * @param params    input buffer
5547  * @param plen      length in [bytes] of input buffer 'params'
5548  * @param arg       output buffer
5549  * @param len       length in [bytes] of output buffer 'arg'
5550  */
dhdpcie_bus_doiovar(dhd_bus_t * bus,const bcm_iovar_t * vi,uint32 actionid,const char * name,void * params,int plen,void * arg,int len,int val_size)5551 static int dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi,
5552                                uint32 actionid, const char *name, void *params,
5553                                int plen, void *arg, int len, int val_size)
5554 {
5555     int bcmerror = 0;
5556     int32 int_val = 0;
5557     int32 int_val2 = 0;
5558     int32 int_val3 = 0;
5559     bool bool_val = 0;
5560 
5561     DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d "
5562                "val_size %d\n",
5563                __FUNCTION__, actionid, name, params, plen, arg, len, val_size));
5564 
5565     if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) !=
5566         0) {
5567         goto exit;
5568     }
5569 
5570     if (plen >= (int)sizeof(int_val)) {
5571         bcopy(params, &int_val, sizeof(int_val));
5572     }
5573 
5574     if (plen >= (int)sizeof(int_val) * 0x2) {
5575         bcopy((void *)((uintptr)params + sizeof(int_val)), &int_val2,
5576               sizeof(int_val2));
5577     }
5578 
5579     if (plen >= (int)sizeof(int_val) * 0x3) {
5580         bcopy((void *)((uintptr)params + 0x2 * sizeof(int_val)), &int_val3,
5581               sizeof(int_val3));
5582     }
5583 
5584     bool_val = (int_val != 0) ? TRUE : FALSE;
5585 
5586     /* Check if dongle is in reset. If so, only allow DEVRESET iovars */
5587     if (bus->dhd->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) ||
5588                                     actionid == IOV_GVAL(IOV_DEVRESET))) {
5589         bcmerror = BCME_NOTREADY;
5590         goto exit;
5591     }
5592     switch (actionid) {
5593         case IOV_SVAL(IOV_VARS):
5594             bcmerror = dhdpcie_downloadvars(bus, arg, len);
5595             break;
5596         case IOV_SVAL(IOV_PCIE_LPBK):
5597             bcmerror = dhdpcie_bus_lpback_req(bus, int_val);
5598             break;
5599         case IOV_SVAL(IOV_PCIE_DMAXFER): {
5600             dma_xfer_info_t *dmaxfer = (dma_xfer_info_t *)arg;
5601             if (!dmaxfer) {
5602                 return BCME_BADARG;
5603             }
5604             if (dmaxfer->version != DHD_DMAXFER_VERSION) {
5605                 return BCME_VERSION;
5606             }
5607             if (dmaxfer->length != sizeof(dma_xfer_info_t)) {
5608                 return BCME_BADLEN;
5609             }
5610 
5611             bcmerror = dhdpcie_bus_dmaxfer_req(
5612                 bus, dmaxfer->num_bytes, dmaxfer->src_delay,
5613                 dmaxfer->dest_delay, dmaxfer->type, dmaxfer->core_num,
5614                 dmaxfer->should_wait);
5615             if (dmaxfer->should_wait && bcmerror >= 0) {
5616                 bcmerror = dhdmsgbuf_dmaxfer_status(bus->dhd, dmaxfer);
5617             }
5618             break;
5619         }
5620         case IOV_GVAL(IOV_PCIE_DMAXFER): {
5621             dma_xfer_info_t *dmaxfer = (dma_xfer_info_t *)params;
5622             if (!dmaxfer) {
5623                 return BCME_BADARG;
5624             }
5625             if (dmaxfer->version != DHD_DMAXFER_VERSION) {
5626                 return BCME_VERSION;
5627             }
5628             if (dmaxfer->length != sizeof(dma_xfer_info_t)) {
5629                 return BCME_BADLEN;
5630             }
5631             bcmerror = dhdmsgbuf_dmaxfer_status(bus->dhd, dmaxfer);
5632             break;
5633         }
5634 
5635         case IOV_GVAL(IOV_PCIE_SUSPEND):
5636             int_val = (bus->dhd->busstate == DHD_BUS_SUSPEND) ? 1 : 0;
5637             bcopy(&int_val, arg, val_size);
5638             break;
5639 
5640         case IOV_SVAL(IOV_PCIE_SUSPEND):
5641             if (bool_val) { /* Suspend */
5642                 int ret;
5643                 unsigned long flags;
5644 
5645                 /*
5646                  * If some other context is busy, wait until they are done,
5647                  * before starting suspend
5648                  */
5649                 ret = dhd_os_busbusy_wait_condition(
5650                     bus->dhd, &bus->dhd->dhd_bus_busy_state,
5651                     DHD_BUS_BUSY_IN_DHD_IOVAR);
5652                 if (ret == 0) {
5653                     DHD_ERROR(("%s:Wait Timedout, dhd_bus_busy_state = 0x%x\n",
5654                                __FUNCTION__, bus->dhd->dhd_bus_busy_state));
5655                     return BCME_BUSY;
5656                 }
5657 
5658                 DHD_GENERAL_LOCK(bus->dhd, flags);
5659                 DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd);
5660                 DHD_GENERAL_UNLOCK(bus->dhd, flags);
5661 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5662                 dhdpcie_bus_suspend(bus, TRUE, TRUE);
5663 #else
5664                 dhdpcie_bus_suspend(bus, TRUE);
5665 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5666 
5667                 DHD_GENERAL_LOCK(bus->dhd, flags);
5668                 DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd);
5669                 dhd_os_busbusy_wake(bus->dhd);
5670                 DHD_GENERAL_UNLOCK(bus->dhd, flags);
5671             } else { /* Resume */
5672                 unsigned long flags;
5673                 DHD_GENERAL_LOCK(bus->dhd, flags);
5674                 DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd);
5675                 DHD_GENERAL_UNLOCK(bus->dhd, flags);
5676 
5677                 dhdpcie_bus_suspend(bus, FALSE);
5678 
5679                 DHD_GENERAL_LOCK(bus->dhd, flags);
5680                 DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd);
5681                 dhd_os_busbusy_wake(bus->dhd);
5682                 DHD_GENERAL_UNLOCK(bus->dhd, flags);
5683             }
5684             break;
5685 
5686         case IOV_GVAL(IOV_MEMSIZE):
5687             int_val = (int32)bus->ramsize;
5688             bcopy(&int_val, arg, val_size);
5689             break;
5690 
5691         /* Debug related. Dumps core registers or one of the dongle memory */
5692         case IOV_GVAL(IOV_DUMP_DONGLE): {
5693             dump_dongle_in_t ddi = *(dump_dongle_in_t *)params;
5694             dump_dongle_out_t *ddo = (dump_dongle_out_t *)arg;
5695             uint32 *p = ddo->val;
5696             const uint max_offset =
5697                 4096 - 1; /* one core contains max 4096/4 registers */
5698 
5699             if (plen < sizeof(ddi) || len < sizeof(ddo)) {
5700                 bcmerror = BCME_BADARG;
5701                 break;
5702             }
5703 
5704             switch (ddi.type) {
5705                 case DUMP_DONGLE_COREREG:
5706                     ddo->n_bytes = 0;
5707 
5708                     if (si_setcoreidx(bus->sih, ddi.index) == NULL) {
5709                         break; // beyond last core: core enumeration ended
5710                     }
5711 
5712                     ddo->address = si_addrspace(bus->sih, CORE_SLAVE_PORT_0,
5713                                                 CORE_BASE_ADDR_0);
5714                     ddo->address +=
5715                         ddi.offset; // BP address at which this dump starts
5716 
5717                     ddo->id = si_coreid(bus->sih);
5718                     ddo->rev = si_corerev(bus->sih);
5719 
5720                     while (ddi.offset < max_offset &&
5721                            sizeof(dump_dongle_out_t) + ddo->n_bytes < (uint)len) {
5722                         *p++ =
5723                             si_corereg(bus->sih, ddi.index, ddi.offset, 0, 0);
5724                         ddi.offset += sizeof(uint32);
5725                         ddo->n_bytes += sizeof(uint32);
5726                     }
5727                     break;
5728                 default:
5729                     // implement d11 SHM/TPL dumping
5730                     bcmerror = BCME_BADARG;
5731                     break;
5732             }
5733             break;
5734         }
5735 
5736         /* Debug related. Returns a string with dongle capabilities */
5737         case IOV_GVAL(IOV_DNGL_CAPS): {
5738             strncpy(arg, bus->dhd->fw_capabilities,
5739                     MIN(strlen(bus->dhd->fw_capabilities), (size_t)len));
5740             ((char *)arg)[len - 1] = '\0';
5741             break;
5742         }
5743 
5744 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
5745         case IOV_SVAL(IOV_GDB_SERVER):
5746             /* debugger_*() functions may sleep, so cannot hold spinlock */
5747             DHD_PERIM_UNLOCK(bus->dhd);
5748             if (int_val > 0) {
5749                 debugger_init((void *)bus, &bus_ops, int_val,
5750                               SI_ENUM_BASE(bus->sih));
5751             } else {
5752                 debugger_close();
5753             }
5754             DHD_PERIM_LOCK(bus->dhd);
5755             break;
5756 #endif /* DEBUGGER || DHD_DSCOPE */
5757 
5758 #ifdef BCM_BUZZZ
5759         /* Dump dongle side buzzz trace to console */
5760         case IOV_GVAL(IOV_BUZZZ_DUMP):
5761             bcmerror = dhd_buzzz_dump_dngl(bus);
5762             break;
5763 #endif /* BCM_BUZZZ */
5764 
5765         case IOV_SVAL(IOV_SET_DOWNLOAD_STATE):
5766             bcmerror = dhdpcie_bus_download_state(bus, bool_val);
5767             break;
5768 
5769         case IOV_GVAL(IOV_RAMSIZE):
5770             int_val = (int32)bus->ramsize;
5771             bcopy(&int_val, arg, val_size);
5772             break;
5773 
5774         case IOV_SVAL(IOV_RAMSIZE):
5775             bus->ramsize = int_val;
5776             bus->orig_ramsize = int_val;
5777             break;
5778 
5779         case IOV_GVAL(IOV_RAMSTART):
5780             int_val = (int32)bus->dongle_ram_base;
5781             bcopy(&int_val, arg, val_size);
5782             break;
5783 
5784         case IOV_GVAL(IOV_CC_NVMSHADOW): {
5785             struct bcmstrbuf dump_b;
5786 
5787             bcm_binit(&dump_b, arg, len);
5788             bcmerror = dhdpcie_cc_nvmshadow(bus, &dump_b);
5789             break;
5790         }
5791 
5792         case IOV_GVAL(IOV_SLEEP_ALLOWED):
5793             bool_val = bus->sleep_allowed;
5794             bcopy(&bool_val, arg, val_size);
5795             break;
5796 
5797         case IOV_SVAL(IOV_SLEEP_ALLOWED):
5798             bus->sleep_allowed = bool_val;
5799             break;
5800 
5801         case IOV_GVAL(IOV_DONGLEISOLATION):
5802             int_val = bus->dhd->dongle_isolation;
5803             bcopy(&int_val, arg, val_size);
5804             break;
5805 
5806         case IOV_SVAL(IOV_DONGLEISOLATION):
5807             bus->dhd->dongle_isolation = bool_val;
5808             break;
5809 
5810         case IOV_GVAL(IOV_LTRSLEEPON_UNLOOAD):
5811             int_val = bus->ltrsleep_on_unload;
5812             bcopy(&int_val, arg, val_size);
5813             break;
5814 
5815         case IOV_SVAL(IOV_LTRSLEEPON_UNLOOAD):
5816             bus->ltrsleep_on_unload = bool_val;
5817             break;
5818 
5819         case IOV_GVAL(IOV_DUMP_RINGUPD_BLOCK): {
5820             struct bcmstrbuf dump_b;
5821             bcm_binit(&dump_b, arg, len);
5822             bcmerror = dhd_prot_ringupd_dump(bus->dhd, &dump_b);
5823             break;
5824         }
5825         case IOV_GVAL(IOV_DMA_RINGINDICES): {
5826             int_val = dhdpcie_get_dma_ring_indices(bus->dhd);
5827             bcopy(&int_val, arg, sizeof(int_val));
5828             break;
5829         }
5830         case IOV_SVAL(IOV_DMA_RINGINDICES):
5831             bcmerror = dhdpcie_set_dma_ring_indices(bus->dhd, int_val);
5832             break;
5833 
5834         case IOV_GVAL(IOV_METADATA_DBG):
5835             int_val = dhd_prot_metadata_dbg_get(bus->dhd);
5836             bcopy(&int_val, arg, val_size);
5837             break;
5838         case IOV_SVAL(IOV_METADATA_DBG):
5839             dhd_prot_metadata_dbg_set(bus->dhd, (int_val != 0));
5840             break;
5841 
5842         case IOV_GVAL(IOV_RX_METADATALEN):
5843             int_val = dhd_prot_metadatalen_get(bus->dhd, TRUE);
5844             bcopy(&int_val, arg, val_size);
5845             break;
5846 
5847         case IOV_SVAL(IOV_RX_METADATALEN):
5848             if (int_val > 0x40) {
5849                 bcmerror = BCME_BUFTOOLONG;
5850                 break;
5851             }
5852             dhd_prot_metadatalen_set(bus->dhd, int_val, TRUE);
5853             break;
5854 
5855         case IOV_SVAL(IOV_TXP_THRESHOLD):
5856             dhd_prot_txp_threshold(bus->dhd, TRUE, int_val);
5857             break;
5858 
5859         case IOV_GVAL(IOV_TXP_THRESHOLD):
5860             int_val = dhd_prot_txp_threshold(bus->dhd, FALSE, int_val);
5861             bcopy(&int_val, arg, val_size);
5862             break;
5863 
5864         case IOV_SVAL(IOV_DB1_FOR_MB):
5865             if (int_val) {
5866                 bus->db1_for_mb = TRUE;
5867             } else {
5868                 bus->db1_for_mb = FALSE;
5869             }
5870             break;
5871 
5872         case IOV_GVAL(IOV_DB1_FOR_MB):
5873             if (bus->db1_for_mb) {
5874                 int_val = 1;
5875             } else {
5876                 int_val = 0;
5877             }
5878             bcopy(&int_val, arg, val_size);
5879             break;
5880 
5881         case IOV_GVAL(IOV_TX_METADATALEN):
5882             int_val = dhd_prot_metadatalen_get(bus->dhd, FALSE);
5883             bcopy(&int_val, arg, val_size);
5884             break;
5885 
5886         case IOV_SVAL(IOV_TX_METADATALEN):
5887             if (int_val > 0x40) {
5888                 bcmerror = BCME_BUFTOOLONG;
5889                 break;
5890             }
5891             dhd_prot_metadatalen_set(bus->dhd, int_val, FALSE);
5892             break;
5893 
5894         case IOV_SVAL(IOV_DEVRESET):
5895             switch (int_val) {
5896                 case DHD_BUS_DEVRESET_ON:
5897                     bcmerror = dhd_bus_devreset(bus->dhd, (uint8)int_val);
5898                     break;
5899                 case DHD_BUS_DEVRESET_OFF:
5900                     bcmerror = dhd_bus_devreset(bus->dhd, (uint8)int_val);
5901                     break;
5902                 case DHD_BUS_DEVRESET_FLR:
5903                     bcmerror = dhd_bus_perform_flr(bus, bus->flr_force_fail);
5904                     break;
5905                 case DHD_BUS_DEVRESET_FLR_FORCE_FAIL:
5906                     bus->flr_force_fail = TRUE;
5907                     break;
5908                 default:
5909                     DHD_ERROR(
5910                         ("%s: invalid argument for devreset\n", __FUNCTION__));
5911                     break;
5912             }
5913             break;
5914         case IOV_SVAL(IOV_FORCE_FW_TRAP):
5915             if (bus->dhd->busstate == DHD_BUS_DATA) {
5916                 dhdpcie_fw_trap(bus);
5917             } else {
5918                 DHD_ERROR(("%s: Bus is NOT up\n", __FUNCTION__));
5919                 bcmerror = BCME_NOTUP;
5920             }
5921             break;
5922         case IOV_GVAL(IOV_FLOW_PRIO_MAP):
5923             int_val = bus->dhd->flow_prio_map_type;
5924             bcopy(&int_val, arg, val_size);
5925             break;
5926 
5927         case IOV_SVAL(IOV_FLOW_PRIO_MAP):
5928             int_val = (int32)dhd_update_flow_prio_map(bus->dhd, (uint8)int_val);
5929             bcopy(&int_val, arg, val_size);
5930             break;
5931 
5932         case IOV_GVAL(IOV_TXBOUND):
5933             int_val = (int32)dhd_txbound;
5934             bcopy(&int_val, arg, val_size);
5935             break;
5936 
5937         case IOV_SVAL(IOV_TXBOUND):
5938             dhd_txbound = (uint)int_val;
5939             break;
5940 
5941         case IOV_SVAL(IOV_H2D_MAILBOXDATA):
5942             dhdpcie_send_mb_data(bus, (uint)int_val);
5943             break;
5944 
5945         case IOV_SVAL(IOV_INFORINGS):
5946             dhd_prot_init_info_rings(bus->dhd);
5947             break;
5948 
5949         case IOV_SVAL(IOV_H2D_PHASE):
5950             if (bus->dhd->busstate != DHD_BUS_DOWN) {
5951                 DHD_ERROR(
5952                     ("%s: Can change only when bus down (before FW download)\n",
5953                      __FUNCTION__));
5954                 bcmerror = BCME_NOTDOWN;
5955                 break;
5956             }
5957             if (int_val) {
5958                 bus->dhd->h2d_phase_supported = TRUE;
5959             } else {
5960                 bus->dhd->h2d_phase_supported = FALSE;
5961             }
5962             break;
5963 
5964         case IOV_GVAL(IOV_H2D_PHASE):
5965             int_val = (int32)bus->dhd->h2d_phase_supported;
5966             bcopy(&int_val, arg, val_size);
5967             break;
5968 
5969         case IOV_SVAL(IOV_H2D_ENABLE_TRAP_BADPHASE):
5970             if (bus->dhd->busstate != DHD_BUS_DOWN) {
5971                 DHD_ERROR(
5972                     ("%s: Can change only when bus down (before FW download)\n",
5973                      __FUNCTION__));
5974                 bcmerror = BCME_NOTDOWN;
5975                 break;
5976             }
5977             if (int_val) {
5978                 bus->dhd->force_dongletrap_on_bad_h2d_phase = TRUE;
5979             } else {
5980                 bus->dhd->force_dongletrap_on_bad_h2d_phase = FALSE;
5981             }
5982             break;
5983 
5984         case IOV_GVAL(IOV_H2D_ENABLE_TRAP_BADPHASE):
5985             int_val = (int32)bus->dhd->force_dongletrap_on_bad_h2d_phase;
5986             bcopy(&int_val, arg, val_size);
5987             break;
5988 
5989         case IOV_SVAL(IOV_H2D_TXPOST_MAX_ITEM):
5990             if (bus->dhd->busstate != DHD_BUS_DOWN) {
5991                 DHD_ERROR(
5992                     ("%s: Can change only when bus down (before FW download)\n",
5993                      __FUNCTION__));
5994                 bcmerror = BCME_NOTDOWN;
5995                 break;
5996             }
5997             dhd_prot_set_h2d_max_txpost(bus->dhd, (uint16)int_val);
5998             break;
5999 
6000         case IOV_GVAL(IOV_H2D_TXPOST_MAX_ITEM):
6001             int_val = dhd_prot_get_h2d_max_txpost(bus->dhd);
6002             bcopy(&int_val, arg, val_size);
6003             break;
6004 
6005         case IOV_GVAL(IOV_RXBOUND):
6006             int_val = (int32)dhd_rxbound;
6007             bcopy(&int_val, arg, val_size);
6008             break;
6009 
6010         case IOV_SVAL(IOV_RXBOUND):
6011             dhd_rxbound = (uint)int_val;
6012             break;
6013 
6014         case IOV_GVAL(IOV_TRAPDATA): {
6015             struct bcmstrbuf dump_b;
6016             bcm_binit(&dump_b, arg, len);
6017             bcmerror = dhd_prot_dump_extended_trap(bus->dhd, &dump_b, FALSE);
6018             break;
6019         }
6020 
6021         case IOV_GVAL(IOV_TRAPDATA_RAW): {
6022             struct bcmstrbuf dump_b;
6023             bcm_binit(&dump_b, arg, len);
6024             bcmerror = dhd_prot_dump_extended_trap(bus->dhd, &dump_b, TRUE);
6025             break;
6026         }
6027         case IOV_SVAL(IOV_HANGREPORT):
6028             bus->dhd->hang_report = bool_val;
6029             DHD_ERROR(("%s: Set hang_report as %d\n", __FUNCTION__,
6030                        bus->dhd->hang_report));
6031             break;
6032 
6033         case IOV_GVAL(IOV_HANGREPORT):
6034             int_val = (int32)bus->dhd->hang_report;
6035             bcopy(&int_val, arg, val_size);
6036             break;
6037 
6038         case IOV_SVAL(IOV_CTO_PREVENTION):
6039             bcmerror = dhdpcie_cto_init(bus, bool_val);
6040             break;
6041 
6042         case IOV_GVAL(IOV_CTO_PREVENTION):
6043             if (bus->sih->buscorerev < 0x13) {
6044                 bcmerror = BCME_UNSUPPORTED;
6045                 break;
6046             }
6047             int_val = (int32)bus->cto_enable;
6048             bcopy(&int_val, arg, val_size);
6049             break;
6050 
6051         case IOV_SVAL(IOV_CTO_THRESHOLD): {
6052             if (bus->sih->buscorerev < 0x13) {
6053                 bcmerror = BCME_UNSUPPORTED;
6054                 break;
6055             }
6056             bus->cto_threshold = (uint32)int_val;
6057             break;
6058         }
6059 
6060         case IOV_GVAL(IOV_CTO_THRESHOLD):
6061             if (bus->sih->buscorerev < 0x13) {
6062                 bcmerror = BCME_UNSUPPORTED;
6063                 break;
6064             }
6065             if (bus->cto_threshold) {
6066                 int_val = (int32)bus->cto_threshold;
6067             } else {
6068                 int_val = (int32)PCIE_CTO_TO_THRESH_DEFAULT;
6069             }
6070 
6071             bcopy(&int_val, arg, val_size);
6072             break;
6073 
6074         case IOV_SVAL(IOV_PCIE_WD_RESET):
6075             if (bool_val) {
6076                 /* Legacy chipcommon watchdog reset */
6077                 dhdpcie_cc_watchdog_reset(bus);
6078             }
6079             break;
6080 
6081         case IOV_GVAL(IOV_HWA_ENAB_BMAP):
6082             int_val = bus->hwa_enab_bmap;
6083             bcopy(&int_val, arg, val_size);
6084             break;
6085         case IOV_SVAL(IOV_HWA_ENAB_BMAP):
6086             bus->hwa_enab_bmap = (uint8)int_val;
6087             break;
6088         case IOV_GVAL(IOV_IDMA_ENABLE):
6089             int_val = bus->idma_enabled;
6090             bcopy(&int_val, arg, val_size);
6091             break;
6092         case IOV_SVAL(IOV_IDMA_ENABLE):
6093             bus->idma_enabled = (bool)int_val;
6094             break;
6095         case IOV_GVAL(IOV_IFRM_ENABLE):
6096             int_val = bus->ifrm_enabled;
6097             bcopy(&int_val, arg, val_size);
6098             break;
6099         case IOV_SVAL(IOV_IFRM_ENABLE):
6100             bus->ifrm_enabled = (bool)int_val;
6101             break;
6102         case IOV_GVAL(IOV_CLEAR_RING):
6103             bcopy(&int_val, arg, val_size);
6104             dhd_flow_rings_flush(bus->dhd, 0);
6105             break;
6106         case IOV_GVAL(IOV_DAR_ENABLE):
6107             int_val = bus->dar_enabled;
6108             bcopy(&int_val, arg, val_size);
6109             break;
6110         case IOV_SVAL(IOV_DAR_ENABLE):
6111             bus->dar_enabled = (bool)int_val;
6112             break;
6113         case IOV_GVAL(IOV_HSCBSIZE):
6114             bcmerror = dhd_get_hscb_info(bus->dhd, NULL, (uint32 *)arg);
6115             break;
6116 
6117 #ifdef DHD_HP2P
6118         case IOV_SVAL(IOV_HP2P_ENABLE):
6119             dhd_prot_hp2p_enable(bus->dhd, TRUE, int_val);
6120             break;
6121 
6122         case IOV_GVAL(IOV_HP2P_ENABLE):
6123             int_val = dhd_prot_hp2p_enable(bus->dhd, FALSE, int_val);
6124             bcopy(&int_val, arg, val_size);
6125             break;
6126 
6127         case IOV_SVAL(IOV_HP2P_PKT_THRESHOLD):
6128             dhd_prot_pkt_threshold(bus->dhd, TRUE, int_val);
6129             break;
6130 
6131         case IOV_GVAL(IOV_HP2P_PKT_THRESHOLD):
6132             int_val = dhd_prot_pkt_threshold(bus->dhd, FALSE, int_val);
6133             bcopy(&int_val, arg, val_size);
6134             break;
6135 
6136         case IOV_SVAL(IOV_HP2P_TIME_THRESHOLD):
6137             dhd_prot_time_threshold(bus->dhd, TRUE, int_val);
6138             break;
6139 
6140         case IOV_GVAL(IOV_HP2P_TIME_THRESHOLD):
6141             int_val = dhd_prot_time_threshold(bus->dhd, FALSE, int_val);
6142             bcopy(&int_val, arg, val_size);
6143             break;
6144 
6145         case IOV_SVAL(IOV_HP2P_PKT_EXPIRY):
6146             dhd_prot_pkt_expiry(bus->dhd, TRUE, int_val);
6147             break;
6148 
6149         case IOV_GVAL(IOV_HP2P_PKT_EXPIRY):
6150             int_val = dhd_prot_pkt_expiry(bus->dhd, FALSE, int_val);
6151             bcopy(&int_val, arg, val_size);
6152             break;
6153         case IOV_SVAL(IOV_HP2P_TXCPL_MAXITEMS):
6154             if (bus->dhd->busstate != DHD_BUS_DOWN) {
6155                 return BCME_NOTDOWN;
6156             }
6157             dhd_bus_set_hp2p_ring_max_size(bus, TRUE, int_val);
6158             break;
6159 
6160         case IOV_GVAL(IOV_HP2P_TXCPL_MAXITEMS):
6161             int_val = dhd_bus_get_hp2p_ring_max_size(bus, TRUE);
6162             bcopy(&int_val, arg, val_size);
6163             break;
6164         case IOV_SVAL(IOV_HP2P_RXCPL_MAXITEMS):
6165             if (bus->dhd->busstate != DHD_BUS_DOWN) {
6166                 return BCME_NOTDOWN;
6167             }
6168             dhd_bus_set_hp2p_ring_max_size(bus, FALSE, int_val);
6169             break;
6170 
6171         case IOV_GVAL(IOV_HP2P_RXCPL_MAXITEMS):
6172             int_val = dhd_bus_get_hp2p_ring_max_size(bus, FALSE);
6173             bcopy(&int_val, arg, val_size);
6174             break;
6175 #endif /* DHD_HP2P */
6176         case IOV_SVAL(IOV_EXTDTXS_IN_TXCPL):
6177             if (bus->dhd->busstate != DHD_BUS_DOWN) {
6178                 return BCME_NOTDOWN;
6179             }
6180             if (int_val) {
6181                 bus->dhd->extdtxs_in_txcpl = TRUE;
6182             } else {
6183                 bus->dhd->extdtxs_in_txcpl = FALSE;
6184             }
6185             break;
6186 
6187         case IOV_GVAL(IOV_EXTDTXS_IN_TXCPL):
6188             int_val = bus->dhd->extdtxs_in_txcpl;
6189             bcopy(&int_val, arg, val_size);
6190             break;
6191 
6192         case IOV_SVAL(IOV_HOSTRDY_AFTER_INIT):
6193             if (bus->dhd->busstate != DHD_BUS_DOWN) {
6194                 return BCME_NOTDOWN;
6195             }
6196             if (int_val) {
6197                 bus->dhd->hostrdy_after_init = TRUE;
6198             } else {
6199                 bus->dhd->hostrdy_after_init = FALSE;
6200             }
6201             break;
6202 
6203         case IOV_GVAL(IOV_HOSTRDY_AFTER_INIT):
6204             int_val = bus->dhd->hostrdy_after_init;
6205             bcopy(&int_val, arg, val_size);
6206             break;
6207 
6208         default:
6209             bcmerror = BCME_UNSUPPORTED;
6210             break;
6211     }
6212 
6213 exit:
6214     return bcmerror;
6215 } /* dhdpcie_bus_doiovar */
6216 
6217 /** Transfers bytes from host to dongle using pio mode */
dhdpcie_bus_lpback_req(struct dhd_bus * bus,uint32 len)6218 static int dhdpcie_bus_lpback_req(struct dhd_bus *bus, uint32 len)
6219 {
6220     if (bus->dhd == NULL) {
6221         DHD_ERROR(("%s: bus not inited\n", __FUNCTION__));
6222         return 0;
6223     }
6224     if (bus->dhd->prot == NULL) {
6225         DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__));
6226         return 0;
6227     }
6228     if (bus->dhd->busstate != DHD_BUS_DATA) {
6229         DHD_ERROR(
6230             ("%s: not in a readystate to LPBK  is not inited\n", __FUNCTION__));
6231         return 0;
6232     }
6233     dhdmsgbuf_lpbk_req(bus->dhd, len);
6234     return 0;
6235 }
6236 
dhd_bus_dump_dar_registers(struct dhd_bus * bus)6237 void dhd_bus_dump_dar_registers(struct dhd_bus *bus)
6238 {
6239     uint32 dar_clk_ctrl_val, dar_pwr_ctrl_val, dar_intstat_val, dar_errlog_val,
6240         dar_erraddr_val, dar_pcie_mbint_val;
6241     uint32 dar_clk_ctrl_reg, dar_pwr_ctrl_reg, dar_intstat_reg, dar_errlog_reg,
6242         dar_erraddr_reg, dar_pcie_mbint_reg;
6243 
6244     if (bus->is_linkdown && !bus->cto_triggered) {
6245         DHD_ERROR(("%s: link is down\n", __FUNCTION__));
6246         return;
6247     }
6248 
6249     dar_clk_ctrl_reg = (uint32)DAR_CLK_CTRL(bus->sih->buscorerev);
6250     dar_pwr_ctrl_reg = (uint32)DAR_PCIE_PWR_CTRL(bus->sih->buscorerev);
6251     dar_intstat_reg = (uint32)DAR_INTSTAT(bus->sih->buscorerev);
6252     dar_errlog_reg = (uint32)DAR_ERRLOG(bus->sih->buscorerev);
6253     dar_erraddr_reg = (uint32)DAR_ERRADDR(bus->sih->buscorerev);
6254     dar_pcie_mbint_reg = (uint32)DAR_PCIMailBoxInt(bus->sih->buscorerev);
6255 
6256     if (bus->sih->buscorerev < 0x18) {
6257         DHD_ERROR(("%s: DAR not supported for corerev(%d) < 24\n", __FUNCTION__,
6258                    bus->sih->buscorerev));
6259         return;
6260     }
6261 
6262     dar_clk_ctrl_val =
6263         si_corereg(bus->sih, bus->sih->buscoreidx, dar_clk_ctrl_reg, 0, 0);
6264     dar_pwr_ctrl_val =
6265         si_corereg(bus->sih, bus->sih->buscoreidx, dar_pwr_ctrl_reg, 0, 0);
6266     dar_intstat_val =
6267         si_corereg(bus->sih, bus->sih->buscoreidx, dar_intstat_reg, 0, 0);
6268     dar_errlog_val =
6269         si_corereg(bus->sih, bus->sih->buscoreidx, dar_errlog_reg, 0, 0);
6270     dar_erraddr_val =
6271         si_corereg(bus->sih, bus->sih->buscoreidx, dar_erraddr_reg, 0, 0);
6272     dar_pcie_mbint_val =
6273         si_corereg(bus->sih, bus->sih->buscoreidx, dar_pcie_mbint_reg, 0, 0);
6274 
6275     DHD_ERROR(("%s: dar_clk_ctrl(0x%x:0x%x) dar_pwr_ctrl(0x%x:0x%x) "
6276                "dar_intstat(0x%x:0x%x)\n",
6277                __FUNCTION__, dar_clk_ctrl_reg, dar_clk_ctrl_val,
6278                dar_pwr_ctrl_reg, dar_pwr_ctrl_val, dar_intstat_reg,
6279                dar_intstat_val));
6280 
6281     DHD_ERROR(("%s: dar_errlog(0x%x:0x%x) dar_erraddr(0x%x:0x%x) "
6282                "dar_pcie_mbint(0x%x:0x%x)\n",
6283                __FUNCTION__, dar_errlog_reg, dar_errlog_val, dar_erraddr_reg,
6284                dar_erraddr_val, dar_pcie_mbint_reg, dar_pcie_mbint_val));
6285 }
6286 
6287 /* Ring DoorBell1 to indicate Hostready i.e. D3 Exit */
dhd_bus_hostready(struct dhd_bus * bus)6288 void dhd_bus_hostready(struct dhd_bus *bus)
6289 {
6290     if (!bus->dhd->d2h_hostrdy_supported) {
6291         return;
6292     }
6293 
6294     if (bus->is_linkdown) {
6295         DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
6296         return;
6297     }
6298 
6299     DHD_ERROR(("%s : Read PCICMD Reg: 0x%08X\n", __FUNCTION__,
6300                dhd_pcie_config_read(bus->osh, PCI_CFG_CMD, sizeof(uint32))));
6301 
6302     if (DAR_PWRREQ(bus)) {
6303         dhd_bus_pcie_pwr_req(bus);
6304     }
6305 
6306     dhd_bus_dump_dar_registers(bus);
6307 
6308     si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db1_addr_get(bus), ~0,
6309                0x12345678);
6310     bus->hostready_count++;
6311     DHD_ERROR(("%s: Ring Hostready:%d\n", __FUNCTION__, bus->hostready_count));
6312 }
6313 
6314 /* Clear INTSTATUS */
dhdpcie_bus_clear_intstatus(struct dhd_bus * bus)6315 void dhdpcie_bus_clear_intstatus(struct dhd_bus *bus)
6316 {
6317     uint32 intstatus = 0;
6318     if ((bus->sih->buscorerev == 0x6) || (bus->sih->buscorerev == 0x4) ||
6319         (bus->sih->buscorerev == 0x2)) {
6320         intstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 0x4);
6321         dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 0x4, intstatus);
6322     } else {
6323         /* this is a PCIE core register..not a config register... */
6324         intstatus = si_corereg(bus->sih, bus->sih->buscoreidx,
6325                                bus->pcie_mailbox_int, 0, 0);
6326         si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int,
6327                    bus->def_intmask, intstatus);
6328     }
6329 }
6330 
6331 int
6332 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
dhdpcie_bus_suspend(struct dhd_bus * bus,bool state,bool byint)6333 dhdpcie_bus_suspend(struct dhd_bus *bus, bool state, bool byint)
6334 #else
6335 dhdpcie_bus_suspend(struct dhd_bus *bus, bool state)
6336 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
6337 {
6338     int timeleft;
6339     int rc = 0;
6340     unsigned long flags, flags_bus;
6341 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
6342     int d3_read_retry = 0;
6343     uint32 d2h_mb_data = 0;
6344     uint32 zero = 0;
6345 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
6346 
6347     printf("%s: state=%d\n", __FUNCTION__, state);
6348     if (bus->dhd == NULL) {
6349         DHD_ERROR(("%s: bus not inited\n", __FUNCTION__));
6350         return BCME_ERROR;
6351     }
6352     if (bus->dhd->prot == NULL) {
6353         DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__));
6354         return BCME_ERROR;
6355     }
6356 
6357     if (dhd_query_bus_erros(bus->dhd)) {
6358         return BCME_ERROR;
6359     }
6360 
6361     DHD_GENERAL_LOCK(bus->dhd, flags);
6362     if (!(bus->dhd->busstate == DHD_BUS_DATA ||
6363           bus->dhd->busstate == DHD_BUS_SUSPEND)) {
6364         DHD_ERROR(("%s: not in a readystate\n", __FUNCTION__));
6365         DHD_GENERAL_UNLOCK(bus->dhd, flags);
6366         return BCME_ERROR;
6367     }
6368     DHD_GENERAL_UNLOCK(bus->dhd, flags);
6369     if (bus->dhd->dongle_reset) {
6370         DHD_ERROR(("Dongle is in reset state.\n"));
6371         return -EIO;
6372     }
6373 
6374     /* Check whether we are already in the requested state.
6375      * state=TRUE means Suspend
6376      * state=FALSE meanse Resume
6377      */
6378     if (state == TRUE && bus->dhd->busstate == DHD_BUS_SUSPEND) {
6379         DHD_ERROR(("Bus is already in SUSPEND state.\n"));
6380         return BCME_OK;
6381     } else if (state == FALSE && bus->dhd->busstate == DHD_BUS_DATA) {
6382         DHD_ERROR(("Bus is already in RESUME state.\n"));
6383         return BCME_OK;
6384     }
6385 
6386     if (state) {
6387         int idle_retry = 0;
6388         int active;
6389 
6390         if (bus->is_linkdown) {
6391             DHD_ERROR(
6392                 ("%s: PCIe link was down, state=%d\n", __FUNCTION__, state));
6393             return BCME_ERROR;
6394         }
6395 
6396         /* Suspend */
6397         DHD_ERROR(("%s: Entering suspend state\n", __FUNCTION__));
6398 
6399         bus->dhd->dhd_watchdog_ms_backup = dhd_watchdog_ms;
6400         if (bus->dhd->dhd_watchdog_ms_backup) {
6401             DHD_ERROR(("%s: Disabling wdtick before going to suspend\n",
6402                        __FUNCTION__));
6403             dhd_os_wd_timer(bus->dhd, 0);
6404         }
6405 
6406         DHD_GENERAL_LOCK(bus->dhd, flags);
6407         if (DHD_BUS_BUSY_CHECK_IN_TX(bus->dhd)) {
6408             DHD_ERROR(("Tx Request is not ended\n"));
6409             bus->dhd->busstate = DHD_BUS_DATA;
6410             DHD_GENERAL_UNLOCK(bus->dhd, flags);
6411             return -EBUSY;
6412         }
6413 
6414         bus->last_suspend_start_time = OSL_LOCALTIME_NS();
6415 
6416         /* stop all interface network queue. */
6417         dhd_bus_stop_queue(bus);
6418         DHD_GENERAL_UNLOCK(bus->dhd, flags);
6419 
6420 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
6421         if (byint) {
6422             DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
6423             /* Clear wait_for_d3_ack before sending D3_INFORM */
6424             bus->wait_for_d3_ack = 0;
6425             dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM);
6426 
6427             timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
6428             DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
6429         } else {
6430             /* Clear wait_for_d3_ack before sending D3_INFORM */
6431             bus->wait_for_d3_ack = 0;
6432             dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM | H2D_HOST_ACK_NOINT);
6433             while (!bus->wait_for_d3_ack &&
6434                    d3_read_retry < MAX_D3_ACK_TIMEOUT) {
6435                 dhdpcie_handle_mb_data(bus);
6436                 usleep_range(0x3E8, 0x5DC);
6437                 d3_read_retry++;
6438             }
6439         }
6440 #else
6441         DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
6442         /* Clear wait_for_d3_ack before sending D3_INFORM */
6443         bus->wait_for_d3_ack = 0;
6444         /*
6445          * Send H2D_HOST_D3_INFORM to dongle and mark bus->bus_low_power_state
6446          * to DHD_BUS_D3_INFORM_SENT in dhd_prot_ring_write_complete_mbdata
6447          * inside atomic context, so that no more DBs will be
6448          * rung after sending D3_INFORM
6449          */
6450         dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM);
6451 
6452         /* Wait for D3 ACK for D3_ACK_RESP_TIMEOUT seconds */
6453 
6454         timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
6455 
6456 #ifdef DHD_RECOVER_TIMEOUT
6457         if (bus->wait_for_d3_ack == 0) {
6458             /* If wait_for_d3_ack was not updated because D2H MB was not
6459              * received */
6460             uint32 intstatus = si_corereg(bus->sih, bus->sih->buscoreidx,
6461                                           bus->pcie_mailbox_int, 0, 0);
6462             int host_irq_disabled = dhdpcie_irq_disabled(bus);
6463             if ((intstatus) && (intstatus != (uint32)-1) && (timeleft == 0) &&
6464                 (!dhd_query_bus_erros(bus->dhd))) {
6465                 DHD_ERROR(("%s: D3 ACK trying again intstatus=%x"
6466                            " host_irq_disabled=%d\n",
6467                            __FUNCTION__, intstatus, host_irq_disabled));
6468                 dhd_pcie_intr_count_dump(bus->dhd);
6469                 dhd_print_tasklet_status(bus->dhd);
6470                 if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6 &&
6471                     !bus->use_mailbox) {
6472                     dhd_prot_process_ctrlbuf(bus->dhd);
6473                 } else {
6474                     dhdpcie_handle_mb_data(bus);
6475                 }
6476                 timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
6477                 /* Clear Interrupts */
6478                 dhdpcie_bus_clear_intstatus(bus);
6479             }
6480         } /* bus->wait_for_d3_ack was 0 */
6481 #endif /* DHD_RECOVER_TIMEOUT */
6482 
6483         DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
6484 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
6485 
6486         /* To allow threads that got pre-empted to complete.
6487          */
6488         while ((active = dhd_os_check_wakelock_all(bus->dhd)) &&
6489                (idle_retry < MAX_WKLK_IDLE_CHECK)) {
6490             OSL_SLEEP(1);
6491             idle_retry++;
6492         }
6493 
6494         if (bus->wait_for_d3_ack) {
6495             DHD_ERROR(("%s: Got D3 Ack \n", __FUNCTION__));
6496             /* Got D3 Ack. Suspend the bus */
6497             if (active) {
6498                 DHD_ERROR(("%s():Suspend failed because of wakelock"
6499                            "restoring Dongle to D0\n",
6500                            __FUNCTION__));
6501 
6502                 if (bus->dhd->dhd_watchdog_ms_backup) {
6503                     DHD_ERROR(("%s: Enabling wdtick due to wakelock active\n",
6504                                __FUNCTION__));
6505                     dhd_os_wd_timer(bus->dhd, bus->dhd->dhd_watchdog_ms_backup);
6506                 }
6507 
6508                 /*
6509                  * Dongle still thinks that it has to be in D3 state until
6510                  * it gets a D0 Inform, but we are backing off from suspend.
6511                  * Ensure that Dongle is brought back to D0.
6512                  *
6513                  * Bringing back Dongle from D3 Ack state to D0 state is a
6514                  * 2 step process. Dongle would want to know that D0 Inform
6515                  * would be sent as a MB interrupt to bring it out of D3 Ack
6516                  * state to D0 state. So we have to send both this message.
6517                  */
6518 
6519                 /* Clear wait_for_d3_ack to send D0_INFORM or host_ready */
6520                 bus->wait_for_d3_ack = 0;
6521 
6522                 DHD_BUS_LOCK(bus->bus_lock, flags_bus);
6523                 bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE;
6524                 /* Enable back the intmask which was cleared in DPC
6525                  * after getting D3_ACK.
6526                  */
6527                 bus->resume_intr_enable_count++;
6528 
6529                 /* For Linux, Macos etc (otherthan NDIS) enable back the dongle
6530                  * interrupts using intmask and host interrupts
6531                  * which were disabled in the dhdpcie_bus_isr()->
6532                  * dhd_bus_handle_d3_ack().
6533                  */
6534                 /* Enable back interrupt using Intmask!! */
6535                 dhdpcie_bus_intr_enable(bus);
6536                 /* Enable back interrupt from Host side!! */
6537                 dhdpcie_enable_irq(bus);
6538 
6539                 DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
6540 
6541                 if (bus->use_d0_inform) {
6542                     DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
6543                     dhdpcie_send_mb_data(
6544                         bus, (H2D_HOST_D0_INFORM_IN_USE | H2D_HOST_D0_INFORM));
6545                     DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
6546                 }
6547                 /* ring doorbell 1 (hostready) */
6548                 dhd_bus_hostready(bus);
6549 
6550                 DHD_GENERAL_LOCK(bus->dhd, flags);
6551                 bus->dhd->busstate = DHD_BUS_DATA;
6552                 /* resume all interface network queue. */
6553                 dhd_bus_start_queue(bus);
6554                 DHD_GENERAL_UNLOCK(bus->dhd, flags);
6555                 rc = BCME_ERROR;
6556             } else {
6557                 /* Actual Suspend after no wakelock */
6558                 /* At this time bus->bus_low_power_state will be
6559                  * made to DHD_BUS_D3_ACK_RECIEVED after recieving D3_ACK
6560                  * in dhd_bus_handle_d3_ack()
6561                  */
6562                 if (bus->use_d0_inform &&
6563                     (bus->api.fw_rev < PCIE_SHARED_VERSION_6)) {
6564                     DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
6565                     dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM_IN_USE));
6566                     DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
6567                 }
6568 
6569 #if defined(BCMPCIE_OOB_HOST_WAKE)
6570                 if (bus->dhd->dhd_induce_error == DHD_INDUCE_DROP_OOB_IRQ) {
6571                     DHD_ERROR(("%s: Inducing DROP OOB IRQ\n", __FUNCTION__));
6572                 } else {
6573                     dhdpcie_oob_intr_set(bus, TRUE);
6574                 }
6575 #endif /* BCMPCIE_OOB_HOST_WAKE */
6576 
6577                 DHD_GENERAL_LOCK(bus->dhd, flags);
6578                 /* The Host cannot process interrupts now so disable the same.
6579                  * No need to disable the dongle INTR using intmask, as we are
6580                  * already calling disabling INTRs from DPC context after
6581                  * getting D3_ACK in dhd_bus_handle_d3_ack.
6582                  * Code may not look symmetric between Suspend and
6583                  * Resume paths but this is done to close down the timing window
6584                  * between DPC and suspend context and bus->bus_low_power_state
6585                  * will be set to DHD_BUS_D3_ACK_RECIEVED in DPC.
6586                  */
6587                 bus->dhd->d3ackcnt_timeout = 0;
6588                 bus->dhd->busstate = DHD_BUS_SUSPEND;
6589                 DHD_GENERAL_UNLOCK(bus->dhd, flags);
6590                 dhdpcie_dump_resource(bus);
6591                 /* Handle Host Suspend */
6592                 rc = dhdpcie_pci_suspend_resume(bus, state);
6593                 if (!rc) {
6594                     bus->last_suspend_end_time = OSL_LOCALTIME_NS();
6595                 }
6596             }
6597         } else if (timeleft == 0) { /* D3 ACK Timeout */
6598 #ifdef DHD_FW_COREDUMP
6599             uint32 cur_memdump_mode = bus->dhd->memdump_enabled;
6600 #endif /* DHD_FW_COREDUMP */
6601 
6602             /* check if the D3 ACK timeout due to scheduling issue */
6603             bus->dhd->is_sched_error =
6604                 !dhd_query_bus_erros(bus->dhd) &&
6605                 bus->isr_entry_time > bus->last_d3_inform_time &&
6606                 dhd_bus_query_dpc_sched_errors(bus->dhd);
6607             bus->dhd->d3ack_timeout_occured = TRUE;
6608             /* If the D3 Ack has timeout */
6609             bus->dhd->d3ackcnt_timeout++;
6610             DHD_ERROR(
6611                 ("%s: resumed on timeout for D3 ACK%s d3_inform_cnt %d\n",
6612                  __FUNCTION__,
6613                  bus->dhd->is_sched_error ? " due to scheduling problem" : "",
6614                  bus->dhd->d3ackcnt_timeout));
6615 #if defined(DHD_KERNEL_SCHED_DEBUG) && defined(DHD_FW_COREDUMP)
6616             if (bus->dhd->is_sched_error &&
6617                 cur_memdump_mode == DUMP_MEMFILE_BUGON) {
6618                 /* change g_assert_type to trigger Kernel panic */
6619                 g_assert_type = 0x2;
6620                 /* use ASSERT() to trigger panic */
6621                 ASSERT(0);
6622             }
6623 #endif /* DHD_KERNEL_SCHED_DEBUG && DHD_FW_COREDUMP */
6624             DHD_BUS_LOCK(bus->bus_lock, flags_bus);
6625             bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE;
6626             DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
6627             DHD_GENERAL_LOCK(bus->dhd, flags);
6628             bus->dhd->busstate = DHD_BUS_DATA;
6629             /* resume all interface network queue. */
6630             dhd_bus_start_queue(bus);
6631             DHD_GENERAL_UNLOCK(bus->dhd, flags);
6632             if (!bus->dhd->dongle_trap_occured && !bus->is_linkdown &&
6633                 !bus->cto_triggered) {
6634                 uint32 intstatus = 0;
6635 
6636                 /* Check if PCIe bus status is valid */
6637                 intstatus = si_corereg(bus->sih, bus->sih->buscoreidx,
6638                                        bus->pcie_mailbox_int, 0, 0);
6639                 if (intstatus == (uint32)-1) {
6640                     /* Invalidate PCIe bus status */
6641                     bus->is_linkdown = 1;
6642                 }
6643 
6644                 dhd_bus_dump_console_buffer(bus);
6645                 dhd_prot_debug_info_print(bus->dhd);
6646 #ifdef DHD_FW_COREDUMP
6647                 if (cur_memdump_mode) {
6648                     /* write core dump to file */
6649                     bus->dhd->memdump_type = DUMP_TYPE_D3_ACK_TIMEOUT;
6650                     dhdpcie_mem_dump(bus);
6651                 }
6652 #endif /* DHD_FW_COREDUMP */
6653 
6654                 DHD_ERROR(("%s: Event HANG send up due to D3_ACK timeout\n",
6655                            __FUNCTION__));
6656                 dhd_os_check_hang(bus->dhd, 0, -ETIMEDOUT);
6657             }
6658 #if defined(DHD_ERPOM)
6659             dhd_schedule_reset(bus->dhd);
6660 #endif // endif
6661             rc = -ETIMEDOUT;
6662         }
6663     } else {
6664         /* Resume */
6665         DHD_ERROR(("%s: Entering resume state\n", __FUNCTION__));
6666         bus->last_resume_start_time = OSL_LOCALTIME_NS();
6667 
6668         /**
6669          * PCIE2_BAR0_CORE2_WIN gets reset after D3 cold.
6670          * si_backplane_access(function to read/write backplane)
6671          * updates the window(PCIE2_BAR0_CORE2_WIN) only if
6672          * window being accessed is different form the window
6673          * being pointed by second_bar0win.
6674          * Since PCIE2_BAR0_CORE2_WIN is already reset because of D3 cold,
6675          * invalidating second_bar0win after resume updates
6676          * PCIE2_BAR0_CORE2_WIN with right window.
6677          */
6678         si_invalidate_second_bar0win(bus->sih);
6679 #if defined(BCMPCIE_OOB_HOST_WAKE)
6680         DHD_OS_OOB_IRQ_WAKE_UNLOCK(bus->dhd);
6681 #endif /* BCMPCIE_OOB_HOST_WAKE */
6682         rc = dhdpcie_pci_suspend_resume(bus, state);
6683         dhdpcie_dump_resource(bus);
6684 
6685         DHD_BUS_LOCK(bus->bus_lock, flags_bus);
6686         /* set bus_low_power_state to DHD_BUS_NO_LOW_POWER_STATE */
6687         bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE;
6688         DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
6689 
6690         if (!rc && bus->dhd->busstate == DHD_BUS_SUSPEND) {
6691             if (bus->use_d0_inform) {
6692                 DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
6693                 dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM));
6694                 DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
6695             }
6696             /* ring doorbell 1 (hostready) */
6697             dhd_bus_hostready(bus);
6698         }
6699         DHD_GENERAL_LOCK(bus->dhd, flags);
6700         bus->dhd->busstate = DHD_BUS_DATA;
6701         /* resume all interface network queue. */
6702         dhd_bus_start_queue(bus);
6703 
6704         /* for NDIS also we need to use enable_irq in future */
6705         bus->resume_intr_enable_count++;
6706 
6707         /* For Linux, Macos etc (otherthan NDIS) enable back the dongle
6708          * interrupts using intmask and host interrupts which were disabled in
6709          * the dhdpcie_bus_isr()->dhd_bus_handle_d3_ack().
6710          */
6711         dhdpcie_bus_intr_enable(
6712             bus);                /* Enable back interrupt using Intmask!! */
6713         dhdpcie_enable_irq(bus); /* Enable back interrupt from Host side!! */
6714 
6715         DHD_GENERAL_UNLOCK(bus->dhd, flags);
6716 
6717         if (bus->dhd->dhd_watchdog_ms_backup) {
6718             DHD_ERROR(("%s: Enabling wdtick after resume\n", __FUNCTION__));
6719             dhd_os_wd_timer(bus->dhd, bus->dhd->dhd_watchdog_ms_backup);
6720         }
6721 
6722         bus->last_resume_end_time = OSL_LOCALTIME_NS();
6723         /* Update TCM rd index for EDL ring */
6724         DHD_EDL_RING_TCM_RD_UPDATE(bus->dhd);
6725     }
6726     return rc;
6727 }
6728 
dhdpcie_force_alp(struct dhd_bus * bus,bool enable)6729 uint32 dhdpcie_force_alp(struct dhd_bus *bus, bool enable)
6730 {
6731     ASSERT(bus && bus->sih);
6732     if (enable) {
6733         si_corereg(bus->sih, bus->sih->buscoreidx,
6734                    OFFSETOF(sbpcieregs_t, u.pcie2.clk_ctl_st), CCS_FORCEALP,
6735                    CCS_FORCEALP);
6736     } else {
6737         si_corereg(bus->sih, bus->sih->buscoreidx,
6738                    OFFSETOF(sbpcieregs_t, u.pcie2.clk_ctl_st), CCS_FORCEALP, 0);
6739     }
6740     return 0;
6741 }
6742 
6743 /* set pcie l1 entry time: dhd pciereg 0x1004[22:16] */
dhdpcie_set_l1_entry_time(struct dhd_bus * bus,int l1_entry_time)6744 uint32 dhdpcie_set_l1_entry_time(struct dhd_bus *bus, int l1_entry_time)
6745 {
6746     uint reg_val;
6747 
6748     ASSERT(bus && bus->sih);
6749 
6750     si_corereg(bus->sih, bus->sih->buscoreidx,
6751                OFFSETOF(sbpcieregs_t, configaddr), ~0, 0x1004);
6752     reg_val = si_corereg(bus->sih, bus->sih->buscoreidx,
6753                          OFFSETOF(sbpcieregs_t, configdata), 0, 0);
6754     reg_val = (reg_val & ~(0x7f << 0x10)) | ((l1_entry_time & 0x7f) << 0x10);
6755     si_corereg(bus->sih, bus->sih->buscoreidx,
6756                OFFSETOF(sbpcieregs_t, configdata), ~0, reg_val);
6757 
6758     return 0;
6759 }
6760 
dhd_apply_d11_war_length(struct dhd_bus * bus,uint32 len,uint32 d11_lpbk)6761 static uint32 dhd_apply_d11_war_length(struct dhd_bus *bus, uint32 len,
6762                                        uint32 d11_lpbk)
6763 {
6764     uint16 chipid = si_chipid(bus->sih);
6765     if ((chipid == BCM4375_CHIP_ID || chipid == BCM4362_CHIP_ID ||
6766          chipid == BCM43751_CHIP_ID || chipid == BCM43752_CHIP_ID ||
6767          chipid == BCM4377_CHIP_ID) &&
6768         (d11_lpbk != M2M_DMA_LPBK && d11_lpbk != M2M_NON_DMA_LPBK)) {
6769         len += 0x8;
6770     }
6771     DHD_ERROR(("%s: len %d\n", __FUNCTION__, len));
6772     return len;
6773 }
6774 
6775 /** Transfers bytes from host to dongle and to host again using DMA */
dhdpcie_bus_dmaxfer_req(struct dhd_bus * bus,uint32 len,uint32 srcdelay,uint32 destdelay,uint32 d11_lpbk,uint32 core_num,uint32 wait)6776 static int dhdpcie_bus_dmaxfer_req(struct dhd_bus *bus, uint32 len,
6777                                    uint32 srcdelay, uint32 destdelay,
6778                                    uint32 d11_lpbk, uint32 core_num,
6779                                    uint32 wait)
6780 {
6781     int ret = 0;
6782 
6783     if (bus->dhd == NULL) {
6784         DHD_ERROR(("%s: bus not inited\n", __FUNCTION__));
6785         return BCME_ERROR;
6786     }
6787     if (bus->dhd->prot == NULL) {
6788         DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__));
6789         return BCME_ERROR;
6790     }
6791     if (bus->dhd->busstate != DHD_BUS_DATA) {
6792         DHD_ERROR(
6793             ("%s: not in a readystate to LPBK  is not inited\n", __FUNCTION__));
6794         return BCME_ERROR;
6795     }
6796 
6797     if (len < 0x5 || len > 0x3FFFF8) {
6798         DHD_ERROR(("%s: len is too small or too large\n", __FUNCTION__));
6799         return BCME_ERROR;
6800     }
6801 
6802     len = dhd_apply_d11_war_length(bus, len, d11_lpbk);
6803 
6804     bus->dmaxfer_complete = FALSE;
6805     ret = dhdmsgbuf_dmaxfer_req(bus->dhd, len, srcdelay, destdelay, d11_lpbk,
6806                                 core_num);
6807     if (ret != BCME_OK || !wait) {
6808         DHD_INFO(("%s: dmaxfer req returns status %u; wait = %u\n",
6809                   __FUNCTION__, ret, wait));
6810     } else {
6811         ret = dhd_os_dmaxfer_wait(bus->dhd, &bus->dmaxfer_complete);
6812         if (ret < 0) {
6813             ret = BCME_NOTREADY;
6814         }
6815     }
6816 
6817     return ret;
6818 }
6819 
dhd_bus_is_multibp_capable(struct dhd_bus * bus)6820 bool dhd_bus_is_multibp_capable(struct dhd_bus *bus)
6821 {
6822     return MULTIBP_CAP(bus->sih);
6823 }
6824 
6825 #define PCIE_REV_FOR_4378A0                                                    \
6826     66 /* dhd_bus_perform_flr_with_quiesce() causes problems */
6827 #define PCIE_REV_FOR_4378B0 68
6828 
dhdpcie_bus_download_state(dhd_bus_t * bus,bool enter)6829 static int dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter)
6830 {
6831     int bcmerror = 0;
6832     volatile uint32 *cr4_regs;
6833     bool do_flr;
6834 
6835     if (!bus->sih) {
6836         DHD_ERROR(("%s: NULL sih!!\n", __FUNCTION__));
6837         return BCME_ERROR;
6838     }
6839 
6840     do_flr = ((bus->sih->buscorerev != PCIE_REV_FOR_4378A0) &&
6841               (bus->sih->buscorerev != PCIE_REV_FOR_4378B0));
6842 
6843     if (MULTIBP_ENAB(bus->sih) && !do_flr) {
6844         dhd_bus_pcie_pwr_req(bus);
6845     }
6846 
6847     /* To enter download state, disable ARM and reset SOCRAM.
6848      * To exit download state, simply reset ARM (default is RAM boot).
6849      */
6850     if (enter) {
6851         /* Make sure BAR1 maps to backplane address 0 */
6852         dhdpcie_setbar1win(bus, 0x00000000);
6853         bus->alp_only = TRUE;
6854 
6855         /* some chips (e.g. 43602) have two ARM cores, the CR4 is receives the
6856          * firmware. */
6857         cr4_regs = si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
6858         if (cr4_regs == NULL && !(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
6859             !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) &&
6860             !(si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
6861             DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
6862             bcmerror = BCME_ERROR;
6863             goto fail;
6864         }
6865         if (si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) {
6866             /* Halt ARM & remove reset */
6867             si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT);
6868             if (!(si_setcore(bus->sih, SYSMEM_CORE_ID, 0))) {
6869                 DHD_ERROR(("%s: Failed to find SYSMEM core!\n", __FUNCTION__));
6870                 bcmerror = BCME_ERROR;
6871                 goto fail;
6872             }
6873             si_core_reset(bus->sih, 0, 0);
6874             /* reset last 4 bytes of RAM address. to be used for shared area */
6875             dhdpcie_init_shared_addr(bus);
6876         } else if (cr4_regs == NULL) { /* no CR4 present on chip */
6877             si_core_disable(bus->sih, 0);
6878 
6879             if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
6880                 DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
6881                 bcmerror = BCME_ERROR;
6882                 goto fail;
6883             }
6884 
6885             si_core_reset(bus->sih, 0, 0);
6886 
6887             /* Clear the top bit of memory */
6888             if (bus->ramsize) {
6889                 uint32 zeros = 0;
6890                 if (dhdpcie_bus_membytes(bus, TRUE, bus->ramsize - 0x4,
6891                                          (uint8 *)&zeros, 0x4) < 0) {
6892                     bcmerror = BCME_ERROR;
6893                     goto fail;
6894                 }
6895             }
6896         } else {
6897             /* For CR4,
6898              * Halt ARM
6899              * Remove ARM reset
6900              * Read RAM base address [0x18_0000]
6901              * [next] Download firmware
6902              * [done at else] Populate the reset vector
6903              * [done at else] Remove ARM halt
6904              */
6905             /* Halt ARM & remove reset */
6906             si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT);
6907             if (BCM43602_CHIP(bus->sih->chip)) {
6908                 W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 0x5);
6909                 W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0);
6910                 W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 0x7);
6911                 W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0);
6912             }
6913             /* reset last 4 bytes of RAM address. to be used for shared area */
6914             dhdpcie_init_shared_addr(bus);
6915         }
6916     } else {
6917         if (si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) {
6918             /* write vars */
6919             if ((bcmerror = dhdpcie_bus_write_vars(bus))) {
6920                 DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
6921                 goto fail;
6922             }
6923             /* write random numbers to sysmem for the purpose of
6924              * randomizing heap address space.
6925              */
6926             if ((bcmerror = dhdpcie_wrt_rnd(bus)) != BCME_OK) {
6927                 DHD_ERROR(("%s: Failed to get random seed to write to TCM !\n",
6928                            __FUNCTION__));
6929                 goto fail;
6930             }
6931             /* switch back to arm core again */
6932             if (!(si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
6933                 DHD_ERROR(("%s: Failed to find ARM CA7 core!\n", __FUNCTION__));
6934                 bcmerror = BCME_ERROR;
6935                 goto fail;
6936             }
6937             /* write address 0 with reset instruction */
6938             bcmerror =
6939                 dhdpcie_bus_membytes(bus, TRUE, 0, (uint8 *)&bus->resetinstr,
6940                                      sizeof(bus->resetinstr));
6941             /* now remove reset and halt and continue to run CA7 */
6942         } else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
6943             if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
6944                 DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
6945                 bcmerror = BCME_ERROR;
6946                 goto fail;
6947             }
6948 
6949             if (!si_iscoreup(bus->sih)) {
6950                 DHD_ERROR(
6951                     ("%s: SOCRAM core is down after reset?\n", __FUNCTION__));
6952                 bcmerror = BCME_ERROR;
6953                 goto fail;
6954             }
6955 
6956             /* Enable remap before ARM reset but after vars.
6957              * No backplane access in remap mode
6958              */
6959             if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) &&
6960                 !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) {
6961                 DHD_ERROR(
6962                     ("%s: Can't change back to SDIO core?\n", __FUNCTION__));
6963                 bcmerror = BCME_ERROR;
6964                 goto fail;
6965             }
6966 
6967             if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
6968                 !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
6969                 DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
6970                 bcmerror = BCME_ERROR;
6971                 goto fail;
6972             }
6973         } else {
6974             if (BCM43602_CHIP(bus->sih->chip)) {
6975                 /* Firmware crashes on SOCSRAM access when core is in reset */
6976                 if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
6977                     DHD_ERROR(
6978                         ("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
6979                     bcmerror = BCME_ERROR;
6980                     goto fail;
6981                 }
6982                 si_core_reset(bus->sih, 0, 0);
6983                 si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
6984             }
6985 
6986             /* write vars */
6987             if ((bcmerror = dhdpcie_bus_write_vars(bus))) {
6988                 DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
6989                 goto fail;
6990             }
6991 
6992             /* write a random number to TCM for the purpose of
6993              * randomizing heap address space.
6994              */
6995             if ((bcmerror = dhdpcie_wrt_rnd(bus)) != BCME_OK) {
6996                 DHD_ERROR(("%s: Failed to get random seed to write to TCM !\n",
6997                            __FUNCTION__));
6998                 goto fail;
6999             }
7000 
7001             /* switch back to arm core again */
7002             if (!(si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) {
7003                 DHD_ERROR(("%s: Failed to find ARM CR4 core!\n", __FUNCTION__));
7004                 bcmerror = BCME_ERROR;
7005                 goto fail;
7006             }
7007 
7008             /* write address 0 with reset instruction */
7009             bcmerror =
7010                 dhdpcie_bus_membytes(bus, TRUE, 0, (uint8 *)&bus->resetinstr,
7011                                      sizeof(bus->resetinstr));
7012             if (bcmerror == BCME_OK) {
7013                 uint32 tmp;
7014                 bcmerror = dhdpcie_bus_membytes(bus, FALSE, 0, (uint8 *)&tmp,
7015                                                 sizeof(tmp));
7016                 if (bcmerror == BCME_OK && tmp != bus->resetinstr) {
7017                     DHD_ERROR(("%s: Failed to write 0x%08x to addr 0\n",
7018                                __FUNCTION__, bus->resetinstr));
7019                     DHD_ERROR(("%s: contents of addr 0 is 0x%08x\n",
7020                                __FUNCTION__, tmp));
7021                     bcmerror = BCME_ERROR;
7022                     goto fail;
7023                 }
7024             }
7025             /* now remove reset and halt and continue to run CR4 */
7026         }
7027 
7028         si_core_reset(bus->sih, 0, 0);
7029 
7030         /* Allow HT Clock now that the ARM is running. */
7031         bus->alp_only = FALSE;
7032         bus->dhd->busstate = DHD_BUS_LOAD;
7033     }
7034 
7035 fail:
7036     /* Always return to PCIE core */
7037     si_setcore(bus->sih, PCIE2_CORE_ID, 0);
7038 
7039     if (MULTIBP_ENAB(bus->sih) && !do_flr) {
7040         dhd_bus_pcie_pwr_req_clear(bus);
7041     }
7042 
7043     return bcmerror;
7044 } /* dhdpcie_bus_download_state */
7045 
dhdpcie_bus_write_vars(dhd_bus_t * bus)7046 static int dhdpcie_bus_write_vars(dhd_bus_t *bus)
7047 {
7048     int bcmerror = 0;
7049     uint32 varsize, phys_size;
7050     uint32 varaddr;
7051     uint8 *vbuffer;
7052     uint32 varsizew;
7053 #ifdef DHD_DEBUG
7054     uint8 *nvram_ularray;
7055 #endif /* DHD_DEBUG */
7056 
7057     /* Even if there are no vars are to be written, we still need to set the
7058      * ramsize. */
7059     varsize = bus->varsz ? ROUNDUP(bus->varsz, 0x4) : 0;
7060     varaddr = (bus->ramsize - 0x4) - varsize;
7061 
7062     varaddr += bus->dongle_ram_base;
7063 
7064     if (bus->vars) {
7065         vbuffer = (uint8 *)MALLOC(bus->dhd->osh, varsize);
7066         if (!vbuffer) {
7067             return BCME_NOMEM;
7068         }
7069 
7070         bzero(vbuffer, varsize);
7071         bcopy(bus->vars, vbuffer, bus->varsz);
7072         /* Write the vars list */
7073         bcmerror = dhdpcie_bus_membytes(bus, TRUE, varaddr, vbuffer, varsize);
7074 
7075         /* Implement read back and verify later */
7076 #ifdef DHD_DEBUG
7077         /* Verify NVRAM bytes */
7078         DHD_INFO(
7079             ("%s: Compare NVRAM dl & ul; varsize=%d\n", __FUNCTION__, varsize));
7080         nvram_ularray = (uint8 *)MALLOC(bus->dhd->osh, varsize);
7081         if (!nvram_ularray) {
7082             MFREE(bus->dhd->osh, vbuffer, varsize);
7083             return BCME_NOMEM;
7084         }
7085 
7086         /* Upload image to verify downloaded contents. */
7087         memset(nvram_ularray, 0xaa, varsize);
7088 
7089         /* Read the vars list to temp buffer for comparison */
7090         bcmerror =
7091             dhdpcie_bus_membytes(bus, FALSE, varaddr, nvram_ularray, varsize);
7092         if (bcmerror) {
7093             DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n",
7094                        __FUNCTION__, bcmerror, varsize, varaddr));
7095         }
7096 
7097         /* Compare the org NVRAM with the one read from RAM */
7098         if (memcmp(vbuffer, nvram_ularray, varsize)) {
7099             DHD_ERROR(
7100                 ("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__));
7101         } else {
7102             DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n",
7103                        __FUNCTION__));
7104         }
7105 
7106         MFREE(bus->dhd->osh, nvram_ularray, varsize);
7107 #endif /* DHD_DEBUG */
7108 
7109         MFREE(bus->dhd->osh, vbuffer, varsize);
7110     }
7111 
7112     phys_size = REMAP_ENAB(bus) ? bus->ramsize : bus->orig_ramsize;
7113 
7114     phys_size += bus->dongle_ram_base;
7115 
7116     /* adjust to the user specified RAM */
7117     DHD_INFO(("%s: Physical memory size: %d, usable memory size: %d\n",
7118               __FUNCTION__, phys_size, bus->ramsize));
7119     DHD_INFO(("%s: Vars are at %d, orig varsize is %d\n", __FUNCTION__, varaddr,
7120               varsize));
7121     varsize = ((phys_size - 0x4) - varaddr);
7122 
7123     /*
7124      * Determine the length token:
7125      * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits.
7126      */
7127     if (bcmerror) {
7128         varsizew = 0;
7129         bus->nvram_csm = varsizew;
7130     } else {
7131         varsizew = varsize / 0x4;
7132         varsizew = (~varsizew << 0x10) | (varsizew & 0x0000FFFF);
7133         bus->nvram_csm = varsizew;
7134         varsizew = htol32(varsizew);
7135     }
7136 
7137     DHD_INFO(("%s: New varsize is %d, length token=0x%08x\n", __FUNCTION__,
7138               varsize, varsizew));
7139 
7140     /* Write the length token to the last word */
7141     bcmerror =
7142         dhdpcie_bus_membytes(bus, TRUE, (phys_size - 0x4), (uint8 *)&varsizew, 0x4);
7143 
7144     return bcmerror;
7145 } /* dhdpcie_bus_write_vars */
7146 
dhdpcie_downloadvars(dhd_bus_t * bus,void * arg,int len)7147 int dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len)
7148 {
7149     int bcmerror = BCME_OK;
7150 
7151     DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7152 
7153     /* Basic sanity checks */
7154     if (bus->dhd->up) {
7155         bcmerror = BCME_NOTDOWN;
7156         goto err;
7157     }
7158     if (!len) {
7159         bcmerror = BCME_BUFTOOSHORT;
7160         goto err;
7161     }
7162 
7163     /* Free the old ones and replace with passed variables */
7164     if (bus->vars) {
7165         MFREE(bus->dhd->osh, bus->vars, bus->varsz);
7166     }
7167 
7168     bus->vars = MALLOC(bus->dhd->osh, len);
7169     bus->varsz = bus->vars ? len : 0;
7170     if (bus->vars == NULL) {
7171         bcmerror = BCME_NOMEM;
7172         goto err;
7173     }
7174 
7175     /* Copy the passed variables, which should include the terminating
7176      * double-null */
7177     bcopy(arg, bus->vars, bus->varsz);
7178 
7179 #ifdef DHD_USE_SINGLE_NVRAM_FILE
7180     if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) {
7181         char *sp = NULL;
7182         char *ep = NULL;
7183         int i;
7184         char tag[0x2][0x8] = {"ccode=", "regrev="};
7185 
7186         /* Find ccode and regrev info */
7187         for (i = 0; i < 0x2; i++) {
7188             sp = strnstr(bus->vars, tag[i], bus->varsz);
7189             if (!sp) {
7190                 DHD_ERROR(("%s: Could not find ccode info from the nvram %s\n",
7191                            __FUNCTION__, bus->nv_path));
7192                 bcmerror = BCME_ERROR;
7193                 goto err;
7194             }
7195             sp = strchr(sp, '=');
7196             ep = strchr(sp, '\0');
7197             /* We assumed that string length of both ccode and
7198              * regrev values should not exceed WLC_CNTRY_BUF_SZ
7199              */
7200             if (ep && ((ep - sp) <= WLC_CNTRY_BUF_SZ)) {
7201                 sp++;
7202                 while (*sp != '\0') {
7203                     DHD_INFO(("%s: parse '%s', current sp = '%c'\n",
7204                               __FUNCTION__, tag[i], *sp));
7205                     *sp++ = '0';
7206                 }
7207             } else {
7208                 DHD_ERROR(("%s: Invalid parameter format when parsing for %s\n",
7209                            __FUNCTION__, tag[i]));
7210                 bcmerror = BCME_ERROR;
7211                 goto err;
7212             }
7213         }
7214     }
7215 #endif /* DHD_USE_SINGLE_NVRAM_FILE */
7216 
7217 err:
7218     return bcmerror;
7219 }
7220 
7221 /* loop through the capability list and see if the pcie capabilty exists */
dhdpcie_find_pci_capability(osl_t * osh,uint8 req_cap_id)7222 uint8 dhdpcie_find_pci_capability(osl_t *osh, uint8 req_cap_id)
7223 {
7224     uint8 cap_id;
7225     uint8 cap_ptr = 0;
7226     uint8 byte_val;
7227 
7228     /* check for Header type 0 */
7229     byte_val = read_pci_cfg_byte(PCI_CFG_HDR);
7230     if ((byte_val & 0x7f) != PCI_HEADER_NORMAL) {
7231         DHD_ERROR(("%s : PCI config header not normal.\n", __FUNCTION__));
7232         goto end;
7233     }
7234 
7235     /* check if the capability pointer field exists */
7236     byte_val = read_pci_cfg_byte(PCI_CFG_STAT);
7237     if (!(byte_val & PCI_CAPPTR_PRESENT)) {
7238         DHD_ERROR(("%s : PCI CAP pointer not present.\n", __FUNCTION__));
7239         goto end;
7240     }
7241 
7242     cap_ptr = read_pci_cfg_byte(PCI_CFG_CAPPTR);
7243     /* check if the capability pointer is 0x00 */
7244     if (cap_ptr == 0x00) {
7245         DHD_ERROR(("%s : PCI CAP pointer is 0x00.\n", __FUNCTION__));
7246         goto end;
7247     }
7248 
7249     /* loop thr'u the capability list and see if the pcie capabilty exists */
7250 
7251     cap_id = read_pci_cfg_byte(cap_ptr);
7252 
7253     while (cap_id != req_cap_id) {
7254         cap_ptr = read_pci_cfg_byte((cap_ptr + 1));
7255         if (cap_ptr == 0x00) {
7256             break;
7257         }
7258         cap_id = read_pci_cfg_byte(cap_ptr);
7259     }
7260 
7261 end:
7262     return cap_ptr;
7263 }
7264 
dhdpcie_pme_active(osl_t * osh,bool enable)7265 void dhdpcie_pme_active(osl_t *osh, bool enable)
7266 {
7267     uint8 cap_ptr;
7268     uint32 pme_csr;
7269 
7270     cap_ptr = dhdpcie_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID);
7271     if (!cap_ptr) {
7272         DHD_ERROR(
7273             ("%s : Power Management Capability not present\n", __FUNCTION__));
7274         return;
7275     }
7276 
7277     pme_csr =
7278         OSL_PCI_READ_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32));
7279     DHD_ERROR(("%s : pme_sts_ctrl 0x%x\n", __FUNCTION__, pme_csr));
7280 
7281     pme_csr |= PME_CSR_PME_STAT;
7282     if (enable) {
7283         pme_csr |= PME_CSR_PME_EN;
7284     } else {
7285         pme_csr &= ~PME_CSR_PME_EN;
7286     }
7287     OSL_PCI_WRITE_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32),
7288                          pme_csr);
7289 }
7290 
dhdpcie_pme_cap(osl_t * osh)7291 bool dhdpcie_pme_cap(osl_t *osh)
7292 {
7293     uint8 cap_ptr;
7294     uint32 pme_cap;
7295 
7296     cap_ptr = dhdpcie_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID);
7297     if (!cap_ptr) {
7298         DHD_ERROR(
7299             ("%s : Power Management Capability not present\n", __FUNCTION__));
7300         return FALSE;
7301     }
7302 
7303     pme_cap = OSL_PCI_READ_CONFIG(osh, cap_ptr, sizeof(uint32));
7304 
7305     DHD_ERROR(("%s : pme_cap 0x%x\n", __FUNCTION__, pme_cap));
7306 
7307     return ((pme_cap & PME_CAP_PM_STATES) != 0);
7308 }
7309 
dhdpcie_lcreg(osl_t * osh,uint32 mask,uint32 val)7310 uint32 dhdpcie_lcreg(osl_t *osh, uint32 mask, uint32 val)
7311 {
7312     uint8 pcie_cap;
7313     uint8 lcreg_offset; /* PCIE capability LCreg offset in the config space */
7314     uint32 reg_val;
7315 
7316     pcie_cap = dhdpcie_find_pci_capability(osh, PCI_CAP_PCIECAP_ID);
7317     if (!pcie_cap) {
7318         DHD_ERROR(("%s : PCIe Capability not present\n", __FUNCTION__));
7319         return 0;
7320     }
7321 
7322     lcreg_offset = pcie_cap + PCIE_CAP_LINKCTRL_OFFSET;
7323 
7324     /* set operation */
7325     if (mask) {
7326         /* read */
7327         reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
7328         /* modify */
7329         reg_val &= ~mask;
7330         reg_val |= (mask & val);
7331         /* write */
7332         OSL_PCI_WRITE_CONFIG(osh, lcreg_offset, sizeof(uint32), reg_val);
7333     }
7334     return OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
7335 }
7336 
dhdpcie_clkreq(osl_t * osh,uint32 mask,uint32 val)7337 uint8 dhdpcie_clkreq(osl_t *osh, uint32 mask, uint32 val)
7338 {
7339     uint8 pcie_cap;
7340     uint32 reg_val;
7341     uint8 lcreg_offset; /* PCIE capability LCreg offset in the config space */
7342 
7343     pcie_cap = dhdpcie_find_pci_capability(osh, PCI_CAP_PCIECAP_ID);
7344     if (!pcie_cap) {
7345         DHD_ERROR(("%s : PCIe Capability not present\n", __FUNCTION__));
7346         return 0;
7347     }
7348 
7349     lcreg_offset = pcie_cap + PCIE_CAP_LINKCTRL_OFFSET;
7350 
7351     reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
7352     /* set operation */
7353     if (mask) {
7354         if (val) {
7355             reg_val |= PCIE_CLKREQ_ENAB;
7356         } else {
7357             reg_val &= ~PCIE_CLKREQ_ENAB;
7358         }
7359         OSL_PCI_WRITE_CONFIG(osh, lcreg_offset, sizeof(uint32), reg_val);
7360         reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
7361     }
7362     if (reg_val & PCIE_CLKREQ_ENAB) {
7363         return 1;
7364     } else {
7365         return 0;
7366     }
7367 }
7368 
dhd_dump_intr_counters(dhd_pub_t * dhd,struct bcmstrbuf * strbuf)7369 void dhd_dump_intr_counters(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
7370 {
7371     dhd_bus_t *bus;
7372     uint64 current_time = OSL_LOCALTIME_NS();
7373 
7374     if (!dhd) {
7375         DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
7376         return;
7377     }
7378 
7379     bus = dhd->bus;
7380     if (!bus) {
7381         DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
7382         return;
7383     }
7384 
7385     bcm_bprintf(strbuf,
7386                 "\n ------- DUMPING INTR enable/disable counters-------\n");
7387     bcm_bprintf(strbuf,
7388                 "resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n"
7389                 "isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n"
7390                 "dpc_return_busdown_count=%lu non_ours_irq_count=%lu\n",
7391                 bus->resume_intr_enable_count, bus->dpc_intr_enable_count,
7392                 bus->isr_intr_disable_count, bus->suspend_intr_disable_count,
7393                 bus->dpc_return_busdown_count, bus->non_ours_irq_count);
7394 #ifdef BCMPCIE_OOB_HOST_WAKE
7395     bcm_bprintf(strbuf,
7396                 "oob_intr_count=%lu oob_intr_enable_count=%lu"
7397                 " oob_intr_disable_count=%lu\noob_irq_num=%d "
7398                 "last_oob_irq_time=" SEC_USEC_FMT
7399                 " last_oob_irq_enable_time=" SEC_USEC_FMT
7400                 "\nlast_oob_irq_disable_time=" SEC_USEC_FMT
7401                 " oob_irq_enabled=%d oob_gpio_level=%d\n",
7402                 bus->oob_intr_count, bus->oob_intr_enable_count,
7403                 bus->oob_intr_disable_count, dhdpcie_get_oob_irq_num(bus),
7404                 GET_SEC_USEC(bus->last_oob_irq_time),
7405                 GET_SEC_USEC(bus->last_oob_irq_enable_time),
7406                 GET_SEC_USEC(bus->last_oob_irq_disable_time),
7407                 dhdpcie_get_oob_irq_status(bus), dhdpcie_get_oob_irq_level());
7408 #endif /* BCMPCIE_OOB_HOST_WAKE */
7409     bcm_bprintf(
7410         strbuf,
7411         "\ncurrent_time=" SEC_USEC_FMT " isr_entry_time=" SEC_USEC_FMT
7412         " isr_exit_time=" SEC_USEC_FMT "\ndpc_sched_time=" SEC_USEC_FMT
7413         " last_non_ours_irq_time=" SEC_USEC_FMT " dpc_entry_time=" SEC_USEC_FMT
7414         "\n"
7415         "last_process_ctrlbuf_time=" SEC_USEC_FMT
7416         " last_process_flowring_time=" SEC_USEC_FMT
7417         " last_process_txcpl_time=" SEC_USEC_FMT
7418         "\nlast_process_rxcpl_time=" SEC_USEC_FMT
7419         " last_process_infocpl_time=" SEC_USEC_FMT
7420         " last_process_edl_time=" SEC_USEC_FMT "\ndpc_exit_time=" SEC_USEC_FMT
7421         " resched_dpc_time=" SEC_USEC_FMT "\n"
7422         "last_d3_inform_time=" SEC_USEC_FMT "\n",
7423         GET_SEC_USEC(current_time), GET_SEC_USEC(bus->isr_entry_time),
7424         GET_SEC_USEC(bus->isr_exit_time), GET_SEC_USEC(bus->dpc_sched_time),
7425         GET_SEC_USEC(bus->last_non_ours_irq_time),
7426         GET_SEC_USEC(bus->dpc_entry_time),
7427         GET_SEC_USEC(bus->last_process_ctrlbuf_time),
7428         GET_SEC_USEC(bus->last_process_flowring_time),
7429         GET_SEC_USEC(bus->last_process_txcpl_time),
7430         GET_SEC_USEC(bus->last_process_rxcpl_time),
7431         GET_SEC_USEC(bus->last_process_infocpl_time),
7432         GET_SEC_USEC(bus->last_process_edl_time),
7433         GET_SEC_USEC(bus->dpc_exit_time), GET_SEC_USEC(bus->resched_dpc_time),
7434         GET_SEC_USEC(bus->last_d3_inform_time));
7435 
7436     bcm_bprintf(strbuf,
7437                 "\nlast_suspend_start_time=" SEC_USEC_FMT
7438                 " last_suspend_end_time=" SEC_USEC_FMT
7439                 " last_resume_start_time=" SEC_USEC_FMT
7440                 " last_resume_end_time=" SEC_USEC_FMT "\n",
7441                 GET_SEC_USEC(bus->last_suspend_start_time),
7442                 GET_SEC_USEC(bus->last_suspend_end_time),
7443                 GET_SEC_USEC(bus->last_resume_start_time),
7444                 GET_SEC_USEC(bus->last_resume_end_time));
7445 
7446 #if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE)
7447     bcm_bprintf(strbuf,
7448                 "logtrace_thread_entry_time=" SEC_USEC_FMT
7449                 " logtrace_thread_sem_down_time=" SEC_USEC_FMT
7450                 "\nlogtrace_thread_flush_time=" SEC_USEC_FMT
7451                 " logtrace_thread_unexpected_break_time=" SEC_USEC_FMT
7452                 "\nlogtrace_thread_complete_time=" SEC_USEC_FMT "\n",
7453                 GET_SEC_USEC(dhd->logtrace_thr_ts.entry_time),
7454                 GET_SEC_USEC(dhd->logtrace_thr_ts.sem_down_time),
7455                 GET_SEC_USEC(dhd->logtrace_thr_ts.flush_time),
7456                 GET_SEC_USEC(dhd->logtrace_thr_ts.unexpected_break_time),
7457                 GET_SEC_USEC(dhd->logtrace_thr_ts.complete_time));
7458 #endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */
7459 }
7460 
dhd_dump_intr_registers(dhd_pub_t * dhd,struct bcmstrbuf * strbuf)7461 void dhd_dump_intr_registers(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
7462 {
7463     uint32 intstatus = 0;
7464     uint32 intmask = 0;
7465     uint32 d2h_db0 = 0;
7466     uint32 d2h_mb_data = 0;
7467 
7468     intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
7469                            dhd->bus->pcie_mailbox_int, 0, 0);
7470     intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
7471                          dhd->bus->pcie_mailbox_mask, 0, 0);
7472     d2h_db0 = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
7473                          PCID2H_MailBox, 0, 0);
7474     dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0);
7475 
7476     bcm_bprintf(strbuf, "intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n", intstatus,
7477                 intmask, d2h_db0);
7478     bcm_bprintf(strbuf, "d2h_mb_data=0x%x def_intmask=0x%x\n", d2h_mb_data,
7479                 dhd->bus->def_intmask);
7480 }
7481 /** Add bus dump output to a buffer */
dhd_bus_dump(dhd_pub_t * dhdp,struct bcmstrbuf * strbuf)7482 void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
7483 {
7484     uint16 flowid;
7485     int ix = 0;
7486     flow_ring_node_t *flow_ring_node;
7487     flow_info_t *flow_info;
7488 #ifdef TX_STATUS_LATENCY_STATS
7489     uint8 ifindex;
7490     if_flow_lkup_t *if_flow_lkup;
7491     dhd_if_tx_status_latency_t if_tx_status_latency[DHD_MAX_IFS];
7492 #endif /* TX_STATUS_LATENCY_STATS */
7493 
7494     if (dhdp->busstate != DHD_BUS_DATA) {
7495         return;
7496     }
7497 
7498 #ifdef TX_STATUS_LATENCY_STATS
7499     memset(if_tx_status_latency, 0, sizeof(if_tx_status_latency));
7500 #endif /* TX_STATUS_LATENCY_STATS */
7501 #ifdef DHD_WAKE_STATUS
7502     bcm_bprintf(strbuf, "wake %u rxwake %u readctrlwake %u\n",
7503                 bcmpcie_get_total_wake(dhdp->bus),
7504                 dhdp->bus->wake_counts.rxwake, dhdp->bus->wake_counts.rcwake);
7505 #ifdef DHD_WAKE_RX_STATUS
7506     bcm_bprintf(strbuf, " unicast %u muticast %u broadcast %u arp %u\n",
7507                 dhdp->bus->wake_counts.rx_ucast,
7508                 dhdp->bus->wake_counts.rx_mcast,
7509                 dhdp->bus->wake_counts.rx_bcast, dhdp->bus->wake_counts.rx_arp);
7510     bcm_bprintf(strbuf, " multi4 %u multi6 %u icmp6 %u multiother %u\n",
7511                 dhdp->bus->wake_counts.rx_multi_ipv4,
7512                 dhdp->bus->wake_counts.rx_multi_ipv6,
7513                 dhdp->bus->wake_counts.rx_icmpv6,
7514                 dhdp->bus->wake_counts.rx_multi_other);
7515     bcm_bprintf(strbuf, " icmp6_ra %u, icmp6_na %u, icmp6_ns %u\n",
7516                 dhdp->bus->wake_counts.rx_icmpv6_ra,
7517                 dhdp->bus->wake_counts.rx_icmpv6_na,
7518                 dhdp->bus->wake_counts.rx_icmpv6_ns);
7519 #endif /* DHD_WAKE_RX_STATUS */
7520 #ifdef DHD_WAKE_EVENT_STATUS
7521     for (flowid = 0; flowid < WLC_E_LAST; flowid++) {
7522         if (dhdp->bus->wake_counts.rc_event[flowid] != 0) {
7523             bcm_bprintf(strbuf, " %s = %u\n", bcmevent_get_name(flowid),
7524                         dhdp->bus->wake_counts.rc_event[flowid]);
7525         }
7526     }
7527     bcm_bprintf(strbuf, "\n");
7528 #endif /* DHD_WAKE_EVENT_STATUS */
7529 #endif /* DHD_WAKE_STATUS */
7530 
7531     dhd_prot_print_info(dhdp, strbuf);
7532     dhd_dump_intr_registers(dhdp, strbuf);
7533     dhd_dump_intr_counters(dhdp, strbuf);
7534     bcm_bprintf(
7535         strbuf, "h2d_mb_data_ptr_addr 0x%x, d2h_mb_data_ptr_addr 0x%x\n",
7536         dhdp->bus->h2d_mb_data_ptr_addr, dhdp->bus->d2h_mb_data_ptr_addr);
7537     bcm_bprintf(strbuf, "dhd cumm_ctr %d\n",
7538                 DHD_CUMM_CTR_READ(&dhdp->cumm_ctr));
7539 #ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
7540     bcm_bprintf(
7541         strbuf, "multi_client_flow_rings:%d max_multi_client_flow_rings:%d\n",
7542         dhdp->multi_client_flow_rings, dhdp->max_multi_client_flow_rings);
7543 #endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */
7544     bcm_bprintf(strbuf, "%4s %4s %2s %4s %17s %4s %4s %6s %10s %4s %4s ",
7545                 "Num:", "Flow", "If", "Prio", ":Dest_MacAddress:", "Qlen",
7546                 "CLen", "L2CLen", " Overflows", "  RD", "  WR");
7547 
7548 #ifdef TX_STATUS_LATENCY_STATS
7549     /* Average Tx status/Completion Latency in micro secs */
7550     bcm_bprintf(strbuf, "%16s %16s ", "       NumTxPkts", "    AvgTxCmpL_Us");
7551 #endif /* TX_STATUS_LATENCY_STATS */
7552 
7553     bcm_bprintf(strbuf, "\n");
7554 
7555     for (flowid = 0; flowid < dhdp->num_flow_rings; flowid++) {
7556         flow_ring_node = DHD_FLOW_RING(dhdp, flowid);
7557         if (!flow_ring_node->active) {
7558             continue;
7559         }
7560 
7561         flow_info = &flow_ring_node->flow_info;
7562         bcm_bprintf(
7563             strbuf, "%4d %4d %2d %4d " MACDBG " %4d %4d %6d %10u ", ix++,
7564             flow_ring_node->flowid, flow_info->ifindex, flow_info->tid,
7565             MAC2STRDBG(flow_info->da),
7566             DHD_FLOW_QUEUE_LEN(&flow_ring_node->queue),
7567             DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_CLEN_PTR(&flow_ring_node->queue)),
7568             DHD_CUMM_CTR_READ(
7569                 DHD_FLOW_QUEUE_L2CLEN_PTR(&flow_ring_node->queue)),
7570             DHD_FLOW_QUEUE_FAILURES(&flow_ring_node->queue));
7571         dhd_prot_print_flow_ring(dhdp, flow_ring_node->prot_info, strbuf,
7572                                  "%4d %4d ");
7573 
7574 #ifdef TX_STATUS_LATENCY_STATS
7575         bcm_bprintf(strbuf, "%16d %16d ", flow_info->num_tx_pkts,
7576                     flow_info->num_tx_status
7577                         ? DIV_U64_BY_U64(flow_info->cum_tx_status_latency,
7578                                          flow_info->num_tx_status)
7579                         : 0);
7580 
7581         ifindex = flow_info->ifindex;
7582         ASSERT(ifindex < DHD_MAX_IFS);
7583         if (ifindex < DHD_MAX_IFS) {
7584             if_tx_status_latency[ifindex].num_tx_status +=
7585                 flow_info->num_tx_status;
7586             if_tx_status_latency[ifindex].cum_tx_status_latency +=
7587                 flow_info->cum_tx_status_latency;
7588         } else {
7589             DHD_ERROR(("%s: Bad IF index: %d associated with flowid: %d\n",
7590                        __FUNCTION__, ifindex, flowid));
7591         }
7592 #endif /* TX_STATUS_LATENCY_STATS */
7593         bcm_bprintf(strbuf, "\n");
7594     }
7595 
7596 #ifdef TX_STATUS_LATENCY_STATS
7597     bcm_bprintf(strbuf, "\n%s  %16s  %16s\n", "If", "AvgTxCmpL_Us",
7598                 "NumTxStatus");
7599     if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
7600     for (ix = 0; ix < DHD_MAX_IFS; ix++) {
7601         if (!if_flow_lkup[ix].status) {
7602             continue;
7603         }
7604         bcm_bprintf(
7605             strbuf, "%2d  %16d  %16d\n", ix,
7606             if_tx_status_latency[ix].num_tx_status
7607                 ? DIV_U64_BY_U64(if_tx_status_latency[ix].cum_tx_status_latency,
7608                                  if_tx_status_latency[ix].num_tx_status)
7609                 : 0,
7610             if_tx_status_latency[ix].num_tx_status);
7611     }
7612 #endif /* TX_STATUS_LATENCY_STATS */
7613 
7614 #ifdef DHD_HP2P
7615     if (dhdp->hp2p_capable) {
7616         bcm_bprintf(strbuf, "\n%s  %16s  %16s", "Flowid", "Tx_t0", "Tx_t1");
7617 
7618         for (flowid = 0; flowid < MAX_HP2P_FLOWS; flowid++) {
7619             hp2p_info_t *hp2p_info;
7620             int bin;
7621 
7622             hp2p_info = &dhdp->hp2p_info[flowid];
7623             if (hp2p_info->num_timer_start == 0) {
7624                 continue;
7625             }
7626 
7627             bcm_bprintf(strbuf, "\n%d", hp2p_info->flowid);
7628             bcm_bprintf(strbuf, "\n%s", "Bin");
7629 
7630             for (bin = 0; bin < MAX_TX_HIST_BIN; bin++) {
7631                 bcm_bprintf(strbuf, "\n%2d %20d  %16d", bin,
7632                             hp2p_info->tx_t0[bin], hp2p_info->tx_t1[bin]);
7633             }
7634 
7635             bcm_bprintf(strbuf, "\n%s  %16s", "Flowid", "Rx_t0");
7636             bcm_bprintf(strbuf, "\n%d", hp2p_info->flowid);
7637             bcm_bprintf(strbuf, "\n%s", "Bin");
7638 
7639             for (bin = 0; bin < MAX_RX_HIST_BIN; bin++) {
7640                 bcm_bprintf(strbuf, "\n%d %20d", bin, hp2p_info->rx_t0[bin]);
7641             }
7642 
7643             bcm_bprintf(strbuf, "\n%s  %16s  %16s", "Packet limit",
7644                         "Timer limit", "Timer start");
7645             bcm_bprintf(strbuf, "\n%d %24d %16d", hp2p_info->num_pkt_limit,
7646                         hp2p_info->num_timer_limit, hp2p_info->num_timer_start);
7647         }
7648 
7649         bcm_bprintf(strbuf, "\n");
7650     }
7651 #endif /* DHD_HP2P */
7652 
7653     bcm_bprintf(strbuf, "D3 inform cnt %d\n", dhdp->bus->d3_inform_cnt);
7654     bcm_bprintf(strbuf, "D0 inform cnt %d\n", dhdp->bus->d0_inform_cnt);
7655     bcm_bprintf(strbuf, "D0 inform in use cnt %d\n",
7656                 dhdp->bus->d0_inform_in_use_cnt);
7657     if (dhdp->d2h_hostrdy_supported) {
7658         bcm_bprintf(strbuf, "hostready count:%d\n", dhdp->bus->hostready_count);
7659     }
7660     bcm_bprintf(strbuf, "d2h_intr_method -> %s\n",
7661                 dhdp->bus->d2h_intr_method ? "PCIE_MSI" : "PCIE_INTX");
7662 }
7663 
7664 #ifdef DNGL_AXI_ERROR_LOGGING
dhd_axi_sig_match(dhd_pub_t * dhdp)7665 bool dhd_axi_sig_match(dhd_pub_t *dhdp)
7666 {
7667     uint32 axi_tcm_addr =
7668         dhdpcie_bus_rtcm32(dhdp->bus, dhdp->axierror_logbuf_addr);
7669 
7670     if (dhdp->dhd_induce_error == DHD_INDUCE_DROP_AXI_SIG) {
7671         DHD_ERROR(("%s: Induce AXI signature drop\n", __FUNCTION__));
7672         return FALSE;
7673     }
7674 
7675     DHD_ERROR(("%s: axi_tcm_addr: 0x%x, tcm range: 0x%x ~ 0x%x\n", __FUNCTION__,
7676                axi_tcm_addr, dhdp->bus->dongle_ram_base,
7677                dhdp->bus->dongle_ram_base + dhdp->bus->ramsize));
7678     if (axi_tcm_addr >= dhdp->bus->dongle_ram_base &&
7679         axi_tcm_addr < dhdp->bus->dongle_ram_base + dhdp->bus->ramsize) {
7680         uint32 axi_signature = dhdpcie_bus_rtcm32(
7681             dhdp->bus,
7682             (axi_tcm_addr + OFFSETOF(hnd_ext_trap_axi_error_v1_t, signature)));
7683         if (axi_signature == HND_EXT_TRAP_AXIERROR_SIGNATURE) {
7684             return TRUE;
7685         } else {
7686             DHD_ERROR(
7687                 ("%s: No AXI signature: 0x%x\n", __FUNCTION__, axi_signature));
7688             return FALSE;
7689         }
7690     } else {
7691         DHD_ERROR(
7692             ("%s: No AXI shared tcm address debug info.\n", __FUNCTION__));
7693         return FALSE;
7694     }
7695 }
7696 
dhd_axi_error(dhd_pub_t * dhdp)7697 void dhd_axi_error(dhd_pub_t *dhdp)
7698 {
7699     dhd_axi_error_dump_t *axi_err_dump;
7700     uint8 *axi_err_buf = NULL;
7701     uint8 *p_axi_err = NULL;
7702     uint32 axi_logbuf_addr;
7703     uint32 axi_tcm_addr;
7704     int err, size;
7705 
7706     OSL_DELAY(0x124F8);
7707 
7708     axi_logbuf_addr = dhdp->axierror_logbuf_addr;
7709     if (!axi_logbuf_addr) {
7710         DHD_ERROR(("%s: No AXI TCM address debug info.\n", __FUNCTION__));
7711         goto sched_axi;
7712     }
7713 
7714     axi_err_dump = dhdp->axi_err_dump;
7715     if (!axi_err_dump) {
7716         goto sched_axi;
7717     }
7718 
7719     if (!dhd_axi_sig_match(dhdp)) {
7720         goto sched_axi;
7721     }
7722 
7723     /* Reading AXI error data for SMMU fault */
7724     DHD_ERROR(("%s: Read AXI data from TCM address\n", __FUNCTION__));
7725     axi_tcm_addr = dhdpcie_bus_rtcm32(dhdp->bus, axi_logbuf_addr);
7726     size = sizeof(hnd_ext_trap_axi_error_v1_t);
7727     axi_err_buf = MALLOCZ(dhdp->osh, size);
7728     if (axi_err_buf == NULL) {
7729         DHD_ERROR(("%s: out of memory !\n", __FUNCTION__));
7730         goto sched_axi;
7731     }
7732 
7733     p_axi_err = axi_err_buf;
7734     err = dhdpcie_bus_membytes(dhdp->bus, FALSE, axi_tcm_addr, p_axi_err, size);
7735     if (err) {
7736         DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
7737                    __FUNCTION__, err, size, axi_tcm_addr));
7738         goto sched_axi;
7739     }
7740 
7741     /* Dump data to Dmesg */
7742     dhd_log_dump_axi_error(axi_err_buf);
7743     err = memcpy_s(&axi_err_dump->etd_axi_error_v1, size, axi_err_buf, size);
7744     if (err) {
7745         DHD_ERROR(("%s: failed to copy etd axi error info, err=%d\n",
7746                    __FUNCTION__, err));
7747     }
7748 
7749 sched_axi:
7750     if (axi_err_buf) {
7751         MFREE(dhdp->osh, axi_err_buf, size);
7752     }
7753     dhd_schedule_axi_error_dump(dhdp, NULL);
7754 }
7755 
dhd_log_dump_axi_error(uint8 * axi_err)7756 static void dhd_log_dump_axi_error(uint8 *axi_err)
7757 {
7758     dma_dentry_v1_t dma_dentry;
7759     dma_fifo_v1_t dma_fifo;
7760     int i = 0, j = 0;
7761 
7762     if (*(uint8 *)axi_err == HND_EXT_TRAP_AXIERROR_VERSION_1) {
7763         hnd_ext_trap_axi_error_v1_t *axi_err_v1 =
7764             (hnd_ext_trap_axi_error_v1_t *)axi_err;
7765         DHD_ERROR(
7766             ("%s: signature : 0x%x\n", __FUNCTION__, axi_err_v1->signature));
7767         DHD_ERROR(("%s: version : 0x%x\n", __FUNCTION__, axi_err_v1->version));
7768         DHD_ERROR(("%s: length : 0x%x\n", __FUNCTION__, axi_err_v1->length));
7769         DHD_ERROR(("%s: dma_fifo_valid_count : 0x%x\n", __FUNCTION__,
7770                    axi_err_v1->dma_fifo_valid_count));
7771         DHD_ERROR(("%s: axi_errorlog_status : 0x%x\n", __FUNCTION__,
7772                    axi_err_v1->axi_errorlog_status));
7773         DHD_ERROR(("%s: axi_errorlog_core : 0x%x\n", __FUNCTION__,
7774                    axi_err_v1->axi_errorlog_core));
7775         DHD_ERROR(("%s: axi_errorlog_hi : 0x%x\n", __FUNCTION__,
7776                    axi_err_v1->axi_errorlog_hi));
7777         DHD_ERROR(("%s: axi_errorlog_lo : 0x%x\n", __FUNCTION__,
7778                    axi_err_v1->axi_errorlog_lo));
7779         DHD_ERROR(("%s: axi_errorlog_id : 0x%x\n", __FUNCTION__,
7780                    axi_err_v1->axi_errorlog_id));
7781 
7782         for (i = 0; i < MAX_DMAFIFO_ENTRIES_V1; i++) {
7783             dma_fifo = axi_err_v1->dma_fifo[i];
7784             DHD_ERROR(
7785                 ("%s: valid:%d : 0x%x\n", __FUNCTION__, i, dma_fifo.valid));
7786             DHD_ERROR(("%s: direction:%d : 0x%x\n", __FUNCTION__, i,
7787                        dma_fifo.direction));
7788             DHD_ERROR(
7789                 ("%s: index:%d : 0x%x\n", __FUNCTION__, i, dma_fifo.index));
7790             DHD_ERROR(("%s: dpa:%d : 0x%x\n", __FUNCTION__, i, dma_fifo.dpa));
7791             DHD_ERROR(
7792                 ("%s: desc_lo:%d : 0x%x\n", __FUNCTION__, i, dma_fifo.desc_lo));
7793             DHD_ERROR(
7794                 ("%s: desc_hi:%d : 0x%x\n", __FUNCTION__, i, dma_fifo.desc_hi));
7795             DHD_ERROR(("%s: din:%d : 0x%x\n", __FUNCTION__, i, dma_fifo.din));
7796             DHD_ERROR(("%s: dout:%d : 0x%x\n", __FUNCTION__, i, dma_fifo.dout));
7797             for (j = 0; j < MAX_DMAFIFO_DESC_ENTRIES_V1; j++) {
7798                 dma_dentry = axi_err_v1->dma_fifo[i].dentry[j];
7799                 DHD_ERROR(("%s: ctrl1:%d : 0x%x\n", __FUNCTION__, i,
7800                            dma_dentry.ctrl1));
7801                 DHD_ERROR(("%s: ctrl2:%d : 0x%x\n", __FUNCTION__, i,
7802                            dma_dentry.ctrl2));
7803                 DHD_ERROR(("%s: addrlo:%d : 0x%x\n", __FUNCTION__, i,
7804                            dma_dentry.addrlo));
7805                 DHD_ERROR(("%s: addrhi:%d : 0x%x\n", __FUNCTION__, i,
7806                            dma_dentry.addrhi));
7807             }
7808         }
7809     } else {
7810         DHD_ERROR(("%s: Invalid AXI version: 0x%x\n", __FUNCTION__,
7811                    (*(uint8 *)axi_err)));
7812     }
7813 }
7814 #endif /* DNGL_AXI_ERROR_LOGGING */
7815 
7816 /**
7817  * Brings transmit packets on all flow rings closer to the dongle, by moving (a
7818  * subset) from their flow queue to their flow ring.
7819  */
dhd_update_txflowrings(dhd_pub_t * dhd)7820 static void dhd_update_txflowrings(dhd_pub_t *dhd)
7821 {
7822     unsigned long flags;
7823     dll_t *item, *next;
7824     flow_ring_node_t *flow_ring_node;
7825     struct dhd_bus *bus = dhd->bus;
7826 
7827     if (dhd_query_bus_erros(dhd)) {
7828         return;
7829     }
7830 
7831     /* Hold flowring_list_lock to ensure no race condition while accessing the
7832      * List */
7833     DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
7834     for (item = dll_head_p(&bus->flowring_active_list);
7835          (!dhd_is_device_removed(dhd) &&
7836           !dll_end(&bus->flowring_active_list, item));
7837          item = next) {
7838         if (dhd->hang_was_sent) {
7839             break;
7840         }
7841 
7842         next = dll_next_p(item);
7843         flow_ring_node = dhd_constlist_to_flowring(item);
7844 
7845         /* Ensure that flow_ring_node in the list is Not Null */
7846         ASSERT(flow_ring_node != NULL);
7847 
7848         /* Ensure that the flowring node has valid contents */
7849         ASSERT(flow_ring_node->prot_info != NULL);
7850 
7851         dhd_prot_update_txflowring(dhd, flow_ring_node->flowid,
7852                                    flow_ring_node->prot_info);
7853     }
7854     DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
7855 }
7856 
7857 /** Mailbox ringbell Function */
dhd_bus_gen_devmb_intr(struct dhd_bus * bus)7858 static void dhd_bus_gen_devmb_intr(struct dhd_bus *bus)
7859 {
7860     if ((bus->sih->buscorerev == 0x2) || (bus->sih->buscorerev == 0x6) ||
7861         (bus->sih->buscorerev == 0x4)) {
7862         DHD_ERROR(("%s: mailbox communication not supported\n", __FUNCTION__));
7863         return;
7864     }
7865     if (bus->db1_for_mb) {
7866         /* this is a pcie core register, not the config register */
7867         DHD_INFO(("%s: writing a mail box interrupt to the device, through "
7868                   "doorbell 1\n",
7869                   __FUNCTION__));
7870         if (DAR_PWRREQ(bus)) {
7871             dhd_bus_pcie_pwr_req(bus);
7872         }
7873         si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db1_addr_get(bus),
7874                    ~0, 0x12345678);
7875     } else {
7876         DHD_INFO(("%s: writing a mail box interrupt to the device, through "
7877                   "config space\n",
7878                   __FUNCTION__));
7879         dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 0x4, (1 << 0));
7880         dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 0x4, (1 << 0));
7881     }
7882 }
7883 
7884 /* Upon receiving a mailbox interrupt,
7885  * if H2D_FW_TRAP bit is set in mailbox location
7886  * device traps
7887  */
dhdpcie_fw_trap(dhd_bus_t * bus)7888 static void dhdpcie_fw_trap(dhd_bus_t *bus)
7889 {
7890     /* Send the mailbox data and generate mailbox intr. */
7891     dhdpcie_send_mb_data(bus, H2D_FW_TRAP);
7892     /* For FWs that cannot interprete H2D_FW_TRAP */
7893     (void)dhd_wl_ioctl_set_intiovar(bus->dhd, "bus:disconnect", 0x63, WLC_SET_VAR,
7894                                     TRUE, 0);
7895 }
7896 
7897 /** mailbox doorbell ring function */
dhd_bus_ringbell(struct dhd_bus * bus,uint32 value)7898 void dhd_bus_ringbell(struct dhd_bus *bus, uint32 value)
7899 {
7900     /* Skip after sending D3_INFORM */
7901     if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
7902         DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
7903                    __FUNCTION__, bus->bus_low_power_state));
7904         return;
7905     }
7906 
7907     /* Skip in the case of link down */
7908     if (bus->is_linkdown) {
7909         DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
7910         return;
7911     }
7912 
7913     if ((bus->sih->buscorerev == 0x2) || (bus->sih->buscorerev == 0x6) ||
7914         (bus->sih->buscorerev == 0x4)) {
7915         si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int,
7916                    PCIE_INTB, PCIE_INTB);
7917     } else {
7918         /* this is a pcie core register, not the config regsiter */
7919         DHD_INFO(("%s: writing a door bell to the device\n", __FUNCTION__));
7920         if (IDMA_ACTIVE(bus->dhd)) {
7921             if (DAR_PWRREQ(bus)) {
7922                 dhd_bus_pcie_pwr_req(bus);
7923             }
7924             si_corereg(bus->sih, bus->sih->buscoreidx,
7925                        dhd_bus_db0_addr_2_get(bus), ~0, value);
7926         } else {
7927             if (DAR_PWRREQ(bus)) {
7928                 dhd_bus_pcie_pwr_req(bus);
7929             }
7930             si_corereg(bus->sih, bus->sih->buscoreidx,
7931                        dhd_bus_db0_addr_get(bus), ~0, 0x12345678);
7932         }
7933     }
7934 }
7935 
7936 /** mailbox doorbell ring function for IDMA/IFRM using dma channel2 */
dhd_bus_ringbell_2(struct dhd_bus * bus,uint32 value,bool devwake)7937 void dhd_bus_ringbell_2(struct dhd_bus *bus, uint32 value, bool devwake)
7938 {
7939     /* this is a pcie core register, not the config regsiter */
7940     /* Skip after sending D3_INFORM */
7941     if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
7942         DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
7943                    __FUNCTION__, bus->bus_low_power_state));
7944         return;
7945     }
7946 
7947     /* Skip in the case of link down */
7948     if (bus->is_linkdown) {
7949         DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
7950         return;
7951     }
7952 
7953     DHD_INFO(("writing a door bell 2 to the device\n"));
7954     if (DAR_PWRREQ(bus)) {
7955         dhd_bus_pcie_pwr_req(bus);
7956     }
7957     si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db0_addr_2_get(bus), ~0,
7958                value);
7959 }
7960 
dhdpcie_bus_ringbell_fast(struct dhd_bus * bus,uint32 value)7961 void dhdpcie_bus_ringbell_fast(struct dhd_bus *bus, uint32 value)
7962 {
7963     /* Skip after sending D3_INFORM */
7964     if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
7965         DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
7966                    __FUNCTION__, bus->bus_low_power_state));
7967         return;
7968     }
7969 
7970     /* Skip in the case of link down */
7971     if (bus->is_linkdown) {
7972         DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
7973         return;
7974     }
7975 
7976     if (DAR_PWRREQ(bus)) {
7977         dhd_bus_pcie_pwr_req(bus);
7978     }
7979 
7980 #ifdef DHD_DB0TS
7981     if (bus->dhd->db0ts_capable) {
7982         uint64 ts;
7983 
7984         ts = local_clock();
7985         do_div(ts, 0x3E8);
7986 
7987         value = htol32(ts & 0xFFFFFFFF);
7988         DHD_INFO(("%s: usec timer = 0x%x\n", __FUNCTION__, value));
7989     }
7990 #endif /* DHD_DB0TS */
7991     W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, value);
7992 }
7993 
dhdpcie_bus_ringbell_2_fast(struct dhd_bus * bus,uint32 value,bool devwake)7994 void dhdpcie_bus_ringbell_2_fast(struct dhd_bus *bus, uint32 value,
7995                                  bool devwake)
7996 {
7997     /* Skip after sending D3_INFORM */
7998     if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
7999         DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
8000                    __FUNCTION__, bus->bus_low_power_state));
8001         return;
8002     }
8003 
8004     /* Skip in the case of link down */
8005     if (bus->is_linkdown) {
8006         DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
8007         return;
8008     }
8009 
8010     if (DAR_PWRREQ(bus)) {
8011         dhd_bus_pcie_pwr_req(bus);
8012     }
8013     W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_2_addr, value);
8014 }
8015 
dhd_bus_ringbell_oldpcie(struct dhd_bus * bus,uint32 value)8016 static void dhd_bus_ringbell_oldpcie(struct dhd_bus *bus, uint32 value)
8017 {
8018     uint32 w;
8019     /* Skip after sending D3_INFORM */
8020     if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
8021         DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
8022                    __FUNCTION__, bus->bus_low_power_state));
8023         return;
8024     }
8025 
8026     /* Skip in the case of link down */
8027     if (bus->is_linkdown) {
8028         DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
8029         return;
8030     }
8031 
8032     w = (R_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr) & ~PCIE_INTB) |
8033         PCIE_INTB;
8034     W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, w);
8035 }
8036 
dhd_bus_get_mbintr_fn(struct dhd_bus * bus)8037 dhd_mb_ring_t dhd_bus_get_mbintr_fn(struct dhd_bus *bus)
8038 {
8039     if ((bus->sih->buscorerev == 0x2) || (bus->sih->buscorerev == 0x6) ||
8040         (bus->sih->buscorerev == 0x4)) {
8041         bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
8042                                                  bus->pcie_mailbox_int);
8043         if (bus->pcie_mb_intr_addr) {
8044             bus->pcie_mb_intr_osh = si_osh(bus->sih);
8045             return dhd_bus_ringbell_oldpcie;
8046         }
8047     } else {
8048         bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
8049                                                  dhd_bus_db0_addr_get(bus));
8050         if (bus->pcie_mb_intr_addr) {
8051             bus->pcie_mb_intr_osh = si_osh(bus->sih);
8052             return dhdpcie_bus_ringbell_fast;
8053         }
8054     }
8055     return dhd_bus_ringbell;
8056 }
8057 
dhd_bus_get_mbintr_2_fn(struct dhd_bus * bus)8058 dhd_mb_ring_2_t dhd_bus_get_mbintr_2_fn(struct dhd_bus *bus)
8059 {
8060     bus->pcie_mb_intr_2_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
8061                                                dhd_bus_db0_addr_2_get(bus));
8062     if (bus->pcie_mb_intr_2_addr) {
8063         bus->pcie_mb_intr_osh = si_osh(bus->sih);
8064         return dhdpcie_bus_ringbell_2_fast;
8065     }
8066     return dhd_bus_ringbell_2;
8067 }
8068 
dhd_bus_dpc(struct dhd_bus * bus)8069 bool BCMFASTPATH dhd_bus_dpc(struct dhd_bus *bus)
8070 {
8071     bool resched = FALSE; /* Flag indicating resched wanted */
8072     unsigned long flags;
8073 
8074     DHD_TRACE(("%s: Enter\n", __FUNCTION__));
8075 
8076     bus->dpc_entry_time = OSL_LOCALTIME_NS();
8077 
8078     DHD_GENERAL_LOCK(bus->dhd, flags);
8079     /* Check for only DHD_BUS_DOWN and not for DHD_BUS_DOWN_IN_PROGRESS
8080      * to avoid IOCTL Resumed On timeout when ioctl is waiting for response
8081      * and rmmod is fired in parallel, which will make DHD_BUS_DOWN_IN_PROGRESS
8082      * and if we return from here, then IOCTL response will never be handled
8083      */
8084     if (bus->dhd->busstate == DHD_BUS_DOWN) {
8085         DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__));
8086         bus->intstatus = 0;
8087         DHD_GENERAL_UNLOCK(bus->dhd, flags);
8088         bus->dpc_return_busdown_count++;
8089         return 0;
8090     }
8091     DHD_BUS_BUSY_SET_IN_DPC(bus->dhd);
8092     DHD_GENERAL_UNLOCK(bus->dhd, flags);
8093 
8094 #ifdef DHD_READ_INTSTATUS_IN_DPC
8095     if (bus->ipend) {
8096         bus->ipend = FALSE;
8097         bus->intstatus = dhdpcie_bus_intstatus(bus);
8098         /* Check if the interrupt is ours or not */
8099         if (bus->intstatus == 0) {
8100             goto INTR_ON;
8101         }
8102         bus->intrcount++;
8103     }
8104 #endif /* DHD_READ_INTSTATUS_IN_DPC */
8105 
8106     resched = dhdpcie_bus_process_mailbox_intr(bus, bus->intstatus);
8107     if (!resched) {
8108         bus->intstatus = 0;
8109 #ifdef DHD_READ_INTSTATUS_IN_DPC
8110     INTR_ON:
8111 #endif /* DHD_READ_INTSTATUS_IN_DPC */
8112         bus->dpc_intr_enable_count++;
8113 #ifdef CHIP_INTR_CONTROL
8114         dhdpcie_bus_intr_enable(
8115             bus); /* Enable back interrupt using Intmask!! */
8116 #else
8117         /* For Linux, Macos etc (otherthan NDIS) enable back the host interrupts
8118          * which has been disabled in the dhdpcie_bus_isr()
8119          */
8120         dhdpcie_enable_irq(bus); /* Enable back interrupt!! */
8121 #endif /* HOST_INTR_CONTROL */
8122         bus->dpc_exit_time = OSL_LOCALTIME_NS();
8123     } else {
8124         bus->resched_dpc_time = OSL_LOCALTIME_NS();
8125     }
8126 
8127     bus->dpc_sched = resched;
8128 
8129     DHD_GENERAL_LOCK(bus->dhd, flags);
8130     DHD_BUS_BUSY_CLEAR_IN_DPC(bus->dhd);
8131     dhd_os_busbusy_wake(bus->dhd);
8132     DHD_GENERAL_UNLOCK(bus->dhd, flags);
8133 
8134     return resched;
8135 }
8136 
dhdpcie_send_mb_data(dhd_bus_t * bus,uint32 h2d_mb_data)8137 int dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data)
8138 {
8139     uint32 cur_h2d_mb_data = 0;
8140 
8141     DHD_INFO_HW4(("%s: H2D_MB_DATA: 0x%08X\n", __FUNCTION__, h2d_mb_data));
8142 
8143     if (bus->is_linkdown) {
8144         DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
8145         return BCME_ERROR;
8146     }
8147 
8148     if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !bus->use_mailbox) {
8149         DHD_INFO(("API rev is 6, sending mb data as H2D Ctrl message to "
8150                   "dongle, 0x%04x\n",
8151                   h2d_mb_data));
8152         /* Prevent asserting device_wake during doorbell ring for mb data to
8153          * avoid loop. */
8154         {
8155             if (dhd_prot_h2d_mbdata_send_ctrlmsg(bus->dhd, h2d_mb_data)) {
8156                 DHD_ERROR(("failure sending the H2D Mailbox message "
8157                            "to firmware\n"));
8158                 goto fail;
8159             }
8160         }
8161         goto done;
8162     }
8163 
8164     dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, H2D_MB_DATA, 0);
8165 
8166     if (cur_h2d_mb_data != 0) {
8167         uint32 i = 0;
8168         DHD_INFO(("%s: GRRRRRRR: MB transaction is already pending 0x%04x\n",
8169                   __FUNCTION__, cur_h2d_mb_data));
8170         while ((i++ < 0x64) && cur_h2d_mb_data) {
8171             OSL_DELAY(0xA);
8172             dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, H2D_MB_DATA, 0);
8173         }
8174         if (i >= 0x64) {
8175             DHD_ERROR(("%s : waited 1ms for the dngl "
8176                        "to ack the previous mb transaction\n",
8177                        __FUNCTION__));
8178             DHD_ERROR(("%s : MB transaction is still pending 0x%04x\n",
8179                        __FUNCTION__, cur_h2d_mb_data));
8180         }
8181     }
8182 
8183     dhd_bus_cmn_writeshared(bus, &h2d_mb_data, sizeof(uint32), H2D_MB_DATA, 0);
8184     dhd_bus_gen_devmb_intr(bus);
8185 
8186 done:
8187     if (h2d_mb_data == H2D_HOST_D3_INFORM) {
8188         DHD_INFO_HW4(("%s: send H2D_HOST_D3_INFORM to dongle\n", __FUNCTION__));
8189         bus->last_d3_inform_time = OSL_LOCALTIME_NS();
8190         bus->d3_inform_cnt++;
8191     }
8192     if (h2d_mb_data == H2D_HOST_D0_INFORM_IN_USE) {
8193         DHD_INFO_HW4(
8194             ("%s: send H2D_HOST_D0_INFORM_IN_USE to dongle\n", __FUNCTION__));
8195         bus->d0_inform_in_use_cnt++;
8196     }
8197     if (h2d_mb_data == H2D_HOST_D0_INFORM) {
8198         DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM to dongle\n", __FUNCTION__));
8199         bus->d0_inform_cnt++;
8200     }
8201     return BCME_OK;
8202 fail:
8203     return BCME_ERROR;
8204 }
8205 
dhd_bus_handle_d3_ack(dhd_bus_t * bus)8206 static void dhd_bus_handle_d3_ack(dhd_bus_t *bus)
8207 {
8208     unsigned long flags_bus;
8209     DHD_BUS_LOCK(bus->bus_lock, flags_bus);
8210     bus->suspend_intr_disable_count++;
8211     /* Disable dongle Interrupts Immediately after D3 */
8212 
8213     /* For Linux, Macos etc (otherthan NDIS) along with disabling
8214      * dongle interrupt by clearing the IntMask, disable directly
8215      * interrupt from the host side as well. Also clear the intstatus
8216      * if it is set to avoid unnecessary intrrupts after D3 ACK.
8217      */
8218     dhdpcie_bus_intr_disable(bus); /* Disable interrupt using IntMask!! */
8219     dhdpcie_bus_clear_intstatus(bus);
8220     dhdpcie_disable_irq_nosync(bus); /* Disable host interrupt!! */
8221 
8222     if (bus->dhd->dhd_induce_error != DHD_INDUCE_D3_ACK_TIMEOUT) {
8223         /* Set bus_low_power_state to DHD_BUS_D3_ACK_RECIEVED */
8224         bus->bus_low_power_state = DHD_BUS_D3_ACK_RECIEVED;
8225         DHD_ERROR(("%s: D3_ACK Recieved\n", __FUNCTION__));
8226     }
8227     DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
8228     /* Check for D3 ACK induce flag, which is set by firing dhd iovar to induce
8229      * D3 Ack timeout. If flag is set, D3 wake is skipped, which results in to
8230      * D3 Ack timeout.
8231      */
8232     if (bus->dhd->dhd_induce_error != DHD_INDUCE_D3_ACK_TIMEOUT) {
8233         bus->wait_for_d3_ack = 1;
8234         dhd_os_d3ack_wake(bus->dhd);
8235     } else {
8236         DHD_ERROR(("%s: Inducing D3 ACK timeout\n", __FUNCTION__));
8237     }
8238 }
dhd_bus_handle_mb_data(dhd_bus_t * bus,uint32 d2h_mb_data)8239 void dhd_bus_handle_mb_data(dhd_bus_t *bus, uint32 d2h_mb_data)
8240 {
8241     if (MULTIBP_ENAB(bus->sih)) {
8242         dhd_bus_pcie_pwr_req(bus);
8243     }
8244 
8245     DHD_INFO(("D2H_MB_DATA: 0x%04x\n", d2h_mb_data));
8246 
8247     if (d2h_mb_data & D2H_DEV_FWHALT) {
8248         DHD_ERROR(("FW trap has happened\n"));
8249         dhdpcie_checkdied(bus, NULL, 0);
8250         dhd_os_check_hang(bus->dhd, 0, -EREMOTEIO);
8251         goto exit;
8252     }
8253     if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ) {
8254         bool ds_acked = FALSE;
8255         BCM_REFERENCE(ds_acked);
8256         if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
8257             DHD_ERROR(("DS-ENTRY AFTER D3-ACK!!!!! QUITING\n"));
8258             DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
8259             bus->dhd->busstate = DHD_BUS_DOWN;
8260             goto exit;
8261         }
8262         /* what should we do */
8263         DHD_INFO(("D2H_MB_DATA: DEEP SLEEP REQ\n"));
8264         {
8265             dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
8266             DHD_INFO(("D2H_MB_DATA: sent DEEP SLEEP ACK\n"));
8267         }
8268     }
8269     if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE) {
8270         /* what should we do */
8271         DHD_INFO(("D2H_MB_DATA: DEEP SLEEP EXIT\n"));
8272     }
8273     if (d2h_mb_data & D2HMB_DS_HOST_SLEEP_EXIT_ACK) {
8274         /* what should we do */
8275         DHD_INFO(("D2H_MB_DATA: D0 ACK\n"));
8276     }
8277     if (d2h_mb_data & D2H_DEV_D3_ACK) {
8278         /* what should we do */
8279         DHD_INFO_HW4(("D2H_MB_DATA: D3 ACK\n"));
8280         if (!bus->wait_for_d3_ack) {
8281             dhd_bus_handle_d3_ack(bus);
8282         }
8283     }
8284 
8285 exit:
8286     if (MULTIBP_ENAB(bus->sih)) {
8287         dhd_bus_pcie_pwr_req_clear(bus);
8288     }
8289 }
8290 
dhdpcie_handle_mb_data(dhd_bus_t * bus)8291 static void dhdpcie_handle_mb_data(dhd_bus_t *bus)
8292 {
8293     uint32 d2h_mb_data = 0;
8294     uint32 zero = 0;
8295 
8296     if (MULTIBP_ENAB(bus->sih)) {
8297         dhd_bus_pcie_pwr_req(bus);
8298     }
8299 
8300     dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0);
8301     if (D2H_DEV_MB_INVALIDATED(d2h_mb_data)) {
8302         DHD_ERROR(
8303             ("%s: Invalid D2H_MB_DATA: 0x%08x\n", __FUNCTION__, d2h_mb_data));
8304         goto exit;
8305     }
8306 
8307     dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), D2H_MB_DATA, 0);
8308 
8309     DHD_INFO_HW4(("%s: D2H_MB_DATA: 0x%04x\n", __FUNCTION__, d2h_mb_data));
8310     if (d2h_mb_data & D2H_DEV_FWHALT) {
8311         DHD_ERROR(("FW trap has happened\n"));
8312         dhdpcie_checkdied(bus, NULL, 0);
8313         /* not ready yet dhd_os_ind_firmware_stall(bus->dhd); */
8314         goto exit;
8315     }
8316     if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ) {
8317         /* what should we do */
8318         DHD_INFO(("%s: D2H_MB_DATA: DEEP SLEEP REQ\n", __FUNCTION__));
8319         dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
8320         DHD_INFO(("%s: D2H_MB_DATA: sent DEEP SLEEP ACK\n", __FUNCTION__));
8321     }
8322     if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE) {
8323         /* what should we do */
8324         DHD_INFO(("%s: D2H_MB_DATA: DEEP SLEEP EXIT\n", __FUNCTION__));
8325     }
8326     if (d2h_mb_data & D2H_DEV_D3_ACK) {
8327         /* what should we do */
8328         DHD_INFO_HW4(("%s: D2H_MB_DATA: D3 ACK\n", __FUNCTION__));
8329         if (!bus->wait_for_d3_ack) {
8330             dhd_bus_handle_d3_ack(bus);
8331         }
8332     }
8333 
8334 exit:
8335     if (MULTIBP_ENAB(bus->sih)) {
8336         dhd_bus_pcie_pwr_req_clear(bus);
8337     }
8338 }
8339 
dhdpcie_read_handle_mb_data(dhd_bus_t * bus)8340 static void dhdpcie_read_handle_mb_data(dhd_bus_t *bus)
8341 {
8342     uint32 d2h_mb_data = 0;
8343     uint32 zero = 0;
8344 
8345     if (bus->is_linkdown) {
8346         DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
8347         return;
8348     }
8349 
8350     if (MULTIBP_ENAB(bus->sih)) {
8351         dhd_bus_pcie_pwr_req(bus);
8352     }
8353 
8354     dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0);
8355     if (!d2h_mb_data) {
8356         goto exit;
8357     }
8358 
8359     dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), D2H_MB_DATA, 0);
8360 
8361     dhd_bus_handle_mb_data(bus, d2h_mb_data);
8362 
8363 exit:
8364     if (MULTIBP_ENAB(bus->sih)) {
8365         dhd_bus_pcie_pwr_req_clear(bus);
8366     }
8367 }
8368 
dhdpcie_bus_process_mailbox_intr(dhd_bus_t * bus,uint32 intstatus)8369 static bool dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus)
8370 {
8371     bool resched = FALSE;
8372     unsigned long flags_bus;
8373 
8374     if (MULTIBP_ENAB(bus->sih)) {
8375         dhd_bus_pcie_pwr_req(bus);
8376     }
8377     if ((bus->sih->buscorerev == 0x2) || (bus->sih->buscorerev == 0x6) ||
8378         (bus->sih->buscorerev == 0x4)) {
8379         /* Msg stream interrupt */
8380         if (intstatus & I_BIT1) {
8381             resched = dhdpci_bus_read_frames(bus);
8382         } else if (intstatus & I_BIT0) {
8383             /* do nothing for Now */
8384         }
8385     } else {
8386         if (intstatus & (PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1)) {
8387             bus->api.handle_mb_data(bus);
8388         }
8389 
8390         /* Do no process any rings after recieving D3_ACK */
8391         DHD_BUS_LOCK(bus->bus_lock, flags_bus);
8392         if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
8393             DHD_ERROR(("%s: D3 Ack Recieved. "
8394                        "Skip processing rest of ring buffers.\n",
8395                        __FUNCTION__));
8396             DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
8397             goto exit;
8398         }
8399         DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
8400 
8401         /* Validate intstatus only for INTX case */
8402         if ((bus->d2h_intr_method == PCIE_MSI) ||
8403             ((bus->d2h_intr_method == PCIE_INTX) &&
8404              (intstatus & bus->d2h_mb_mask))) {
8405 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
8406             if (pm_runtime_get(dhd_bus_to_dev(bus)) >= 0) {
8407                 resched = dhdpci_bus_read_frames(bus);
8408                 pm_runtime_mark_last_busy(dhd_bus_to_dev(bus));
8409                 pm_runtime_put_autosuspend(dhd_bus_to_dev(bus));
8410             }
8411 #else
8412             resched = dhdpci_bus_read_frames(bus);
8413 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
8414         }
8415     }
8416 
8417 exit:
8418     if (MULTIBP_ENAB(bus->sih)) {
8419         dhd_bus_pcie_pwr_req_clear(bus);
8420     }
8421     return resched;
8422 }
8423 
8424 #if defined(DHD_H2D_LOG_TIME_SYNC)
dhdpci_bus_rte_log_time_sync_poll(dhd_bus_t * bus)8425 static void dhdpci_bus_rte_log_time_sync_poll(dhd_bus_t *bus)
8426 {
8427     unsigned long time_elapsed;
8428 
8429     /* Poll for timeout value periodically */
8430     if ((bus->dhd->busstate == DHD_BUS_DATA) &&
8431         (bus->dhd->dhd_rte_time_sync_ms != 0) &&
8432         (bus->bus_low_power_state == DHD_BUS_NO_LOW_POWER_STATE)) {
8433         time_elapsed = OSL_SYSUPTIME_US() - bus->dhd_rte_time_sync_count;
8434         /* Compare time is milli seconds */
8435         if ((time_elapsed / 0x3E8) >= bus->dhd->dhd_rte_time_sync_ms) {
8436             /*
8437              * Its fine, if it has crossed the timeout value. No need to adjust
8438              * the elapsed time
8439              */
8440             bus->dhd_rte_time_sync_count += time_elapsed;
8441 
8442             /* Schedule deffered work. Work function will send IOVAR. */
8443             dhd_h2d_log_time_sync_deferred_wq_schedule(bus->dhd);
8444         }
8445     }
8446 }
8447 #endif /* DHD_H2D_LOG_TIME_SYNC */
8448 
dhdpci_bus_read_frames(dhd_bus_t * bus)8449 static bool dhdpci_bus_read_frames(dhd_bus_t *bus)
8450 {
8451     bool more = FALSE;
8452     unsigned long flags_bus;
8453 
8454     /* First check if there a FW trap */
8455     if ((bus->api.fw_rev >= PCIE_SHARED_VERSION_6) &&
8456         (bus->dhd->dongle_trap_data = dhd_prot_process_trapbuf(bus->dhd))) {
8457 #ifdef DNGL_AXI_ERROR_LOGGING
8458         if (bus->dhd->axi_error) {
8459             DHD_ERROR(("AXI Error happened\n"));
8460             return FALSE;
8461         }
8462 #endif /* DNGL_AXI_ERROR_LOGGING */
8463         dhd_bus_handle_mb_data(bus, D2H_DEV_FWHALT);
8464         return FALSE;
8465     }
8466 
8467     /* There may be frames in both ctrl buf and data buf; check ctrl buf first
8468      */
8469     DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
8470 
8471     dhd_prot_process_ctrlbuf(bus->dhd);
8472     bus->last_process_ctrlbuf_time = OSL_LOCALTIME_NS();
8473     /* Unlock to give chance for resp to be handled */
8474     DHD_PERIM_UNLOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
8475 
8476     /* Do not process rest of ring buf once bus enters low power state
8477      * (D3_INFORM/D3_ACK) */
8478     DHD_BUS_LOCK(bus->bus_lock, flags_bus);
8479     if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
8480         DHD_ERROR(("%s: Bus is in power save state (%d). "
8481                    "Skip processing rest of ring buffers.\n",
8482                    __FUNCTION__, bus->bus_low_power_state));
8483         DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
8484         return FALSE;
8485     }
8486     DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
8487 
8488     DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
8489     /* update the flow ring cpls */
8490     dhd_update_txflowrings(bus->dhd);
8491     bus->last_process_flowring_time = OSL_LOCALTIME_NS();
8492 
8493     /* With heavy TX traffic, we could get a lot of TxStatus
8494      * so add bound
8495      */
8496 #ifdef DHD_HP2P
8497     more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound, DHD_HP2P_RING);
8498 #endif /* DHD_HP2P */
8499     more |=
8500         dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound, DHD_REGULAR_RING);
8501     bus->last_process_txcpl_time = OSL_LOCALTIME_NS();
8502 
8503     /* With heavy RX traffic, this routine potentially could spend some time
8504      * processing RX frames without RX bound
8505      */
8506 #ifdef DHD_HP2P
8507     more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound, DHD_HP2P_RING);
8508 #endif /* DHD_HP2P */
8509     more |=
8510         dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound, DHD_REGULAR_RING);
8511     bus->last_process_rxcpl_time = OSL_LOCALTIME_NS();
8512 
8513     /* Process info ring completion messages */
8514 #ifdef EWP_EDL
8515     if (!bus->dhd->dongle_edl_support)
8516 #endif // endif
8517     {
8518         more |= dhd_prot_process_msgbuf_infocpl(bus->dhd, DHD_INFORING_BOUND);
8519         bus->last_process_infocpl_time = OSL_LOCALTIME_NS();
8520     }
8521 #ifdef EWP_EDL
8522     else {
8523         more |= dhd_prot_process_msgbuf_edl(bus->dhd);
8524         bus->last_process_edl_time = OSL_LOCALTIME_NS();
8525     }
8526 #endif /* EWP_EDL */
8527 
8528 #ifdef IDLE_TX_FLOW_MGMT
8529     if (bus->enable_idle_flowring_mgmt) {
8530         /* Look for idle flow rings */
8531         dhd_bus_check_idle_scan(bus);
8532     }
8533 #endif /* IDLE_TX_FLOW_MGMT */
8534 
8535     /* don't talk to the dongle if fw is about to be reloaded */
8536     if (bus->dhd->hang_was_sent) {
8537         more = FALSE;
8538     }
8539     DHD_PERIM_UNLOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
8540 
8541 #if defined(DHD_H2D_LOG_TIME_SYNC)
8542     dhdpci_bus_rte_log_time_sync_poll(bus);
8543 #endif /* DHD_H2D_LOG_TIME_SYNC */
8544     return more;
8545 }
8546 
dhdpcie_tcm_valid(dhd_bus_t * bus)8547 bool dhdpcie_tcm_valid(dhd_bus_t *bus)
8548 {
8549     uint32 addr = 0;
8550     int rv;
8551     uint32 shaddr = 0;
8552     pciedev_shared_t sh;
8553 
8554     shaddr = bus->dongle_ram_base + bus->ramsize - 0x4;
8555 
8556     /* Read last word in memory to determine address of pciedev_shared structure
8557      */
8558     addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr));
8559     if ((addr == 0) || (addr == bus->nvram_csm) ||
8560         (addr < bus->dongle_ram_base) || (addr > shaddr)) {
8561         DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid addr\n",
8562                    __FUNCTION__, addr));
8563         return FALSE;
8564     }
8565 
8566     /* Read hndrte_shared structure */
8567     if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&sh,
8568                                    sizeof(pciedev_shared_t))) < 0) {
8569         DHD_ERROR(("Failed to read PCIe shared struct with %d\n", rv));
8570         return FALSE;
8571     }
8572 
8573     /* Compare any field in pciedev_shared_t */
8574     if (sh.console_addr != bus->pcie_sh->console_addr) {
8575         DHD_ERROR(
8576             ("Contents of pciedev_shared_t structure are not matching.\n"));
8577         return FALSE;
8578     }
8579     return TRUE;
8580 }
8581 
dhdpcie_update_bus_api_revisions(uint32 firmware_api_version,uint32 host_api_version)8582 static void dhdpcie_update_bus_api_revisions(uint32 firmware_api_version,
8583                                              uint32 host_api_version)
8584 {
8585     snprintf(bus_api_revision, BUS_API_REV_STR_LEN,
8586              "\nBus API revisions:(FW rev%d)(DHD rev%d)", firmware_api_version,
8587              host_api_version);
8588     return;
8589 }
8590 
dhdpcie_check_firmware_compatible(uint32 firmware_api_version,uint32 host_api_version)8591 static bool dhdpcie_check_firmware_compatible(uint32 firmware_api_version,
8592                                               uint32 host_api_version)
8593 {
8594     bool retcode = FALSE;
8595 
8596     DHD_INFO(("firmware api revision %d, host api revision %d\n",
8597               firmware_api_version, host_api_version));
8598 
8599     switch (firmware_api_version) {
8600         case PCIE_SHARED_VERSION_7:
8601         case PCIE_SHARED_VERSION_6:
8602         case PCIE_SHARED_VERSION_5:
8603             retcode = TRUE;
8604             break;
8605         default:
8606             if (firmware_api_version <= host_api_version) {
8607                 retcode = TRUE;
8608             }
8609     }
8610     return retcode;
8611 }
8612 
dhdpcie_readshared(dhd_bus_t * bus)8613 static int dhdpcie_readshared(dhd_bus_t *bus)
8614 {
8615     uint32 addr = 0;
8616     int rv, dma_indx_wr_buf, dma_indx_rd_buf;
8617     uint32 shaddr = 0;
8618     pciedev_shared_t *sh = bus->pcie_sh;
8619     dhd_timeout_t tmo;
8620     bool idma_en = FALSE;
8621 
8622     if (MULTIBP_ENAB(bus->sih)) {
8623         dhd_bus_pcie_pwr_req(bus);
8624     }
8625 
8626     shaddr = bus->dongle_ram_base + bus->ramsize - 0x4;
8627     /* start a timer for 5 seconds */
8628     dhd_timeout_start(&tmo, MAX_READ_TIMEOUT);
8629 
8630     while (((addr == 0) || (addr == bus->nvram_csm)) &&
8631            !dhd_timeout_expired(&tmo)) {
8632         /* Read last word in memory to determine address of pciedev_shared
8633          * structure */
8634         addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr));
8635     }
8636 
8637     if (addr == (uint32)-1) {
8638         DHD_ERROR(("%s: PCIe link might be down\n", __FUNCTION__));
8639         bus->is_linkdown = 1;
8640         return BCME_ERROR;
8641     }
8642 
8643     if ((addr == 0) || (addr == bus->nvram_csm) ||
8644         (addr < bus->dongle_ram_base) || (addr > shaddr)) {
8645         DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid\n",
8646                    __FUNCTION__, addr));
8647         DHD_ERROR(("%s: Waited %u usec, dongle is not ready\n", __FUNCTION__,
8648                    tmo.elapsed));
8649 #ifdef DEBUG_DNGL_INIT_FAIL
8650         if (addr !=
8651             (uint32)-1) { /* skip further PCIE reads if read this addr */
8652             if (bus->dhd->memdump_enabled) {
8653                 bus->dhd->memdump_type = DUMP_TYPE_DONGLE_INIT_FAILURE;
8654                 dhdpcie_mem_dump(bus);
8655             }
8656         }
8657 #endif /* DEBUG_DNGL_INIT_FAIL */
8658         return BCME_ERROR;
8659     } else {
8660         bus->shared_addr = (ulong)addr;
8661         DHD_ERROR(("%s: PCIe shared addr (0x%08x) read took %u usec "
8662                    "before dongle is ready\n",
8663                    __FUNCTION__, addr, tmo.elapsed));
8664     }
8665 
8666     /* Read hndrte_shared structure */
8667     if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)sh,
8668                                    sizeof(pciedev_shared_t))) < 0) {
8669         DHD_ERROR(("%s: Failed to read PCIe shared struct with %d\n",
8670                    __FUNCTION__, rv));
8671         return rv;
8672     }
8673 
8674     /* Endianness */
8675     sh->flags = ltoh32(sh->flags);
8676     sh->trap_addr = ltoh32(sh->trap_addr);
8677     sh->assert_exp_addr = ltoh32(sh->assert_exp_addr);
8678     sh->assert_file_addr = ltoh32(sh->assert_file_addr);
8679     sh->assert_line = ltoh32(sh->assert_line);
8680     sh->console_addr = ltoh32(sh->console_addr);
8681     sh->msgtrace_addr = ltoh32(sh->msgtrace_addr);
8682     sh->dma_rxoffset = ltoh32(sh->dma_rxoffset);
8683     sh->rings_info_ptr = ltoh32(sh->rings_info_ptr);
8684     sh->flags2 = ltoh32(sh->flags2);
8685 
8686     /* load bus console address */
8687     bus->console_addr = sh->console_addr;
8688 
8689     /* Read the dma rx offset */
8690     bus->dma_rxoffset = bus->pcie_sh->dma_rxoffset;
8691     dhd_prot_rx_dataoffset(bus->dhd, bus->dma_rxoffset);
8692 
8693     DHD_INFO(("%s: DMA RX offset from shared Area %d\n", __FUNCTION__,
8694               bus->dma_rxoffset));
8695 
8696     bus->api.fw_rev = sh->flags & PCIE_SHARED_VERSION_MASK;
8697     if (!(dhdpcie_check_firmware_compatible(bus->api.fw_rev,
8698                                             PCIE_SHARED_VERSION))) {
8699         DHD_ERROR(("%s: pcie_shared version %d in dhd "
8700                    "is older than pciedev_shared version %d in dongle\n",
8701                    __FUNCTION__, PCIE_SHARED_VERSION, bus->api.fw_rev));
8702         return BCME_ERROR;
8703     }
8704     dhdpcie_update_bus_api_revisions(bus->api.fw_rev, PCIE_SHARED_VERSION);
8705 
8706     bus->rw_index_sz = (sh->flags & PCIE_SHARED_2BYTE_INDICES) ? sizeof(uint16)
8707                                                                : sizeof(uint32);
8708     DHD_INFO(("%s: Dongle advertizes %d size indices\n", __FUNCTION__,
8709               bus->rw_index_sz));
8710 
8711 #ifdef IDLE_TX_FLOW_MGMT
8712     if (sh->flags & PCIE_SHARED_IDLE_FLOW_RING) {
8713         DHD_ERROR(("%s: FW Supports IdleFlow ring managment!\n", __FUNCTION__));
8714         bus->enable_idle_flowring_mgmt = TRUE;
8715     }
8716 #endif /* IDLE_TX_FLOW_MGMT */
8717 
8718     if (IDMA_CAPABLE(bus)) {
8719         if (bus->sih->buscorerev == 0x17) {
8720         } else {
8721             idma_en = TRUE;
8722         }
8723     }
8724 
8725     /* This need to be selected based on IPC instead of compile time */
8726     bus->dhd->hwa_enable = TRUE;
8727 
8728     if (idma_en) {
8729         bus->dhd->idma_enable = (sh->flags & PCIE_SHARED_IDMA) ? TRUE : FALSE;
8730         bus->dhd->ifrm_enable = (sh->flags & PCIE_SHARED_IFRM) ? TRUE : FALSE;
8731     }
8732 
8733     bus->dhd->d2h_sync_mode = sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK;
8734 
8735     bus->dhd->dar_enable = (sh->flags & PCIE_SHARED_DAR) ? TRUE : FALSE;
8736 
8737     /* Does the FW support DMA'ing r/w indices */
8738     if (sh->flags & PCIE_SHARED_DMA_INDEX) {
8739         if (!bus->dhd->dma_ring_upd_overwrite) {
8740             {
8741                 if (!IFRM_ENAB(bus->dhd)) {
8742                     bus->dhd->dma_h2d_ring_upd_support = TRUE;
8743                 }
8744                 bus->dhd->dma_d2h_ring_upd_support = TRUE;
8745             }
8746         }
8747 
8748         if (bus->dhd->dma_d2h_ring_upd_support) {
8749             bus->dhd->d2h_sync_mode = 0;
8750         }
8751 
8752         DHD_INFO(("%s: Host support DMAing indices: H2D:%d - D2H:%d. FW "
8753                   "supports it\n",
8754                   __FUNCTION__, (bus->dhd->dma_h2d_ring_upd_support ? 1 : 0),
8755                   (bus->dhd->dma_d2h_ring_upd_support ? 1 : 0)));
8756     } else if (!(sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK)) {
8757         DHD_ERROR(("%s FW has to support either dma indices or d2h sync\n",
8758                    __FUNCTION__));
8759         return BCME_UNSUPPORTED;
8760     } else {
8761         bus->dhd->dma_h2d_ring_upd_support = FALSE;
8762         bus->dhd->dma_d2h_ring_upd_support = FALSE;
8763     }
8764 
8765     /* Does the firmware support fast delete ring? */
8766     if (sh->flags2 & PCIE_SHARED2_FAST_DELETE_RING) {
8767         DHD_INFO(("%s: Firmware supports fast delete ring\n", __FUNCTION__));
8768         bus->dhd->fast_delete_ring_support = TRUE;
8769     } else {
8770         DHD_INFO(
8771             ("%s: Firmware does not support fast delete ring\n", __FUNCTION__));
8772         bus->dhd->fast_delete_ring_support = FALSE;
8773     }
8774 
8775     /* get ring_info, ring_state and mb data ptrs and store the addresses in bus
8776      * structure */
8777     {
8778         ring_info_t ring_info;
8779 
8780         /* boundary check */
8781         if (sh->rings_info_ptr > shaddr) {
8782             DHD_ERROR(
8783                 ("%s: rings_info_ptr is invalid 0x%x, skip reading ring info\n",
8784                  __FUNCTION__, sh->rings_info_ptr));
8785             return BCME_ERROR;
8786         }
8787 
8788         if ((rv = dhdpcie_bus_membytes(bus, FALSE, sh->rings_info_ptr,
8789                                        (uint8 *)&ring_info,
8790                                        sizeof(ring_info_t))) < 0) {
8791             return rv;
8792         }
8793 
8794         bus->h2d_mb_data_ptr_addr = ltoh32(sh->h2d_mb_data_ptr);
8795         bus->d2h_mb_data_ptr_addr = ltoh32(sh->d2h_mb_data_ptr);
8796 
8797         if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6) {
8798             bus->max_tx_flowrings = ltoh16(ring_info.max_tx_flowrings);
8799             bus->max_submission_rings = ltoh16(ring_info.max_submission_queues);
8800             bus->max_completion_rings = ltoh16(ring_info.max_completion_rings);
8801             bus->max_cmn_rings =
8802                 bus->max_submission_rings - bus->max_tx_flowrings;
8803             bus->api.handle_mb_data = dhdpcie_read_handle_mb_data;
8804             bus->use_mailbox = sh->flags & PCIE_SHARED_USE_MAILBOX;
8805         } else {
8806             bus->max_tx_flowrings = ltoh16(ring_info.max_tx_flowrings);
8807             bus->max_submission_rings = bus->max_tx_flowrings;
8808             bus->max_completion_rings = BCMPCIE_D2H_COMMON_MSGRINGS;
8809             bus->max_cmn_rings = BCMPCIE_H2D_COMMON_MSGRINGS;
8810             bus->api.handle_mb_data = dhdpcie_handle_mb_data;
8811             bus->use_mailbox = TRUE;
8812         }
8813         if (bus->max_completion_rings == 0) {
8814             DHD_ERROR(("dongle completion rings are invalid %d\n",
8815                        bus->max_completion_rings));
8816             return BCME_ERROR;
8817         }
8818         if (bus->max_submission_rings == 0) {
8819             DHD_ERROR(("dongle submission rings are invalid %d\n",
8820                        bus->max_submission_rings));
8821             return BCME_ERROR;
8822         }
8823         if (bus->max_tx_flowrings == 0) {
8824             DHD_ERROR(("dongle txflow rings are invalid %d\n",
8825                        bus->max_tx_flowrings));
8826             return BCME_ERROR;
8827         }
8828 
8829         /* If both FW and Host support DMA'ing indices, allocate memory and
8830          * notify FW The max_sub_queues is read from FW initialized ring_info
8831          */
8832         if (bus->dhd->dma_h2d_ring_upd_support || IDMA_ENAB(bus->dhd)) {
8833             dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
8834                                                      H2D_DMA_INDX_WR_BUF,
8835                                                      bus->max_submission_rings);
8836             dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
8837                                                      D2H_DMA_INDX_RD_BUF,
8838                                                      bus->max_completion_rings);
8839             if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) {
8840                 DHD_ERROR(
8841                     ("%s: Failed to allocate memory for dma'ing h2d indices"
8842                      "Host will use w/r indices in TCM\n",
8843                      __FUNCTION__));
8844                 bus->dhd->dma_h2d_ring_upd_support = FALSE;
8845                 bus->dhd->idma_enable = FALSE;
8846             }
8847         }
8848 
8849         if (bus->dhd->dma_d2h_ring_upd_support) {
8850             dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
8851                                                      D2H_DMA_INDX_WR_BUF,
8852                                                      bus->max_completion_rings);
8853             dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
8854                                                      H2D_DMA_INDX_RD_BUF,
8855                                                      bus->max_submission_rings);
8856             if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) {
8857                 DHD_ERROR(
8858                     ("%s: Failed to allocate memory for dma'ing d2h indices"
8859                      "Host will use w/r indices in TCM\n",
8860                      __FUNCTION__));
8861                 bus->dhd->dma_d2h_ring_upd_support = FALSE;
8862             }
8863         }
8864         if (IFRM_ENAB(bus->dhd)) {
8865             dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
8866                                                      H2D_IFRM_INDX_WR_BUF,
8867                                                      bus->max_tx_flowrings);
8868             if (dma_indx_wr_buf != BCME_OK) {
8869                 DHD_ERROR(("%s: Failed to alloc memory for Implicit DMA\n",
8870                            __FUNCTION__));
8871                 bus->dhd->ifrm_enable = FALSE;
8872             }
8873         }
8874         /* read ringmem and ringstate ptrs from shared area and store in host
8875          * variables */
8876         dhd_fillup_ring_sharedptr_info(bus, &ring_info);
8877         if (dhd_msg_level & DHD_INFO_VAL) {
8878             bcm_print_bytes("ring_info_raw", (uchar *)&ring_info,
8879                             sizeof(ring_info_t));
8880         }
8881         DHD_INFO(("%s: ring_info\n", __FUNCTION__));
8882 
8883         DHD_ERROR(("%s: max H2D queues %d\n", __FUNCTION__,
8884                    ltoh16(ring_info.max_tx_flowrings)));
8885 
8886         DHD_INFO(("mail box address\n"));
8887         DHD_INFO(("%s: h2d_mb_data_ptr_addr 0x%04x\n", __FUNCTION__,
8888                   bus->h2d_mb_data_ptr_addr));
8889         DHD_INFO(("%s: d2h_mb_data_ptr_addr 0x%04x\n", __FUNCTION__,
8890                   bus->d2h_mb_data_ptr_addr));
8891     }
8892 
8893     DHD_INFO(
8894         ("%s: d2h_sync_mode 0x%08x\n", __FUNCTION__, bus->dhd->d2h_sync_mode));
8895 
8896     bus->dhd->d2h_hostrdy_supported =
8897         ((sh->flags & PCIE_SHARED_HOSTRDY_SUPPORT) ==
8898          PCIE_SHARED_HOSTRDY_SUPPORT);
8899 
8900     bus->dhd->ext_trap_data_supported =
8901         ((sh->flags2 & PCIE_SHARED2_EXTENDED_TRAP_DATA) ==
8902          PCIE_SHARED2_EXTENDED_TRAP_DATA);
8903 
8904     if ((sh->flags2 & PCIE_SHARED2_TXSTATUS_METADATA) == 0) {
8905         bus->dhd->pcie_txs_metadata_enable = 0;
8906     }
8907 
8908     bus->dhd->hscb_enable =
8909         (sh->flags2 & PCIE_SHARED2_HSCB) == PCIE_SHARED2_HSCB;
8910 
8911 #ifdef EWP_EDL
8912     if (host_edl_support) {
8913         bus->dhd->dongle_edl_support =
8914             (sh->flags2 & PCIE_SHARED2_EDL_RING) ? TRUE : FALSE;
8915         DHD_ERROR(("Dongle EDL support: %u\n", bus->dhd->dongle_edl_support));
8916     }
8917 #endif /* EWP_EDL */
8918 
8919     bus->dhd->debug_buf_dest_support =
8920         (sh->flags2 & PCIE_SHARED2_DEBUG_BUF_DEST) ? TRUE : FALSE;
8921     DHD_ERROR(("FW supports debug buf dest ? %s \n",
8922                bus->dhd->debug_buf_dest_support ? "Y" : "N"));
8923 
8924 #ifdef DHD_HP2P
8925     if (bus->dhd->hp2p_enable) {
8926         bus->dhd->hp2p_ts_capable = (sh->flags2 & PCIE_SHARED2_PKT_TIMESTAMP) ==
8927                                     PCIE_SHARED2_PKT_TIMESTAMP;
8928         bus->dhd->hp2p_capable =
8929             (sh->flags2 & PCIE_SHARED2_HP2P) == PCIE_SHARED2_HP2P;
8930         bus->dhd->hp2p_capable &= bus->dhd->hp2p_ts_capable;
8931 
8932         DHD_ERROR(
8933             ("FW supports HP2P ? %s \n", bus->dhd->hp2p_capable ? "Y" : "N"));
8934 
8935         if (bus->dhd->hp2p_capable) {
8936             bus->dhd->pkt_thresh = HP2P_PKT_THRESH;
8937             bus->dhd->pkt_expiry = HP2P_PKT_EXPIRY;
8938             bus->dhd->time_thresh = HP2P_TIME_THRESH;
8939             for (addr = 0; addr < MAX_HP2P_FLOWS; addr++) {
8940                 hp2p_info_t *hp2p_info = &bus->dhd->hp2p_info[addr];
8941 
8942                 hp2p_info->hrtimer_init = FALSE;
8943                 tasklet_hrtimer_init(&hp2p_info->timer, dhd_hp2p_write,
8944                                      CLOCK_MONOTONIC, HRTIMER_MODE_REL);
8945             }
8946         }
8947     }
8948 #endif /* DHD_HP2P */
8949 
8950 #ifdef DHD_DB0TS
8951     bus->dhd->db0ts_capable =
8952         (sh->flags & PCIE_SHARED_TIMESTAMP_DB0) == PCIE_SHARED_TIMESTAMP_DB0;
8953 #endif /* DHD_DB0TS */
8954 
8955     if (MULTIBP_ENAB(bus->sih)) {
8956         dhd_bus_pcie_pwr_req_clear(bus);
8957 
8958         /*
8959          * WAR to fix ARM cold boot;
8960          * De-assert WL domain in DAR
8961          */
8962         if (bus->sih->buscorerev >= 0x44) {
8963             dhd_bus_pcie_pwr_req_wl_domain(
8964                 bus, DAR_PCIE_PWR_CTRL((bus->sih)->buscorerev), FALSE);
8965         }
8966     }
8967     return BCME_OK;
8968 } /* dhdpcie_readshared */
8969 
8970 /** Read ring mem and ring state ptr info from shared memory area in device
8971  * memory */
dhd_fillup_ring_sharedptr_info(dhd_bus_t * bus,ring_info_t * ring_info)8972 static void dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus,
8973                                            ring_info_t *ring_info)
8974 {
8975     uint16 i = 0;
8976     uint16 j = 0;
8977     uint32 tcm_memloc;
8978     uint32 d2h_w_idx_ptr, d2h_r_idx_ptr, h2d_w_idx_ptr, h2d_r_idx_ptr;
8979     uint16 max_tx_flowrings = bus->max_tx_flowrings;
8980 
8981     /* Ring mem ptr info */
8982     /* Alloated in the order
8983         H2D_MSGRING_CONTROL_SUBMIT              0
8984         H2D_MSGRING_RXPOST_SUBMIT               1
8985         D2H_MSGRING_CONTROL_COMPLETE            2
8986         D2H_MSGRING_TX_COMPLETE                 3
8987         D2H_MSGRING_RX_COMPLETE                 4
8988     */
8989 
8990     {
8991         /* ringmemptr holds start of the mem block address space */
8992         tcm_memloc = ltoh32(ring_info->ringmem_ptr);
8993 
8994         /* Find out ringmem ptr for each ring common  ring */
8995         for (i = 0; i <= BCMPCIE_COMMON_MSGRING_MAX_ID; i++) {
8996             bus->ring_sh[i].ring_mem_addr = tcm_memloc;
8997             /* Update mem block */
8998             tcm_memloc = tcm_memloc + sizeof(ring_mem_t);
8999             DHD_INFO(("%s: ring id %d ring mem addr 0x%04x \n", __FUNCTION__, i,
9000                       bus->ring_sh[i].ring_mem_addr));
9001         }
9002     }
9003 
9004     /* Ring state mem ptr info */
9005     {
9006         d2h_w_idx_ptr = ltoh32(ring_info->d2h_w_idx_ptr);
9007         d2h_r_idx_ptr = ltoh32(ring_info->d2h_r_idx_ptr);
9008         h2d_w_idx_ptr = ltoh32(ring_info->h2d_w_idx_ptr);
9009         h2d_r_idx_ptr = ltoh32(ring_info->h2d_r_idx_ptr);
9010 
9011         /* Store h2d common ring write/read pointers */
9012         for (i = 0; i < BCMPCIE_H2D_COMMON_MSGRINGS; i++) {
9013             bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
9014             bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
9015 
9016             /* update mem block */
9017             h2d_w_idx_ptr = h2d_w_idx_ptr + bus->rw_index_sz;
9018             h2d_r_idx_ptr = h2d_r_idx_ptr + bus->rw_index_sz;
9019 
9020             DHD_INFO(("%s: h2d w/r : idx %d write %x read %x \n", __FUNCTION__,
9021                       i, bus->ring_sh[i].ring_state_w,
9022                       bus->ring_sh[i].ring_state_r));
9023         }
9024 
9025         /* Store d2h common ring write/read pointers */
9026         for (j = 0; j < BCMPCIE_D2H_COMMON_MSGRINGS; j++, i++) {
9027             bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr;
9028             bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr;
9029 
9030             /* update mem block */
9031             d2h_w_idx_ptr = d2h_w_idx_ptr + bus->rw_index_sz;
9032             d2h_r_idx_ptr = d2h_r_idx_ptr + bus->rw_index_sz;
9033 
9034             DHD_INFO(("%s: d2h w/r : idx %d write %x read %x \n", __FUNCTION__,
9035                       i, bus->ring_sh[i].ring_state_w,
9036                       bus->ring_sh[i].ring_state_r));
9037         }
9038 
9039         /* Store txflow ring write/read pointers */
9040         if (bus->api.fw_rev < PCIE_SHARED_VERSION_6) {
9041             max_tx_flowrings -= BCMPCIE_H2D_COMMON_MSGRINGS;
9042         } else {
9043             /* Account for Debug info h2d ring located after the last tx flow
9044              * ring */
9045             max_tx_flowrings = max_tx_flowrings + 1;
9046         }
9047         for (j = 0; j < max_tx_flowrings; i++, j++) {
9048             bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
9049             bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
9050 
9051             /* update mem block */
9052             h2d_w_idx_ptr = h2d_w_idx_ptr + bus->rw_index_sz;
9053             h2d_r_idx_ptr = h2d_r_idx_ptr + bus->rw_index_sz;
9054 
9055             DHD_INFO(("%s: FLOW Rings h2d w/r : idx %d write %x read %x \n",
9056                       __FUNCTION__, i, bus->ring_sh[i].ring_state_w,
9057                       bus->ring_sh[i].ring_state_r));
9058         }
9059         /* store wr/rd pointers for  debug info completion ring */
9060         bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr;
9061         bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr;
9062         d2h_w_idx_ptr = d2h_w_idx_ptr + bus->rw_index_sz;
9063         d2h_r_idx_ptr = d2h_r_idx_ptr + bus->rw_index_sz;
9064         DHD_INFO(("d2h w/r : idx %d write %x read %x \n", i,
9065                   bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
9066     }
9067 } /* dhd_fillup_ring_sharedptr_info */
9068 
9069 /**
9070  * Initialize bus module: prepare for communication with the dongle. Called
9071  * after downloading firmware into the dongle.
9072  */
dhd_bus_init(dhd_pub_t * dhdp,bool enforce_mutex)9073 int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex)
9074 {
9075     dhd_bus_t *bus = dhdp->bus;
9076     int ret = 0;
9077 
9078     DHD_TRACE(("%s: Enter\n", __FUNCTION__));
9079 
9080     ASSERT(bus->dhd);
9081     if (!bus->dhd) {
9082         return 0;
9083     }
9084 
9085     if (PCIE_RELOAD_WAR_ENAB(bus->sih->buscorerev)) {
9086         dhd_bus_pcie_pwr_req_clear_reload_war(bus);
9087     }
9088 
9089     if (MULTIBP_ENAB(bus->sih)) {
9090         dhd_bus_pcie_pwr_req(bus);
9091     }
9092 
9093     /* Configure AER registers to log the TLP header */
9094     dhd_bus_aer_config(bus);
9095 
9096     /* Make sure we're talking to the core. */
9097     bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
9098     ASSERT(bus->reg != NULL);
9099 
9100     /* before opening up bus for data transfer, check if shared are is intact */
9101     ret = dhdpcie_readshared(bus);
9102     if (ret < 0) {
9103         DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__));
9104         goto exit;
9105     }
9106 
9107     /* Make sure we're talking to the core. */
9108     bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
9109     ASSERT(bus->reg != NULL);
9110 
9111     dhd_init_bus_lock(bus);
9112 
9113     dhd_init_backplane_access_lock(bus);
9114 
9115     /* Set bus state according to enable result */
9116     dhdp->busstate = DHD_BUS_DATA;
9117     bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE;
9118     dhdp->dhd_bus_busy_state = 0;
9119 
9120     /* D11 status via PCIe completion header */
9121     if ((ret = dhdpcie_init_d11status(bus)) < 0) {
9122         goto exit;
9123     }
9124 
9125     if (!dhd_download_fw_on_driverload) {
9126         dhd_dpc_enable(bus->dhd);
9127     }
9128     /* Enable the interrupt after device is up */
9129     dhdpcie_bus_intr_enable(bus);
9130 
9131     bus->intr_enabled = TRUE;
9132 
9133     bus->idletime = 0;
9134 
9135     /* Make use_d0_inform TRUE for Rev 5 for backward compatibility */
9136     if (bus->api.fw_rev < PCIE_SHARED_VERSION_6) {
9137         bus->use_d0_inform = TRUE;
9138     } else {
9139         bus->use_d0_inform = FALSE;
9140     }
9141 
9142 exit:
9143     if (MULTIBP_ENAB(bus->sih)) {
9144         dhd_bus_pcie_pwr_req_clear(bus);
9145     }
9146     return ret;
9147 }
9148 
dhdpcie_init_shared_addr(dhd_bus_t * bus)9149 static void dhdpcie_init_shared_addr(dhd_bus_t *bus)
9150 {
9151     uint32 addr = 0;
9152     uint32 val = 0;
9153 
9154     addr = bus->dongle_ram_base + bus->ramsize - 0x4;
9155     dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val));
9156 }
9157 
dhdpcie_chipmatch(uint16 vendor,uint16 device)9158 bool dhdpcie_chipmatch(uint16 vendor, uint16 device)
9159 {
9160     if (vendor != PCI_VENDOR_ID_BROADCOM) {
9161         DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__,
9162                    vendor, device));
9163         return (-ENODEV);
9164     }
9165 
9166     if ((device == BCM4350_D11AC_ID) || (device == BCM4350_D11AC2G_ID) ||
9167         (device == BCM4350_D11AC5G_ID) || (device == BCM4350_CHIP_ID) ||
9168         (device == BCM43569_CHIP_ID)) {
9169         return 0;
9170     }
9171 
9172     if ((device == BCM4354_D11AC_ID) || (device == BCM4354_D11AC2G_ID) ||
9173         (device == BCM4354_D11AC5G_ID) || (device == BCM4354_CHIP_ID)) {
9174         return 0;
9175     }
9176 
9177     if ((device == BCM4356_D11AC_ID) || (device == BCM4356_D11AC2G_ID) ||
9178         (device == BCM4356_D11AC5G_ID) || (device == BCM4356_CHIP_ID)) {
9179         return 0;
9180     }
9181 
9182     if ((device == BCM4371_D11AC_ID) || (device == BCM4371_D11AC2G_ID) ||
9183         (device == BCM4371_D11AC5G_ID) || (device == BCM4371_CHIP_ID)) {
9184         return 0;
9185     }
9186 
9187     if ((device == BCM4345_D11AC_ID) || (device == BCM4345_D11AC2G_ID) ||
9188         (device == BCM4345_D11AC5G_ID) || BCM4345_CHIP(device)) {
9189         return 0;
9190     }
9191 
9192     if ((device == BCM43452_D11AC_ID) || (device == BCM43452_D11AC2G_ID) ||
9193         (device == BCM43452_D11AC5G_ID)) {
9194         return 0;
9195     }
9196 
9197     if ((device == BCM4335_D11AC_ID) || (device == BCM4335_D11AC2G_ID) ||
9198         (device == BCM4335_D11AC5G_ID) || (device == BCM4335_CHIP_ID)) {
9199         return 0;
9200     }
9201 
9202     if ((device == BCM43602_D11AC_ID) || (device == BCM43602_D11AC2G_ID) ||
9203         (device == BCM43602_D11AC5G_ID) || (device == BCM43602_CHIP_ID)) {
9204         return 0;
9205     }
9206 
9207     if ((device == BCM43569_D11AC_ID) || (device == BCM43569_D11AC2G_ID) ||
9208         (device == BCM43569_D11AC5G_ID) || (device == BCM43569_CHIP_ID)) {
9209         return 0;
9210     }
9211 
9212     if ((device == BCM4358_D11AC_ID) || (device == BCM4358_D11AC2G_ID) ||
9213         (device == BCM4358_D11AC5G_ID)) {
9214         return 0;
9215     }
9216 
9217     if ((device == BCM4349_D11AC_ID) || (device == BCM4349_D11AC2G_ID) ||
9218         (device == BCM4349_D11AC5G_ID) || (device == BCM4349_CHIP_ID)) {
9219         return 0;
9220     }
9221 
9222     if ((device == BCM4355_D11AC_ID) || (device == BCM4355_D11AC2G_ID) ||
9223         (device == BCM4355_D11AC5G_ID) || (device == BCM4355_CHIP_ID)) {
9224         return 0;
9225     }
9226 
9227     if ((device == BCM4359_D11AC_ID) || (device == BCM4359_D11AC2G_ID) ||
9228         (device == BCM4359_D11AC5G_ID)) {
9229         return 0;
9230     }
9231 
9232     if ((device == BCM43596_D11AC_ID) || (device == BCM43596_D11AC2G_ID) ||
9233         (device == BCM43596_D11AC5G_ID)) {
9234         return 0;
9235     }
9236 
9237     if ((device == BCM43597_D11AC_ID) || (device == BCM43597_D11AC2G_ID) ||
9238         (device == BCM43597_D11AC5G_ID)) {
9239         return 0;
9240     }
9241 
9242     if ((device == BCM4364_D11AC_ID) || (device == BCM4364_D11AC2G_ID) ||
9243         (device == BCM4364_D11AC5G_ID) || (device == BCM4364_CHIP_ID)) {
9244         return 0;
9245     }
9246 
9247     if ((device == BCM4361_D11AC_ID) || (device == BCM4361_D11AC2G_ID) ||
9248         (device == BCM4361_D11AC5G_ID) || (device == BCM4361_CHIP_ID)) {
9249         return 0;
9250     }
9251     if ((device == BCM4362_D11AX_ID) || (device == BCM4362_D11AX2G_ID) ||
9252         (device == BCM4362_D11AX5G_ID) || (device == BCM4362_CHIP_ID)) {
9253         return 0;
9254     }
9255     if ((device == BCM43751_D11AX_ID) || (device == BCM43751_D11AX2G_ID) ||
9256         (device == BCM43751_D11AX5G_ID) || (device == BCM43751_CHIP_ID)) {
9257         return 0;
9258     }
9259     if ((device == BCM43752_D11AX_ID) || (device == BCM43752_D11AX2G_ID) ||
9260         (device == BCM43752_D11AX5G_ID) || (device == BCM43752_CHIP_ID)) {
9261         return 0;
9262     }
9263     if ((device == BCM4347_D11AC_ID) || (device == BCM4347_D11AC2G_ID) ||
9264         (device == BCM4347_D11AC5G_ID) || (device == BCM4347_CHIP_ID)) {
9265         return 0;
9266     }
9267 
9268     if ((device == BCM4365_D11AC_ID) || (device == BCM4365_D11AC2G_ID) ||
9269         (device == BCM4365_D11AC5G_ID) || (device == BCM4365_CHIP_ID)) {
9270         return 0;
9271     }
9272 
9273     if ((device == BCM4366_D11AC_ID) || (device == BCM4366_D11AC2G_ID) ||
9274         (device == BCM4366_D11AC5G_ID) || (device == BCM4366_CHIP_ID) ||
9275         (device == BCM43664_CHIP_ID) || (device == BCM43666_CHIP_ID)) {
9276         return 0;
9277     }
9278 
9279     if ((device == BCM4369_D11AX_ID) || (device == BCM4369_D11AX2G_ID) ||
9280         (device == BCM4369_D11AX5G_ID) || (device == BCM4369_CHIP_ID)) {
9281         return 0;
9282     }
9283 
9284     if ((device == BCM4375_D11AX_ID) || (device == BCM4375_D11AX2G_ID) ||
9285         (device == BCM4375_D11AX5G_ID) || (device == BCM4375_CHIP_ID)) {
9286         return 0;
9287     }
9288 
9289     DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__, vendor,
9290                device));
9291     return (-ENODEV);
9292 } /* dhdpcie_chipmatch */
9293 
9294 /**
9295  * Name:  dhdpcie_cc_nvmshadow
9296  *
9297  * Description:
9298  * A shadow of OTP/SPROM exists in ChipCommon Region
9299  * betw. 0x800 and 0xBFF (Backplane Addr. 0x1800_0800 and 0x1800_0BFF).
9300  * Strapping option (SPROM vs. OTP), presence of OTP/SPROM and its size
9301  * can also be read from ChipCommon Registers.
9302  */
dhdpcie_cc_nvmshadow(dhd_bus_t * bus,struct bcmstrbuf * b)9303 static int dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b)
9304 {
9305     uint16 dump_offset = 0;
9306     uint32 dump_size = 0, otp_size = 0, sprom_size = 0;
9307 
9308     /* Table for 65nm OTP Size (in bits) */
9309     int otp_size_65nm[8] = {0, 2048, 4096, 8192, 4096, 6144, 512, 1024};
9310 
9311     volatile uint16 *nvm_shadow;
9312 
9313     uint cur_coreid;
9314     uint chipc_corerev;
9315     chipcregs_t *chipcregs;
9316 
9317     /* Save the current core */
9318     cur_coreid = si_coreid(bus->sih);
9319     /* Switch to ChipC */
9320     chipcregs = (chipcregs_t *)si_setcore(bus->sih, CC_CORE_ID, 0);
9321     ASSERT(chipcregs != NULL);
9322 
9323     chipc_corerev = si_corerev(bus->sih);
9324     /* Check ChipcommonCore Rev */
9325     if (chipc_corerev < 0x2C) {
9326         DHD_ERROR(
9327             ("%s: ChipcommonCore Rev %d < 44\n", __FUNCTION__, chipc_corerev));
9328         return BCME_UNSUPPORTED;
9329     }
9330     /* Check ChipID */
9331     if (((uint16)bus->sih->chip != BCM4350_CHIP_ID) &&
9332         !BCM4345_CHIP((uint16)bus->sih->chip) &&
9333         ((uint16)bus->sih->chip != BCM4355_CHIP_ID) &&
9334         ((uint16)bus->sih->chip != BCM4364_CHIP_ID)) {
9335         DHD_ERROR(("%s: cc_nvmdump cmd. supported for Olympic chips"
9336                    "4350/4345/4355/4364 only\n",
9337                    __FUNCTION__));
9338         return BCME_UNSUPPORTED;
9339     }
9340 
9341     /* Check if SRC_PRESENT in SpromCtrl(0x190 in ChipCommon Regs) is set */
9342     if (chipcregs->sromcontrol & SRC_PRESENT) {
9343         /* SPROM Size: 1Kbits (0x0), 4Kbits (0x1), 16Kbits(0x2) */
9344         sprom_size = (1 << (0x2 * ((chipcregs->sromcontrol & SRC_SIZE_MASK) >>
9345                                  SRC_SIZE_SHIFT))) *
9346                      0x400;
9347         bcm_bprintf(b, "\nSPROM Present (Size %d bits)\n", sprom_size);
9348     }
9349 
9350     if (chipcregs->sromcontrol & SRC_OTPPRESENT) {
9351         bcm_bprintf(b, "\nOTP Present");
9352 
9353         if (((chipcregs->otplayout & OTPL_WRAP_TYPE_MASK) >>
9354              OTPL_WRAP_TYPE_SHIFT) == OTPL_WRAP_TYPE_40NM) {
9355             /* 40nm OTP: Size = (OtpSize + 1) * 1024 bits */
9356             /* Chipcommon rev51 is a variation on rev45 and does not support
9357              * the latest OTP configuration.
9358              */
9359             if (chipc_corerev != 0x33 && chipc_corerev >= 0x31) {
9360                 otp_size = (((chipcregs->otplayout & OTPL_ROW_SIZE_MASK) >>
9361                              OTPL_ROW_SIZE_SHIFT) +
9362                             1) *
9363                            0x400;
9364                 bcm_bprintf(b, "(Size %d bits)\n", otp_size);
9365             } else {
9366                 otp_size = (((chipcregs->capabilities & CC_CAP_OTPSIZE) >>
9367                              CC_CAP_OTPSIZE_SHIFT) +
9368                             1) *
9369                            0x400;
9370                 bcm_bprintf(b, "(Size %d bits)\n", otp_size);
9371             }
9372         } else {
9373             /* This part is untested since newer chips have 40nm OTP */
9374             /* Chipcommon rev51 is a variation on rev45 and does not support
9375              * the latest OTP configuration.
9376              */
9377             if (chipc_corerev != 0x33 && chipc_corerev >= 0x31) {
9378                 otp_size =
9379                     otp_size_65nm[(chipcregs->otplayout & OTPL_ROW_SIZE_MASK) >>
9380                                   OTPL_ROW_SIZE_SHIFT];
9381                 bcm_bprintf(b, "(Size %d bits)\n", otp_size);
9382             } else {
9383                 otp_size =
9384                     otp_size_65nm[(chipcregs->capabilities & CC_CAP_OTPSIZE) >>
9385                                   CC_CAP_OTPSIZE_SHIFT];
9386                 bcm_bprintf(b, "(Size %d bits)\n", otp_size);
9387                 DHD_INFO(
9388                     ("%s: 65nm/130nm OTP Size not tested. \n", __FUNCTION__));
9389             }
9390         }
9391     }
9392 
9393     /* Chipcommon rev51 is a variation on rev45 and does not support
9394      * the latest OTP configuration.
9395      */
9396     if (chipc_corerev != 0x33 && chipc_corerev >= 0x31) {
9397         if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
9398             ((chipcregs->otplayout & OTPL_ROW_SIZE_MASK) == 0)) {
9399             DHD_ERROR(("%s: SPROM and OTP could not be found "
9400                        "sromcontrol = %x, otplayout = %x \n",
9401                        __FUNCTION__, chipcregs->sromcontrol,
9402                        chipcregs->otplayout));
9403             return BCME_NOTFOUND;
9404         }
9405     } else {
9406         if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
9407             ((chipcregs->capabilities & CC_CAP_OTPSIZE) == 0)) {
9408             DHD_ERROR(("%s: SPROM and OTP could not be found "
9409                        "sromcontrol = %x, capablities = %x \n",
9410                        __FUNCTION__, chipcregs->sromcontrol,
9411                        chipcregs->capabilities));
9412             return BCME_NOTFOUND;
9413         }
9414     }
9415 
9416     /* Check the strapping option in SpromCtrl: Set = OTP otherwise SPROM */
9417     if ((!(chipcregs->sromcontrol & SRC_PRESENT) ||
9418          (chipcregs->sromcontrol & SRC_OTPSEL)) &&
9419         (chipcregs->sromcontrol & SRC_OTPPRESENT)) {
9420         bcm_bprintf(b, "OTP Strap selected.\n"
9421                        "\nOTP Shadow in ChipCommon:\n");
9422 
9423         dump_size = otp_size / 16; /* 16bit words */
9424     } else if (((chipcregs->sromcontrol & SRC_OTPSEL) == 0) &&
9425                (chipcregs->sromcontrol & SRC_PRESENT)) {
9426         bcm_bprintf(b, "SPROM Strap selected\n"
9427                        "\nSPROM Shadow in ChipCommon:\n");
9428         /* If SPROM > 8K only 8Kbits is mapped to ChipCommon (0x800 - 0xBFF) */
9429         /* dump_size in 16bit words */
9430         dump_size = sprom_size > 0x8 ? (0x8 * 0x400) / 0x10 : sprom_size / 0x10;
9431     } else {
9432         DHD_ERROR(
9433             ("%s: NVM Shadow does not exist in ChipCommon\n", __FUNCTION__));
9434         return BCME_NOTFOUND;
9435     }
9436     if (bus->regs == NULL) {
9437         DHD_ERROR(("ChipCommon Regs. not initialized\n"));
9438         return BCME_NOTREADY;
9439     } else {
9440         bcm_bprintf(b, "\n OffSet:");
9441 
9442         /* Chipcommon rev51 is a variation on rev45 and does not support
9443          * the latest OTP configuration.
9444          */
9445         if (chipc_corerev != 0x33 && chipc_corerev >= 0x31) {
9446             /* Chip common can read only 8kbits,
9447              * for ccrev >= 49 otp size is around 12 kbits so use GCI core
9448              */
9449             nvm_shadow =
9450                 (volatile uint16 *)si_setcore(bus->sih, GCI_CORE_ID, 0);
9451         } else {
9452             /* Point to the SPROM/OTP shadow in ChipCommon */
9453             nvm_shadow = chipcregs->sromotp;
9454         }
9455 
9456         if (nvm_shadow == NULL) {
9457             DHD_ERROR(("%s: NVM Shadow is not intialized\n", __FUNCTION__));
9458             return BCME_NOTFOUND;
9459         }
9460 
9461         /*
9462          * Read 16 bits / iteration.
9463          * dump_size & dump_offset in 16-bit words
9464          */
9465         while (dump_offset < dump_size) {
9466             if (dump_offset % 0x2 == 0) {
9467                 /* Print the offset in the shadow space in Bytes */
9468                 bcm_bprintf(b, "\n 0x%04x", dump_offset * 0x2);
9469             }
9470 
9471             bcm_bprintf(b, "\t0x%04x", *(nvm_shadow + dump_offset));
9472             dump_offset += 0x1;
9473         }
9474     }
9475 
9476     /* Switch back to the original core */
9477     si_setcore(bus->sih, cur_coreid, 0);
9478 
9479     return BCME_OK;
9480 } /* dhdpcie_cc_nvmshadow */
9481 
9482 /** Flow rings are dynamically created and destroyed */
dhd_bus_clean_flow_ring(dhd_bus_t * bus,void * node)9483 void dhd_bus_clean_flow_ring(dhd_bus_t *bus, void *node)
9484 {
9485     void *pkt;
9486     flow_queue_t *queue;
9487     flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)node;
9488     unsigned long flags;
9489 
9490     queue = &flow_ring_node->queue;
9491 
9492 #ifdef DHDTCPACK_SUPPRESS
9493     /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
9494      * when there is a newly coming packet from network stack.
9495      */
9496     dhd_tcpack_info_tbl_clean(bus->dhd);
9497 #endif /* DHDTCPACK_SUPPRESS */
9498 
9499 #ifdef DHD_HP2P
9500     if (flow_ring_node->hp2p_ring) {
9501         bus->dhd->hp2p_ring_active = FALSE;
9502         flow_ring_node->hp2p_ring = FALSE;
9503     }
9504 #endif /* DHD_HP2P */
9505 
9506     /* clean up BUS level info */
9507     DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
9508 
9509     /* Flush all pending packets in the queue, if any */
9510     while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
9511         PKTFREE(bus->dhd->osh, pkt, TRUE);
9512     }
9513     ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
9514 
9515     /* Reinitialise flowring's queue */
9516     dhd_flow_queue_reinit(bus->dhd, queue,
9517                           bus->dhd->conf->flow_ring_queue_threshold);
9518     flow_ring_node->status = FLOW_RING_STATUS_CLOSED;
9519     flow_ring_node->active = FALSE;
9520 
9521     DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
9522 
9523     /* Hold flowring_list_lock to ensure no race condition while accessing the
9524      * List */
9525     DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
9526     dll_delete(&flow_ring_node->list);
9527     DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
9528 
9529     /* Release the flowring object back into the pool */
9530     dhd_prot_flowrings_pool_release(bus->dhd, flow_ring_node->flowid,
9531                                     flow_ring_node->prot_info);
9532 
9533     /* Free the flowid back to the flowid allocator */
9534     dhd_flowid_free(bus->dhd, flow_ring_node->flow_info.ifindex,
9535                     flow_ring_node->flowid);
9536 }
9537 
9538 /**
9539  * Allocate a Flow ring buffer,
9540  * Init Ring buffer, send Msg to device about flow ring creation
9541  */
dhd_bus_flow_ring_create_request(dhd_bus_t * bus,void * arg)9542 int dhd_bus_flow_ring_create_request(dhd_bus_t *bus, void *arg)
9543 {
9544     flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)arg;
9545 
9546     DHD_INFO(("%s :Flow create\n", __FUNCTION__));
9547 
9548     /* Send Msg to device about flow ring creation */
9549     if (dhd_prot_flow_ring_create(bus->dhd, flow_ring_node) != BCME_OK) {
9550         return BCME_NOMEM;
9551     }
9552 
9553     return BCME_OK;
9554 }
9555 
9556 /** Handle response from dongle on a 'flow ring create' request */
dhd_bus_flow_ring_create_response(dhd_bus_t * bus,uint16 flowid,int32 status)9557 void dhd_bus_flow_ring_create_response(dhd_bus_t *bus, uint16 flowid,
9558                                        int32 status)
9559 {
9560     flow_ring_node_t *flow_ring_node;
9561     unsigned long flags;
9562 
9563     DHD_INFO(("%s :Flow Response %d \n", __FUNCTION__, flowid));
9564 
9565     /* Boundary check of the flowid */
9566     if (flowid >= bus->dhd->num_flow_rings) {
9567         DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__, flowid,
9568                    bus->dhd->num_flow_rings));
9569         return;
9570     }
9571 
9572     flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
9573     if (!flow_ring_node) {
9574         DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__));
9575         return;
9576     }
9577 
9578     ASSERT(flow_ring_node->flowid == flowid);
9579     if (flow_ring_node->flowid != flowid) {
9580         DHD_ERROR(("%s: flowid %d is different from the flowid "
9581                    "of the flow_ring_node %d\n",
9582                    __FUNCTION__, flowid, flow_ring_node->flowid));
9583         return;
9584     }
9585 
9586     if (status != BCME_OK) {
9587         DHD_ERROR(("%s Flow create Response failure error status = %d \n",
9588                    __FUNCTION__, status));
9589         /* Call Flow clean up */
9590         dhd_bus_clean_flow_ring(bus, flow_ring_node);
9591         return;
9592     }
9593 
9594     DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
9595     flow_ring_node->status = FLOW_RING_STATUS_OPEN;
9596     DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
9597 
9598     /* Now add the Flow ring node into the active list
9599      * Note that this code to add the newly created node to the active
9600      * list was living in dhd_flowid_lookup. But note that after
9601      * adding the node to the active list the contents of node is being
9602      * filled in dhd_prot_flow_ring_create.
9603      * If there is a D2H interrupt after the node gets added to the
9604      * active list and before the node gets populated with values
9605      * from the Bottom half dhd_update_txflowrings would be called.
9606      * which will then try to walk through the active flow ring list,
9607      * pickup the nodes and operate on them. Now note that since
9608      * the function dhd_prot_flow_ring_create is not finished yet
9609      * the contents of flow_ring_node can still be NULL leading to
9610      * crashes. Hence the flow_ring_node should be added to the
9611      * active list only after its truely created, which is after
9612      * receiving the create response message from the Host.
9613      */
9614     DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
9615     dll_prepend(&bus->flowring_active_list, &flow_ring_node->list);
9616     DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
9617 
9618     dhd_bus_schedule_queue(bus, flowid, FALSE); /* from queue to flowring */
9619 
9620     return;
9621 }
9622 
dhd_bus_flow_ring_delete_request(dhd_bus_t * bus,void * arg)9623 int dhd_bus_flow_ring_delete_request(dhd_bus_t *bus, void *arg)
9624 {
9625     void *pkt;
9626     flow_queue_t *queue;
9627     flow_ring_node_t *flow_ring_node;
9628     unsigned long flags;
9629 
9630     DHD_INFO(("%s :Flow Delete\n", __FUNCTION__));
9631 
9632     flow_ring_node = (flow_ring_node_t *)arg;
9633 
9634 #ifdef DHDTCPACK_SUPPRESS
9635     /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
9636      * when there is a newly coming packet from network stack.
9637      */
9638     dhd_tcpack_info_tbl_clean(bus->dhd);
9639 #endif /* DHDTCPACK_SUPPRESS */
9640     DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
9641     if (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) {
9642         DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
9643         DHD_ERROR(("%s :Delete Pending flowid %u\n", __FUNCTION__,
9644                    flow_ring_node->flowid));
9645         return BCME_ERROR;
9646     }
9647     flow_ring_node->status = FLOW_RING_STATUS_DELETE_PENDING;
9648 
9649     queue = &flow_ring_node->queue; /* queue associated with flow ring */
9650 
9651     /* Flush all pending packets in the queue, if any */
9652     while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
9653         PKTFREE(bus->dhd->osh, pkt, TRUE);
9654     }
9655     ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
9656 
9657     DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
9658 
9659     /* Send Msg to device about flow ring deletion */
9660     dhd_prot_flow_ring_delete(bus->dhd, flow_ring_node);
9661 
9662     return BCME_OK;
9663 }
9664 
dhd_bus_flow_ring_delete_response(dhd_bus_t * bus,uint16 flowid,uint32 status)9665 void dhd_bus_flow_ring_delete_response(dhd_bus_t *bus, uint16 flowid,
9666                                        uint32 status)
9667 {
9668     flow_ring_node_t *flow_ring_node;
9669 
9670     DHD_INFO(("%s :Flow Delete Response %d \n", __FUNCTION__, flowid));
9671 
9672     /* Boundary check of the flowid */
9673     if (flowid >= bus->dhd->num_flow_rings) {
9674         DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__, flowid,
9675                    bus->dhd->num_flow_rings));
9676         return;
9677     }
9678 
9679     flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
9680     if (!flow_ring_node) {
9681         DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__));
9682         return;
9683     }
9684 
9685     ASSERT(flow_ring_node->flowid == flowid);
9686     if (flow_ring_node->flowid != flowid) {
9687         DHD_ERROR(("%s: flowid %d is different from the flowid "
9688                    "of the flow_ring_node %d\n",
9689                    __FUNCTION__, flowid, flow_ring_node->flowid));
9690         return;
9691     }
9692 
9693     if (status != BCME_OK) {
9694         DHD_ERROR(("%s Flow Delete Response failure error status = %d \n",
9695                    __FUNCTION__, status));
9696         return;
9697     }
9698     /* Call Flow clean up */
9699     dhd_bus_clean_flow_ring(bus, flow_ring_node);
9700 
9701     return;
9702 }
9703 
dhd_bus_flow_ring_flush_request(dhd_bus_t * bus,void * arg)9704 int dhd_bus_flow_ring_flush_request(dhd_bus_t *bus, void *arg)
9705 {
9706     void *pkt;
9707     flow_queue_t *queue;
9708     flow_ring_node_t *flow_ring_node;
9709     unsigned long flags;
9710 
9711     DHD_INFO(("%s :Flow Flush\n", __FUNCTION__));
9712 
9713     flow_ring_node = (flow_ring_node_t *)arg;
9714 
9715     DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
9716     queue = &flow_ring_node->queue; /* queue associated with flow ring */
9717     /* Flow ring status will be set back to FLOW_RING_STATUS_OPEN
9718      * once flow ring flush response is received for this flowring node.
9719      */
9720     flow_ring_node->status = FLOW_RING_STATUS_FLUSH_PENDING;
9721 
9722 #ifdef DHDTCPACK_SUPPRESS
9723     /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
9724      * when there is a newly coming packet from network stack.
9725      */
9726     dhd_tcpack_info_tbl_clean(bus->dhd);
9727 #endif /* DHDTCPACK_SUPPRESS */
9728 
9729     /* Flush all pending packets in the queue, if any */
9730     while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
9731         PKTFREE(bus->dhd->osh, pkt, TRUE);
9732     }
9733     ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
9734 
9735     DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
9736 
9737     /* Send Msg to device about flow ring flush */
9738     dhd_prot_flow_ring_flush(bus->dhd, flow_ring_node);
9739 
9740     return BCME_OK;
9741 }
9742 
dhd_bus_flow_ring_flush_response(dhd_bus_t * bus,uint16 flowid,uint32 status)9743 void dhd_bus_flow_ring_flush_response(dhd_bus_t *bus, uint16 flowid,
9744                                       uint32 status)
9745 {
9746     flow_ring_node_t *flow_ring_node;
9747 
9748     if (status != BCME_OK) {
9749         DHD_ERROR(("%s Flow flush Response failure error status = %d \n",
9750                    __FUNCTION__, status));
9751         return;
9752     }
9753 
9754     /* Boundary check of the flowid */
9755     if (flowid >= bus->dhd->num_flow_rings) {
9756         DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__, flowid,
9757                    bus->dhd->num_flow_rings));
9758         return;
9759     }
9760 
9761     flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
9762     if (!flow_ring_node) {
9763         DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__));
9764         return;
9765     }
9766 
9767     ASSERT(flow_ring_node->flowid == flowid);
9768     if (flow_ring_node->flowid != flowid) {
9769         DHD_ERROR(("%s: flowid %d is different from the flowid "
9770                    "of the flow_ring_node %d\n",
9771                    __FUNCTION__, flowid, flow_ring_node->flowid));
9772         return;
9773     }
9774 
9775     flow_ring_node->status = FLOW_RING_STATUS_OPEN;
9776     return;
9777 }
9778 
dhd_bus_max_h2d_queues(struct dhd_bus * bus)9779 uint32 dhd_bus_max_h2d_queues(struct dhd_bus *bus)
9780 {
9781     return bus->max_submission_rings;
9782 }
9783 
9784 /* To be symmetric with SDIO */
dhd_bus_pktq_flush(dhd_pub_t * dhdp)9785 void dhd_bus_pktq_flush(dhd_pub_t *dhdp)
9786 {
9787     return;
9788 }
9789 
dhd_bus_set_linkdown(dhd_pub_t * dhdp,bool val)9790 void dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val)
9791 {
9792     dhdp->bus->is_linkdown = val;
9793 }
9794 
dhd_bus_get_linkdown(dhd_pub_t * dhdp)9795 int dhd_bus_get_linkdown(dhd_pub_t *dhdp)
9796 {
9797     return dhdp->bus->is_linkdown;
9798 }
9799 
dhd_bus_get_cto(dhd_pub_t * dhdp)9800 int dhd_bus_get_cto(dhd_pub_t *dhdp)
9801 {
9802     return dhdp->bus->cto_triggered;
9803 }
9804 
9805 #ifdef IDLE_TX_FLOW_MGMT
9806 /* resume request */
dhd_bus_flow_ring_resume_request(dhd_bus_t * bus,void * arg)9807 int dhd_bus_flow_ring_resume_request(dhd_bus_t *bus, void *arg)
9808 {
9809     flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)arg;
9810 
9811     DHD_ERROR(("%s :Flow Resume Request flow id %u\n", __FUNCTION__,
9812                flow_ring_node->flowid));
9813 
9814     flow_ring_node->status = FLOW_RING_STATUS_RESUME_PENDING;
9815 
9816     /* Send Msg to device about flow ring resume */
9817     dhd_prot_flow_ring_resume(bus->dhd, flow_ring_node);
9818 
9819     return BCME_OK;
9820 }
9821 
9822 /* add the node back to active flowring */
dhd_bus_flow_ring_resume_response(dhd_bus_t * bus,uint16 flowid,int32 status)9823 void dhd_bus_flow_ring_resume_response(dhd_bus_t *bus, uint16 flowid,
9824                                        int32 status)
9825 {
9826     flow_ring_node_t *flow_ring_node;
9827 
9828     DHD_TRACE(("%s :flowid %d \n", __FUNCTION__, flowid));
9829 
9830     flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
9831     ASSERT(flow_ring_node->flowid == flowid);
9832 
9833     if (status != BCME_OK) {
9834         DHD_ERROR(("%s Error Status = %d \n", __FUNCTION__, status));
9835         return;
9836     }
9837     DHD_TRACE(("%s :Number of pkts queued in FlowId:%d is -> %u!!\n",
9838                __FUNCTION__, flow_ring_node->flowid,
9839                flow_ring_node->queue.len));
9840 
9841     flow_ring_node->status = FLOW_RING_STATUS_OPEN;
9842 
9843     dhd_bus_schedule_queue(bus, flowid, FALSE);
9844     return;
9845 }
9846 
9847 /* scan the flow rings in active list for idle time out */
dhd_bus_check_idle_scan(dhd_bus_t * bus)9848 void dhd_bus_check_idle_scan(dhd_bus_t *bus)
9849 {
9850     uint64 time_stamp; /* in millisec */
9851     uint64 diff;
9852 
9853     time_stamp = OSL_SYSUPTIME();
9854     diff = time_stamp - bus->active_list_last_process_ts;
9855 
9856     if (diff > IDLE_FLOW_LIST_TIMEOUT) {
9857         dhd_bus_idle_scan(bus);
9858         bus->active_list_last_process_ts = OSL_SYSUPTIME();
9859     }
9860 
9861     return;
9862 }
9863 
9864 /* scan the nodes in active list till it finds a non idle node */
dhd_bus_idle_scan(dhd_bus_t * bus)9865 void dhd_bus_idle_scan(dhd_bus_t *bus)
9866 {
9867     dll_t *item, *prev;
9868     flow_ring_node_t *flow_ring_node;
9869     uint64 time_stamp, diff;
9870     unsigned long flags;
9871     uint16 ringid[MAX_SUSPEND_REQ];
9872     uint16 count = 0;
9873 
9874     time_stamp = OSL_SYSUPTIME();
9875     DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
9876 
9877     for (item = dll_tail_p(&bus->flowring_active_list);
9878          !dll_end(&bus->flowring_active_list, item); item = prev) {
9879         prev = dll_prev_p(item);
9880 
9881         flow_ring_node = dhd_constlist_to_flowring(item);
9882         if (flow_ring_node->flowid == (bus->max_submission_rings - 1)) {
9883             continue;
9884         }
9885         if (flow_ring_node->status != FLOW_RING_STATUS_OPEN) {
9886             /* Takes care of deleting zombie rings */
9887             /* delete from the active list */
9888             DHD_INFO(("deleting flow id %u from active list\n",
9889                       flow_ring_node->flowid));
9890             __dhd_flow_ring_delete_from_active_list(bus, flow_ring_node);
9891             continue;
9892         }
9893 
9894         diff = time_stamp - flow_ring_node->last_active_ts;
9895 
9896         if ((diff > IDLE_FLOW_RING_TIMEOUT) && !(flow_ring_node->queue.len)) {
9897             DHD_ERROR(("\nSuspending flowid %d\n", flow_ring_node->flowid));
9898             /* delete from the active list */
9899             __dhd_flow_ring_delete_from_active_list(bus, flow_ring_node);
9900             flow_ring_node->status = FLOW_RING_STATUS_SUSPENDED;
9901             ringid[count] = flow_ring_node->flowid;
9902             count++;
9903             if (count == MAX_SUSPEND_REQ) {
9904                 /* create a batch message now!! */
9905                 dhd_prot_flow_ring_batch_suspend_request(bus->dhd, ringid,
9906                                                          count);
9907                 count = 0;
9908             }
9909         } else {
9910             /* No more scanning, break from here! */
9911             break;
9912         }
9913     }
9914 
9915     if (count) {
9916         dhd_prot_flow_ring_batch_suspend_request(bus->dhd, ringid, count);
9917     }
9918 
9919     DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
9920 
9921     return;
9922 }
9923 
dhd_flow_ring_move_to_active_list_head(struct dhd_bus * bus,flow_ring_node_t * flow_ring_node)9924 void dhd_flow_ring_move_to_active_list_head(struct dhd_bus *bus,
9925                                             flow_ring_node_t *flow_ring_node)
9926 {
9927     unsigned long flags;
9928     dll_t *list;
9929 
9930     DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
9931     /* check if the node is already at head, otherwise delete it and prepend */
9932     list = dll_head_p(&bus->flowring_active_list);
9933     if (&flow_ring_node->list != list) {
9934         dll_delete(&flow_ring_node->list);
9935         dll_prepend(&bus->flowring_active_list, &flow_ring_node->list);
9936     }
9937 
9938     /* update flow ring timestamp */
9939     flow_ring_node->last_active_ts = OSL_SYSUPTIME();
9940 
9941     DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
9942 
9943     return;
9944 }
9945 
dhd_flow_ring_add_to_active_list(struct dhd_bus * bus,flow_ring_node_t * flow_ring_node)9946 void dhd_flow_ring_add_to_active_list(struct dhd_bus *bus,
9947                                       flow_ring_node_t *flow_ring_node)
9948 {
9949     unsigned long flags;
9950 
9951     DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
9952 
9953     dll_prepend(&bus->flowring_active_list, &flow_ring_node->list);
9954     /* update flow ring timestamp */
9955     flow_ring_node->last_active_ts = OSL_SYSUPTIME();
9956 
9957     DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
9958 
9959     return;
9960 }
__dhd_flow_ring_delete_from_active_list(struct dhd_bus * bus,flow_ring_node_t * flow_ring_node)9961 void __dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus,
9962                                              flow_ring_node_t *flow_ring_node)
9963 {
9964     dll_delete(&flow_ring_node->list);
9965 }
9966 
dhd_flow_ring_delete_from_active_list(struct dhd_bus * bus,flow_ring_node_t * flow_ring_node)9967 void dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus,
9968                                            flow_ring_node_t *flow_ring_node)
9969 {
9970     unsigned long flags;
9971 
9972     DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
9973 
9974     __dhd_flow_ring_delete_from_active_list(bus, flow_ring_node);
9975 
9976     DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
9977 
9978     return;
9979 }
9980 #endif /* IDLE_TX_FLOW_MGMT */
9981 
dhdpcie_bus_clock_start(struct dhd_bus * bus)9982 int dhdpcie_bus_clock_start(struct dhd_bus *bus)
9983 {
9984     return dhdpcie_start_host_pcieclock(bus);
9985 }
9986 
dhdpcie_bus_clock_stop(struct dhd_bus * bus)9987 int dhdpcie_bus_clock_stop(struct dhd_bus *bus)
9988 {
9989     return dhdpcie_stop_host_pcieclock(bus);
9990 }
9991 
dhdpcie_bus_disable_device(struct dhd_bus * bus)9992 int dhdpcie_bus_disable_device(struct dhd_bus *bus)
9993 {
9994     return dhdpcie_disable_device(bus);
9995 }
9996 
dhdpcie_bus_enable_device(struct dhd_bus * bus)9997 int dhdpcie_bus_enable_device(struct dhd_bus *bus)
9998 {
9999     return dhdpcie_enable_device(bus);
10000 }
10001 
dhdpcie_bus_alloc_resource(struct dhd_bus * bus)10002 int dhdpcie_bus_alloc_resource(struct dhd_bus *bus)
10003 {
10004     return dhdpcie_alloc_resource(bus);
10005 }
10006 
dhdpcie_bus_free_resource(struct dhd_bus * bus)10007 void dhdpcie_bus_free_resource(struct dhd_bus *bus)
10008 {
10009     dhdpcie_free_resource(bus);
10010 }
10011 
dhd_bus_request_irq(struct dhd_bus * bus)10012 int dhd_bus_request_irq(struct dhd_bus *bus)
10013 {
10014     return dhdpcie_bus_request_irq(bus);
10015 }
10016 
dhdpcie_bus_dongle_attach(struct dhd_bus * bus)10017 bool dhdpcie_bus_dongle_attach(struct dhd_bus *bus)
10018 {
10019     return dhdpcie_dongle_attach(bus);
10020 }
10021 
dhd_bus_release_dongle(struct dhd_bus * bus)10022 int dhd_bus_release_dongle(struct dhd_bus *bus)
10023 {
10024     bool dongle_isolation;
10025     osl_t *osh;
10026 
10027     DHD_TRACE(("%s: Enter\n", __FUNCTION__));
10028 
10029     if (bus) {
10030         osh = bus->osh;
10031         ASSERT(osh);
10032 
10033         if (bus->dhd) {
10034 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
10035             debugger_close();
10036 #endif /* DEBUGGER || DHD_DSCOPE */
10037 
10038             dongle_isolation = bus->dhd->dongle_isolation;
10039             dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
10040         }
10041     }
10042 
10043     return 0;
10044 }
10045 
dhdpcie_cto_cfg_init(struct dhd_bus * bus,bool enable)10046 int dhdpcie_cto_cfg_init(struct dhd_bus *bus, bool enable)
10047 {
10048     uint32 val;
10049     if (enable) {
10050         dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 0x4,
10051                                     PCI_CTO_INT_MASK | PCI_SBIM_MASK_SERR);
10052         val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 0x4);
10053         dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 0x4,
10054                                     val | SPROM_BACKPLANE_EN);
10055     } else {
10056         dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 0x4, 0);
10057         val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 0x4);
10058         dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 0x4,
10059                                     val & ~SPROM_BACKPLANE_EN);
10060     }
10061     return 0;
10062 }
10063 
dhdpcie_cto_init(struct dhd_bus * bus,bool enable)10064 int dhdpcie_cto_init(struct dhd_bus *bus, bool enable)
10065 {
10066     if (bus->sih->buscorerev < 0x13) {
10067         DHD_INFO(("%s: Unsupported CTO, buscorerev=%d\n", __FUNCTION__,
10068                   bus->sih->buscorerev));
10069         return BCME_UNSUPPORTED;
10070     }
10071 
10072     if (bus->sih->buscorerev == 0x13) {
10073         uint32 pcie_lnkst;
10074         si_corereg(bus->sih, bus->sih->buscoreidx,
10075                    OFFSETOF(sbpcieregs_t, configaddr), ~0, PCI_LINK_STATUS);
10076 
10077         pcie_lnkst = si_corereg(bus->sih, bus->sih->buscoreidx,
10078                                 OFFSETOF(sbpcieregs_t, configdata), 0, 0);
10079         if (((pcie_lnkst >> PCI_LINK_SPEED_SHIFT) & PCI_LINK_SPEED_MASK) ==
10080             PCIE_LNK_SPEED_GEN1) {
10081             return BCME_UNSUPPORTED;
10082         }
10083     }
10084 
10085     bus->cto_enable = enable;
10086 
10087     dhdpcie_cto_cfg_init(bus, enable);
10088 
10089     if (enable) {
10090         if (bus->cto_threshold == 0) {
10091             bus->cto_threshold = PCIE_CTO_TO_THRESH_DEFAULT;
10092         }
10093         si_corereg(bus->sih, bus->sih->buscoreidx,
10094                    OFFSETOF(sbpcieregs_t, ctoctrl), ~0,
10095                    ((bus->cto_threshold << PCIE_CTO_TO_THRESHOLD_SHIFT) &
10096                     PCIE_CTO_TO_THRESHHOLD_MASK) |
10097                        ((PCIE_CTO_CLKCHKCNT_VAL << PCIE_CTO_CLKCHKCNT_SHIFT) &
10098                         PCIE_CTO_CLKCHKCNT_MASK) |
10099                        PCIE_CTO_ENAB_MASK);
10100     } else {
10101         si_corereg(bus->sih, bus->sih->buscoreidx,
10102                    OFFSETOF(sbpcieregs_t, ctoctrl), ~0, 0);
10103     }
10104 
10105     DHD_ERROR(("%s: set CTO prevention and recovery enable/disable %d\n",
10106                __FUNCTION__, bus->cto_enable));
10107 
10108     return 0;
10109 }
10110 
dhdpcie_cto_error_recovery(struct dhd_bus * bus)10111 static int dhdpcie_cto_error_recovery(struct dhd_bus *bus)
10112 {
10113     uint32 pci_intmask, err_status;
10114     uint8 i = 0;
10115     uint32 val;
10116 
10117     pci_intmask = dhdpcie_bus_cfg_read_dword(bus, PCI_INT_MASK, 0x4);
10118     dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 0x4,
10119                                 pci_intmask & ~PCI_CTO_INT_MASK);
10120 
10121     DHD_OS_WAKE_LOCK(bus->dhd);
10122 
10123     DHD_ERROR(("--- CTO Triggered --- %d\n", bus->pwr_req_ref));
10124 
10125     /*
10126      * DAR still accessible
10127      */
10128     dhd_bus_dump_dar_registers(bus);
10129 
10130     /* reset backplane */
10131     val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 0x4);
10132     dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 0x4,
10133                                 val | SPROM_CFG_TO_SB_RST);
10134 
10135     /* clear timeout error */
10136     while (1) {
10137         err_status = si_corereg(bus->sih, bus->sih->buscoreidx,
10138                                 DAR_ERRLOG(bus->sih->buscorerev), 0, 0);
10139         if (err_status & PCIE_CTO_ERR_MASK) {
10140             si_corereg(bus->sih, bus->sih->buscoreidx,
10141                        DAR_ERRLOG(bus->sih->buscorerev), ~0, PCIE_CTO_ERR_MASK);
10142         } else {
10143             break;
10144         }
10145         OSL_DELAY(CTO_TO_CLEAR_WAIT_MS * 0x3E8);
10146         i++;
10147         if (i > CTO_TO_CLEAR_WAIT_MAX_CNT) {
10148             DHD_ERROR(("cto recovery fail\n"));
10149 
10150             DHD_OS_WAKE_UNLOCK(bus->dhd);
10151             return BCME_ERROR;
10152         }
10153     }
10154 
10155     /* clear interrupt status */
10156     dhdpcie_bus_cfg_write_dword(bus, PCI_INT_STATUS, 0x4, PCI_CTO_INT_MASK);
10157 
10158     /* Halt ARM & remove reset */
10159     /* we can add ARM Halt here in case */
10160 
10161     /* reset SPROM_CFG_TO_SB_RST */
10162     val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 0x4);
10163 
10164     DHD_ERROR(("cto recovery reset 0x%x:SPROM_CFG_TO_SB_RST(0x%x) 0x%x\n",
10165                PCI_SPROM_CONTROL, SPROM_CFG_TO_SB_RST, val));
10166     dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 0x4,
10167                                 val & ~SPROM_CFG_TO_SB_RST);
10168 
10169     val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 0x4);
10170     DHD_ERROR(("cto recovery success 0x%x:SPROM_CFG_TO_SB_RST(0x%x) 0x%x\n",
10171                PCI_SPROM_CONTROL, SPROM_CFG_TO_SB_RST, val));
10172 
10173     DHD_OS_WAKE_UNLOCK(bus->dhd);
10174 
10175     return BCME_OK;
10176 }
10177 
dhdpcie_ssreset_dis_enum_rst(struct dhd_bus * bus)10178 void dhdpcie_ssreset_dis_enum_rst(struct dhd_bus *bus)
10179 {
10180     uint32 val;
10181 
10182     val = dhdpcie_bus_cfg_read_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 0x4);
10183     dhdpcie_bus_cfg_write_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 0x4,
10184                                 val | (0x1 << PCIE_SSRESET_DIS_ENUM_RST_BIT));
10185 }
10186 
10187 #if defined(DBG_PKT_MON)
dhdpcie_init_d11status(struct dhd_bus * bus)10188 static int dhdpcie_init_d11status(struct dhd_bus *bus)
10189 {
10190     uint32 addr;
10191     uint32 flags2;
10192     int ret = 0;
10193 
10194     if (bus->pcie_sh->flags2 & PCIE_SHARED2_D2H_D11_TX_STATUS) {
10195         flags2 = bus->pcie_sh->flags2;
10196         addr = bus->shared_addr + OFFSETOF(pciedev_shared_t, flags2);
10197         flags2 |= PCIE_SHARED2_H2D_D11_TX_STATUS;
10198         ret = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&flags2,
10199                                    sizeof(flags2));
10200         if (ret < 0) {
10201             DHD_ERROR(("%s: update flag bit (H2D_D11_TX_STATUS) failed\n",
10202                        __FUNCTION__));
10203             return ret;
10204         }
10205         bus->pcie_sh->flags2 = flags2;
10206         bus->dhd->d11_tx_status = TRUE;
10207     }
10208     return ret;
10209 }
10210 
10211 #else
dhdpcie_init_d11status(struct dhd_bus * bus)10212 static int dhdpcie_init_d11status(struct dhd_bus *bus)
10213 {
10214     return 0;
10215 }
10216 #endif // endif
10217 
10218 #ifdef BCMPCIE_OOB_HOST_WAKE
dhd_bus_oob_intr_register(dhd_pub_t * dhdp)10219 int dhd_bus_oob_intr_register(dhd_pub_t *dhdp)
10220 {
10221     return dhdpcie_oob_intr_register(dhdp->bus);
10222 }
10223 
dhd_bus_oob_intr_unregister(dhd_pub_t * dhdp)10224 void dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp)
10225 {
10226     dhdpcie_oob_intr_unregister(dhdp->bus);
10227 }
10228 
dhd_bus_oob_intr_set(dhd_pub_t * dhdp,bool enable)10229 void dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable)
10230 {
10231     dhdpcie_oob_intr_set(dhdp->bus, enable);
10232 }
10233 #endif /* BCMPCIE_OOB_HOST_WAKE */
10234 
dhdpcie_bus_get_pcie_hostready_supported(dhd_bus_t * bus)10235 bool dhdpcie_bus_get_pcie_hostready_supported(dhd_bus_t *bus)
10236 {
10237     return bus->dhd->d2h_hostrdy_supported;
10238 }
10239 
dhd_pcie_dump_core_regs(dhd_pub_t * pub,uint32 index,uint32 first_addr,uint32 last_addr)10240 void dhd_pcie_dump_core_regs(dhd_pub_t *pub, uint32 index, uint32 first_addr,
10241                              uint32 last_addr)
10242 {
10243     dhd_bus_t *bus = pub->bus;
10244     uint32 coreoffset = index << 12;
10245     uint32 core_addr = SI_ENUM_BASE(bus->sih) + coreoffset;
10246     uint32 value;
10247 
10248     while (first_addr <= last_addr) {
10249         core_addr = SI_ENUM_BASE(bus->sih) + coreoffset + first_addr;
10250         if (serialized_backplane_access(bus, core_addr, 0x4, &value, TRUE) !=
10251             BCME_OK) {
10252             DHD_ERROR(("Invalid size/addr combination \n"));
10253         }
10254         DHD_ERROR(("[0x%08x]: 0x%08x\n", core_addr, value));
10255         first_addr = first_addr + 0x4;
10256     }
10257 }
10258 
dhdpcie_bus_get_pcie_hwa_supported(dhd_bus_t * bus)10259 bool dhdpcie_bus_get_pcie_hwa_supported(dhd_bus_t *bus)
10260 {
10261     if (!bus->dhd) {
10262         return FALSE;
10263     } else if (bus->hwa_enab_bmap) {
10264         return bus->dhd->hwa_enable;
10265     } else {
10266         return FALSE;
10267     }
10268 }
10269 
dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t * bus)10270 bool dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t *bus)
10271 {
10272     if (!bus->dhd) {
10273         return FALSE;
10274     } else if (bus->idma_enabled) {
10275         return bus->dhd->idma_enable;
10276     } else {
10277         return FALSE;
10278     }
10279 }
10280 
dhdpcie_bus_get_pcie_ifrm_supported(dhd_bus_t * bus)10281 bool dhdpcie_bus_get_pcie_ifrm_supported(dhd_bus_t *bus)
10282 {
10283     if (!bus->dhd) {
10284         return FALSE;
10285     } else if (bus->ifrm_enabled) {
10286         return bus->dhd->ifrm_enable;
10287     } else {
10288         return FALSE;
10289     }
10290 }
10291 
dhdpcie_bus_get_pcie_dar_supported(dhd_bus_t * bus)10292 bool dhdpcie_bus_get_pcie_dar_supported(dhd_bus_t *bus)
10293 {
10294     if (!bus->dhd) {
10295         return FALSE;
10296     } else if (bus->dar_enabled) {
10297         return bus->dhd->dar_enable;
10298     } else {
10299         return FALSE;
10300     }
10301 }
10302 
dhdpcie_bus_enab_pcie_dw(dhd_bus_t * bus,uint8 dw_option)10303 void dhdpcie_bus_enab_pcie_dw(dhd_bus_t *bus, uint8 dw_option)
10304 {
10305     DHD_ERROR(("ENABLING DW:%d\n", dw_option));
10306     bus->dw_option = dw_option;
10307 }
10308 
dhd_bus_dump_trap_info(dhd_bus_t * bus,struct bcmstrbuf * strbuf)10309 void dhd_bus_dump_trap_info(dhd_bus_t *bus, struct bcmstrbuf *strbuf)
10310 {
10311     trap_t *tr = &bus->dhd->last_trap_info;
10312     bcm_bprintf(strbuf,
10313                 "\nTRAP type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x,"
10314                 " lp 0x%x, rpc 0x%x"
10315                 "\nTrap offset 0x%x, r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, "
10316                 "r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x, r8 0x%x, r9 0x%x, "
10317                 "r10 0x%x, r11 0x%x, r12 0x%x\n\n",
10318                 ltoh32(tr->type), ltoh32(tr->epc), ltoh32(tr->cpsr),
10319                 ltoh32(tr->spsr), ltoh32(tr->r13), ltoh32(tr->r14),
10320                 ltoh32(tr->pc), ltoh32(bus->pcie_sh->trap_addr), ltoh32(tr->r0),
10321                 ltoh32(tr->r1), ltoh32(tr->r2), ltoh32(tr->r3), ltoh32(tr->r4),
10322                 ltoh32(tr->r5), ltoh32(tr->r6), ltoh32(tr->r7), ltoh32(tr->r8),
10323                 ltoh32(tr->r9), ltoh32(tr->r10), ltoh32(tr->r11),
10324                 ltoh32(tr->r12));
10325 }
10326 
dhd_bus_readwrite_bp_addr(dhd_pub_t * dhdp,uint addr,uint size,uint * data,bool read)10327 int dhd_bus_readwrite_bp_addr(dhd_pub_t *dhdp, uint addr, uint size, uint *data,
10328                               bool read)
10329 {
10330     int bcmerror = 0;
10331     struct dhd_bus *bus = dhdp->bus;
10332 
10333     if (serialized_backplane_access(bus, addr, size, data, read) != BCME_OK) {
10334         DHD_ERROR(("Invalid size/addr combination \n"));
10335         bcmerror = BCME_ERROR;
10336     }
10337 
10338     return bcmerror;
10339 }
10340 
dhd_get_idletime(dhd_pub_t * dhd)10341 int dhd_get_idletime(dhd_pub_t *dhd)
10342 {
10343     return dhd->bus->idletime;
10344 }
10345 
dhd_sbreg_op(dhd_pub_t * dhd,uint addr,uint * val,bool read)10346 static INLINE void dhd_sbreg_op(dhd_pub_t *dhd, uint addr, uint *val, bool read)
10347 {
10348     OSL_DELAY(1);
10349     if (serialized_backplane_access(dhd->bus, addr, sizeof(uint), val, read) !=
10350         BCME_OK) {
10351         DHD_ERROR(("sbreg: Invalid uint addr: 0x%x \n", addr));
10352     } else {
10353         DHD_ERROR(("sbreg: addr:0x%x val:0x%x read:%d\n", addr, *val, read));
10354     }
10355     return;
10356 }
10357 
10358 #ifdef DHD_SSSR_DUMP
dhdpcie_get_sssr_fifo_dump(dhd_pub_t * dhd,uint * buf,uint fifo_size,uint addr_reg,uint data_reg)10359 static int dhdpcie_get_sssr_fifo_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size,
10360                                       uint addr_reg, uint data_reg)
10361 {
10362     uint addr;
10363     uint val = 0;
10364     int i;
10365 
10366     DHD_ERROR(("%s\n", __FUNCTION__));
10367 
10368     if (!buf) {
10369         DHD_ERROR(("%s: buf is NULL\n", __FUNCTION__));
10370         return BCME_ERROR;
10371     }
10372 
10373     if (!fifo_size) {
10374         DHD_ERROR(("%s: fifo_size is 0\n", __FUNCTION__));
10375         return BCME_ERROR;
10376     }
10377 
10378     /* Set the base address offset to 0 */
10379     addr = addr_reg;
10380     val = 0;
10381     dhd_sbreg_op(dhd, addr, &val, FALSE);
10382 
10383     addr = data_reg;
10384     /* Read 4 bytes at once and loop for fifo_size / 4 */
10385     for (i = 0; i < fifo_size / 4; i++) {
10386         if (serialized_backplane_access(dhd->bus, addr, sizeof(uint), &val,
10387                                         TRUE) != BCME_OK) {
10388             DHD_ERROR(
10389                 ("%s: error in serialized_backplane_access\n", __FUNCTION__));
10390             return BCME_ERROR;
10391         }
10392         buf[i] = val;
10393         OSL_DELAY(1);
10394     }
10395     return BCME_OK;
10396 }
10397 
dhdpcie_get_sssr_dig_dump(dhd_pub_t * dhd,uint * buf,uint fifo_size,uint addr_reg)10398 static int dhdpcie_get_sssr_dig_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size,
10399                                      uint addr_reg)
10400 {
10401     uint addr;
10402     uint val = 0;
10403     int i;
10404     si_t *sih = dhd->bus->sih;
10405 
10406     DHD_ERROR(("%s\n", __FUNCTION__));
10407 
10408     if (!buf) {
10409         DHD_ERROR(("%s: buf is NULL\n", __FUNCTION__));
10410         return BCME_ERROR;
10411     }
10412 
10413     if (!fifo_size) {
10414         DHD_ERROR(("%s: fifo_size is 0\n", __FUNCTION__));
10415         return BCME_ERROR;
10416     }
10417 
10418     if (addr_reg) {
10419         if ((!dhd->sssr_reg_info.vasip_regs.vasip_sr_size) &&
10420             dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) {
10421             int err = dhdpcie_bus_membytes(dhd->bus, FALSE, addr_reg,
10422                                            (uint8 *)buf, fifo_size);
10423             if (err != BCME_OK) {
10424                 DHD_ERROR(("%s: Error reading dig dump from dongle !\n",
10425                            __FUNCTION__));
10426             }
10427         } else {
10428             /* Check if vasip clk is disabled, if yes enable it */
10429             addr = dhd->sssr_reg_info.vasip_regs.wrapper_regs.ioctrl;
10430             dhd_sbreg_op(dhd, addr, &val, TRUE);
10431             if (!val) {
10432                 val = 1;
10433                 dhd_sbreg_op(dhd, addr, &val, FALSE);
10434             }
10435 
10436             addr = addr_reg;
10437             /* Read 4 bytes at once and loop for fifo_size / 4 */
10438             for (i = 0; i < fifo_size / 4; i++, addr += 4) {
10439                 if (serialized_backplane_access(dhd->bus, addr, sizeof(uint),
10440                                                 &val, TRUE) != BCME_OK) {
10441                     DHD_ERROR(
10442                         ("%s: Invalid uint addr: 0x%x \n", __FUNCTION__, addr));
10443                     return BCME_ERROR;
10444                 }
10445                 buf[i] = val;
10446                 OSL_DELAY(1);
10447             }
10448         }
10449     } else {
10450         uint cur_coreid;
10451         uint chipc_corerev;
10452         chipcregs_t *chipcregs;
10453 
10454         /* Save the current core */
10455         cur_coreid = si_coreid(sih);
10456 
10457         /* Switch to ChipC */
10458         chipcregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
10459 
10460         chipc_corerev = si_corerev(sih);
10461         if ((chipc_corerev == 0x40) || (chipc_corerev == 0x41)) {
10462             W_REG(si_osh(sih), &chipcregs->sr_memrw_addr, 0);
10463 
10464             /* Read 4 bytes at once and loop for fifo_size / 4 */
10465             for (i = 0; i < fifo_size / 4; i++) {
10466                 buf[i] = R_REG(si_osh(sih), &chipcregs->sr_memrw_data);
10467                 OSL_DELAY(1);
10468             }
10469         }
10470         /* Switch back to the original core */
10471         si_setcore(sih, cur_coreid, 0);
10472     }
10473 
10474     return BCME_OK;
10475 }
10476 
10477 #if defined(EWP_ETD_PRSRV_LOGS)
dhdpcie_get_etd_preserve_logs(dhd_pub_t * dhd,uint8 * ext_trap_data,void * event_decode_data)10478 void dhdpcie_get_etd_preserve_logs(dhd_pub_t *dhd, uint8 *ext_trap_data,
10479                                    void *event_decode_data)
10480 {
10481     hnd_ext_trap_hdr_t *hdr = NULL;
10482     bcm_tlv_t *tlv;
10483     eventlog_trapdata_info_t *etd_evtlog = NULL;
10484     eventlog_trap_buf_info_t *evtlog_buf_arr = NULL;
10485     uint arr_size = 0;
10486     int i = 0;
10487     int err = 0;
10488     uint32 seqnum = 0;
10489 
10490     if (!ext_trap_data || !event_decode_data || !dhd) {
10491         return;
10492     }
10493 
10494     if (!dhd->concise_dbg_buf) {
10495         return;
10496     }
10497 
10498     /* First word is original trap_data, skip */
10499     ext_trap_data += sizeof(uint32);
10500 
10501     hdr = (hnd_ext_trap_hdr_t *)ext_trap_data;
10502     tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_LOG_DATA);
10503     if (tlv) {
10504         uint32 baseaddr = 0;
10505         uint32 endaddr = dhd->bus->dongle_ram_base + dhd->bus->ramsize - 4;
10506 
10507         etd_evtlog = (eventlog_trapdata_info_t *)tlv->data;
10508         DHD_ERROR(("%s: etd_evtlog tlv found, num_elements=%x; "
10509                    "seq_num=%x; log_arr_addr=%x\n",
10510                    __FUNCTION__, (etd_evtlog->num_elements),
10511                    ntoh32(etd_evtlog->seq_num), (etd_evtlog->log_arr_addr)));
10512         arr_size = (uint32)sizeof(*evtlog_buf_arr) * (etd_evtlog->num_elements);
10513         if (!arr_size) {
10514             DHD_ERROR(("%s: num event logs is zero! \n", __FUNCTION__));
10515             return;
10516         }
10517         evtlog_buf_arr = MALLOCZ(dhd->osh, arr_size);
10518         if (!evtlog_buf_arr) {
10519             DHD_ERROR(("%s: out of memory !\n", __FUNCTION__));
10520             return;
10521         }
10522 
10523         /* boundary check */
10524         baseaddr = etd_evtlog->log_arr_addr;
10525         if ((baseaddr < dhd->bus->dongle_ram_base) ||
10526             ((baseaddr + arr_size) > endaddr)) {
10527             DHD_ERROR(("%s: Error reading invalid address\n", __FUNCTION__));
10528             goto err;
10529         }
10530 
10531         /* read the eventlog_trap_buf_info_t array from dongle memory */
10532         err = dhdpcie_bus_membytes(dhd->bus, FALSE,
10533                                    (ulong)(etd_evtlog->log_arr_addr),
10534                                    (uint8 *)evtlog_buf_arr, arr_size);
10535         if (err != BCME_OK) {
10536             DHD_ERROR(("%s: Error reading event log array from dongle !\n",
10537                        __FUNCTION__));
10538             goto err;
10539         }
10540         /* ntoh is required only for seq_num, because in the original
10541          * case of event logs from info ring, it is sent from dongle in that way
10542          * so for ETD also dongle follows same convention
10543          */
10544         seqnum = ntoh32(etd_evtlog->seq_num);
10545         memset(dhd->concise_dbg_buf, 0, CONCISE_DUMP_BUFLEN);
10546         for (i = 0; i < (etd_evtlog->num_elements); ++i) {
10547             /* boundary check */
10548             baseaddr = evtlog_buf_arr[i].buf_addr;
10549             if ((baseaddr < dhd->bus->dongle_ram_base) ||
10550                 ((baseaddr + evtlog_buf_arr[i].len) > endaddr)) {
10551                 DHD_ERROR(
10552                     ("%s: Error reading invalid address\n", __FUNCTION__));
10553                 goto err;
10554             }
10555             /* read each individual event log buf from dongle memory */
10556             err = dhdpcie_bus_membytes(
10557                 dhd->bus, FALSE, ((ulong)evtlog_buf_arr[i].buf_addr),
10558                 dhd->concise_dbg_buf, (evtlog_buf_arr[i].len));
10559             if (err != BCME_OK) {
10560                 DHD_ERROR(("%s: Error reading event log buffer from dongle !\n",
10561                            __FUNCTION__));
10562                 goto err;
10563             }
10564             dhd_dbg_msgtrace_log_parser(
10565                 dhd, dhd->concise_dbg_buf, event_decode_data,
10566                 (evtlog_buf_arr[i].len), FALSE, hton32(seqnum));
10567             ++seqnum;
10568         }
10569     err:
10570         MFREE(dhd->osh, evtlog_buf_arr, arr_size);
10571     } else {
10572         DHD_ERROR(("%s: Error getting trap log data in ETD !\n", __FUNCTION__));
10573     }
10574 }
10575 #endif /* BCMPCIE && DHD_LOG_DUMP */
10576 
dhdpcie_resume_chipcommon_powerctrl(dhd_pub_t * dhd,uint32 reg_val)10577 static uint32 dhdpcie_resume_chipcommon_powerctrl(dhd_pub_t *dhd,
10578                                                   uint32 reg_val)
10579 {
10580     uint addr;
10581     uint val = 0;
10582 
10583     DHD_ERROR(("%s\n", __FUNCTION__));
10584 
10585     /* conditionally clear bits [11:8] of PowerCtrl */
10586     addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
10587     dhd_sbreg_op(dhd, addr, &val, TRUE);
10588     if (!(val & dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask)) {
10589         addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
10590         dhd_sbreg_op(dhd, addr, &reg_val, FALSE);
10591     }
10592     return BCME_OK;
10593 }
10594 
dhdpcie_suspend_chipcommon_powerctrl(dhd_pub_t * dhd)10595 static uint32 dhdpcie_suspend_chipcommon_powerctrl(dhd_pub_t *dhd)
10596 {
10597     uint addr;
10598     uint val = 0, reg_val = 0;
10599 
10600     DHD_ERROR(("%s\n", __FUNCTION__));
10601 
10602     /* conditionally clear bits [11:8] of PowerCtrl */
10603     addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
10604     dhd_sbreg_op(dhd, addr, &reg_val, TRUE);
10605     if (reg_val & dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask) {
10606         addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
10607         val = 0;
10608         dhd_sbreg_op(dhd, addr, &val, FALSE);
10609     }
10610     return reg_val;
10611 }
10612 
dhdpcie_clear_intmask_and_timer(dhd_pub_t * dhd)10613 static int dhdpcie_clear_intmask_and_timer(dhd_pub_t *dhd)
10614 {
10615     uint addr;
10616     uint val;
10617 
10618     DHD_ERROR(("%s\n", __FUNCTION__));
10619 
10620     /* clear chipcommon intmask */
10621     addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.intmask;
10622     val = 0x0;
10623     dhd_sbreg_op(dhd, addr, &val, FALSE);
10624 
10625     /* clear PMUIntMask0 */
10626     addr = dhd->sssr_reg_info.pmu_regs.base_regs.pmuintmask0;
10627     val = 0x0;
10628     dhd_sbreg_op(dhd, addr, &val, FALSE);
10629 
10630     /* clear PMUIntMask1 */
10631     addr = dhd->sssr_reg_info.pmu_regs.base_regs.pmuintmask1;
10632     val = 0x0;
10633     dhd_sbreg_op(dhd, addr, &val, FALSE);
10634 
10635     /* clear res_req_timer */
10636     addr = dhd->sssr_reg_info.pmu_regs.base_regs.resreqtimer;
10637     val = 0x0;
10638     dhd_sbreg_op(dhd, addr, &val, FALSE);
10639 
10640     /* clear macresreqtimer */
10641     addr = dhd->sssr_reg_info.pmu_regs.base_regs.macresreqtimer;
10642     val = 0x0;
10643     dhd_sbreg_op(dhd, addr, &val, FALSE);
10644 
10645     /* clear macresreqtimer1 */
10646     addr = dhd->sssr_reg_info.pmu_regs.base_regs.macresreqtimer1;
10647     val = 0x0;
10648     dhd_sbreg_op(dhd, addr, &val, FALSE);
10649 
10650     /* clear VasipClkEn */
10651     if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
10652         addr = dhd->sssr_reg_info.vasip_regs.wrapper_regs.ioctrl;
10653         val = 0x0;
10654         dhd_sbreg_op(dhd, addr, &val, FALSE);
10655     }
10656 
10657     return BCME_OK;
10658 }
10659 
dhdpcie_update_d11_status_from_trapdata(dhd_pub_t * dhd)10660 static void dhdpcie_update_d11_status_from_trapdata(dhd_pub_t *dhd)
10661 {
10662 #define TRAP_DATA_MAIN_CORE_BIT_MASK (1 << 1)
10663 #define TRAP_DATA_AUX_CORE_BIT_MASK (1 << 4)
10664     uint trap_data_mask[MAX_NUM_D11CORES] = {TRAP_DATA_MAIN_CORE_BIT_MASK,
10665                                              TRAP_DATA_AUX_CORE_BIT_MASK};
10666     int i;
10667     /* Apply only for 4375 chip */
10668     if (dhd_bus_chip_id(dhd) == BCM4375_CHIP_ID) {
10669         for (i = 0; i < MAX_NUM_D11CORES; i++) {
10670             if (dhd->sssr_d11_outofreset[i] &&
10671                 (dhd->dongle_trap_data & trap_data_mask[i])) {
10672                 dhd->sssr_d11_outofreset[i] = TRUE;
10673             } else {
10674                 dhd->sssr_d11_outofreset[i] = FALSE;
10675             }
10676             DHD_ERROR(("%s: sssr_d11_outofreset[%d] : %d after AND with "
10677                        "trap_data:0x%x-0x%x\n",
10678                        __FUNCTION__, i, dhd->sssr_d11_outofreset[i],
10679                        dhd->dongle_trap_data, trap_data_mask[i]));
10680         }
10681     }
10682 }
10683 
dhdpcie_d11_check_outofreset(dhd_pub_t * dhd)10684 static int dhdpcie_d11_check_outofreset(dhd_pub_t *dhd)
10685 {
10686     int i;
10687     uint addr;
10688     uint val = 0;
10689 
10690     DHD_ERROR(("%s\n", __FUNCTION__));
10691 
10692     for (i = 0; i < MAX_NUM_D11CORES; i++) {
10693         /* Check if bit 0 of resetctrl is cleared */
10694         addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.resetctrl;
10695         if (!addr) {
10696             DHD_ERROR(("%s: skipping for core[%d] as 'addr' is NULL\n",
10697                        __FUNCTION__, i));
10698             continue;
10699         }
10700         dhd_sbreg_op(dhd, addr, &val, TRUE);
10701         if (!(val & 1)) {
10702             dhd->sssr_d11_outofreset[i] = TRUE;
10703         } else {
10704             dhd->sssr_d11_outofreset[i] = FALSE;
10705         }
10706         DHD_ERROR(("%s: sssr_d11_outofreset[%d] : %d\n", __FUNCTION__, i,
10707                    dhd->sssr_d11_outofreset[i]));
10708     }
10709     dhdpcie_update_d11_status_from_trapdata(dhd);
10710 
10711     return BCME_OK;
10712 }
10713 
dhdpcie_d11_clear_clk_req(dhd_pub_t * dhd)10714 static int dhdpcie_d11_clear_clk_req(dhd_pub_t *dhd)
10715 {
10716     int i;
10717     uint addr;
10718     uint val = 0;
10719 
10720     DHD_ERROR(("%s\n", __FUNCTION__));
10721 
10722     for (i = 0; i < MAX_NUM_D11CORES; i++) {
10723         if (dhd->sssr_d11_outofreset[i]) {
10724             /* clear request clk only if itopoobb is non zero */
10725             addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.itopoobb;
10726             dhd_sbreg_op(dhd, addr, &val, TRUE);
10727             if (val != 0) {
10728                 /* clear clockcontrolstatus */
10729                 addr =
10730                     dhd->sssr_reg_info.mac_regs[i].base_regs.clockcontrolstatus;
10731                 val = dhd->sssr_reg_info.mac_regs[i]
10732                           .base_regs.clockcontrolstatus_val;
10733                 dhd_sbreg_op(dhd, addr, &val, FALSE);
10734             }
10735         }
10736     }
10737     return BCME_OK;
10738 }
10739 
dhdpcie_arm_clear_clk_req(dhd_pub_t * dhd)10740 static int dhdpcie_arm_clear_clk_req(dhd_pub_t *dhd)
10741 {
10742     uint addr;
10743     uint val = 0;
10744 
10745     DHD_ERROR(("%s\n", __FUNCTION__));
10746 
10747     /* Check if bit 0 of resetctrl is cleared */
10748     addr = dhd->sssr_reg_info.arm_regs.wrapper_regs.resetctrl;
10749     dhd_sbreg_op(dhd, addr, &val, TRUE);
10750     if (!(val & 1)) {
10751         /* clear request clk only if itopoobb is non zero */
10752         addr = dhd->sssr_reg_info.arm_regs.wrapper_regs.itopoobb;
10753         dhd_sbreg_op(dhd, addr, &val, TRUE);
10754         if (val != 0) {
10755             /* clear clockcontrolstatus */
10756             addr = dhd->sssr_reg_info.arm_regs.base_regs.clockcontrolstatus;
10757             val = dhd->sssr_reg_info.arm_regs.base_regs.clockcontrolstatus_val;
10758             dhd_sbreg_op(dhd, addr, &val, FALSE);
10759         }
10760 
10761         if (MULTIBP_ENAB(dhd->bus->sih)) {
10762             uint32 resetctrl =
10763                 dhd->sssr_reg_info.arm_regs.wrapper_regs.resetctrl;
10764 
10765             /* Just halt ARM but do not reset the core */
10766             resetctrl &= ~(SI_CORE_SIZE - 1);
10767             resetctrl += OFFSETOF(aidmp_t, ioctrl);
10768 
10769             dhd_sbreg_op(dhd, resetctrl, &val, TRUE);
10770             val |= SICF_CPUHALT;
10771             dhd_sbreg_op(dhd, resetctrl, &val, FALSE);
10772         }
10773     }
10774     return BCME_OK;
10775 }
10776 
dhdpcie_arm_resume_clk_req(dhd_pub_t * dhd)10777 static int dhdpcie_arm_resume_clk_req(dhd_pub_t *dhd)
10778 {
10779     uint addr;
10780     uint val = 0;
10781 
10782     DHD_ERROR(("%s\n", __FUNCTION__));
10783 
10784     /* Check if bit 0 of resetctrl is cleared */
10785     addr = dhd->sssr_reg_info.arm_regs.wrapper_regs.resetctrl;
10786     dhd_sbreg_op(dhd, addr, &val, TRUE);
10787     if (!(val & 1)) {
10788         if (MULTIBP_ENAB(dhd->bus->sih)) {
10789             uint32 resetctrl =
10790                 dhd->sssr_reg_info.arm_regs.wrapper_regs.resetctrl;
10791 
10792             /* Take ARM out of halt but do not reset core */
10793             resetctrl &= ~(SI_CORE_SIZE - 1);
10794             resetctrl += OFFSETOF(aidmp_t, ioctrl);
10795 
10796             dhd_sbreg_op(dhd, resetctrl, &val, TRUE);
10797             val &= ~SICF_CPUHALT;
10798             dhd_sbreg_op(dhd, resetctrl, &val, FALSE);
10799             dhd_sbreg_op(dhd, resetctrl, &val, TRUE);
10800         }
10801     }
10802 
10803     return BCME_OK;
10804 }
10805 
dhdpcie_pcie_clear_clk_req(dhd_pub_t * dhd)10806 static int dhdpcie_pcie_clear_clk_req(dhd_pub_t *dhd)
10807 {
10808     uint addr;
10809     uint val = 0;
10810 
10811     DHD_ERROR(("%s\n", __FUNCTION__));
10812 
10813     /* clear request clk only if itopoobb is non zero */
10814     addr = dhd->sssr_reg_info.pcie_regs.wrapper_regs.itopoobb;
10815     dhd_sbreg_op(dhd, addr, &val, TRUE);
10816     if (val) {
10817         /* clear clockcontrolstatus */
10818         addr = dhd->sssr_reg_info.pcie_regs.base_regs.clockcontrolstatus;
10819         val = dhd->sssr_reg_info.pcie_regs.base_regs.clockcontrolstatus_val;
10820         dhd_sbreg_op(dhd, addr, &val, FALSE);
10821     }
10822     return BCME_OK;
10823 }
10824 
dhdpcie_pcie_send_ltrsleep(dhd_pub_t * dhd)10825 static int dhdpcie_pcie_send_ltrsleep(dhd_pub_t *dhd)
10826 {
10827     uint addr;
10828     uint val = 0;
10829 
10830     DHD_ERROR(("%s\n", __FUNCTION__));
10831 
10832     addr = dhd->sssr_reg_info.pcie_regs.base_regs.ltrstate;
10833     val = LTR_ACTIVE;
10834     dhd_sbreg_op(dhd, addr, &val, FALSE);
10835 
10836     val = LTR_SLEEP;
10837     dhd_sbreg_op(dhd, addr, &val, FALSE);
10838 
10839     return BCME_OK;
10840 }
10841 
dhdpcie_clear_clk_req(dhd_pub_t * dhd)10842 static int dhdpcie_clear_clk_req(dhd_pub_t *dhd)
10843 {
10844     DHD_ERROR(("%s\n", __FUNCTION__));
10845 
10846     dhdpcie_arm_clear_clk_req(dhd);
10847 
10848     dhdpcie_d11_clear_clk_req(dhd);
10849 
10850     dhdpcie_pcie_clear_clk_req(dhd);
10851 
10852     return BCME_OK;
10853 }
10854 
dhdpcie_bring_d11_outofreset(dhd_pub_t * dhd)10855 static int dhdpcie_bring_d11_outofreset(dhd_pub_t *dhd)
10856 {
10857     int i;
10858     uint addr;
10859     uint val = 0;
10860 
10861     DHD_ERROR(("%s\n", __FUNCTION__));
10862 
10863     for (i = 0; i < MAX_NUM_D11CORES; i++) {
10864         if (dhd->sssr_d11_outofreset[i]) {
10865             /* disable core by setting bit 0 */
10866             addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.resetctrl;
10867             val = 1;
10868             dhd_sbreg_op(dhd, addr, &val, FALSE);
10869             OSL_DELAY(0x1770);
10870 
10871             addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.ioctrl;
10872             val = dhd->sssr_reg_info.mac_regs[0]
10873                       .wrapper_regs.ioctrl_resetseq_val[0];
10874             dhd_sbreg_op(dhd, addr, &val, FALSE);
10875 
10876             val = dhd->sssr_reg_info.mac_regs[0]
10877                       .wrapper_regs.ioctrl_resetseq_val[1];
10878             dhd_sbreg_op(dhd, addr, &val, FALSE);
10879 
10880             /* enable core by clearing bit 0 */
10881             addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.resetctrl;
10882             val = 0;
10883             dhd_sbreg_op(dhd, addr, &val, FALSE);
10884 
10885             addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.ioctrl;
10886             val = dhd->sssr_reg_info.mac_regs[0]
10887                       .wrapper_regs.ioctrl_resetseq_val[0x2];
10888             dhd_sbreg_op(dhd, addr, &val, FALSE);
10889 
10890             val = dhd->sssr_reg_info.mac_regs[0]
10891                       .wrapper_regs.ioctrl_resetseq_val[0x3];
10892             dhd_sbreg_op(dhd, addr, &val, FALSE);
10893 
10894             val = dhd->sssr_reg_info.mac_regs[0]
10895                       .wrapper_regs.ioctrl_resetseq_val[0x4];
10896             dhd_sbreg_op(dhd, addr, &val, FALSE);
10897         }
10898     }
10899     return BCME_OK;
10900 }
10901 
dhdpcie_sssr_dump_get_before_sr(dhd_pub_t * dhd)10902 static int dhdpcie_sssr_dump_get_before_sr(dhd_pub_t *dhd)
10903 {
10904     int i;
10905 
10906     DHD_ERROR(("%s\n", __FUNCTION__));
10907 
10908     for (i = 0; i < MAX_NUM_D11CORES; i++) {
10909         if (dhd->sssr_d11_outofreset[i]) {
10910             dhdpcie_get_sssr_fifo_dump(
10911                 dhd, dhd->sssr_d11_before[i],
10912                 dhd->sssr_reg_info.mac_regs[i].sr_size,
10913                 dhd->sssr_reg_info.mac_regs[i].base_regs.xmtaddress,
10914                 dhd->sssr_reg_info.mac_regs[i].base_regs.xmtdata);
10915         }
10916     }
10917 
10918     if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
10919         dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_before,
10920                                   dhd->sssr_reg_info.vasip_regs.vasip_sr_size,
10921                                   dhd->sssr_reg_info.vasip_regs.vasip_sr_addr);
10922     } else if ((dhd->sssr_reg_info.length >
10923                 OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
10924                dhd->sssr_reg_info.dig_mem_info.dig_sr_addr) {
10925         dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_before,
10926                                   dhd->sssr_reg_info.dig_mem_info.dig_sr_size,
10927                                   dhd->sssr_reg_info.dig_mem_info.dig_sr_addr);
10928     }
10929 
10930     return BCME_OK;
10931 }
10932 
dhdpcie_sssr_dump_get_after_sr(dhd_pub_t * dhd)10933 static int dhdpcie_sssr_dump_get_after_sr(dhd_pub_t *dhd)
10934 {
10935     int i;
10936 
10937     DHD_ERROR(("%s\n", __FUNCTION__));
10938 
10939     for (i = 0; i < MAX_NUM_D11CORES; i++) {
10940         if (dhd->sssr_d11_outofreset[i]) {
10941             dhdpcie_get_sssr_fifo_dump(
10942                 dhd, dhd->sssr_d11_after[i],
10943                 dhd->sssr_reg_info.mac_regs[i].sr_size,
10944                 dhd->sssr_reg_info.mac_regs[i].base_regs.xmtaddress,
10945                 dhd->sssr_reg_info.mac_regs[i].base_regs.xmtdata);
10946         }
10947     }
10948 
10949     if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
10950         dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_after,
10951                                   dhd->sssr_reg_info.vasip_regs.vasip_sr_size,
10952                                   dhd->sssr_reg_info.vasip_regs.vasip_sr_addr);
10953     } else if ((dhd->sssr_reg_info.length >
10954                 OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
10955                dhd->sssr_reg_info.dig_mem_info.dig_sr_addr) {
10956         dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_after,
10957                                   dhd->sssr_reg_info.dig_mem_info.dig_sr_size,
10958                                   dhd->sssr_reg_info.dig_mem_info.dig_sr_addr);
10959     }
10960 
10961     return BCME_OK;
10962 }
10963 
dhdpcie_sssr_dump(dhd_pub_t * dhd)10964 int dhdpcie_sssr_dump(dhd_pub_t *dhd)
10965 {
10966     uint32 powerctrl_val;
10967 
10968     if (!dhd->sssr_inited) {
10969         DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__));
10970         return BCME_ERROR;
10971     }
10972 
10973     if (dhd->bus->is_linkdown) {
10974         DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
10975         return BCME_ERROR;
10976     }
10977 
10978     DHD_ERROR(
10979         ("%s: Before WL down (powerctl: pcie:0x%x chipc:0x%x) "
10980          "PMU rctl:0x%x res_state:0x%x\n",
10981          __FUNCTION__,
10982          si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10983                     OFFSETOF(chipcregs_t, powerctl), 0, 0),
10984          si_corereg(dhd->bus->sih, 0, OFFSETOF(chipcregs_t, powerctl), 0, 0),
10985          PMU_REG(dhd->bus->sih, retention_ctl, 0, 0),
10986          PMU_REG(dhd->bus->sih, res_state, 0, 0)));
10987 
10988     dhdpcie_d11_check_outofreset(dhd);
10989 
10990     DHD_ERROR(("%s: Collecting Dump before SR\n", __FUNCTION__));
10991     if (dhdpcie_sssr_dump_get_before_sr(dhd) != BCME_OK) {
10992         DHD_ERROR(
10993             ("%s: dhdpcie_sssr_dump_get_before_sr failed\n", __FUNCTION__));
10994         return BCME_ERROR;
10995     }
10996 
10997     dhdpcie_clear_intmask_and_timer(dhd);
10998     powerctrl_val = dhdpcie_suspend_chipcommon_powerctrl(dhd);
10999     dhdpcie_clear_clk_req(dhd);
11000     dhdpcie_pcie_send_ltrsleep(dhd);
11001 
11002     if (MULTIBP_ENAB(dhd->bus->sih)) {
11003         dhd_bus_pcie_pwr_req_wl_domain(dhd->bus,
11004                                        OFFSETOF(chipcregs_t, powerctl), FALSE);
11005     }
11006 
11007     /* Wait for some time before Restore */
11008     OSL_DELAY(0x1770);
11009 
11010     DHD_ERROR(
11011         ("%s: After WL down (powerctl: pcie:0x%x chipc:0x%x) "
11012          "PMU rctl:0x%x res_state:0x%x\n",
11013          __FUNCTION__,
11014          si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
11015                     OFFSETOF(chipcregs_t, powerctl), 0, 0),
11016          si_corereg(dhd->bus->sih, 0, OFFSETOF(chipcregs_t, powerctl), 0, 0),
11017          PMU_REG(dhd->bus->sih, retention_ctl, 0, 0),
11018          PMU_REG(dhd->bus->sih, res_state, 0, 0)));
11019 
11020     if (MULTIBP_ENAB(dhd->bus->sih)) {
11021         dhd_bus_pcie_pwr_req_wl_domain(dhd->bus,
11022                                        OFFSETOF(chipcregs_t, powerctl), TRUE);
11023         /* Add delay for WL domain to power up */
11024         OSL_DELAY(0x3A98);
11025 
11026         DHD_ERROR((
11027             "%s: After WL up again (powerctl: pcie:0x%x chipc:0x%x) "
11028             "PMU rctl:0x%x res_state:0x%x\n",
11029             __FUNCTION__,
11030             si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
11031                        OFFSETOF(chipcregs_t, powerctl), 0, 0),
11032             si_corereg(dhd->bus->sih, 0, OFFSETOF(chipcregs_t, powerctl), 0, 0),
11033             PMU_REG(dhd->bus->sih, retention_ctl, 0, 0),
11034             PMU_REG(dhd->bus->sih, res_state, 0, 0)));
11035     }
11036 
11037     dhdpcie_arm_resume_clk_req(dhd);
11038     dhdpcie_resume_chipcommon_powerctrl(dhd, powerctrl_val);
11039     dhdpcie_bring_d11_outofreset(dhd);
11040 
11041     DHD_ERROR(("%s: Collecting Dump after SR\n", __FUNCTION__));
11042     if (dhdpcie_sssr_dump_get_after_sr(dhd) != BCME_OK) {
11043         DHD_ERROR(
11044             ("%s: dhdpcie_sssr_dump_get_after_sr failed\n", __FUNCTION__));
11045         return BCME_ERROR;
11046     }
11047     dhd->sssr_dump_collected = TRUE;
11048     dhd_write_sssr_dump(dhd, SSSR_DUMP_MODE_SSSR);
11049 
11050     return BCME_OK;
11051 }
11052 
dhdpcie_fis_trigger(dhd_pub_t * dhd)11053 static int dhdpcie_fis_trigger(dhd_pub_t *dhd)
11054 {
11055     if (!dhd->sssr_inited) {
11056         DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__));
11057         return BCME_ERROR;
11058     }
11059 
11060     if (dhd->bus->is_linkdown) {
11061         DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
11062         return BCME_ERROR;
11063     }
11064 
11065     /* Trigger FIS */
11066     si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
11067                DAR_FIS_CTRL(dhd->bus->sih->buscorerev), ~0, DAR_FIS_START_MASK);
11068     OSL_DELAY(0x64 * 0x3E8);
11069 
11070     return BCME_OK;
11071 }
11072 
dhd_bus_fis_trigger(dhd_pub_t * dhd)11073 int dhd_bus_fis_trigger(dhd_pub_t *dhd)
11074 {
11075     return dhdpcie_fis_trigger(dhd);
11076 }
11077 
dhdpcie_fis_dump(dhd_pub_t * dhd)11078 static int dhdpcie_fis_dump(dhd_pub_t *dhd)
11079 {
11080     int i;
11081 
11082     if (!dhd->sssr_inited) {
11083         DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__));
11084         return BCME_ERROR;
11085     }
11086 
11087     if (dhd->bus->is_linkdown) {
11088         DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
11089         return BCME_ERROR;
11090     }
11091 
11092     /* bring up all pmu resources */
11093     PMU_REG(dhd->bus->sih, min_res_mask, ~0,
11094             PMU_REG(dhd->bus->sih, max_res_mask, 0, 0));
11095     OSL_DELAY(0xA * 0x3E8);
11096 
11097     for (i = 0; i < MAX_NUM_D11CORES; i++) {
11098         dhd->sssr_d11_outofreset[i] = TRUE;
11099     }
11100 
11101     dhdpcie_bring_d11_outofreset(dhd);
11102     OSL_DELAY(0x1770);
11103 
11104     /* clear FIS Done */
11105     PMU_REG(dhd->bus->sih, fis_ctrl_status, PMU_CLEAR_FIS_DONE_MASK,
11106             PMU_CLEAR_FIS_DONE_MASK);
11107 
11108     dhdpcie_d11_check_outofreset(dhd);
11109 
11110     DHD_ERROR(("%s: Collecting Dump after SR\n", __FUNCTION__));
11111     if (dhdpcie_sssr_dump_get_after_sr(dhd) != BCME_OK) {
11112         DHD_ERROR(
11113             ("%s: dhdpcie_sssr_dump_get_after_sr failed\n", __FUNCTION__));
11114         return BCME_ERROR;
11115     }
11116 
11117     dhd_write_sssr_dump(dhd, SSSR_DUMP_MODE_FIS);
11118 
11119     return BCME_OK;
11120 }
11121 
dhd_bus_fis_dump(dhd_pub_t * dhd)11122 int dhd_bus_fis_dump(dhd_pub_t *dhd)
11123 {
11124     return dhdpcie_fis_dump(dhd);
11125 }
11126 #endif /* DHD_SSSR_DUMP */
11127 
11128 #ifdef DHD_WAKE_STATUS
dhd_bus_get_wakecount(dhd_pub_t * dhd)11129 wake_counts_t *dhd_bus_get_wakecount(dhd_pub_t *dhd)
11130 {
11131     return &dhd->bus->wake_counts;
11132 }
dhd_bus_get_bus_wake(dhd_pub_t * dhd)11133 int dhd_bus_get_bus_wake(dhd_pub_t *dhd)
11134 {
11135     return bcmpcie_set_get_wake(dhd->bus, 0);
11136 }
11137 #endif /* DHD_WAKE_STATUS */
11138 
11139 /* Writes random number(s) to the TCM. FW upon initialization reads this
11140  * register to fetch the random number, and uses it to randomize heap address
11141  * space layout.
11142  */
dhdpcie_wrt_rnd(struct dhd_bus * bus)11143 static int dhdpcie_wrt_rnd(struct dhd_bus *bus)
11144 {
11145     bcm_rand_metadata_t rnd_data;
11146     uint8 rand_buf[BCM_ENTROPY_HOST_NBYTES];
11147     uint32 count = BCM_ENTROPY_HOST_NBYTES;
11148     int ret = 0;
11149     uint32 addr = bus->dongle_ram_base + (bus->ramsize - BCM_NVRAM_OFFSET_TCM) -
11150                   ((bus->nvram_csm & 0xffff) * BCM_NVRAM_IMG_COMPRS_FACTOR +
11151                    sizeof(rnd_data));
11152 
11153     memset(rand_buf, 0, BCM_ENTROPY_HOST_NBYTES);
11154     rnd_data.signature = htol32(BCM_NVRAM_RNG_SIGNATURE);
11155     rnd_data.count = htol32(count);
11156     /* write the metadata about random number */
11157     dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&rnd_data, sizeof(rnd_data));
11158     /* scale back by number of random number counts */
11159     addr -= count;
11160 
11161     /* Now get & write the random number(s) */
11162     ret = dhd_get_random_bytes(rand_buf, count);
11163     if (ret != BCME_OK) {
11164         return ret;
11165     }
11166     dhdpcie_bus_membytes(bus, TRUE, addr, rand_buf, count);
11167 
11168     return BCME_OK;
11169 }
11170 
dhd_pcie_intr_count_dump(dhd_pub_t * dhd)11171 void dhd_pcie_intr_count_dump(dhd_pub_t *dhd)
11172 {
11173     struct dhd_bus *bus = dhd->bus;
11174     uint64 current_time;
11175 
11176     DHD_ERROR(
11177         ("\n ------- DUMPING INTR enable/disable counters  ------- \r\n"));
11178     DHD_ERROR(("resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n",
11179                bus->resume_intr_enable_count, bus->dpc_intr_enable_count));
11180     DHD_ERROR(("isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n",
11181                bus->isr_intr_disable_count, bus->suspend_intr_disable_count));
11182 #ifdef BCMPCIE_OOB_HOST_WAKE
11183     DHD_ERROR(("oob_intr_count=%lu oob_intr_enable_count=%lu "
11184                "oob_intr_disable_count=%lu\n",
11185                bus->oob_intr_count, bus->oob_intr_enable_count,
11186                bus->oob_intr_disable_count));
11187     DHD_ERROR(("oob_irq_num=%d last_oob_irq_time=" SEC_USEC_FMT "\n",
11188                dhdpcie_get_oob_irq_num(bus),
11189                GET_SEC_USEC(bus->last_oob_irq_time)));
11190     DHD_ERROR(("last_oob_irq_enable_time=" SEC_USEC_FMT
11191                " last_oob_irq_disable_time=" SEC_USEC_FMT "\n",
11192                GET_SEC_USEC(bus->last_oob_irq_enable_time),
11193                GET_SEC_USEC(bus->last_oob_irq_disable_time)));
11194     DHD_ERROR(("oob_irq_enabled=%d oob_gpio_level=%d\n",
11195                dhdpcie_get_oob_irq_status(bus), dhdpcie_get_oob_irq_level()));
11196 #endif /* BCMPCIE_OOB_HOST_WAKE */
11197     DHD_ERROR(("dpc_return_busdown_count=%lu non_ours_irq_count=%lu\n",
11198                bus->dpc_return_busdown_count, bus->non_ours_irq_count));
11199 
11200     current_time = OSL_LOCALTIME_NS();
11201     DHD_ERROR(
11202         ("\ncurrent_time=" SEC_USEC_FMT "\n", GET_SEC_USEC(current_time)));
11203     DHD_ERROR(
11204         ("isr_entry_time=" SEC_USEC_FMT " isr_exit_time=" SEC_USEC_FMT "\n",
11205          GET_SEC_USEC(bus->isr_entry_time), GET_SEC_USEC(bus->isr_exit_time)));
11206     DHD_ERROR(("dpc_sched_time=" SEC_USEC_FMT
11207                " last_non_ours_irq_time=" SEC_USEC_FMT "\n",
11208                GET_SEC_USEC(bus->dpc_sched_time),
11209                GET_SEC_USEC(bus->last_non_ours_irq_time)));
11210     DHD_ERROR(("dpc_entry_time=" SEC_USEC_FMT
11211                " last_process_ctrlbuf_time=" SEC_USEC_FMT "\n",
11212                GET_SEC_USEC(bus->dpc_entry_time),
11213                GET_SEC_USEC(bus->last_process_ctrlbuf_time)));
11214     DHD_ERROR(("last_process_flowring_time=" SEC_USEC_FMT
11215                " last_process_txcpl_time=" SEC_USEC_FMT "\n",
11216                GET_SEC_USEC(bus->last_process_flowring_time),
11217                GET_SEC_USEC(bus->last_process_txcpl_time)));
11218     DHD_ERROR(("last_process_rxcpl_time=" SEC_USEC_FMT
11219                " last_process_infocpl_time=" SEC_USEC_FMT
11220                " last_process_edl_time=" SEC_USEC_FMT "\n",
11221                GET_SEC_USEC(bus->last_process_rxcpl_time),
11222                GET_SEC_USEC(bus->last_process_infocpl_time),
11223                GET_SEC_USEC(bus->last_process_edl_time)));
11224     DHD_ERROR((
11225         "dpc_exit_time=" SEC_USEC_FMT " resched_dpc_time=" SEC_USEC_FMT "\n",
11226         GET_SEC_USEC(bus->dpc_exit_time), GET_SEC_USEC(bus->resched_dpc_time)));
11227     DHD_ERROR(("last_d3_inform_time=" SEC_USEC_FMT "\n",
11228                GET_SEC_USEC(bus->last_d3_inform_time)));
11229 
11230     DHD_ERROR(("\nlast_suspend_start_time=" SEC_USEC_FMT
11231                " last_suspend_end_time=" SEC_USEC_FMT "\n",
11232                GET_SEC_USEC(bus->last_suspend_start_time),
11233                GET_SEC_USEC(bus->last_suspend_end_time)));
11234     DHD_ERROR(("last_resume_start_time=" SEC_USEC_FMT
11235                " last_resume_end_time=" SEC_USEC_FMT "\n",
11236                GET_SEC_USEC(bus->last_resume_start_time),
11237                GET_SEC_USEC(bus->last_resume_end_time)));
11238 
11239 #if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE)
11240     DHD_ERROR(("logtrace_thread_entry_time=" SEC_USEC_FMT
11241                " logtrace_thread_sem_down_time=" SEC_USEC_FMT
11242                "\nlogtrace_thread_flush_time=" SEC_USEC_FMT
11243                " logtrace_thread_unexpected_break_time=" SEC_USEC_FMT
11244                "\nlogtrace_thread_complete_time=" SEC_USEC_FMT "\n",
11245                GET_SEC_USEC(dhd->logtrace_thr_ts.entry_time),
11246                GET_SEC_USEC(dhd->logtrace_thr_ts.sem_down_time),
11247                GET_SEC_USEC(dhd->logtrace_thr_ts.flush_time),
11248                GET_SEC_USEC(dhd->logtrace_thr_ts.unexpected_break_time),
11249                GET_SEC_USEC(dhd->logtrace_thr_ts.complete_time)));
11250 #endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */
11251 }
11252 
dhd_bus_intr_count_dump(dhd_pub_t * dhd)11253 void dhd_bus_intr_count_dump(dhd_pub_t *dhd)
11254 {
11255     dhd_pcie_intr_count_dump(dhd);
11256 }
11257 
dhd_pcie_dump_wrapper_regs(dhd_pub_t * dhd)11258 int dhd_pcie_dump_wrapper_regs(dhd_pub_t *dhd)
11259 {
11260     uint32 save_idx, val;
11261     si_t *sih = dhd->bus->sih;
11262     uint32 oob_base, oob_base1;
11263     uint32 wrapper_dump_list[] = {
11264         AI_OOBSELOUTA30, AI_OOBSELOUTA74, AI_OOBSELOUTB30, AI_OOBSELOUTB74,
11265         AI_OOBSELOUTC30, AI_OOBSELOUTC74, AI_OOBSELOUTD30, AI_OOBSELOUTD74,
11266         AI_RESETSTATUS,  AI_RESETCTRL,    AI_ITIPOOBA,     AI_ITIPOOBB,
11267         AI_ITIPOOBC,     AI_ITIPOOBD,     AI_ITIPOOBAOUT,  AI_ITIPOOBBOUT,
11268         AI_ITIPOOBCOUT,  AI_ITIPOOBDOUT};
11269     uint32 i;
11270     hndoobr_reg_t *reg;
11271     cr4regs_t *cr4regs;
11272     ca7regs_t *ca7regs;
11273 
11274     save_idx = si_coreidx(sih);
11275 
11276     DHD_ERROR(("%s: Master wrapper Reg\n", __FUNCTION__));
11277 
11278     if (si_setcore(sih, PCIE2_CORE_ID, 0) != NULL) {
11279         for (i = 0; i < sizeof(wrapper_dump_list) / 0x4; i++) {
11280             val = si_wrapperreg(sih, wrapper_dump_list[i], 0, 0);
11281             DHD_ERROR(
11282                 ("sbreg: addr:0x%x val:0x%x\n", wrapper_dump_list[i], val));
11283         }
11284     }
11285 
11286     if ((cr4regs = si_setcore(sih, ARMCR4_CORE_ID, 0)) != NULL) {
11287         DHD_ERROR(("%s: ARM CR4 wrapper Reg\n", __FUNCTION__));
11288         for (i = 0; i < sizeof(wrapper_dump_list) / 0x4; i++) {
11289             val = si_wrapperreg(sih, wrapper_dump_list[i], 0, 0);
11290             DHD_ERROR(
11291                 ("sbreg: addr:0x%x val:0x%x\n", wrapper_dump_list[i], val));
11292         }
11293         DHD_ERROR(("%s: ARM CR4 core Reg\n", __FUNCTION__));
11294         val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corecontrol));
11295         DHD_ERROR(("reg:0x%x val:0x%x\n",
11296                    (uint)OFFSETOF(cr4regs_t, corecontrol), val));
11297         val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corecapabilities));
11298         DHD_ERROR(("reg:0x%x val:0x%x\n",
11299                    (uint)OFFSETOF(cr4regs_t, corecapabilities), val));
11300         val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corestatus));
11301         DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, corestatus),
11302                    val));
11303         val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, nmiisrst));
11304         DHD_ERROR(
11305             ("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, nmiisrst), val));
11306         val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, nmimask));
11307         DHD_ERROR(
11308             ("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, nmimask), val));
11309         val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, isrmask));
11310         DHD_ERROR(
11311             ("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, isrmask), val));
11312         val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, swintreg));
11313         DHD_ERROR(
11314             ("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, swintreg), val));
11315         val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, intstatus));
11316         DHD_ERROR(
11317             ("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, intstatus), val));
11318         val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, cyclecnt));
11319         DHD_ERROR(
11320             ("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, cyclecnt), val));
11321         val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, inttimer));
11322         DHD_ERROR(
11323             ("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, inttimer), val));
11324         val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, clk_ctl_st));
11325         DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, clk_ctl_st),
11326                    val));
11327         val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, powerctl));
11328         DHD_ERROR(
11329             ("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, powerctl), val));
11330     }
11331 
11332     if ((ca7regs = si_setcore(sih, ARMCA7_CORE_ID, 0)) != NULL) {
11333         DHD_ERROR(("%s: ARM CA7 core Reg\n", __FUNCTION__));
11334         val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corecontrol));
11335         DHD_ERROR(("reg:0x%x val:0x%x\n",
11336                    (uint)OFFSETOF(ca7regs_t, corecontrol), val));
11337         val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corecapabilities));
11338         DHD_ERROR(("reg:0x%x val:0x%x\n",
11339                    (uint)OFFSETOF(ca7regs_t, corecapabilities), val));
11340         val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corestatus));
11341         DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, corestatus),
11342                    val));
11343         val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, tracecontrol));
11344         DHD_ERROR(("reg:0x%x val:0x%x\n",
11345                    (uint)OFFSETOF(ca7regs_t, tracecontrol), val));
11346         val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, clk_ctl_st));
11347         DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, clk_ctl_st),
11348                    val));
11349         val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, powerctl));
11350         DHD_ERROR(
11351             ("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, powerctl), val));
11352     }
11353 
11354     DHD_ERROR(("%s: OOBR Reg\n", __FUNCTION__));
11355 
11356     oob_base = si_oobr_baseaddr(sih, FALSE);
11357     oob_base1 = si_oobr_baseaddr(sih, TRUE);
11358     if (oob_base) {
11359         dhd_sbreg_op(dhd, oob_base + OOB_STATUSA, &val, TRUE);
11360         dhd_sbreg_op(dhd, oob_base + OOB_STATUSB, &val, TRUE);
11361         dhd_sbreg_op(dhd, oob_base + OOB_STATUSC, &val, TRUE);
11362         dhd_sbreg_op(dhd, oob_base + OOB_STATUSD, &val, TRUE);
11363     } else if ((reg = si_setcore(sih, HND_OOBR_CORE_ID, 0)) != NULL) {
11364         val = R_REG(dhd->osh, &reg->intstatus[0]);
11365         DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
11366         val = R_REG(dhd->osh, &reg->intstatus[1]);
11367         DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
11368         val = R_REG(dhd->osh, &reg->intstatus[0x2]);
11369         DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
11370         val = R_REG(dhd->osh, &reg->intstatus[0x3]);
11371         DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
11372     }
11373 
11374     if (oob_base1) {
11375         DHD_ERROR(("%s: Second OOBR Reg\n", __FUNCTION__));
11376 
11377         dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSA, &val, TRUE);
11378         dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSB, &val, TRUE);
11379         dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSC, &val, TRUE);
11380         dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSD, &val, TRUE);
11381     }
11382 
11383     si_setcoreidx(dhd->bus->sih, save_idx);
11384 
11385     return 0;
11386 }
11387 
dhdpcie_hw_war_regdump(dhd_bus_t * bus)11388 static void dhdpcie_hw_war_regdump(dhd_bus_t *bus)
11389 {
11390     uint32 save_idx, val;
11391     volatile uint32 *reg;
11392 
11393     save_idx = si_coreidx(bus->sih);
11394     if ((reg = si_setcore(bus->sih, CC_CORE_ID, 0)) != NULL) {
11395         val = R_REG(bus->osh, reg + REG_WORK_AROUND);
11396         DHD_ERROR(("CC HW_WAR :0x%x\n", val));
11397     }
11398 
11399     if ((reg = si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) != NULL) {
11400         val = R_REG(bus->osh, reg + REG_WORK_AROUND);
11401         DHD_ERROR(("ARM HW_WAR:0x%x\n", val));
11402     }
11403 
11404     if ((reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0)) != NULL) {
11405         val = R_REG(bus->osh, reg + REG_WORK_AROUND);
11406         DHD_ERROR(("PCIE HW_WAR :0x%x\n", val));
11407     }
11408     si_setcoreidx(bus->sih, save_idx);
11409 
11410     val = PMU_REG_NEW(bus->sih, min_res_mask, 0, 0);
11411     DHD_ERROR(("MINRESMASK :0x%x\n", val));
11412 }
11413 
dhd_pcie_dma_info_dump(dhd_pub_t * dhd)11414 int dhd_pcie_dma_info_dump(dhd_pub_t *dhd)
11415 {
11416     if (dhd->bus->is_linkdown) {
11417         DHD_ERROR(("\n ------- SKIP DUMPING DMA Registers "
11418                    "due to PCIe link down ------- \r\n"));
11419         return 0;
11420     }
11421 
11422     DHD_ERROR(("\n ------- DUMPING DMA Registers ------- \r\n"));
11423 
11424     // HostToDev
11425     DHD_ERROR(
11426         ("HostToDev TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n",
11427          si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x200, 0, 0),
11428          si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x204, 0, 0)));
11429     DHD_ERROR(
11430         ("            : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n",
11431          si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x208, 0, 0),
11432          si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x20C, 0, 0)));
11433     DHD_ERROR(
11434         ("            : XmtStatus0=0x%08x XmtStatus1=0x%08x\n",
11435          si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x210, 0, 0),
11436          si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x214, 0, 0)));
11437 
11438     DHD_ERROR(
11439         ("HostToDev RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n",
11440          si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x220, 0, 0),
11441          si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x224, 0, 0)));
11442     DHD_ERROR(
11443         ("            : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n",
11444          si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x228, 0, 0),
11445          si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x22C, 0, 0)));
11446     DHD_ERROR(
11447         ("            : RcvStatus0=0x%08x RcvStatus1=0x%08x\n",
11448          si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x230, 0, 0),
11449          si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x234, 0, 0)));
11450 
11451     // DevToHost
11452     DHD_ERROR(
11453         ("DevToHost TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n",
11454          si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x240, 0, 0),
11455          si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x244, 0, 0)));
11456     DHD_ERROR(
11457         ("            : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n",
11458          si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x248, 0, 0),
11459          si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x24C, 0, 0)));
11460     DHD_ERROR(
11461         ("            : XmtStatus0=0x%08x XmtStatus1=0x%08x\n",
11462          si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x250, 0, 0),
11463          si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x254, 0, 0)));
11464 
11465     DHD_ERROR(
11466         ("DevToHost RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n",
11467          si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x260, 0, 0),
11468          si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x264, 0, 0)));
11469     DHD_ERROR(
11470         ("            : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n",
11471          si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x268, 0, 0),
11472          si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x26C, 0, 0)));
11473     DHD_ERROR(
11474         ("            : RcvStatus0=0x%08x RcvStatus1=0x%08x\n",
11475          si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x270, 0, 0),
11476          si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x274, 0, 0)));
11477 
11478     return 0;
11479 }
11480 
dhd_pcie_dump_int_regs(dhd_pub_t * dhd)11481 bool dhd_pcie_dump_int_regs(dhd_pub_t *dhd)
11482 {
11483     uint32 intstatus = 0;
11484     uint32 intmask = 0;
11485     uint32 d2h_db0 = 0;
11486     uint32 d2h_mb_data = 0;
11487 
11488     DHD_ERROR(("\n ------- DUMPING INTR Status and Masks ------- \r\n"));
11489     intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
11490                            dhd->bus->pcie_mailbox_int, 0, 0);
11491     if (intstatus == (uint32)-1) {
11492         DHD_ERROR(("intstatus=0x%x \n", intstatus));
11493         return FALSE;
11494     }
11495 
11496     intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
11497                          dhd->bus->pcie_mailbox_mask, 0, 0);
11498     if (intmask == (uint32)-1) {
11499         DHD_ERROR(("intstatus=0x%x intmask=0x%x \n", intstatus, intmask));
11500         return FALSE;
11501     }
11502 
11503     d2h_db0 = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
11504                          PCID2H_MailBox, 0, 0);
11505     if (d2h_db0 == (uint32)-1) {
11506         DHD_ERROR(("intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n", intstatus,
11507                    intmask, d2h_db0));
11508         return FALSE;
11509     }
11510 
11511     DHD_ERROR(("intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n", intstatus, intmask,
11512                d2h_db0));
11513     dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0);
11514     DHD_ERROR(("d2h_mb_data=0x%x def_intmask=0x%x \r\n", d2h_mb_data,
11515                dhd->bus->def_intmask));
11516 
11517     return TRUE;
11518 }
11519 
dhd_pcie_dump_rc_conf_space_cap(dhd_pub_t * dhd)11520 void dhd_pcie_dump_rc_conf_space_cap(dhd_pub_t *dhd)
11521 {
11522     DHD_ERROR(
11523         ("\n ------- DUMPING PCIE RC config space Registers ------- \r\n"));
11524     DHD_ERROR(
11525         ("Pcie RC Uncorrectable Error Status Val=0x%x\n",
11526          dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
11527                                PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0)));
11528 #ifdef EXTENDED_PCIE_DEBUG_DUMP
11529     DHD_ERROR(
11530         ("hdrlog0 =0x%08x hdrlog1 =0x%08x hdrlog2 =0x%08x hdrlog3 =0x%08x\n",
11531          dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
11532                                PCIE_EXTCAP_ERR_HEADER_LOG_0, TRUE, FALSE, 0),
11533          dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
11534                                PCIE_EXTCAP_ERR_HEADER_LOG_1, TRUE, FALSE, 0),
11535          dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
11536                                PCIE_EXTCAP_ERR_HEADER_LOG_2, TRUE, FALSE, 0),
11537          dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
11538                                PCIE_EXTCAP_ERR_HEADER_LOG_3, TRUE, FALSE, 0)));
11539 #endif /* EXTENDED_PCIE_DEBUG_DUMP */
11540 }
11541 
dhd_pcie_debug_info_dump(dhd_pub_t * dhd)11542 int dhd_pcie_debug_info_dump(dhd_pub_t *dhd)
11543 {
11544     int host_irq_disabled;
11545 
11546     DHD_ERROR(
11547         ("bus->bus_low_power_state = %d\n", dhd->bus->bus_low_power_state));
11548     host_irq_disabled = dhdpcie_irq_disabled(dhd->bus);
11549     DHD_ERROR(("host pcie_irq disabled = %d\n", host_irq_disabled));
11550     dhd_print_tasklet_status(dhd);
11551     dhd_pcie_intr_count_dump(dhd);
11552 
11553     DHD_ERROR(("\n ------- DUMPING PCIE EP Resouce Info ------- \r\n"));
11554     dhdpcie_dump_resource(dhd->bus);
11555 
11556     dhd_pcie_dump_rc_conf_space_cap(dhd);
11557 
11558     DHD_ERROR(
11559         ("RootPort PCIe linkcap=0x%08x\n", dhd_debug_get_rc_linkcap(dhd->bus)));
11560     DHD_ERROR(
11561         ("\n ------- DUMPING PCIE EP config space Registers ------- \r\n"));
11562     DHD_ERROR(
11563         ("Status Command(0x%x)=0x%x, BaseAddress0(0x%x)=0x%x "
11564          "BaseAddress1(0x%x)=0x%x "
11565          "PCIE_CFG_PMCSR(0x%x)=0x%x\n",
11566          PCIECFGREG_STATUS_CMD,
11567          dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_STATUS_CMD,
11568                               sizeof(uint32)),
11569          PCIECFGREG_BASEADDR0,
11570          dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_BASEADDR0,
11571                               sizeof(uint32)),
11572          PCIECFGREG_BASEADDR1,
11573          dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_BASEADDR1,
11574                               sizeof(uint32)),
11575          PCIE_CFG_PMCSR,
11576          dhd_pcie_config_read(dhd->bus->osh, PCIE_CFG_PMCSR, sizeof(uint32))));
11577     DHD_ERROR(("LinkCtl(0x%x)=0x%x DeviceStatusControl2(0x%x)=0x%x "
11578                "L1SSControl(0x%x)=0x%x\n",
11579                PCIECFGREG_LINK_STATUS_CTRL,
11580                dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_LINK_STATUS_CTRL,
11581                                     sizeof(uint32)),
11582                PCIECFGGEN_DEV_STATUS_CTRL2,
11583                dhd_pcie_config_read(dhd->bus->osh, PCIECFGGEN_DEV_STATUS_CTRL2,
11584                                     sizeof(uint32)),
11585                PCIECFGREG_PML1_SUB_CTRL1,
11586                dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_PML1_SUB_CTRL1,
11587                                     sizeof(uint32))));
11588 #ifdef EXTENDED_PCIE_DEBUG_DUMP
11589     DHD_ERROR(
11590         ("Pcie EP Uncorrectable Error Status Val=0x%x\n",
11591          dhdpcie_ep_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
11592                                PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0)));
11593     DHD_ERROR((
11594         "hdrlog0(0x%x)=0x%08x hdrlog1(0x%x)=0x%08x hdrlog2(0x%x)=0x%08x "
11595         "hdrlog3(0x%x)=0x%08x\n",
11596         PCI_TLP_HDR_LOG1,
11597         dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG1, sizeof(uint32)),
11598         PCI_TLP_HDR_LOG2,
11599         dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG2, sizeof(uint32)),
11600         PCI_TLP_HDR_LOG3,
11601         dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG3, sizeof(uint32)),
11602         PCI_TLP_HDR_LOG4,
11603         dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG4, sizeof(uint32))));
11604     if (dhd->bus->sih->buscorerev >= 0x18) {
11605         DHD_ERROR(
11606             ("DeviceStatusControl(0x%x)=0x%x SubsystemControl(0x%x)=0x%x "
11607              "L1SSControl2(0x%x)=0x%x\n",
11608              PCIECFGREG_DEV_STATUS_CTRL,
11609              dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_DEV_STATUS_CTRL,
11610                                   sizeof(uint32)),
11611              PCIE_CFG_SUBSYSTEM_CONTROL,
11612              dhd_pcie_config_read(dhd->bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL,
11613                                   sizeof(uint32)),
11614              PCIECFGREG_PML1_SUB_CTRL2,
11615              dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_PML1_SUB_CTRL2,
11616                                   sizeof(uint32))));
11617         dhd_bus_dump_dar_registers(dhd->bus);
11618     }
11619 #endif /* EXTENDED_PCIE_DEBUG_DUMP */
11620 
11621     if (dhd->bus->is_linkdown) {
11622         DHD_ERROR(("Skip dumping the PCIe Core registers. link may be DOWN\n"));
11623         return 0;
11624     }
11625 
11626     DHD_ERROR(("\n ------- DUMPING PCIE core Registers ------- \r\n"));
11627 
11628     DHD_ERROR(
11629         ("ClkReq0(0x%x)=0x%x ClkReq1(0x%x)=0x%x ClkReq2(0x%x)=0x%x "
11630          "ClkReq3(0x%x)=0x%x\n",
11631          PCIECFGREG_PHY_DBG_CLKREQ0,
11632          dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ0),
11633          PCIECFGREG_PHY_DBG_CLKREQ1,
11634          dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ1),
11635          PCIECFGREG_PHY_DBG_CLKREQ2,
11636          dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ2),
11637          PCIECFGREG_PHY_DBG_CLKREQ3,
11638          dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ3)));
11639 
11640 #ifdef EXTENDED_PCIE_DEBUG_DUMP
11641     if (dhd->bus->sih->buscorerev >= 0x18) {
11642         DHD_ERROR((
11643             "ltssm_hist_0(0x%x)=0x%x ltssm_hist_1(0x%x)=0x%x "
11644             "ltssm_hist_2(0x%x)=0x%x "
11645             "ltssm_hist_3(0x%x)=0x%x\n",
11646             PCIECFGREG_PHY_LTSSM_HIST_0,
11647             dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_0),
11648             PCIECFGREG_PHY_LTSSM_HIST_1,
11649             dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_1),
11650             PCIECFGREG_PHY_LTSSM_HIST_2,
11651             dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_2),
11652             PCIECFGREG_PHY_LTSSM_HIST_3,
11653             dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_3)));
11654 
11655         DHD_ERROR(
11656             ("trefup(0x%x)=0x%x trefup_ext(0x%x)=0x%x\n", PCIECFGREG_TREFUP,
11657              dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_TREFUP),
11658              PCIECFGREG_TREFUP_EXT,
11659              dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_TREFUP_EXT)));
11660         DHD_ERROR(
11661             ("errlog(0x%x)=0x%x errlog_addr(0x%x)=0x%x "
11662              "Function_Intstatus(0x%x)=0x%x "
11663              "Function_Intmask(0x%x)=0x%x Power_Intstatus(0x%x)=0x%x "
11664              "Power_Intmask(0x%x)=0x%x\n",
11665              PCIE_CORE_REG_ERRLOG,
11666              si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
11667                         PCIE_CORE_REG_ERRLOG, 0, 0),
11668              PCIE_CORE_REG_ERR_ADDR,
11669              si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
11670                         PCIE_CORE_REG_ERR_ADDR, 0, 0),
11671              PCIFunctionIntstatus(dhd->bus->sih->buscorerev),
11672              si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
11673                         PCIFunctionIntstatus(dhd->bus->sih->buscorerev), 0, 0),
11674              PCIFunctionIntmask(dhd->bus->sih->buscorerev),
11675              si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
11676                         PCIFunctionIntmask(dhd->bus->sih->buscorerev), 0, 0),
11677              PCIPowerIntstatus(dhd->bus->sih->buscorerev),
11678              si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
11679                         PCIPowerIntstatus(dhd->bus->sih->buscorerev), 0, 0),
11680              PCIPowerIntmask(dhd->bus->sih->buscorerev),
11681              si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
11682                         PCIPowerIntmask(dhd->bus->sih->buscorerev), 0, 0)));
11683         DHD_ERROR((
11684             "err_hdrlog1(0x%x)=0x%x err_hdrlog2(0x%x)=0x%x "
11685             "err_hdrlog3(0x%x)=0x%x err_hdrlog4(0x%x)=0x%x\n",
11686             (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg1),
11687             si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
11688                        OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg1), 0, 0),
11689             (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg2),
11690             si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
11691                        OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg2), 0, 0),
11692             (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg3),
11693             si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
11694                        OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg3), 0, 0),
11695             (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg4),
11696             si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
11697                        OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg4), 0, 0)));
11698         DHD_ERROR(("err_code(0x%x)=0x%x\n",
11699                    (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_code_logreg),
11700                    si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
11701                               OFFSETOF(sbpcieregs_t, u.pcie2.err_code_logreg),
11702                               0, 0)));
11703 
11704         dhd_pcie_dump_wrapper_regs(dhd);
11705         dhdpcie_hw_war_regdump(dhd->bus);
11706     }
11707 #endif /* EXTENDED_PCIE_DEBUG_DUMP */
11708 
11709     dhd_pcie_dma_info_dump(dhd);
11710 
11711     return 0;
11712 }
11713 
dhd_bus_force_bt_quiesce_enabled(struct dhd_bus * bus)11714 bool dhd_bus_force_bt_quiesce_enabled(struct dhd_bus *bus)
11715 {
11716     return bus->force_bt_quiesce;
11717 }
11718 
11719 #ifdef DHD_HP2P
dhd_bus_get_hp2p_ring_max_size(struct dhd_bus * bus,bool tx)11720 uint16 dhd_bus_get_hp2p_ring_max_size(struct dhd_bus *bus, bool tx)
11721 {
11722     if (tx) {
11723         return bus->hp2p_txcpl_max_items;
11724     } else {
11725         return bus->hp2p_rxcpl_max_items;
11726     }
11727 }
11728 
dhd_bus_set_hp2p_ring_max_size(struct dhd_bus * bus,bool tx,uint16 val)11729 static uint16 dhd_bus_set_hp2p_ring_max_size(struct dhd_bus *bus, bool tx,
11730                                              uint16 val)
11731 {
11732     if (tx) {
11733         bus->hp2p_txcpl_max_items = val;
11734     } else {
11735         bus->hp2p_rxcpl_max_items = val;
11736     }
11737     return val;
11738 }
11739 #endif /* DHD_HP2P */
11740 
dhd_bus_tcm_test(struct dhd_bus * bus)11741 static bool dhd_bus_tcm_test(struct dhd_bus *bus)
11742 {
11743     int ret = 0;
11744     int size;          /* Full mem size */
11745     int start;         /* Start address */
11746     int read_size = 0; /* Read size of each iteration */
11747     int num = 0;
11748     uint8 *read_buf, *write_buf;
11749     uint8 init_val[NUM_PATTERNS] = {
11750         0xFFu, /* 11111111 */
11751         0x00u, /* 00000000 */
11752     };
11753 
11754     if (!bus) {
11755         DHD_ERROR(("%s: bus is NULL !\n", __FUNCTION__));
11756         return FALSE;
11757     }
11758 
11759     read_buf = MALLOCZ(bus->dhd->osh, MEMBLOCK);
11760     if (!read_buf) {
11761         DHD_ERROR(("%s: MALLOC of read_buf failed\n", __FUNCTION__));
11762         return FALSE;
11763     }
11764 
11765     write_buf = MALLOCZ(bus->dhd->osh, MEMBLOCK);
11766     if (!write_buf) {
11767         MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
11768         DHD_ERROR(("%s: MALLOC of write_buf failed\n", __FUNCTION__));
11769         return FALSE;
11770     }
11771 
11772     DHD_ERROR(("%s: start %x,  size: %x\n", __FUNCTION__, bus->dongle_ram_base,
11773                bus->ramsize));
11774     DHD_ERROR(("%s: memblock size %d,  #pattern %d\n", __FUNCTION__, MEMBLOCK,
11775                NUM_PATTERNS));
11776 
11777     while (num < NUM_PATTERNS) {
11778         start = bus->dongle_ram_base;
11779         /* Get full mem size */
11780         size = bus->ramsize;
11781 
11782         memset(write_buf, init_val[num], MEMBLOCK);
11783         while (size > 0) {
11784             read_size = MIN(MEMBLOCK, size);
11785             memset(read_buf, 0, read_size);
11786 
11787             /* Write */
11788             if ((ret = dhdpcie_bus_membytes(bus, TRUE, start, write_buf,
11789                                             read_size))) {
11790                 DHD_ERROR(("%s: Write Error membytes %d\n", __FUNCTION__, ret));
11791                 MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
11792                 MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
11793                 return FALSE;
11794             }
11795 
11796             /* Read */
11797             if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, read_buf,
11798                                             read_size))) {
11799                 DHD_ERROR(("%s: Read Error membytes %d\n", __FUNCTION__, ret));
11800                 MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
11801                 MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
11802                 return FALSE;
11803             }
11804 
11805             /* Compare */
11806             if (memcmp(read_buf, write_buf, read_size)) {
11807                 DHD_ERROR(("%s: Mismatch at %x, iter : %d\n", __FUNCTION__,
11808                            start, num));
11809                 prhex("Readbuf", read_buf, read_size);
11810                 prhex("Writebuf", write_buf, read_size);
11811                 MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
11812                 MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
11813                 return FALSE;
11814             }
11815 
11816             /* Decrement size and increment start address */
11817             size -= read_size;
11818             start += read_size;
11819         }
11820         num++;
11821     }
11822 
11823     MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
11824     MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
11825 
11826     DHD_ERROR(("%s: Success iter : %d\n", __FUNCTION__, num));
11827     return TRUE;
11828 }
11829