• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * DHD Bus Module for PCIE
4  *
5  * Copyright (C) 1999-2019, Broadcom.
6  *
7  *      Unless you and Broadcom execute a separate written software license
8  * agreement governing use of this software, this software is licensed to you
9  * under the terms of the GNU General Public License version 2 (the "GPL"),
10  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11  * following added to such license:
12  *
13  *      As a special exception, the copyright holders of this software give you
14  * permission to link this software with independent modules, and to copy and
15  * distribute the resulting executable under terms of your choice, provided that
16  * you also meet, for each linked independent module, the terms and conditions of
17  * the license of that module.  An independent module is a module which is not
18  * derived from this software.  The special exception does not apply to any
19  * modifications of the software.
20  *
21  *      Notwithstanding the above, under no circumstances may you combine this
22  * software in any way with any other Broadcom software provided under a license
23  * other than the GPL, without Broadcom's express prior written consent.
24  *
25  *
26  * <<Broadcom-WL-IPTag/Open:>>
27  *
28  * $Id: dhd_pcie.c 825481 2019-06-14 10:06:03Z $
29  */
30 
31 /* include files */
32 #include <typedefs.h>
33 #include <bcmutils.h>
34 #include <bcmdevs.h>
35 #include <siutils.h>
36 #include <hndoobr.h>
37 #include <hndsoc.h>
38 #include <hndpmu.h>
39 #include <etd.h>
40 #include <hnd_debug.h>
41 #include <sbchipc.h>
42 #include <sbhndarm.h>
43 #include <hnd_armtrap.h>
44 #if defined(DHD_DEBUG)
45 #include <hnd_cons.h>
46 #endif /* defined(DHD_DEBUG) */
47 #include <dngl_stats.h>
48 #include <pcie_core.h>
49 #include <dhd.h>
50 #include <dhd_bus.h>
51 #include <dhd_flowring.h>
52 #include <dhd_proto.h>
53 #include <dhd_dbg.h>
54 #include <dhd_debug.h>
55 #include <dhd_daemon.h>
56 #include <dhdioctl.h>
57 #include <sdiovar.h>
58 #include <bcmmsgbuf.h>
59 #include <pcicfg.h>
60 #include <dhd_pcie.h>
61 #include <bcmpcie.h>
62 #include <bcmendian.h>
63 #include <bcmstdlib_s.h>
64 #ifdef DHDTCPACK_SUPPRESS
65 #include <dhd_ip.h>
66 #endif /* DHDTCPACK_SUPPRESS */
67 #include <bcmevent.h>
68 #include <dhd_config.h>
69 
70 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
71 #include <linux/pm_runtime.h>
72 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
73 
74 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
75 #include <debugger.h>
76 #endif /* DEBUGGER || DHD_DSCOPE */
77 
78 #ifdef DNGL_AXI_ERROR_LOGGING
79 #include <dhd_linux_wq.h>
80 #include <dhd_linux.h>
81 #endif /* DNGL_AXI_ERROR_LOGGING */
82 
83 #if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
84 #include <dhd_linux_priv.h>
85 #endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
86 
87 #include <otpdefs.h>
88 #define EXTENDED_PCIE_DEBUG_DUMP 1	/* Enable Extended pcie registers dump */
89 
90 #define MEMBLOCK	2048		/* Block size used for downloading of dongle image */
91 #define MAX_WKLK_IDLE_CHECK	3	/* times wake_lock checked before deciding not to suspend */
92 
93 #define	DHD_MAX_ITEMS_HPP_TXCPL_RING	512
94 #define	DHD_MAX_ITEMS_HPP_RXCPL_RING	512
95 
96 #define ARMCR4REG_CORECAP	(0x4/sizeof(uint32))
97 #define ARMCR4REG_MPUCTRL	(0x90/sizeof(uint32))
98 #define ACC_MPU_SHIFT		25
99 #define ACC_MPU_MASK		(0x1u << ACC_MPU_SHIFT)
100 
101 #define REG_WORK_AROUND		(0x1e4/sizeof(uint32))
102 
103 #define ARMCR4REG_BANKIDX	(0x40/sizeof(uint32))
104 #define ARMCR4REG_BANKPDA	(0x4C/sizeof(uint32))
105 /* Temporary war to fix precommit till sync issue between trunk & precommit branch is resolved */
106 
107 /* CTO Prevention Recovery */
108 #ifdef BCMQT_HW
109 #define CTO_TO_CLEAR_WAIT_MS 10000
110 #define CTO_TO_CLEAR_WAIT_MAX_CNT 100
111 #else
112 #define CTO_TO_CLEAR_WAIT_MS 1000
113 #define CTO_TO_CLEAR_WAIT_MAX_CNT 10
114 #endif // endif
115 
116 /* Fetch address of a member in the pciedev_shared structure in dongle memory */
117 #define DHD_PCIE_SHARED_MEMBER_ADDR(bus, member) \
118 	(bus)->shared_addr + OFFSETOF(pciedev_shared_t, member)
119 
120 /* Fetch address of a member in rings_info_ptr structure in dongle memory */
121 #define DHD_RING_INFO_MEMBER_ADDR(bus, member) \
122 	(bus)->pcie_sh->rings_info_ptr + OFFSETOF(ring_info_t, member)
123 
124 /* Fetch address of a member in the ring_mem structure in dongle memory */
125 #define DHD_RING_MEM_MEMBER_ADDR(bus, ringid, member) \
126 	(bus)->ring_sh[ringid].ring_mem_addr + OFFSETOF(ring_mem_t, member)
127 
128 #if defined(SUPPORT_MULTIPLE_BOARD_REV)
129 	extern unsigned int system_rev;
130 #endif /* SUPPORT_MULTIPLE_BOARD_REV */
131 
132 #ifdef EWP_EDL
133 extern int host_edl_support;
134 #endif // endif
135 
136 /* This can be overwritten by module parameter(dma_ring_indices) defined in dhd_linux.c */
137 uint dma_ring_indices = 0;
138 /* This can be overwritten by module parameter(h2d_phase) defined in dhd_linux.c */
139 bool h2d_phase = 0;
140 /* This can be overwritten by module parameter(force_trap_bad_h2d_phase)
141  * defined in dhd_linux.c
142  */
143 bool force_trap_bad_h2d_phase = 0;
144 
145 int dhd_dongle_memsize;
146 int dhd_dongle_ramsize;
147 struct dhd_bus *g_dhd_bus = NULL;
148 #ifdef DNGL_AXI_ERROR_LOGGING
149 static void dhd_log_dump_axi_error(uint8 *axi_err);
150 #endif /* DNGL_AXI_ERROR_LOGGING */
151 
152 static int dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size);
153 static int dhdpcie_bus_readconsole(dhd_bus_t *bus);
154 #if defined(DHD_FW_COREDUMP)
155 static int dhdpcie_mem_dump(dhd_bus_t *bus);
156 static int dhdpcie_get_mem_dump(dhd_bus_t *bus);
157 #endif /* DHD_FW_COREDUMP */
158 
159 static int dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size);
160 static int dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid,
161 	const char *name, void *params,
162 	int plen, void *arg, int len, int val_size);
163 static int dhdpcie_bus_lpback_req(struct  dhd_bus *bus, uint32 intval);
164 static int dhdpcie_bus_dmaxfer_req(struct  dhd_bus *bus,
165 	uint32 len, uint32 srcdelay, uint32 destdelay,
166 	uint32 d11_lpbk, uint32 core_num, uint32 wait);
167 static int dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter);
168 static int _dhdpcie_download_firmware(struct dhd_bus *bus);
169 static int dhdpcie_download_firmware(dhd_bus_t *bus, osl_t *osh);
170 static int dhdpcie_bus_write_vars(dhd_bus_t *bus);
171 static bool dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus);
172 static bool dhdpci_bus_read_frames(dhd_bus_t *bus);
173 static int dhdpcie_readshared(dhd_bus_t *bus);
174 static void dhdpcie_init_shared_addr(dhd_bus_t *bus);
175 static bool dhdpcie_dongle_attach(dhd_bus_t *bus);
176 static void dhdpcie_bus_dongle_setmemsize(dhd_bus_t *bus, int mem_size);
177 static void dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh,
178 	bool dongle_isolation, bool reset_flag);
179 static void dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh);
180 static int dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len);
181 static void dhdpcie_setbar1win(dhd_bus_t *bus, uint32 addr);
182 static uint8 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset);
183 static void dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data);
184 static void dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data);
185 static uint16 dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset);
186 static void dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data);
187 static uint32 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset);
188 #ifdef DHD_SUPPORT_64BIT
189 static void dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data) __attribute__ ((used));
190 static uint64 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset) __attribute__ ((used));
191 #endif /* DHD_SUPPORT_64BIT */
192 static void dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data);
193 static void dhdpcie_bus_reg_unmap(osl_t *osh, volatile char *addr, int size);
194 static int dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b);
195 static void dhdpcie_fw_trap(dhd_bus_t *bus);
196 static void dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info);
197 static void dhdpcie_handle_mb_data(dhd_bus_t *bus);
198 extern void dhd_dpc_enable(dhd_pub_t *dhdp);
199 extern void dhd_dpc_kill(dhd_pub_t *dhdp);
200 
201 #ifdef IDLE_TX_FLOW_MGMT
202 static void dhd_bus_check_idle_scan(dhd_bus_t *bus);
203 static void dhd_bus_idle_scan(dhd_bus_t *bus);
204 #endif /* IDLE_TX_FLOW_MGMT */
205 
206 #ifdef EXYNOS_PCIE_DEBUG
207 extern void exynos_pcie_register_dump(int ch_num);
208 #endif /* EXYNOS_PCIE_DEBUG */
209 
210 #if defined(DHD_H2D_LOG_TIME_SYNC)
211 static void dhdpci_bus_rte_log_time_sync_poll(dhd_bus_t *bus);
212 #endif /* DHD_H2D_LOG_TIME_SYNC */
213 
214 #define     PCI_VENDOR_ID_BROADCOM          0x14e4
215 
216 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
217 #define MAX_D3_ACK_TIMEOUT	100
218 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
219 
220 #define DHD_DEFAULT_DOORBELL_TIMEOUT 200	/* ms */
221 static bool dhdpcie_check_firmware_compatible(uint32 f_api_version, uint32 h_api_version);
222 static int dhdpcie_cto_error_recovery(struct dhd_bus *bus);
223 
224 static int dhdpcie_init_d11status(struct dhd_bus *bus);
225 
226 static int dhdpcie_wrt_rnd(struct dhd_bus *bus);
227 
228 extern uint16 dhd_prot_get_h2d_max_txpost(dhd_pub_t *dhd);
229 extern void dhd_prot_set_h2d_max_txpost(dhd_pub_t *dhd, uint16 max_txpost);
230 
231 #ifdef DHD_HP2P
232 extern enum hrtimer_restart dhd_hp2p_write(struct hrtimer *timer);
233 static uint16 dhd_bus_set_hp2p_ring_max_size(struct dhd_bus *bus, bool tx, uint16 val);
234 #endif // endif
235 #define NUM_PATTERNS 2
236 static bool dhd_bus_tcm_test(struct dhd_bus *bus);
237 
238 /* IOVar table */
239 enum {
240 	IOV_INTR = 1,
241 	IOV_MEMSIZE,
242 	IOV_SET_DOWNLOAD_STATE,
243 	IOV_DEVRESET,
244 	IOV_VARS,
245 	IOV_MSI_SIM,
246 	IOV_PCIE_LPBK,
247 	IOV_CC_NVMSHADOW,
248 	IOV_RAMSIZE,
249 	IOV_RAMSTART,
250 	IOV_SLEEP_ALLOWED,
251 	IOV_PCIE_DMAXFER,
252 	IOV_PCIE_SUSPEND,
253 	IOV_DONGLEISOLATION,
254 	IOV_LTRSLEEPON_UNLOOAD,
255 	IOV_METADATA_DBG,
256 	IOV_RX_METADATALEN,
257 	IOV_TX_METADATALEN,
258 	IOV_TXP_THRESHOLD,
259 	IOV_BUZZZ_DUMP,
260 	IOV_DUMP_RINGUPD_BLOCK,
261 	IOV_DMA_RINGINDICES,
262 	IOV_FORCE_FW_TRAP,
263 	IOV_DB1_FOR_MB,
264 	IOV_FLOW_PRIO_MAP,
265 	IOV_RXBOUND,
266 	IOV_TXBOUND,
267 	IOV_HANGREPORT,
268 	IOV_H2D_MAILBOXDATA,
269 	IOV_INFORINGS,
270 	IOV_H2D_PHASE,
271 	IOV_H2D_ENABLE_TRAP_BADPHASE,
272 	IOV_H2D_TXPOST_MAX_ITEM,
273 	IOV_TRAPDATA,
274 	IOV_TRAPDATA_RAW,
275 	IOV_CTO_PREVENTION,
276 	IOV_PCIE_WD_RESET,
277 	IOV_DUMP_DONGLE,
278 	IOV_HWA_ENAB_BMAP,
279 	IOV_IDMA_ENABLE,
280 	IOV_IFRM_ENABLE,
281 	IOV_CLEAR_RING,
282 	IOV_DAR_ENABLE,
283 	IOV_DNGL_CAPS,   /**< returns string with dongle capabilities */
284 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
285 	IOV_GDB_SERVER,  /**< starts gdb server on given interface */
286 #endif /* DEBUGGER || DHD_DSCOPE */
287 	IOV_INB_DW_ENABLE,
288 	IOV_CTO_THRESHOLD,
289 	IOV_HSCBSIZE, /* get HSCB buffer size */
290 	IOV_HP2P_ENABLE,
291 	IOV_HP2P_PKT_THRESHOLD,
292 	IOV_HP2P_TIME_THRESHOLD,
293 	IOV_HP2P_PKT_EXPIRY,
294 	IOV_HP2P_TXCPL_MAXITEMS,
295 	IOV_HP2P_RXCPL_MAXITEMS,
296 	IOV_EXTDTXS_IN_TXCPL,
297 	IOV_HOSTRDY_AFTER_INIT,
298 	IOV_PCIE_LAST /**< unused IOVAR */
299 };
300 
301 const bcm_iovar_t dhdpcie_iovars[] = {
302 	{"intr",	IOV_INTR,	0, 	0, IOVT_BOOL,	0 },
303 	{"memsize",	IOV_MEMSIZE,	0, 	0, IOVT_UINT32,	0 },
304 	{"dwnldstate",	IOV_SET_DOWNLOAD_STATE,	0, 	0, IOVT_BOOL,	0 },
305 	{"vars",	IOV_VARS,	0, 	0, IOVT_BUFFER,	0 },
306 	{"devreset",	IOV_DEVRESET,	0, 	0, IOVT_UINT8,	0 },
307 	{"pcie_device_trap", IOV_FORCE_FW_TRAP, 0, 	0, 0,	0 },
308 	{"pcie_lpbk",	IOV_PCIE_LPBK,	0,	0, IOVT_UINT32,	0 },
309 	{"cc_nvmshadow", IOV_CC_NVMSHADOW, 0,	0, IOVT_BUFFER, 0 },
310 	{"ramsize",	IOV_RAMSIZE,	0, 	0, IOVT_UINT32,	0 },
311 	{"ramstart",	IOV_RAMSTART,	0, 	0, IOVT_UINT32,	0 },
312 	{"pcie_dmaxfer", IOV_PCIE_DMAXFER, 0, 0, IOVT_BUFFER, sizeof(dma_xfer_info_t)},
313 	{"pcie_suspend", IOV_PCIE_SUSPEND,	DHD_IOVF_PWRREQ_BYPASS,	0, IOVT_UINT32,	0 },
314 	{"sleep_allowed",	IOV_SLEEP_ALLOWED,	0,	0, IOVT_BOOL,	0 },
315 	{"dngl_isolation", IOV_DONGLEISOLATION,	0, 	0, IOVT_UINT32,	0 },
316 	{"ltrsleep_on_unload", IOV_LTRSLEEPON_UNLOOAD,	0,	0, IOVT_UINT32,	0 },
317 	{"dump_ringupdblk", IOV_DUMP_RINGUPD_BLOCK,	0, 	0, IOVT_BUFFER,	0 },
318 	{"dma_ring_indices", IOV_DMA_RINGINDICES,	0, 	0, IOVT_UINT32,	0},
319 	{"metadata_dbg", IOV_METADATA_DBG,	0,	0, IOVT_BOOL,	0 },
320 	{"rx_metadata_len", IOV_RX_METADATALEN,	0, 	0, IOVT_UINT32,	0 },
321 	{"tx_metadata_len", IOV_TX_METADATALEN,	0, 	0, IOVT_UINT32,	0 },
322 	{"db1_for_mb", IOV_DB1_FOR_MB,	0, 	0, IOVT_UINT32,	0 },
323 	{"txp_thresh", IOV_TXP_THRESHOLD,	0,	0, IOVT_UINT32,	0 },
324 	{"buzzz_dump", IOV_BUZZZ_DUMP,		0, 	0, IOVT_UINT32,	0 },
325 	{"flow_prio_map", IOV_FLOW_PRIO_MAP,	0, 	0, IOVT_UINT32,	0 },
326 	{"rxbound",     IOV_RXBOUND,    0, 0,	IOVT_UINT32,    0 },
327 	{"txbound",     IOV_TXBOUND,    0, 0,	IOVT_UINT32,    0 },
328 	{"fw_hang_report", IOV_HANGREPORT,	0, 0,	IOVT_BOOL,	0 },
329 	{"h2d_mb_data",     IOV_H2D_MAILBOXDATA,    0, 0,      IOVT_UINT32,    0 },
330 	{"inforings",   IOV_INFORINGS,    0, 0,      IOVT_UINT32,    0 },
331 	{"h2d_phase",   IOV_H2D_PHASE,    0, 0,      IOVT_UINT32,    0 },
332 	{"force_trap_bad_h2d_phase", IOV_H2D_ENABLE_TRAP_BADPHASE,    0, 0,
333 	IOVT_UINT32,    0 },
334 	{"h2d_max_txpost",   IOV_H2D_TXPOST_MAX_ITEM,    0, 0,      IOVT_UINT32,    0 },
335 	{"trap_data",	IOV_TRAPDATA,	0, 0,	IOVT_BUFFER,	0 },
336 	{"trap_data_raw",	IOV_TRAPDATA_RAW,	0, 0,	IOVT_BUFFER,	0 },
337 	{"cto_prevention",	IOV_CTO_PREVENTION,	0, 0,	IOVT_UINT32,	0 },
338 	{"pcie_wd_reset",	IOV_PCIE_WD_RESET,	0,	0, IOVT_BOOL,	0 },
339 	{"dump_dongle", IOV_DUMP_DONGLE, 0, 0, IOVT_BUFFER,
340 	MAX(sizeof(dump_dongle_in_t), sizeof(dump_dongle_out_t))},
341 	{"clear_ring",   IOV_CLEAR_RING,    0, 0,  IOVT_UINT32,    0 },
342 	{"hwa_enab_bmap",   IOV_HWA_ENAB_BMAP,    0, 0,  IOVT_UINT32,    0 },
343 	{"idma_enable",   IOV_IDMA_ENABLE,    0, 0,  IOVT_UINT32,    0 },
344 	{"ifrm_enable",   IOV_IFRM_ENABLE,    0, 0,  IOVT_UINT32,    0 },
345 	{"dar_enable",   IOV_DAR_ENABLE,    0, 0,  IOVT_UINT32,    0 },
346 	{"cap", IOV_DNGL_CAPS,	0, 0, IOVT_BUFFER,	0},
347 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
348 	{"gdb_server", IOV_GDB_SERVER,    0, 0,      IOVT_UINT32,    0 },
349 #endif /* DEBUGGER || DHD_DSCOPE */
350 	{"inb_dw_enable",   IOV_INB_DW_ENABLE,    0, 0,  IOVT_UINT32,    0 },
351 	{"cto_threshold",	IOV_CTO_THRESHOLD,	0,	0, IOVT_UINT32,	0 },
352 	{"hscbsize",	IOV_HSCBSIZE,	0,	0,	IOVT_UINT32,	0 },
353 #ifdef DHD_HP2P
354 	{"hp2p_enable", IOV_HP2P_ENABLE,	0,	0, IOVT_UINT32,	0 },
355 	{"hp2p_pkt_thresh", IOV_HP2P_PKT_THRESHOLD,	0,	0, IOVT_UINT32,	0 },
356 	{"hp2p_time_thresh", IOV_HP2P_TIME_THRESHOLD,	0,	0, IOVT_UINT32,	0 },
357 	{"hp2p_pkt_expiry", IOV_HP2P_PKT_EXPIRY,	0,	0, IOVT_UINT32,	0 },
358 	{"hp2p_txcpl_maxitems", IOV_HP2P_TXCPL_MAXITEMS,	0,	0, IOVT_UINT32,	0 },
359 	{"hp2p_rxcpl_maxitems", IOV_HP2P_RXCPL_MAXITEMS,	0,	0, IOVT_UINT32,	0 },
360 #endif // endif
361 	{"extdtxs_in_txcpl", IOV_EXTDTXS_IN_TXCPL,	0,	0, IOVT_UINT32,	0 },
362 	{"hostrdy_after_init", IOV_HOSTRDY_AFTER_INIT,	0,	0, IOVT_UINT32,	0 },
363 	{NULL, 0, 0, 0, 0, 0 }
364 };
365 
366 #define MAX_READ_TIMEOUT	2 * 1000 * 1000
367 
368 #ifndef DHD_RXBOUND
369 #define DHD_RXBOUND		64
370 #endif // endif
371 #ifndef DHD_TXBOUND
372 #define DHD_TXBOUND		64
373 #endif // endif
374 
375 #define DHD_INFORING_BOUND	32
376 #define DHD_BTLOGRING_BOUND	32
377 
378 uint dhd_rxbound = DHD_RXBOUND;
379 uint dhd_txbound = DHD_TXBOUND;
380 
381 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
382 /** the GDB debugger layer will call back into this (bus) layer to read/write dongle memory */
383 static struct dhd_gdb_bus_ops_s  bus_ops = {
384 	.read_u16 = dhdpcie_bus_rtcm16,
385 	.read_u32 = dhdpcie_bus_rtcm32,
386 	.write_u32 = dhdpcie_bus_wtcm32,
387 };
388 #endif /* DEBUGGER || DHD_DSCOPE */
389 
390 bool
dhd_bus_get_flr_force_fail(struct dhd_bus * bus)391 dhd_bus_get_flr_force_fail(struct dhd_bus *bus)
392 {
393 	return bus->flr_force_fail;
394 }
395 
396 /**
397  * Register/Unregister functions are called by the main DHD entry point (eg module insertion) to
398  * link with the bus driver, in order to look for or await the device.
399  */
400 int
dhd_bus_register(void)401 dhd_bus_register(void)
402 {
403 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
404 
405 	return dhdpcie_bus_register();
406 }
407 
408 void
dhd_bus_unregister(void)409 dhd_bus_unregister(void)
410 {
411 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
412 
413 	dhdpcie_bus_unregister();
414 	return;
415 }
416 
417 /** returns a host virtual address */
418 uint32 *
dhdpcie_bus_reg_map(osl_t * osh,ulong addr,int size)419 dhdpcie_bus_reg_map(osl_t *osh, ulong addr, int size)
420 {
421 	return (uint32 *)REG_MAP(addr, size);
422 }
423 
424 void
dhdpcie_bus_reg_unmap(osl_t * osh,volatile char * addr,int size)425 dhdpcie_bus_reg_unmap(osl_t *osh, volatile char *addr, int size)
426 {
427 	REG_UNMAP(addr);
428 	return;
429 }
430 
431 /**
432  * retrun H2D Doorbell registers address
433  * use DAR registers instead of enum register for corerev >= 23 (4347B0)
434  */
435 static INLINE uint
dhd_bus_db0_addr_get(struct dhd_bus * bus)436 dhd_bus_db0_addr_get(struct dhd_bus *bus)
437 {
438 	uint addr = PCIH2D_MailBox;
439 	uint dar_addr = DAR_PCIH2D_DB0_0(bus->sih->buscorerev);
440 
441 	return ((DAR_ACTIVE(bus->dhd)) ? dar_addr : addr);
442 }
443 
444 static INLINE uint
dhd_bus_db0_addr_2_get(struct dhd_bus * bus)445 dhd_bus_db0_addr_2_get(struct dhd_bus *bus)
446 {
447 	return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB2_0(bus->sih->buscorerev) : PCIH2D_MailBox_2);
448 }
449 
450 static INLINE uint
dhd_bus_db1_addr_get(struct dhd_bus * bus)451 dhd_bus_db1_addr_get(struct dhd_bus *bus)
452 {
453 	return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB0_1(bus->sih->buscorerev) : PCIH2D_DB1);
454 }
455 
456 static INLINE uint
dhd_bus_db1_addr_1_get(struct dhd_bus * bus)457 dhd_bus_db1_addr_1_get(struct dhd_bus *bus)
458 {
459 	return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB1_1(bus->sih->buscorerev) : PCIH2D_DB1_1);
460 }
461 
462 /*
463  * WAR for SWWLAN-215055 - [4378B0] ARM fails to boot without DAR WL domain request
464  */
465 static INLINE void
dhd_bus_pcie_pwr_req_wl_domain(struct dhd_bus * bus,uint offset,bool enable)466 dhd_bus_pcie_pwr_req_wl_domain(struct dhd_bus *bus, uint offset, bool enable)
467 {
468 	if (enable) {
469 		si_corereg(bus->sih, bus->sih->buscoreidx, offset,
470 			SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT,
471 			SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT);
472 	} else {
473 		si_corereg(bus->sih, bus->sih->buscoreidx, offset,
474 			SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT, 0);
475 	}
476 }
477 
478 static INLINE void
_dhd_bus_pcie_pwr_req_clear_cmn(struct dhd_bus * bus)479 _dhd_bus_pcie_pwr_req_clear_cmn(struct dhd_bus *bus)
480 {
481 	uint mask;
482 
483 	/*
484 	 * If multiple de-asserts, decrement ref and return
485 	 * Clear power request when only one pending
486 	 * so initial request is not removed unexpectedly
487 	 */
488 	if (bus->pwr_req_ref > 1) {
489 		bus->pwr_req_ref--;
490 		return;
491 	}
492 
493 	ASSERT(bus->pwr_req_ref == 1);
494 
495 	if (MULTIBP_ENAB(bus->sih)) {
496 		/* Common BP controlled by HW so only need to toggle WL/ARM backplane */
497 		mask = SRPWR_DMN1_ARMBPSD_MASK;
498 	} else {
499 		mask = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK;
500 	}
501 
502 	si_srpwr_request(bus->sih, mask, 0);
503 	bus->pwr_req_ref = 0;
504 }
505 
506 static INLINE void
dhd_bus_pcie_pwr_req_clear(struct dhd_bus * bus)507 dhd_bus_pcie_pwr_req_clear(struct dhd_bus *bus)
508 {
509 	unsigned long flags = 0;
510 
511 	DHD_GENERAL_LOCK(bus->dhd, flags);
512 	_dhd_bus_pcie_pwr_req_clear_cmn(bus);
513 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
514 }
515 
516 static INLINE void
dhd_bus_pcie_pwr_req_clear_nolock(struct dhd_bus * bus)517 dhd_bus_pcie_pwr_req_clear_nolock(struct dhd_bus *bus)
518 {
519 	_dhd_bus_pcie_pwr_req_clear_cmn(bus);
520 }
521 
522 static INLINE void
_dhd_bus_pcie_pwr_req_cmn(struct dhd_bus * bus)523 _dhd_bus_pcie_pwr_req_cmn(struct dhd_bus *bus)
524 {
525 	uint mask, val;
526 
527 	/* If multiple request entries, increment reference and return */
528 	if (bus->pwr_req_ref > 0) {
529 		bus->pwr_req_ref++;
530 		return;
531 	}
532 
533 	ASSERT(bus->pwr_req_ref == 0);
534 
535 	if (MULTIBP_ENAB(bus->sih)) {
536 		/* Common BP controlled by HW so only need to toggle WL/ARM backplane */
537 		mask = SRPWR_DMN1_ARMBPSD_MASK;
538 		val = SRPWR_DMN1_ARMBPSD_MASK;
539 	} else {
540 		mask = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK;
541 		val = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK;
542 	}
543 
544 	si_srpwr_request(bus->sih, mask, val);
545 
546 	bus->pwr_req_ref = 1;
547 }
548 
549 static INLINE void
dhd_bus_pcie_pwr_req(struct dhd_bus * bus)550 dhd_bus_pcie_pwr_req(struct dhd_bus *bus)
551 {
552 	unsigned long flags = 0;
553 
554 	DHD_GENERAL_LOCK(bus->dhd, flags);
555 	_dhd_bus_pcie_pwr_req_cmn(bus);
556 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
557 }
558 
559 static INLINE void
_dhd_bus_pcie_pwr_req_pd0123_cmn(struct dhd_bus * bus)560 _dhd_bus_pcie_pwr_req_pd0123_cmn(struct dhd_bus *bus)
561 {
562 	uint mask, val;
563 
564 	mask = SRPWR_DMN_ALL_MASK(bus->sih);
565 	val = SRPWR_DMN_ALL_MASK(bus->sih);
566 
567 	si_srpwr_request(bus->sih, mask, val);
568 }
569 
570 static INLINE void
dhd_bus_pcie_pwr_req_reload_war(struct dhd_bus * bus)571 dhd_bus_pcie_pwr_req_reload_war(struct dhd_bus *bus)
572 {
573 	unsigned long flags = 0;
574 
575 	DHD_GENERAL_LOCK(bus->dhd, flags);
576 	_dhd_bus_pcie_pwr_req_pd0123_cmn(bus);
577 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
578 }
579 
580 static INLINE void
_dhd_bus_pcie_pwr_req_clear_pd0123_cmn(struct dhd_bus * bus)581 _dhd_bus_pcie_pwr_req_clear_pd0123_cmn(struct dhd_bus *bus)
582 {
583 	uint mask;
584 
585 	mask = SRPWR_DMN_ALL_MASK(bus->sih);
586 
587 	si_srpwr_request(bus->sih, mask, 0);
588 }
589 
590 static INLINE void
dhd_bus_pcie_pwr_req_clear_reload_war(struct dhd_bus * bus)591 dhd_bus_pcie_pwr_req_clear_reload_war(struct dhd_bus *bus)
592 {
593 	unsigned long flags = 0;
594 
595 	DHD_GENERAL_LOCK(bus->dhd, flags);
596 	_dhd_bus_pcie_pwr_req_clear_pd0123_cmn(bus);
597 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
598 }
599 
600 static INLINE void
dhd_bus_pcie_pwr_req_nolock(struct dhd_bus * bus)601 dhd_bus_pcie_pwr_req_nolock(struct dhd_bus *bus)
602 {
603 	_dhd_bus_pcie_pwr_req_cmn(bus);
604 }
605 
606 bool
dhdpcie_chip_support_msi(dhd_bus_t * bus)607 dhdpcie_chip_support_msi(dhd_bus_t *bus)
608 {
609 	DHD_INFO(("%s: buscorerev=%d chipid=0x%x\n",
610 		__FUNCTION__, bus->sih->buscorerev, si_chipid(bus->sih)));
611 	if (bus->sih->buscorerev <= 14 ||
612 		si_chipid(bus->sih) == BCM4375_CHIP_ID ||
613 		si_chipid(bus->sih) == BCM4362_CHIP_ID ||
614 		si_chipid(bus->sih) == BCM43751_CHIP_ID ||
615 		si_chipid(bus->sih) == BCM4361_CHIP_ID ||
616 		si_chipid(bus->sih) == BCM4359_CHIP_ID) {
617 		return FALSE;
618 	} else {
619 		return TRUE;
620 	}
621 }
622 
623 /**
624  * Called once for each hardware (dongle) instance that this DHD manages.
625  *
626  * 'regs' is the host virtual address that maps to the start of the PCIe BAR0 window. The first 4096
627  * bytes in this window are mapped to the backplane address in the PCIEBAR0Window register. The
628  * precondition is that the PCIEBAR0Window register 'points' at the PCIe core.
629  *
630  * 'tcm' is the *host* virtual address at which tcm is mapped.
631  */
dhdpcie_bus_attach(osl_t * osh,dhd_bus_t ** bus_ptr,volatile char * regs,volatile char * tcm,void * pci_dev,wifi_adapter_info_t * adapter)632 int dhdpcie_bus_attach(osl_t *osh, dhd_bus_t **bus_ptr,
633 	volatile char *regs, volatile char *tcm, void *pci_dev, wifi_adapter_info_t *adapter)
634 {
635 	dhd_bus_t *bus = NULL;
636 	int ret = BCME_OK;
637 
638 	DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
639 
640 	do {
641 		if (!(bus = MALLOCZ(osh, sizeof(dhd_bus_t)))) {
642 			DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
643 			ret = BCME_NORESOURCE;
644 			break;
645 		}
646 		bus->bus = adapter->bus_type;
647 		bus->bus_num = adapter->bus_num;
648 		bus->slot_num = adapter->slot_num;
649 
650 		bus->regs = regs;
651 		bus->tcm = tcm;
652 		bus->osh = osh;
653 		/* Save pci_dev into dhd_bus, as it may be needed in dhd_attach */
654 		bus->dev = (struct pci_dev *)pci_dev;
655 
656 		dll_init(&bus->flowring_active_list);
657 #ifdef IDLE_TX_FLOW_MGMT
658 		bus->active_list_last_process_ts = OSL_SYSUPTIME();
659 #endif /* IDLE_TX_FLOW_MGMT */
660 
661 		/* Attach pcie shared structure */
662 		if (!(bus->pcie_sh = MALLOCZ(osh, sizeof(pciedev_shared_t)))) {
663 			DHD_ERROR(("%s: MALLOC of bus->pcie_sh failed\n", __FUNCTION__));
664 			ret = BCME_NORESOURCE;
665 			break;
666 		}
667 
668 		/* dhd_common_init(osh); */
669 
670 		if (dhdpcie_dongle_attach(bus)) {
671 			DHD_ERROR(("%s: dhdpcie_probe_attach failed\n", __FUNCTION__));
672 			ret = BCME_NOTREADY;
673 			break;
674 		}
675 
676 		/* software resources */
677 		if (!(bus->dhd = dhd_attach(osh, bus, PCMSGBUF_HDRLEN))) {
678 			DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__));
679 			ret = BCME_NORESOURCE;
680 			break;
681 		}
682 #if defined(GET_OTP_MAC_ENABLE) || defined(GET_OTP_MODULE_NAME)
683 		dhd_conf_get_otp(bus->dhd, bus->sih);
684 #endif
685 		DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
686 		bus->dhd->busstate = DHD_BUS_DOWN;
687 		bus->dhd->hostrdy_after_init = TRUE;
688 		bus->db1_for_mb = TRUE;
689 		bus->dhd->hang_report = TRUE;
690 		bus->use_mailbox = FALSE;
691 		bus->use_d0_inform = FALSE;
692 		bus->intr_enabled = FALSE;
693 		bus->flr_force_fail = FALSE;
694 		/* By default disable HWA and enable it via iovar */
695 		bus->hwa_enab_bmap = 0;
696 		/* update the dma indices if set through module parameter. */
697 		if (dma_ring_indices != 0) {
698 			dhdpcie_set_dma_ring_indices(bus->dhd, dma_ring_indices);
699 		}
700 		/* update h2d phase support if set through module parameter */
701 		bus->dhd->h2d_phase_supported = h2d_phase ? TRUE : FALSE;
702 		/* update force trap on bad phase if set through module parameter */
703 		bus->dhd->force_dongletrap_on_bad_h2d_phase =
704 			force_trap_bad_h2d_phase ? TRUE : FALSE;
705 #ifdef IDLE_TX_FLOW_MGMT
706 		bus->enable_idle_flowring_mgmt = FALSE;
707 #endif /* IDLE_TX_FLOW_MGMT */
708 		bus->irq_registered = FALSE;
709 
710 #ifdef DHD_MSI_SUPPORT
711 		bus->d2h_intr_method = enable_msi && dhdpcie_chip_support_msi(bus) ?
712 			PCIE_MSI : PCIE_INTX;
713 #else
714 		bus->d2h_intr_method = PCIE_INTX;
715 #endif /* DHD_MSI_SUPPORT */
716 
717 #ifdef DHD_HP2P
718 		bus->hp2p_txcpl_max_items = DHD_MAX_ITEMS_HPP_TXCPL_RING;
719 		bus->hp2p_rxcpl_max_items = DHD_MAX_ITEMS_HPP_RXCPL_RING;
720 #endif /* DHD_HP2P */
721 
722 		DHD_TRACE(("%s: EXIT SUCCESS\n",
723 			__FUNCTION__));
724 		g_dhd_bus = bus;
725 		*bus_ptr = bus;
726 		return ret;
727 	} while (0);
728 
729 	DHD_TRACE(("%s: EXIT FAILURE\n", __FUNCTION__));
730 
731 	if (bus && bus->pcie_sh) {
732 		MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t));
733 	}
734 
735 	if (bus) {
736 		MFREE(osh, bus, sizeof(dhd_bus_t));
737 	}
738 
739 	return ret;
740 }
741 
742 bool
dhd_bus_skip_clm(dhd_pub_t * dhdp)743 dhd_bus_skip_clm(dhd_pub_t *dhdp)
744 {
745 	switch (dhd_bus_chip_id(dhdp)) {
746 		case BCM4369_CHIP_ID:
747 			return TRUE;
748 		default:
749 			return FALSE;
750 	}
751 }
752 
753 uint
dhd_bus_chip(struct dhd_bus * bus)754 dhd_bus_chip(struct dhd_bus *bus)
755 {
756 	ASSERT(bus->sih != NULL);
757 	return bus->sih->chip;
758 }
759 
760 uint
dhd_bus_chiprev(struct dhd_bus * bus)761 dhd_bus_chiprev(struct dhd_bus *bus)
762 {
763 	ASSERT(bus);
764 	ASSERT(bus->sih != NULL);
765 	return bus->sih->chiprev;
766 }
767 
768 void *
dhd_bus_pub(struct dhd_bus * bus)769 dhd_bus_pub(struct dhd_bus *bus)
770 {
771 	return bus->dhd;
772 }
773 
774 void *
dhd_bus_sih(struct dhd_bus * bus)775 dhd_bus_sih(struct dhd_bus *bus)
776 {
777 	return (void *)bus->sih;
778 }
779 
780 void *
dhd_bus_txq(struct dhd_bus * bus)781 dhd_bus_txq(struct dhd_bus *bus)
782 {
783 	return &bus->txq;
784 }
785 
786 /** Get Chip ID version */
dhd_bus_chip_id(dhd_pub_t * dhdp)787 uint dhd_bus_chip_id(dhd_pub_t *dhdp)
788 {
789 	dhd_bus_t *bus = dhdp->bus;
790 	return  bus->sih->chip;
791 }
792 
793 /** Get Chip Rev ID version */
dhd_bus_chiprev_id(dhd_pub_t * dhdp)794 uint dhd_bus_chiprev_id(dhd_pub_t *dhdp)
795 {
796 	dhd_bus_t *bus = dhdp->bus;
797 	return bus->sih->chiprev;
798 }
799 
800 /** Get Chip Pkg ID version */
dhd_bus_chippkg_id(dhd_pub_t * dhdp)801 uint dhd_bus_chippkg_id(dhd_pub_t *dhdp)
802 {
803 	dhd_bus_t *bus = dhdp->bus;
804 	return bus->sih->chippkg;
805 }
806 
dhd_bus_get_ids(struct dhd_bus * bus,uint32 * bus_type,uint32 * bus_num,uint32 * slot_num)807 int dhd_bus_get_ids(struct dhd_bus *bus, uint32 *bus_type, uint32 *bus_num, uint32 *slot_num)
808 {
809 	*bus_type = bus->bus;
810 	*bus_num = bus->bus_num;
811 	*slot_num = bus->slot_num;
812 	return 0;
813 }
814 
815 /** Conduct Loopback test */
816 int
dhd_bus_dmaxfer_lpbk(dhd_pub_t * dhdp,uint32 type)817 dhd_bus_dmaxfer_lpbk(dhd_pub_t *dhdp, uint32 type)
818 {
819 	dma_xfer_info_t dmaxfer_lpbk;
820 	int ret = BCME_OK;
821 
822 #define PCIE_DMAXFER_LPBK_LENGTH	4096
823 	memset(&dmaxfer_lpbk, 0, sizeof(dma_xfer_info_t));
824 	dmaxfer_lpbk.version = DHD_DMAXFER_VERSION;
825 	dmaxfer_lpbk.length = (uint16)sizeof(dma_xfer_info_t);
826 	dmaxfer_lpbk.num_bytes = PCIE_DMAXFER_LPBK_LENGTH;
827 	dmaxfer_lpbk.type = type;
828 	dmaxfer_lpbk.should_wait = TRUE;
829 
830 	ret = dhd_bus_iovar_op(dhdp, "pcie_dmaxfer", NULL, 0,
831 		(char *)&dmaxfer_lpbk, sizeof(dma_xfer_info_t), IOV_SET);
832 	if (ret < 0) {
833 		DHD_ERROR(("failed to start PCIe Loopback Test!!! "
834 			"Type:%d Reason:%d\n", type, ret));
835 		return ret;
836 	}
837 
838 	if (dmaxfer_lpbk.status != DMA_XFER_SUCCESS) {
839 		DHD_ERROR(("failed to check PCIe Loopback Test!!! "
840 			"Type:%d Status:%d Error code:%d\n", type,
841 			dmaxfer_lpbk.status, dmaxfer_lpbk.error_code));
842 		ret = BCME_ERROR;
843 	} else {
844 		DHD_ERROR(("successful to check PCIe Loopback Test"
845 			" Type:%d\n", type));
846 	}
847 #undef PCIE_DMAXFER_LPBK_LENGTH
848 
849 	return ret;
850 }
851 
852 /* Log the lastest DPC schedule time */
853 void
dhd_bus_set_dpc_sched_time(dhd_pub_t * dhdp)854 dhd_bus_set_dpc_sched_time(dhd_pub_t *dhdp)
855 {
856 	dhdp->bus->dpc_sched_time = OSL_LOCALTIME_NS();
857 }
858 
859 /* Check if there is DPC scheduling errors */
860 bool
dhd_bus_query_dpc_sched_errors(dhd_pub_t * dhdp)861 dhd_bus_query_dpc_sched_errors(dhd_pub_t *dhdp)
862 {
863 	dhd_bus_t *bus = dhdp->bus;
864 	bool sched_err;
865 
866 	if (bus->dpc_entry_time < bus->isr_exit_time) {
867 		/* Kernel doesn't schedule the DPC after processing PCIe IRQ */
868 		sched_err = TRUE;
869 	} else if (bus->dpc_entry_time < bus->resched_dpc_time) {
870 		/* Kernel doesn't schedule the DPC after DHD tries to reschedule
871 		 * the DPC due to pending work items to be processed.
872 		 */
873 		sched_err = TRUE;
874 	} else {
875 		sched_err = FALSE;
876 	}
877 
878 	if (sched_err) {
879 		/* print out minimum timestamp info */
880 		DHD_ERROR(("isr_entry_time="SEC_USEC_FMT
881 			" isr_exit_time="SEC_USEC_FMT
882 			" dpc_entry_time="SEC_USEC_FMT
883 			"\ndpc_exit_time="SEC_USEC_FMT
884 			" dpc_sched_time="SEC_USEC_FMT
885 			" resched_dpc_time="SEC_USEC_FMT"\n",
886 			GET_SEC_USEC(bus->isr_entry_time),
887 			GET_SEC_USEC(bus->isr_exit_time),
888 			GET_SEC_USEC(bus->dpc_entry_time),
889 			GET_SEC_USEC(bus->dpc_exit_time),
890 			GET_SEC_USEC(bus->dpc_sched_time),
891 			GET_SEC_USEC(bus->resched_dpc_time)));
892 	}
893 
894 	return sched_err;
895 }
896 
897 /** Read and clear intstatus. This should be called with interrupts disabled or inside isr */
898 uint32
dhdpcie_bus_intstatus(dhd_bus_t * bus)899 dhdpcie_bus_intstatus(dhd_bus_t *bus)
900 {
901 	uint32 intstatus = 0;
902 	uint32 intmask = 0;
903 
904 	if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
905 		DHD_ERROR(("%s: trying to clear intstatus after D3 Ack\n", __FUNCTION__));
906 		return intstatus;
907 	}
908 	if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) ||
909 		(bus->sih->buscorerev == 2)) {
910 		intstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
911 		dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, intstatus);
912 		intstatus &= I_MB;
913 	} else {
914 		/* this is a PCIE core register..not a config register... */
915 		intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, 0, 0);
916 
917 		/* this is a PCIE core register..not a config register... */
918 		intmask = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask, 0, 0);
919 		/* Is device removed. intstatus & intmask read 0xffffffff */
920 		if (intstatus == (uint32)-1 || intmask == (uint32)-1) {
921 			DHD_ERROR(("%s: Device is removed or Link is down.\n", __FUNCTION__));
922 			DHD_ERROR(("%s: INTSTAT : 0x%x INTMASK : 0x%x.\n",
923 			    __FUNCTION__, intstatus, intmask));
924 			bus->is_linkdown = TRUE;
925 			dhd_pcie_debug_info_dump(bus->dhd);
926 			return intstatus;
927 		}
928 
929 #ifndef DHD_READ_INTSTATUS_IN_DPC
930 		intstatus &= intmask;
931 #endif /* DHD_READ_INTSTATUS_IN_DPC */
932 
933 		/*
934 		 * The fourth argument to si_corereg is the "mask" fields of the register to update
935 		 * and the fifth field is the "value" to update. Now if we are interested in only
936 		 * few fields of the "mask" bit map, we should not be writing back what we read
937 		 * By doing so, we might clear/ack interrupts that are not handled yet.
938 		 */
939 		si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, bus->def_intmask,
940 			intstatus);
941 
942 		intstatus &= bus->def_intmask;
943 	}
944 
945 	return intstatus;
946 }
947 
948 void
dhdpcie_cto_recovery_handler(dhd_pub_t * dhd)949 dhdpcie_cto_recovery_handler(dhd_pub_t *dhd)
950 {
951 	dhd_bus_t *bus = dhd->bus;
952 	int ret;
953 
954 	/* Disable PCIe Runtime PM to avoid D3_ACK timeout.
955 	 */
956 	DHD_DISABLE_RUNTIME_PM(dhd);
957 
958 	/* Sleep for 1 seconds so that any AXI timeout
959 	 * if running on ALP clock also will be captured
960 	 */
961 	OSL_SLEEP(1000);
962 
963 	/* reset backplane and cto,
964 	 * then access through pcie is recovered.
965 	 */
966 	ret = dhdpcie_cto_error_recovery(bus);
967 	if (!ret) {
968 		/* Waiting for backplane reset */
969 		OSL_SLEEP(10);
970 		/* Dump debug Info */
971 		dhd_prot_debug_info_print(bus->dhd);
972 		/* Dump console buffer */
973 		dhd_bus_dump_console_buffer(bus);
974 #if defined(DHD_FW_COREDUMP)
975 		/* save core dump or write to a file */
976 		if (!bus->is_linkdown && bus->dhd->memdump_enabled) {
977 #ifdef DHD_SSSR_DUMP
978 			bus->dhd->collect_sssr = TRUE;
979 #endif /* DHD_SSSR_DUMP */
980 			bus->dhd->memdump_type = DUMP_TYPE_CTO_RECOVERY;
981 			dhdpcie_mem_dump(bus);
982 		}
983 #endif /* DHD_FW_COREDUMP */
984 	}
985 	bus->is_linkdown = TRUE;
986 	bus->dhd->hang_reason = HANG_REASON_PCIE_CTO_DETECT;
987 	/* Send HANG event */
988 	dhd_os_send_hang_message(bus->dhd);
989 }
990 
991 /**
992  * Name:  dhdpcie_bus_isr
993  * Parameters:
994  * 1: IN int irq   -- interrupt vector
995  * 2: IN void *arg      -- handle to private data structure
996  * Return value:
997  * Status (TRUE or FALSE)
998  *
999  * Description:
1000  * Interrupt Service routine checks for the status register,
1001  * disable interrupt and queue DPC if mail box interrupts are raised.
1002  */
1003 int32
dhdpcie_bus_isr(dhd_bus_t * bus)1004 dhdpcie_bus_isr(dhd_bus_t *bus)
1005 {
1006 	uint32 intstatus = 0;
1007 
1008 	do {
1009 		DHD_INTR(("%s: Enter\n", __FUNCTION__));
1010 		/* verify argument */
1011 		if (!bus) {
1012 			DHD_LOG_MEM(("%s : bus is null pointer, exit \n", __FUNCTION__));
1013 			break;
1014 		}
1015 
1016 		if (bus->dhd->dongle_reset) {
1017 			DHD_LOG_MEM(("%s : dongle is reset\n", __FUNCTION__));
1018 			break;
1019 		}
1020 
1021 		if (bus->dhd->busstate == DHD_BUS_DOWN) {
1022 			DHD_LOG_MEM(("%s : bus is down \n", __FUNCTION__));
1023 			break;
1024 		}
1025 
1026 		/* avoid processing of interrupts until msgbuf prot is inited */
1027 		if (!bus->intr_enabled) {
1028 			DHD_INFO(("%s, not ready to receive interrupts\n", __FUNCTION__));
1029 			break;
1030 		}
1031 
1032 		if (PCIECTO_ENAB(bus)) {
1033 			/* read pci_intstatus */
1034 			intstatus = dhdpcie_bus_cfg_read_dword(bus, PCI_INT_STATUS, 4);
1035 
1036 			if (intstatus == (uint32)-1) {
1037 				DHD_ERROR(("%s : Invalid intstatus for cto recovery\n",
1038 					__FUNCTION__));
1039 				dhdpcie_disable_irq_nosync(bus);
1040 				break;
1041 			}
1042 
1043 			if (intstatus & PCI_CTO_INT_MASK) {
1044 				DHD_ERROR(("%s: ##### CTO RECOVERY REPORTED BY DONGLE "
1045 					"intstat=0x%x enab=%d\n", __FUNCTION__,
1046 					intstatus, bus->cto_enable));
1047 				bus->cto_triggered = 1;
1048 				/*
1049 				 * DAR still accessible
1050 				 */
1051 				dhd_bus_dump_dar_registers(bus);
1052 
1053 				/* Disable further PCIe interrupts */
1054 				dhdpcie_disable_irq_nosync(bus); /* Disable interrupt!! */
1055 				/* Stop Tx flow */
1056 				dhd_bus_stop_queue(bus);
1057 
1058 				/* Schedule CTO recovery */
1059 				dhd_schedule_cto_recovery(bus->dhd);
1060 
1061 				return TRUE;
1062 			}
1063 		}
1064 
1065 		if (bus->d2h_intr_method == PCIE_MSI) {
1066 			/* For MSI, as intstatus is cleared by firmware, no need to read */
1067 			goto skip_intstatus_read;
1068 		}
1069 
1070 #ifndef DHD_READ_INTSTATUS_IN_DPC
1071 		intstatus = dhdpcie_bus_intstatus(bus);
1072 
1073 		/* Check if the interrupt is ours or not */
1074 		if (intstatus == 0) {
1075 			/* in EFI since we poll for interrupt, this message will flood the logs
1076 			* so disable this for EFI
1077 			*/
1078 			DHD_LOG_MEM(("%s : this interrupt is not ours\n", __FUNCTION__));
1079 			bus->non_ours_irq_count++;
1080 			bus->last_non_ours_irq_time = OSL_LOCALTIME_NS();
1081 			break;
1082 		}
1083 
1084 		/* save the intstatus */
1085 		/* read interrupt status register!! Status bits will be cleared in DPC !! */
1086 		bus->intstatus = intstatus;
1087 
1088 		/* return error for 0xFFFFFFFF */
1089 		if (intstatus == (uint32)-1) {
1090 			DHD_LOG_MEM(("%s : wrong interrupt status val : 0x%x\n",
1091 				__FUNCTION__, intstatus));
1092 			dhdpcie_disable_irq_nosync(bus);
1093 			break;
1094 		}
1095 
1096 skip_intstatus_read:
1097 		/*  Overall operation:
1098 		 *    - Mask further interrupts
1099 		 *    - Read/ack intstatus
1100 		 *    - Take action based on bits and state
1101 		 *    - Reenable interrupts (as per state)
1102 		 */
1103 
1104 		/* Count the interrupt call */
1105 		bus->intrcount++;
1106 #endif /* DHD_READ_INTSTATUS_IN_DPC */
1107 
1108 		bus->ipend = TRUE;
1109 
1110 		bus->isr_intr_disable_count++;
1111 
1112 		/* For Linux, Macos etc (otherthan NDIS) instead of disabling
1113 		* dongle interrupt by clearing the IntMask, disable directly
1114 		* interrupt from the host side, so that host will not recieve
1115 		* any interrupts at all, even though dongle raises interrupts
1116 		*/
1117 		dhdpcie_disable_irq_nosync(bus); /* Disable interrupt!! */
1118 
1119 		bus->intdis = TRUE;
1120 
1121 #if defined(PCIE_ISR_THREAD)
1122 
1123 		DHD_TRACE(("Calling dhd_bus_dpc() from %s\n", __FUNCTION__));
1124 		DHD_OS_WAKE_LOCK(bus->dhd);
1125 		while (dhd_bus_dpc(bus));
1126 		DHD_OS_WAKE_UNLOCK(bus->dhd);
1127 #else
1128 		bus->dpc_sched = TRUE;
1129 		dhd_sched_dpc(bus->dhd);     /* queue DPC now!! */
1130 #endif /* defined(SDIO_ISR_THREAD) */
1131 
1132 		DHD_INTR(("%s: Exit Success DPC Queued\n", __FUNCTION__));
1133 		return TRUE;
1134 
1135 	} while (0);
1136 
1137 	DHD_INTR(("%s: Exit Failure\n", __FUNCTION__));
1138 	return FALSE;
1139 }
1140 
1141 int
dhdpcie_set_pwr_state(dhd_bus_t * bus,uint state)1142 dhdpcie_set_pwr_state(dhd_bus_t *bus, uint state)
1143 {
1144 	uint32 cur_state = 0;
1145 	uint32 pm_csr = 0;
1146 	osl_t *osh = bus->osh;
1147 
1148 	pm_csr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32));
1149 	cur_state = pm_csr & PCIECFGREG_PM_CSR_STATE_MASK;
1150 
1151 	if (cur_state == state) {
1152 		DHD_ERROR(("%s: Already in state %u \n", __FUNCTION__, cur_state));
1153 		return BCME_OK;
1154 	}
1155 
1156 	if (state > PCIECFGREG_PM_CSR_STATE_D3_HOT)
1157 		return BCME_ERROR;
1158 
1159 	/* Validate the state transition
1160 	* if already in a lower power state, return error
1161 	*/
1162 	if (state != PCIECFGREG_PM_CSR_STATE_D0 &&
1163 			cur_state <= PCIECFGREG_PM_CSR_STATE_D3_COLD &&
1164 			cur_state > state) {
1165 		DHD_ERROR(("%s: Invalid power state transition !\n", __FUNCTION__));
1166 		return BCME_ERROR;
1167 	}
1168 
1169 	pm_csr &= ~PCIECFGREG_PM_CSR_STATE_MASK;
1170 	pm_csr |= state;
1171 
1172 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32), pm_csr);
1173 
1174 	/* need to wait for the specified mandatory pcie power transition delay time */
1175 	if (state == PCIECFGREG_PM_CSR_STATE_D3_HOT ||
1176 			cur_state == PCIECFGREG_PM_CSR_STATE_D3_HOT)
1177 			OSL_DELAY(DHDPCIE_PM_D3_DELAY);
1178 	else if (state == PCIECFGREG_PM_CSR_STATE_D2 ||
1179 			cur_state == PCIECFGREG_PM_CSR_STATE_D2)
1180 			OSL_DELAY(DHDPCIE_PM_D2_DELAY);
1181 
1182 	/* read back the power state and verify */
1183 	pm_csr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32));
1184 	cur_state = pm_csr & PCIECFGREG_PM_CSR_STATE_MASK;
1185 	if (cur_state != state) {
1186 		DHD_ERROR(("%s: power transition failed ! Current state is %u \n",
1187 				__FUNCTION__, cur_state));
1188 		return BCME_ERROR;
1189 	} else {
1190 		DHD_ERROR(("%s: power transition to %u success \n",
1191 				__FUNCTION__, cur_state));
1192 	}
1193 
1194 	return BCME_OK;
1195 }
1196 
1197 int
dhdpcie_config_check(dhd_bus_t * bus)1198 dhdpcie_config_check(dhd_bus_t *bus)
1199 {
1200 	uint32 i, val;
1201 	int ret = BCME_ERROR;
1202 
1203 	for (i = 0; i < DHDPCIE_CONFIG_CHECK_RETRY_COUNT; i++) {
1204 		val = OSL_PCI_READ_CONFIG(bus->osh, PCI_CFG_VID, sizeof(uint32));
1205 		if ((val & 0xFFFF) == VENDOR_BROADCOM) {
1206 			ret = BCME_OK;
1207 			break;
1208 		}
1209 		OSL_DELAY(DHDPCIE_CONFIG_CHECK_DELAY_MS * 1000);
1210 	}
1211 
1212 	return ret;
1213 }
1214 
1215 int
dhdpcie_config_restore(dhd_bus_t * bus,bool restore_pmcsr)1216 dhdpcie_config_restore(dhd_bus_t *bus, bool restore_pmcsr)
1217 {
1218 	uint32 i;
1219 	osl_t *osh = bus->osh;
1220 
1221 	if (BCME_OK != dhdpcie_config_check(bus)) {
1222 		return BCME_ERROR;
1223 	}
1224 
1225 	for (i = PCI_CFG_REV >> 2; i < DHDPCIE_CONFIG_HDR_SIZE; i++) {
1226 		OSL_PCI_WRITE_CONFIG(osh, i << 2, sizeof(uint32), bus->saved_config.header[i]);
1227 	}
1228 	OSL_PCI_WRITE_CONFIG(osh, PCI_CFG_CMD, sizeof(uint32), bus->saved_config.header[1]);
1229 
1230 	if (restore_pmcsr)
1231 		OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PM_CSR,
1232 			sizeof(uint32), bus->saved_config.pmcsr);
1233 
1234 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_CAP, sizeof(uint32), bus->saved_config.msi_cap);
1235 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_ADDR_L, sizeof(uint32),
1236 			bus->saved_config.msi_addr0);
1237 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_ADDR_H,
1238 			sizeof(uint32), bus->saved_config.msi_addr1);
1239 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_DATA,
1240 			sizeof(uint32), bus->saved_config.msi_data);
1241 
1242 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_DEV_STATUS_CTRL,
1243 			sizeof(uint32), bus->saved_config.exp_dev_ctrl_stat);
1244 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGGEN_DEV_STATUS_CTRL2,
1245 			sizeof(uint32), bus->saved_config.exp_dev_ctrl_stat2);
1246 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_LINK_STATUS_CTRL,
1247 			sizeof(uint32), bus->saved_config.exp_link_ctrl_stat);
1248 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_LINK_STATUS_CTRL2,
1249 			sizeof(uint32), bus->saved_config.exp_link_ctrl_stat2);
1250 
1251 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL1,
1252 			sizeof(uint32), bus->saved_config.l1pm0);
1253 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL2,
1254 			sizeof(uint32), bus->saved_config.l1pm1);
1255 
1256 	OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, sizeof(uint32),
1257 			bus->saved_config.bar0_win);
1258 	dhdpcie_setbar1win(bus, bus->saved_config.bar1_win);
1259 
1260 	return BCME_OK;
1261 }
1262 
1263 int
dhdpcie_config_save(dhd_bus_t * bus)1264 dhdpcie_config_save(dhd_bus_t *bus)
1265 {
1266 	uint32 i;
1267 	osl_t *osh = bus->osh;
1268 
1269 	if (BCME_OK != dhdpcie_config_check(bus)) {
1270 		return BCME_ERROR;
1271 	}
1272 
1273 	for (i = 0; i < DHDPCIE_CONFIG_HDR_SIZE; i++) {
1274 		bus->saved_config.header[i] = OSL_PCI_READ_CONFIG(osh, i << 2, sizeof(uint32));
1275 	}
1276 
1277 	bus->saved_config.pmcsr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32));
1278 
1279 	bus->saved_config.msi_cap = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_CAP,
1280 			sizeof(uint32));
1281 	bus->saved_config.msi_addr0 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_ADDR_L,
1282 			sizeof(uint32));
1283 	bus->saved_config.msi_addr1 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_ADDR_H,
1284 			sizeof(uint32));
1285 	bus->saved_config.msi_data = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_DATA,
1286 			sizeof(uint32));
1287 
1288 	bus->saved_config.exp_dev_ctrl_stat = OSL_PCI_READ_CONFIG(osh,
1289 			PCIECFGREG_DEV_STATUS_CTRL, sizeof(uint32));
1290 	bus->saved_config.exp_dev_ctrl_stat2 = OSL_PCI_READ_CONFIG(osh,
1291 			PCIECFGGEN_DEV_STATUS_CTRL2, sizeof(uint32));
1292 	bus->saved_config.exp_link_ctrl_stat = OSL_PCI_READ_CONFIG(osh,
1293 			PCIECFGREG_LINK_STATUS_CTRL, sizeof(uint32));
1294 	bus->saved_config.exp_link_ctrl_stat2 = OSL_PCI_READ_CONFIG(osh,
1295 			PCIECFGREG_LINK_STATUS_CTRL2, sizeof(uint32));
1296 
1297 	bus->saved_config.l1pm0 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL1,
1298 			sizeof(uint32));
1299 	bus->saved_config.l1pm1 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL2,
1300 			sizeof(uint32));
1301 
1302 	bus->saved_config.bar0_win = OSL_PCI_READ_CONFIG(osh, PCI_BAR0_WIN,
1303 			sizeof(uint32));
1304 	bus->saved_config.bar1_win = OSL_PCI_READ_CONFIG(osh, PCI_BAR1_WIN,
1305 			sizeof(uint32));
1306 
1307 	return BCME_OK;
1308 }
1309 
1310 #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
1311 dhd_pub_t *link_recovery = NULL;
1312 #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
1313 
1314 static void
dhdpcie_bus_intr_init(dhd_bus_t * bus)1315 dhdpcie_bus_intr_init(dhd_bus_t *bus)
1316 {
1317 	uint buscorerev = bus->sih->buscorerev;
1318 	bus->pcie_mailbox_int = PCIMailBoxInt(buscorerev);
1319 	bus->pcie_mailbox_mask = PCIMailBoxMask(buscorerev);
1320 	bus->d2h_mb_mask = PCIE_MB_D2H_MB_MASK(buscorerev);
1321 	bus->def_intmask = PCIE_MB_D2H_MB_MASK(buscorerev);
1322 	if (buscorerev < 64) {
1323 		bus->def_intmask |= PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1;
1324 	}
1325 }
1326 
1327 static void
dhdpcie_cc_watchdog_reset(dhd_bus_t * bus)1328 dhdpcie_cc_watchdog_reset(dhd_bus_t *bus)
1329 {
1330 	uint32 wd_en = (bus->sih->buscorerev >= 66) ? WD_SSRESET_PCIE_F0_EN :
1331 		(WD_SSRESET_PCIE_F0_EN | WD_SSRESET_PCIE_ALL_FN_EN);
1332 	pcie_watchdog_reset(bus->osh, bus->sih, WD_ENABLE_MASK, wd_en);
1333 }
1334 
1335 void
dhdpcie_dongle_reset(dhd_bus_t * bus)1336 dhdpcie_dongle_reset(dhd_bus_t *bus)
1337 {
1338 	/* if the pcie link is down, watchdog reset
1339 	 * should not be done, as it may hang
1340 	 */
1341 	if (bus->is_linkdown) {
1342 		return;
1343 	}
1344 
1345 	/* dhd_bus_perform_flr will return BCME_UNSUPPORTED if chip is not FLR capable */
1346 	if (dhd_bus_perform_flr(bus, FALSE) == BCME_UNSUPPORTED) {
1347 #ifdef DHD_USE_BP_RESET
1348 		/* Backplane reset using SPROM cfg register(0x88) for buscorerev <= 24 */
1349 		dhd_bus_perform_bp_reset(bus);
1350 #else
1351 		/* Legacy chipcommon watchdog reset */
1352 		dhdpcie_cc_watchdog_reset(bus);
1353 #endif /* DHD_USE_BP_RESET */
1354 	}
1355 }
1356 
1357 static bool
dhdpcie_dongle_attach(dhd_bus_t * bus)1358 dhdpcie_dongle_attach(dhd_bus_t *bus)
1359 {
1360 	osl_t *osh = bus->osh;
1361 	volatile void *regsva = (volatile void*)bus->regs;
1362 	uint16 devid;
1363 	uint32 val;
1364 	sbpcieregs_t *sbpcieregs;
1365 	bool dongle_isolation;
1366 
1367 	DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
1368 
1369 #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
1370 	link_recovery = bus->dhd;
1371 #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
1372 
1373 	bus->alp_only = TRUE;
1374 	bus->sih = NULL;
1375 
1376 	/* Checking PCIe bus status with reading configuration space */
1377 	val = OSL_PCI_READ_CONFIG(osh, PCI_CFG_VID, sizeof(uint32));
1378 	if ((val & 0xFFFF) != VENDOR_BROADCOM) {
1379 		DHD_ERROR(("%s : failed to read PCI configuration space!\n", __FUNCTION__));
1380 		goto fail;
1381 	}
1382 	devid = (val >> 16) & 0xFFFF;
1383 	bus->cl_devid = devid;
1384 
1385 	/* Set bar0 window to si_enum_base */
1386 	dhdpcie_bus_cfg_set_bar0_win(bus, si_enum_base(devid));
1387 
1388 	/*
1389 	 * Checking PCI_SPROM_CONTROL register for preventing invalid address access
1390 	 * due to switch address space from PCI_BUS to SI_BUS.
1391 	 */
1392 	val = OSL_PCI_READ_CONFIG(osh, PCI_SPROM_CONTROL, sizeof(uint32));
1393 	if (val == 0xffffffff) {
1394 		DHD_ERROR(("%s : failed to read SPROM control register\n", __FUNCTION__));
1395 		goto fail;
1396 	}
1397 
1398 	/* si_attach() will provide an SI handle and scan the backplane */
1399 	if (!(bus->sih = si_attach((uint)devid, osh, regsva, PCI_BUS, bus,
1400 	                           &bus->vars, &bus->varsz))) {
1401 		DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__));
1402 		goto fail;
1403 	}
1404 
1405 	/* Configure CTO Prevention functionality */
1406 #if defined(BCMFPGA_HW)
1407 	DHD_ERROR(("Disable CTO\n"));
1408 	bus->cto_enable = FALSE;
1409 #else
1410 #if defined(BCMPCIE_CTO_PREVENTION)
1411 	if (bus->sih->buscorerev >= 24) {
1412 		DHD_ERROR(("Enable CTO\n"));
1413 		bus->cto_enable = TRUE;
1414 	} else
1415 #endif /* BCMPCIE_CTO_PREVENTION */
1416 	{
1417 		DHD_ERROR(("Disable CTO\n"));
1418 		bus->cto_enable = FALSE;
1419 	}
1420 #endif /* BCMFPGA_HW */
1421 
1422 	if (PCIECTO_ENAB(bus)) {
1423 		dhdpcie_cto_init(bus, TRUE);
1424 	}
1425 
1426 	if (MULTIBP_ENAB(bus->sih) && (bus->sih->buscorerev >= 66)) {
1427 		/*
1428 		 * HW JIRA - CRWLPCIEGEN2-672
1429 		 * Producer Index Feature which is used by F1 gets reset on F0 FLR
1430 		 * fixed in REV68
1431 		 */
1432 		if (PCIE_ENUM_RESET_WAR_ENAB(bus->sih->buscorerev)) {
1433 			dhdpcie_ssreset_dis_enum_rst(bus);
1434 		}
1435 
1436 		/* IOV_DEVRESET could exercise si_detach()/si_attach() again so reset
1437 		*   dhdpcie_bus_release_dongle() --> si_detach()
1438 		*   dhdpcie_dongle_attach() --> si_attach()
1439 		*/
1440 		bus->pwr_req_ref = 0;
1441 	}
1442 
1443 	if (MULTIBP_ENAB(bus->sih)) {
1444 		dhd_bus_pcie_pwr_req_nolock(bus);
1445 	}
1446 
1447 	/* Get info on the ARM and SOCRAM cores... */
1448 	/* Should really be qualified by device id */
1449 	if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) ||
1450 	    (si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) ||
1451 	    (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) ||
1452 	    (si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
1453 		bus->armrev = si_corerev(bus->sih);
1454 		bus->coreid = si_coreid(bus->sih);
1455 	} else {
1456 		DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__));
1457 		goto fail;
1458 	}
1459 
1460 	/* CA7 requires coherent bits on */
1461 	if (bus->coreid == ARMCA7_CORE_ID) {
1462 		val = dhdpcie_bus_cfg_read_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4);
1463 		dhdpcie_bus_cfg_write_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4,
1464 			(val | PCIE_BARCOHERENTACCEN_MASK));
1465 	}
1466 
1467 	/* Olympic EFI requirement - stop driver load if FW is already running
1468 	*  need to do this here before pcie_watchdog_reset, because
1469 	*  pcie_watchdog_reset will put the ARM back into halt state
1470 	*/
1471 	if (!dhdpcie_is_arm_halted(bus)) {
1472 		DHD_ERROR(("%s: ARM is not halted,FW is already running! Abort.\n",
1473 				__FUNCTION__));
1474 		goto fail;
1475 	}
1476 
1477 	BCM_REFERENCE(dongle_isolation);
1478 
1479 	/* For inbuilt drivers pcie clk req will be done by RC,
1480 	 * so do not do clkreq from dhd
1481 	 */
1482 	if (dhd_download_fw_on_driverload)
1483 	{
1484 		/* Enable CLKREQ# */
1485 		dhdpcie_clkreq(bus->osh, 1, 1);
1486 	}
1487 
1488 	/*
1489 	 * bus->dhd will be NULL if it is called from dhd_bus_attach, so need to reset
1490 	 * without checking dongle_isolation flag, but if it is called via some other path
1491 	 * like quiesce FLR, then based on dongle_isolation flag, watchdog_reset should
1492 	 * be called.
1493 	 */
1494 	if (bus->dhd == NULL) {
1495 		/* dhd_attach not yet happened, do watchdog reset */
1496 		dongle_isolation = FALSE;
1497 	} else {
1498 		dongle_isolation = bus->dhd->dongle_isolation;
1499 	}
1500 
1501 #ifndef DHD_SKIP_DONGLE_RESET_IN_ATTACH
1502 	/*
1503 	 * Issue CC watchdog to reset all the cores on the chip - similar to rmmod dhd
1504 	 * This is required to avoid spurious interrupts to the Host and bring back
1505 	 * dongle to a sane state (on host soft-reboot / watchdog-reboot).
1506 	 */
1507 	if (dongle_isolation == FALSE) {
1508 		dhdpcie_dongle_reset(bus);
1509 	}
1510 #endif /* !DHD_SKIP_DONGLE_RESET_IN_ATTACH */
1511 
1512 	/* need to set the force_bt_quiesce flag here
1513 	 * before calling dhdpcie_dongle_flr_or_pwr_toggle
1514 	 */
1515 	bus->force_bt_quiesce = TRUE;
1516 	/*
1517 	 * For buscorerev = 66 and after, F0 FLR should be done independent from F1.
1518 	 * So don't need BT quiesce.
1519 	 */
1520 	if (bus->sih->buscorerev >= 66) {
1521 		bus->force_bt_quiesce = FALSE;
1522 	}
1523 
1524 	dhdpcie_dongle_flr_or_pwr_toggle(bus);
1525 
1526 	si_setcore(bus->sih, PCIE2_CORE_ID, 0);
1527 	sbpcieregs = (sbpcieregs_t*)(bus->regs);
1528 
1529 	/* WAR where the BAR1 window may not be sized properly */
1530 	W_REG(osh, &sbpcieregs->configaddr, 0x4e0);
1531 	val = R_REG(osh, &sbpcieregs->configdata);
1532 	W_REG(osh, &sbpcieregs->configdata, val);
1533 
1534 	if (si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) {
1535 		/* Only set dongle RAMSIZE to default value when BMC vs ARM usage of SYSMEM is not
1536 		 * adjusted.
1537 		 */
1538 		if (!bus->ramsize_adjusted) {
1539 			if (!(bus->orig_ramsize = si_sysmem_size(bus->sih))) {
1540 				DHD_ERROR(("%s: failed to find SYSMEM memory!\n", __FUNCTION__));
1541 				goto fail;
1542 			}
1543 			switch ((uint16)bus->sih->chip) {
1544 				default:
1545 					/* also populate base address */
1546 					bus->dongle_ram_base = CA7_4365_RAM_BASE;
1547 					bus->orig_ramsize = 0x1c0000; /* Reserve 1.75MB for CA7 */
1548 					break;
1549 			}
1550 		}
1551 	} else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
1552 		if (!(bus->orig_ramsize = si_socram_size(bus->sih))) {
1553 			DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__));
1554 			goto fail;
1555 		}
1556 	} else {
1557 		/* cr4 has a different way to find the RAM size from TCM's */
1558 		if (!(bus->orig_ramsize = si_tcm_size(bus->sih))) {
1559 			DHD_ERROR(("%s: failed to find CR4-TCM memory!\n", __FUNCTION__));
1560 			goto fail;
1561 		}
1562 		/* also populate base address */
1563 		switch ((uint16)bus->sih->chip) {
1564 		case BCM4339_CHIP_ID:
1565 		case BCM4335_CHIP_ID:
1566 			bus->dongle_ram_base = CR4_4335_RAM_BASE;
1567 			break;
1568 		case BCM4358_CHIP_ID:
1569 		case BCM4354_CHIP_ID:
1570 		case BCM43567_CHIP_ID:
1571 		case BCM43569_CHIP_ID:
1572 		case BCM4350_CHIP_ID:
1573 		case BCM43570_CHIP_ID:
1574 			bus->dongle_ram_base = CR4_4350_RAM_BASE;
1575 			break;
1576 		case BCM4360_CHIP_ID:
1577 			bus->dongle_ram_base = CR4_4360_RAM_BASE;
1578 			break;
1579 
1580 		case BCM4364_CHIP_ID:
1581 			bus->dongle_ram_base = CR4_4364_RAM_BASE;
1582 			break;
1583 
1584 		CASE_BCM4345_CHIP:
1585 			bus->dongle_ram_base = (bus->sih->chiprev < 6)  /* changed at 4345C0 */
1586 				? CR4_4345_LT_C0_RAM_BASE : CR4_4345_GE_C0_RAM_BASE;
1587 			break;
1588 		CASE_BCM43602_CHIP:
1589 			bus->dongle_ram_base = CR4_43602_RAM_BASE;
1590 			break;
1591 		case BCM4349_CHIP_GRPID:
1592 			/* RAM based changed from 4349c0(revid=9) onwards */
1593 			bus->dongle_ram_base = ((bus->sih->chiprev < 9) ?
1594 				CR4_4349_RAM_BASE : CR4_4349_RAM_BASE_FROM_REV_9);
1595 			break;
1596 		case BCM4347_CHIP_ID:
1597 		case BCM4357_CHIP_ID:
1598 		case BCM4361_CHIP_ID:
1599 			bus->dongle_ram_base = CR4_4347_RAM_BASE;
1600 			break;
1601 		case BCM4362_CHIP_ID:
1602 			bus->dongle_ram_base = CR4_4362_RAM_BASE;
1603 			break;
1604 		case BCM43751_CHIP_ID:
1605 			bus->dongle_ram_base = CR4_43751_RAM_BASE;
1606 			break;
1607 		case BCM43752_CHIP_ID:
1608 			bus->dongle_ram_base = CR4_43752_RAM_BASE;
1609 			break;
1610 		case BCM4375_CHIP_ID:
1611 		case BCM4369_CHIP_ID:
1612 			bus->dongle_ram_base = CR4_4369_RAM_BASE;
1613 			break;
1614 		default:
1615 			bus->dongle_ram_base = 0;
1616 			DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n",
1617 			           __FUNCTION__, bus->dongle_ram_base));
1618 		}
1619 	}
1620 	bus->ramsize = bus->orig_ramsize;
1621 	if (dhd_dongle_memsize)
1622 		dhdpcie_bus_dongle_setmemsize(bus, dhd_dongle_memsize);
1623 
1624 	if (bus->ramsize > DONGLE_TCM_MAP_SIZE) {
1625 		DHD_ERROR(("%s : invalid ramsize %d(0x%x) is returned from dongle\n",
1626 				__FUNCTION__, bus->ramsize, bus->ramsize));
1627 		goto fail;
1628 	}
1629 
1630 	DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d) at 0x%x\n",
1631 	           bus->ramsize, bus->orig_ramsize, bus->dongle_ram_base));
1632 
1633 	bus->srmemsize = si_socram_srmem_size(bus->sih);
1634 
1635 	dhdpcie_bus_intr_init(bus);
1636 
1637 	/* Set the poll and/or interrupt flags */
1638 	bus->intr = (bool)dhd_intr;
1639 	if ((bus->poll = (bool)dhd_poll))
1640 		bus->pollrate = 1;
1641 #ifdef DHD_DISABLE_ASPM
1642 	dhd_bus_aspm_enable_rc_ep(bus, FALSE);
1643 #endif /* DHD_DISABLE_ASPM */
1644 
1645 	bus->idma_enabled = TRUE;
1646 	bus->ifrm_enabled = TRUE;
1647 	DHD_TRACE(("%s: EXIT: SUCCESS\n", __FUNCTION__));
1648 
1649 	if (MULTIBP_ENAB(bus->sih)) {
1650 		dhd_bus_pcie_pwr_req_clear_nolock(bus);
1651 
1652 		/*
1653 		 * One time clearing of Common Power Domain since HW default is set
1654 		 * Needs to be after FLR because FLR resets PCIe enum back to HW defaults
1655 		 * for 4378B0 (rev 68).
1656 		 * On 4378A0 (rev 66), PCIe enum reset is disabled due to CRWLPCIEGEN2-672
1657 		 */
1658 		si_srpwr_request(bus->sih, SRPWR_DMN0_PCIE_MASK, 0);
1659 
1660 		/*
1661 		 * WAR to fix ARM cold boot;
1662 		 * Assert WL domain in DAR helps but not enum
1663 		 */
1664 		if (bus->sih->buscorerev >= 68) {
1665 			dhd_bus_pcie_pwr_req_wl_domain(bus,
1666 				DAR_PCIE_PWR_CTRL((bus->sih)->buscorerev), TRUE);
1667 		}
1668 	}
1669 
1670 	return 0;
1671 
1672 fail:
1673 	if (bus->sih != NULL) {
1674 		if (MULTIBP_ENAB(bus->sih)) {
1675 			dhd_bus_pcie_pwr_req_clear_nolock(bus);
1676 		}
1677 		/* for EFI even if there is an error, load still succeeds
1678 		* so si_detach should not be called here, it is called during unload
1679 		*/
1680 		si_detach(bus->sih);
1681 		bus->sih = NULL;
1682 	}
1683 	DHD_TRACE(("%s: EXIT: FAILURE\n", __FUNCTION__));
1684 	return -1;
1685 }
1686 
1687 int
dhpcie_bus_unmask_interrupt(dhd_bus_t * bus)1688 dhpcie_bus_unmask_interrupt(dhd_bus_t *bus)
1689 {
1690 	dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, I_MB);
1691 	return 0;
1692 }
1693 int
dhpcie_bus_mask_interrupt(dhd_bus_t * bus)1694 dhpcie_bus_mask_interrupt(dhd_bus_t *bus)
1695 {
1696 	dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, 0x0);
1697 	return 0;
1698 }
1699 
1700 /* Non atomic function, caller should hold appropriate lock */
1701 void
dhdpcie_bus_intr_enable(dhd_bus_t * bus)1702 dhdpcie_bus_intr_enable(dhd_bus_t *bus)
1703 {
1704 	DHD_TRACE(("%s Enter\n", __FUNCTION__));
1705 	if (bus) {
1706 		if (bus->sih && !bus->is_linkdown) {
1707 			/* Skip after recieving D3 ACK */
1708 			if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
1709 				return;
1710 			}
1711 			if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
1712 				(bus->sih->buscorerev == 4)) {
1713 				dhpcie_bus_unmask_interrupt(bus);
1714 			} else {
1715 	#if defined(BCMINTERNAL) && defined(DHD_DBG_DUMP)
1716 				dhd_bus_mmio_trace(bus, bus->pcie_mailbox_mask,
1717 					bus->def_intmask, TRUE);
1718 	#endif
1719 				si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask,
1720 					bus->def_intmask, bus->def_intmask);
1721 			}
1722 		}
1723 
1724 	}
1725 
1726 	DHD_TRACE(("%s Exit\n", __FUNCTION__));
1727 }
1728 
1729 /* Non atomic function, caller should hold appropriate lock */
1730 void
dhdpcie_bus_intr_disable(dhd_bus_t * bus)1731 dhdpcie_bus_intr_disable(dhd_bus_t *bus)
1732 {
1733 	DHD_TRACE(("%s Enter\n", __FUNCTION__));
1734 	if (bus && bus->sih && !bus->is_linkdown) {
1735 		/* Skip after recieving D3 ACK */
1736 		if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
1737 			return;
1738 		}
1739 		if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
1740 			(bus->sih->buscorerev == 4)) {
1741 			dhpcie_bus_mask_interrupt(bus);
1742 		} else {
1743 			si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask,
1744 				bus->def_intmask, 0);
1745 		}
1746 	}
1747 
1748 	DHD_TRACE(("%s Exit\n", __FUNCTION__));
1749 }
1750 
1751 /*
1752  *  dhdpcie_advertise_bus_cleanup advertises that clean up is under progress
1753  * to other bus user contexts like Tx, Rx, IOVAR, WD etc and it waits for other contexts
1754  * to gracefully exit. All the bus usage contexts before marking busstate as busy, will check for
1755  * whether the busstate is DHD_BUS_DOWN or DHD_BUS_DOWN_IN_PROGRESS, if so
1756  * they will exit from there itself without marking dhd_bus_busy_state as BUSY.
1757  */
1758 void
dhdpcie_advertise_bus_cleanup(dhd_pub_t * dhdp)1759 dhdpcie_advertise_bus_cleanup(dhd_pub_t	 *dhdp)
1760 {
1761 	unsigned long flags;
1762 	int timeleft;
1763 
1764 	dhdp->dhd_watchdog_ms_backup = dhd_watchdog_ms;
1765 	if (dhdp->dhd_watchdog_ms_backup) {
1766 		DHD_ERROR(("%s: Disabling wdtick before dhd deinit\n",
1767 			__FUNCTION__));
1768 		dhd_os_wd_timer(dhdp, 0);
1769 	}
1770 	if (dhdp->busstate != DHD_BUS_DOWN) {
1771 		DHD_GENERAL_LOCK(dhdp, flags);
1772 		dhdp->busstate = DHD_BUS_DOWN_IN_PROGRESS;
1773 		DHD_GENERAL_UNLOCK(dhdp, flags);
1774 	}
1775 
1776 	timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state);
1777 	if ((timeleft == 0) || (timeleft == 1)) {
1778 		DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n",
1779 				__FUNCTION__, dhdp->dhd_bus_busy_state));
1780 		ASSERT(0);
1781 	}
1782 
1783 	return;
1784 }
1785 
1786 static void
dhdpcie_advertise_bus_remove(dhd_pub_t * dhdp)1787 dhdpcie_advertise_bus_remove(dhd_pub_t	 *dhdp)
1788 {
1789 	unsigned long flags;
1790 	int timeleft;
1791 
1792 	DHD_GENERAL_LOCK(dhdp, flags);
1793 	dhdp->busstate = DHD_BUS_REMOVE;
1794 	DHD_GENERAL_UNLOCK(dhdp, flags);
1795 
1796 	timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state);
1797 	if ((timeleft == 0) || (timeleft == 1)) {
1798 		DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n",
1799 				__FUNCTION__, dhdp->dhd_bus_busy_state));
1800 		ASSERT(0);
1801 	}
1802 
1803 	return;
1804 }
1805 
1806 static void
dhdpcie_bus_remove_prep(dhd_bus_t * bus)1807 dhdpcie_bus_remove_prep(dhd_bus_t *bus)
1808 {
1809 	unsigned long flags;
1810 	DHD_TRACE(("%s Enter\n", __FUNCTION__));
1811 
1812 	DHD_GENERAL_LOCK(bus->dhd, flags);
1813 	DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
1814 	bus->dhd->busstate = DHD_BUS_DOWN;
1815 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
1816 
1817 	dhd_os_sdlock(bus->dhd);
1818 
1819 	if (bus->sih && !bus->dhd->dongle_isolation) {
1820 		if (PCIE_RELOAD_WAR_ENAB(bus->sih->buscorerev)) {
1821 			dhd_bus_pcie_pwr_req_reload_war(bus);
1822 		}
1823 
1824 		/* Has insmod fails after rmmod issue in Brix Android */
1825 
1826 		/* if the pcie link is down, watchdog reset
1827 		* should not be done, as it may hang
1828 		*/
1829 
1830 		if (!bus->is_linkdown) {
1831 #ifndef DHD_SKIP_DONGLE_RESET_IN_ATTACH
1832 			/* for efi, depending on bt over pcie mode
1833 			*  we either power toggle or do F0 FLR
1834 			* from dhdpcie_bus_release dongle. So no need to
1835 			* do dongle reset from here
1836 			*/
1837 			dhdpcie_dongle_reset(bus);
1838 #endif /* !DHD_SKIP_DONGLE_RESET_IN_ATTACH */
1839 		}
1840 
1841 		bus->dhd->is_pcie_watchdog_reset = TRUE;
1842 	}
1843 
1844 	dhd_os_sdunlock(bus->dhd);
1845 
1846 	DHD_TRACE(("%s Exit\n", __FUNCTION__));
1847 }
1848 
1849 void
dhd_init_bus_lock(dhd_bus_t * bus)1850 dhd_init_bus_lock(dhd_bus_t *bus)
1851 {
1852 	if (!bus->bus_lock) {
1853 		bus->bus_lock = dhd_os_spin_lock_init(bus->dhd->osh);
1854 	}
1855 }
1856 
1857 void
dhd_deinit_bus_lock(dhd_bus_t * bus)1858 dhd_deinit_bus_lock(dhd_bus_t *bus)
1859 {
1860 	if (bus->bus_lock) {
1861 		dhd_os_spin_lock_deinit(bus->dhd->osh, bus->bus_lock);
1862 		bus->bus_lock = NULL;
1863 	}
1864 }
1865 
1866 void
dhd_init_backplane_access_lock(dhd_bus_t * bus)1867 dhd_init_backplane_access_lock(dhd_bus_t *bus)
1868 {
1869 	if (!bus->backplane_access_lock) {
1870 		bus->backplane_access_lock = dhd_os_spin_lock_init(bus->dhd->osh);
1871 	}
1872 }
1873 
1874 void
dhd_deinit_backplane_access_lock(dhd_bus_t * bus)1875 dhd_deinit_backplane_access_lock(dhd_bus_t *bus)
1876 {
1877 	if (bus->backplane_access_lock) {
1878 		dhd_os_spin_lock_deinit(bus->dhd->osh, bus->backplane_access_lock);
1879 		bus->backplane_access_lock = NULL;
1880 	}
1881 }
1882 
1883 /** Detach and free everything */
1884 void
dhdpcie_bus_release(dhd_bus_t * bus)1885 dhdpcie_bus_release(dhd_bus_t *bus)
1886 {
1887 	bool dongle_isolation = FALSE;
1888 	osl_t *osh = NULL;
1889 	unsigned long flags_bus;
1890 
1891 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
1892 
1893 	if (bus) {
1894 
1895 		osh = bus->osh;
1896 		ASSERT(osh);
1897 
1898 		if (bus->dhd) {
1899 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
1900 			debugger_close();
1901 #endif /* DEBUGGER || DHD_DSCOPE */
1902 			dhdpcie_advertise_bus_remove(bus->dhd);
1903 			dongle_isolation = bus->dhd->dongle_isolation;
1904 			bus->dhd->is_pcie_watchdog_reset = FALSE;
1905 			dhdpcie_bus_remove_prep(bus);
1906 
1907 			if (bus->intr) {
1908 				DHD_BUS_LOCK(bus->bus_lock, flags_bus);
1909 				dhdpcie_bus_intr_disable(bus);
1910 				DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
1911 				dhdpcie_free_irq(bus);
1912 			}
1913 			dhd_deinit_bus_lock(bus);
1914 			dhd_deinit_backplane_access_lock(bus);
1915 			/**
1916 			 * dhdpcie_bus_release_dongle free bus->sih  handle, which is needed to
1917 			 * access Dongle registers.
1918 			 * dhd_detach will communicate with dongle to delete flowring ..etc.
1919 			 * So dhdpcie_bus_release_dongle should be called only after the dhd_detach.
1920 			 */
1921 			dhd_detach(bus->dhd);
1922 			dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
1923 			dhd_free(bus->dhd);
1924 			bus->dhd = NULL;
1925 		}
1926 		/* unmap the regs and tcm here!! */
1927 		if (bus->regs) {
1928 			dhdpcie_bus_reg_unmap(osh, bus->regs, DONGLE_REG_MAP_SIZE);
1929 			bus->regs = NULL;
1930 		}
1931 		if (bus->tcm) {
1932 			dhdpcie_bus_reg_unmap(osh, bus->tcm, DONGLE_TCM_MAP_SIZE);
1933 			bus->tcm = NULL;
1934 		}
1935 
1936 		dhdpcie_bus_release_malloc(bus, osh);
1937 		/* Detach pcie shared structure */
1938 		if (bus->pcie_sh) {
1939 			MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t));
1940 			bus->pcie_sh = NULL;
1941 		}
1942 
1943 		if (bus->console.buf != NULL) {
1944 			MFREE(osh, bus->console.buf, bus->console.bufsize);
1945 		}
1946 
1947 		/* Finally free bus info */
1948 		MFREE(osh, bus, sizeof(dhd_bus_t));
1949 
1950 		g_dhd_bus = NULL;
1951 	}
1952 
1953 	DHD_TRACE(("%s: Exit\n", __FUNCTION__));
1954 } /* dhdpcie_bus_release */
1955 
1956 void
dhdpcie_bus_release_dongle(dhd_bus_t * bus,osl_t * osh,bool dongle_isolation,bool reset_flag)1957 dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag)
1958 {
1959 	DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n", __FUNCTION__,
1960 		bus->dhd, bus->dhd->dongle_reset));
1961 
1962 	if ((bus->dhd && bus->dhd->dongle_reset) && reset_flag) {
1963 		DHD_TRACE(("%s Exit\n", __FUNCTION__));
1964 		return;
1965 	}
1966 
1967 	if (bus->is_linkdown) {
1968 		DHD_ERROR(("%s : Skip release dongle due to linkdown \n", __FUNCTION__));
1969 		return;
1970 	}
1971 
1972 	if (bus->sih) {
1973 
1974 		if (!dongle_isolation &&
1975 			(bus->dhd && !bus->dhd->is_pcie_watchdog_reset)) {
1976 			dhdpcie_dongle_reset(bus);
1977 		}
1978 
1979 		dhdpcie_dongle_flr_or_pwr_toggle(bus);
1980 
1981 		if (bus->ltrsleep_on_unload) {
1982 			si_corereg(bus->sih, bus->sih->buscoreidx,
1983 				OFFSETOF(sbpcieregs_t, u.pcie2.ltr_state), ~0, 0);
1984 		}
1985 
1986 		if (bus->sih->buscorerev == 13)
1987 			 pcie_serdes_iddqdisable(bus->osh, bus->sih,
1988 			                         (sbpcieregs_t *) bus->regs);
1989 
1990 		/* For inbuilt drivers pcie clk req will be done by RC,
1991 		 * so do not do clkreq from dhd
1992 		 */
1993 		if (dhd_download_fw_on_driverload)
1994 		{
1995 			/* Disable CLKREQ# */
1996 			dhdpcie_clkreq(bus->osh, 1, 0);
1997 		}
1998 
1999 		if (bus->sih != NULL) {
2000 			si_detach(bus->sih);
2001 			bus->sih = NULL;
2002 		}
2003 		if (bus->vars && bus->varsz)
2004 			MFREE(osh, bus->vars, bus->varsz);
2005 		bus->vars = NULL;
2006 	}
2007 
2008 	DHD_TRACE(("%s Exit\n", __FUNCTION__));
2009 }
2010 
2011 uint32
dhdpcie_bus_cfg_read_dword(dhd_bus_t * bus,uint32 addr,uint32 size)2012 dhdpcie_bus_cfg_read_dword(dhd_bus_t *bus, uint32 addr, uint32 size)
2013 {
2014 	uint32 data = OSL_PCI_READ_CONFIG(bus->osh, addr, size);
2015 	return data;
2016 }
2017 
2018 /** 32 bit config write */
2019 void
dhdpcie_bus_cfg_write_dword(dhd_bus_t * bus,uint32 addr,uint32 size,uint32 data)2020 dhdpcie_bus_cfg_write_dword(dhd_bus_t *bus, uint32 addr, uint32 size, uint32 data)
2021 {
2022 	OSL_PCI_WRITE_CONFIG(bus->osh, addr, size, data);
2023 }
2024 
2025 void
dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t * bus,uint32 data)2026 dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data)
2027 {
2028 	OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, 4, data);
2029 }
2030 
2031 void
dhdpcie_bus_dongle_setmemsize(struct dhd_bus * bus,int mem_size)2032 dhdpcie_bus_dongle_setmemsize(struct dhd_bus *bus, int mem_size)
2033 {
2034 	int32 min_size =  DONGLE_MIN_MEMSIZE;
2035 	/* Restrict the memsize to user specified limit */
2036 	DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d\n",
2037 		dhd_dongle_memsize, min_size));
2038 	if ((dhd_dongle_memsize > min_size) &&
2039 		(dhd_dongle_memsize < (int32)bus->orig_ramsize))
2040 		bus->ramsize = dhd_dongle_memsize;
2041 }
2042 
2043 void
dhdpcie_bus_release_malloc(dhd_bus_t * bus,osl_t * osh)2044 dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh)
2045 {
2046 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2047 
2048 	if (bus->dhd && bus->dhd->dongle_reset)
2049 		return;
2050 
2051 	if (bus->vars && bus->varsz) {
2052 		MFREE(osh, bus->vars, bus->varsz);
2053 		bus->vars = NULL;
2054 	}
2055 
2056 	DHD_TRACE(("%s: Exit\n", __FUNCTION__));
2057 	return;
2058 
2059 }
2060 
2061 /** Stop bus module: clear pending frames, disable data flow */
dhd_bus_stop(struct dhd_bus * bus,bool enforce_mutex)2062 void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex)
2063 {
2064 	unsigned long flags, flags_bus;
2065 
2066 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2067 
2068 	if (!bus->dhd)
2069 		return;
2070 
2071 	if (bus->dhd->busstate == DHD_BUS_DOWN) {
2072 		DHD_ERROR(("%s: already down by net_dev_reset\n", __FUNCTION__));
2073 		goto done;
2074 	}
2075 
2076 	DHD_DISABLE_RUNTIME_PM(bus->dhd);
2077 
2078 	DHD_GENERAL_LOCK(bus->dhd, flags);
2079 	DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
2080 	bus->dhd->busstate = DHD_BUS_DOWN;
2081 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
2082 
2083 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
2084 	atomic_set(&bus->dhd->block_bus, TRUE);
2085 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
2086 
2087 	DHD_BUS_LOCK(bus->bus_lock, flags_bus);
2088 	dhdpcie_bus_intr_disable(bus);
2089 	DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
2090 
2091 	if (!bus->is_linkdown) {
2092 		uint32 status;
2093 		status = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
2094 		dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, status);
2095 	}
2096 
2097 	if (!dhd_download_fw_on_driverload) {
2098 		dhd_dpc_kill(bus->dhd);
2099 	}
2100 
2101 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
2102 	pm_runtime_disable(dhd_bus_to_dev(bus));
2103 	pm_runtime_set_suspended(dhd_bus_to_dev(bus));
2104 	pm_runtime_enable(dhd_bus_to_dev(bus));
2105 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
2106 
2107 	/* Clear rx control and wake any waiters */
2108 	dhd_os_set_ioctl_resp_timeout(IOCTL_DISABLE_TIMEOUT);
2109 	dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_BUS_STOP);
2110 
2111 done:
2112 	return;
2113 }
2114 
2115 /**
2116  * Watchdog timer function.
2117  * @param dhd   Represents a specific hardware (dongle) instance that this DHD manages
2118  */
dhd_bus_watchdog(dhd_pub_t * dhd)2119 bool dhd_bus_watchdog(dhd_pub_t *dhd)
2120 {
2121 	unsigned long flags;
2122 	dhd_bus_t *bus = dhd->bus;
2123 
2124 	DHD_GENERAL_LOCK(dhd, flags);
2125 	if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd) ||
2126 			DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd)) {
2127 		DHD_GENERAL_UNLOCK(dhd, flags);
2128 		return FALSE;
2129 	}
2130 	DHD_BUS_BUSY_SET_IN_WD(dhd);
2131 	DHD_GENERAL_UNLOCK(dhd, flags);
2132 
2133 	/* Poll for console output periodically */
2134 	if (dhd->busstate == DHD_BUS_DATA &&
2135 		dhd->dhd_console_ms != 0 &&
2136 		bus->bus_low_power_state == DHD_BUS_NO_LOW_POWER_STATE) {
2137 		bus->console.count += dhd_watchdog_ms;
2138 		if (bus->console.count >= dhd->dhd_console_ms) {
2139 			bus->console.count -= dhd->dhd_console_ms;
2140 
2141 			if (MULTIBP_ENAB(bus->sih)) {
2142 				dhd_bus_pcie_pwr_req(bus);
2143 			}
2144 
2145 			/* Make sure backplane clock is on */
2146 			if (dhdpcie_bus_readconsole(bus) < 0) {
2147 				dhd->dhd_console_ms = 0; /* On error, stop trying */
2148 			}
2149 
2150 			if (MULTIBP_ENAB(bus->sih)) {
2151 				dhd_bus_pcie_pwr_req_clear(bus);
2152 			}
2153 		}
2154 	}
2155 
2156 #ifdef DHD_READ_INTSTATUS_IN_DPC
2157 	if (bus->poll) {
2158 		bus->ipend = TRUE;
2159 		bus->dpc_sched = TRUE;
2160 		dhd_sched_dpc(bus->dhd);     /* queue DPC now!! */
2161 	}
2162 #endif /* DHD_READ_INTSTATUS_IN_DPC */
2163 
2164 	DHD_GENERAL_LOCK(dhd, flags);
2165 	DHD_BUS_BUSY_CLEAR_IN_WD(dhd);
2166 	dhd_os_busbusy_wake(dhd);
2167 	DHD_GENERAL_UNLOCK(dhd, flags);
2168 
2169 	return TRUE;
2170 } /* dhd_bus_watchdog */
2171 
2172 #if defined(SUPPORT_MULTIPLE_REVISION)
concate_revision_bcm4358(dhd_bus_t * bus,char * fw_path,char * nv_path)2173 static int concate_revision_bcm4358(dhd_bus_t *bus, char *fw_path, char *nv_path)
2174 {
2175 	uint32 chiprev;
2176 #if defined(SUPPORT_MULTIPLE_CHIPS)
2177 	char chipver_tag[20] = "_4358";
2178 #else
2179 	char chipver_tag[10] = {0, };
2180 #endif /* SUPPORT_MULTIPLE_CHIPS */
2181 
2182 	chiprev = dhd_bus_chiprev(bus);
2183 	if (chiprev == 0) {
2184 		DHD_ERROR(("----- CHIP 4358 A0 -----\n"));
2185 		strcat(chipver_tag, "_a0");
2186 	} else if (chiprev == 1) {
2187 		DHD_ERROR(("----- CHIP 4358 A1 -----\n"));
2188 #if defined(SUPPORT_MULTIPLE_CHIPS) || defined(SUPPORT_MULTIPLE_MODULE_CIS)
2189 		strcat(chipver_tag, "_a1");
2190 #endif /* defined(SUPPORT_MULTIPLE_CHIPS) || defined(SUPPORT_MULTIPLE_MODULE_CIS) */
2191 	} else if (chiprev == 3) {
2192 		DHD_ERROR(("----- CHIP 4358 A3 -----\n"));
2193 #if defined(SUPPORT_MULTIPLE_CHIPS)
2194 		strcat(chipver_tag, "_a3");
2195 #endif /* SUPPORT_MULTIPLE_CHIPS */
2196 	} else {
2197 		DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chiprev));
2198 	}
2199 
2200 	strcat(fw_path, chipver_tag);
2201 
2202 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK)
2203 	if (chiprev == 1 || chiprev == 3) {
2204 		int ret = dhd_check_module_b85a();
2205 		if ((chiprev == 1) && (ret < 0)) {
2206 			memset(chipver_tag, 0x00, sizeof(chipver_tag));
2207 			strcat(chipver_tag, "_b85");
2208 			strcat(chipver_tag, "_a1");
2209 		}
2210 	}
2211 
2212 	DHD_ERROR(("%s: chipver_tag %s \n", __FUNCTION__, chipver_tag));
2213 #endif /* defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) */
2214 
2215 #if defined(SUPPORT_MULTIPLE_BOARD_REV)
2216 	if (system_rev >= 10) {
2217 		DHD_ERROR(("----- Board Rev  [%d]-----\n", system_rev));
2218 		strcat(chipver_tag, "_r10");
2219 	}
2220 #endif /* SUPPORT_MULTIPLE_BOARD_REV */
2221 	strcat(nv_path, chipver_tag);
2222 
2223 	return 0;
2224 }
2225 
concate_revision_bcm4359(dhd_bus_t * bus,char * fw_path,char * nv_path)2226 static int concate_revision_bcm4359(dhd_bus_t *bus, char *fw_path, char *nv_path)
2227 {
2228 	uint32 chip_ver;
2229 	char chipver_tag[10] = {0, };
2230 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \
2231 	defined(SUPPORT_BCM4359_MIXED_MODULES)
2232 	int module_type = -1;
2233 #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
2234 
2235 	chip_ver = bus->sih->chiprev;
2236 	if (chip_ver == 4) {
2237 		DHD_ERROR(("----- CHIP 4359 B0 -----\n"));
2238 		strncat(chipver_tag, "_b0", strlen("_b0"));
2239 	} else if (chip_ver == 5) {
2240 		DHD_ERROR(("----- CHIP 4359 B1 -----\n"));
2241 		strncat(chipver_tag, "_b1", strlen("_b1"));
2242 	} else if (chip_ver == 9) {
2243 		DHD_ERROR(("----- CHIP 4359 C0 -----\n"));
2244 		strncat(chipver_tag, "_c0", strlen("_c0"));
2245 	} else {
2246 		DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chip_ver));
2247 		return -1;
2248 	}
2249 
2250 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \
2251 	defined(SUPPORT_BCM4359_MIXED_MODULES)
2252 	module_type =  dhd_check_module_b90();
2253 
2254 	switch (module_type) {
2255 		case BCM4359_MODULE_TYPE_B90B:
2256 			strcat(fw_path, chipver_tag);
2257 			break;
2258 		case BCM4359_MODULE_TYPE_B90S:
2259 		default:
2260 			/*
2261 			 * .cid.info file not exist case,
2262 			 * loading B90S FW force for initial MFG boot up.
2263 			*/
2264 			if (chip_ver == 5) {
2265 				strncat(fw_path, "_b90s", strlen("_b90s"));
2266 			}
2267 			strcat(fw_path, chipver_tag);
2268 			strcat(nv_path, chipver_tag);
2269 			break;
2270 	}
2271 #else /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
2272 	strcat(fw_path, chipver_tag);
2273 	strcat(nv_path, chipver_tag);
2274 #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
2275 
2276 	return 0;
2277 }
2278 
2279 #if defined(USE_CID_CHECK)
2280 
2281 #define MAX_EXTENSION 20
2282 #define MODULE_BCM4361_INDEX	3
2283 #define CHIP_REV_A0	1
2284 #define CHIP_REV_A1	2
2285 #define CHIP_REV_B0	3
2286 #define CHIP_REV_B1	4
2287 #define CHIP_REV_B2	5
2288 #define CHIP_REV_C0	6
2289 #define BOARD_TYPE_EPA				0x080f
2290 #define BOARD_TYPE_IPA				0x0827
2291 #define BOARD_TYPE_IPA_OLD			0x081a
2292 #define DEFAULT_CIDINFO_FOR_EPA		"r00a_e000_a0_ePA"
2293 #define DEFAULT_CIDINFO_FOR_IPA		"r00a_e000_a0_iPA"
2294 #define DEFAULT_CIDINFO_FOR_A1		"r01a_e30a_a1"
2295 #define DEFAULT_CIDINFO_FOR_B0		"r01i_e32_b0"
2296 #define MAX_VID_LEN					8
2297 #define CIS_TUPLE_HDR_LEN		2
2298 #if defined(BCM4361_CHIP)
2299 #define CIS_TUPLE_START_ADDRESS		0x18011110
2300 #define CIS_TUPLE_END_ADDRESS		0x18011167
2301 #elif defined(BCM4375_CHIP)
2302 #define CIS_TUPLE_START_ADDRESS		0x18011120
2303 #define CIS_TUPLE_END_ADDRESS		0x18011177
2304 #endif /* defined(BCM4361_CHIP) */
2305 #define CIS_TUPLE_MAX_COUNT		(uint32)((CIS_TUPLE_END_ADDRESS - CIS_TUPLE_START_ADDRESS\
2306 						+ 1) / sizeof(uint32))
2307 #define CIS_TUPLE_TAG_START			0x80
2308 #define CIS_TUPLE_TAG_VENDOR		0x81
2309 #define CIS_TUPLE_TAG_BOARDTYPE		0x1b
2310 #define CIS_TUPLE_TAG_LENGTH		1
2311 #define NVRAM_FEM_MURATA			"_murata"
2312 #define CID_FEM_MURATA				"_mur_"
2313 
2314 typedef struct cis_tuple_format {
2315 	uint8	id;
2316 	uint8	len;	/* total length of tag and data */
2317 	uint8	tag;
2318 	uint8	data[1];
2319 } cis_tuple_format_t;
2320 
2321 typedef struct {
2322 	char cid_ext[MAX_EXTENSION];
2323 	char nvram_ext[MAX_EXTENSION];
2324 	char fw_ext[MAX_EXTENSION];
2325 } naming_info_t;
2326 
2327 naming_info_t bcm4361_naming_table[] = {
2328 	{ {""}, {""}, {""} },
2329 	{ {"r00a_e000_a0_ePA"}, {"_a0_ePA"}, {"_a0_ePA"} },
2330 	{ {"r00a_e000_a0_iPA"}, {"_a0"}, {"_a1"} },
2331 	{ {"r01a_e30a_a1"}, {"_r01a_a1"}, {"_a1"} },
2332 	{ {"r02a_e30a_a1"}, {"_r02a_a1"}, {"_a1"} },
2333 	{ {"r02c_e30a_a1"}, {"_r02c_a1"}, {"_a1"} },
2334 	{ {"r01d_e31_b0"}, {"_r01d_b0"}, {"_b0"} },
2335 	{ {"r01f_e31_b0"}, {"_r01f_b0"}, {"_b0"} },
2336 	{ {"r02g_e31_b0"}, {"_r02g_b0"}, {"_b0"} },
2337 	{ {"r01h_e32_b0"}, {"_r01h_b0"}, {"_b0"} },
2338 	{ {"r01i_e32_b0"}, {"_r01i_b0"}, {"_b0"} },
2339 	{ {"r02j_e32_b0"}, {"_r02j_b0"}, {"_b0"} },
2340 	{ {"r012_1kl_a1"}, {"_r012_a1"}, {"_a1"} },
2341 	{ {"r013_1kl_b0"}, {"_r013_b0"}, {"_b0"} },
2342 	{ {"r013_1kl_b0"}, {"_r013_b0"}, {"_b0"} },
2343 	{ {"r014_1kl_b0"}, {"_r014_b0"}, {"_b0"} },
2344 	{ {"r015_1kl_b0"}, {"_r015_b0"}, {"_b0"} },
2345 	{ {"r020_1kl_b0"}, {"_r020_b0"}, {"_b0"} },
2346 	{ {"r021_1kl_b0"}, {"_r021_b0"}, {"_b0"} },
2347 	{ {"r022_1kl_b0"}, {"_r022_b0"}, {"_b0"} },
2348 	{ {"r023_1kl_b0"}, {"_r023_b0"}, {"_b0"} },
2349 	{ {"r024_1kl_b0"}, {"_r024_b0"}, {"_b0"} },
2350 	{ {"r030_1kl_b0"}, {"_r030_b0"}, {"_b0"} },
2351 	{ {"r031_1kl_b0"}, {"_r030_b0"}, {"_b0"} },	/* exceptional case : r31 -> r30 */
2352 	{ {"r032_1kl_b0"}, {"_r032_b0"}, {"_b0"} },
2353 	{ {"r033_1kl_b0"}, {"_r033_b0"}, {"_b0"} },
2354 	{ {"r034_1kl_b0"}, {"_r034_b0"}, {"_b0"} },
2355 	{ {"r02a_e32a_b2"}, {"_r02a_b2"}, {"_b2"} },
2356 	{ {"r02b_e32a_b2"}, {"_r02b_b2"}, {"_b2"} },
2357 	{ {"r020_1qw_b2"}, {"_r020_b2"}, {"_b2"} },
2358 	{ {"r021_1qw_b2"}, {"_r021_b2"}, {"_b2"} },
2359 	{ {"r022_1qw_b2"}, {"_r022_b2"}, {"_b2"} },
2360 	{ {"r031_1qw_b2"}, {"_r031_b2"}, {"_b2"} },
2361 	{ {"r032_1qw_b2"}, {"_r032_b2"}, {"_b2"} },
2362 	{ {"r041_1qw_b2"}, {"_r041_b2"}, {"_b2"} }
2363 };
2364 
2365 #define MODULE_BCM4375_INDEX	3
2366 
2367 naming_info_t bcm4375_naming_table[] = {
2368 	{ {""}, {""}, {""} },
2369 	{ {"e41_es11"}, {"_ES00_semco_b0"}, {"_b0"} },
2370 	{ {"e43_es33"}, {"_ES01_semco_b0"}, {"_b0"} },
2371 	{ {"e43_es34"}, {"_ES02_semco_b0"}, {"_b0"} },
2372 	{ {"e43_es35"}, {"_ES02_semco_b0"}, {"_b0"} },
2373 	{ {"e43_es36"}, {"_ES03_semco_b0"}, {"_b0"} },
2374 	{ {"e43_cs41"}, {"_CS00_semco_b1"}, {"_b1"} },
2375 	{ {"e43_cs51"}, {"_CS01_semco_b1"}, {"_b1"} },
2376 	{ {"e43_cs53"}, {"_CS01_semco_b1"}, {"_b1"} },
2377 	{ {"e43_cs61"}, {"_CS00_skyworks_b1"}, {"_b1"} },
2378 	{ {"1rh_es10"}, {"_1rh_es10_b0"}, {"_b0"} },
2379 	{ {"1rh_es11"}, {"_1rh_es11_b0"}, {"_b0"} },
2380 	{ {"1rh_es12"}, {"_1rh_es12_b0"}, {"_b0"} },
2381 	{ {"1rh_es13"}, {"_1rh_es13_b0"}, {"_b0"} },
2382 	{ {"1rh_es20"}, {"_1rh_es20_b0"}, {"_b0"} },
2383 	{ {"1rh_es32"}, {"_1rh_es32_b0"}, {"_b0"} },
2384 	{ {"1rh_es41"}, {"_1rh_es41_b1"}, {"_b1"} },
2385 	{ {"1rh_es42"}, {"_1rh_es42_b1"}, {"_b1"} },
2386 	{ {"1rh_es43"}, {"_1rh_es43_b1"}, {"_b1"} },
2387 	{ {"1rh_es44"}, {"_1rh_es44_b1"}, {"_b1"} }
2388 };
2389 
2390 static naming_info_t *
dhd_find_naming_info(naming_info_t table[],int table_size,char * module_type)2391 dhd_find_naming_info(naming_info_t table[], int table_size, char *module_type)
2392 {
2393 	int index_found = 0, i = 0;
2394 
2395 	if (module_type && strlen(module_type) > 0) {
2396 		for (i = 1; i < table_size; i++) {
2397 			if (!strncmp(table[i].cid_ext, module_type, strlen(table[i].cid_ext))) {
2398 				index_found = i;
2399 				break;
2400 			}
2401 		}
2402 	}
2403 
2404 	DHD_INFO(("%s: index_found=%d\n", __FUNCTION__, index_found));
2405 
2406 	return &table[index_found];
2407 }
2408 
2409 static naming_info_t *
dhd_find_naming_info_by_cid(naming_info_t table[],int table_size,char * cid_info)2410 dhd_find_naming_info_by_cid(naming_info_t table[], int table_size,
2411 	char *cid_info)
2412 {
2413 	int index_found = 0, i = 0;
2414 	char *ptr;
2415 
2416 	/* truncate extension */
2417 	for (i = 1, ptr = cid_info; i < MODULE_BCM4361_INDEX && ptr; i++) {
2418 		ptr = bcmstrstr(ptr, "_");
2419 		if (ptr) {
2420 			ptr++;
2421 		}
2422 	}
2423 
2424 	for (i = 1; i < table_size && ptr; i++) {
2425 		if (!strncmp(table[i].cid_ext, ptr, strlen(table[i].cid_ext))) {
2426 			index_found = i;
2427 			break;
2428 		}
2429 	}
2430 
2431 	DHD_INFO(("%s: index_found=%d\n", __FUNCTION__, index_found));
2432 
2433 	return &table[index_found];
2434 }
2435 
2436 static int
dhd_parse_board_information_bcm(dhd_bus_t * bus,int * boardtype,unsigned char * vid,int * vid_length)2437 dhd_parse_board_information_bcm(dhd_bus_t *bus, int *boardtype,
2438 	unsigned char *vid, int *vid_length)
2439 {
2440 	int boardtype_backplane_addr[] = {
2441 		0x18010324, /* OTP Control 1 */
2442 		0x18012618, /* PMU min resource mask */
2443 	};
2444 	int boardtype_backplane_data[] = {
2445 		0x00fa0000,
2446 		0x0e4fffff /* Keep on ARMHTAVAIL */
2447 	};
2448 	int int_val = 0, i = 0;
2449 	cis_tuple_format_t *tuple;
2450 	int totlen, len;
2451 	uint32 raw_data[CIS_TUPLE_MAX_COUNT];
2452 
2453 	for (i = 0; i < ARRAYSIZE(boardtype_backplane_addr); i++) {
2454 		/* Write new OTP and PMU configuration */
2455 		if (si_backplane_access(bus->sih, boardtype_backplane_addr[i], sizeof(int),
2456 				&boardtype_backplane_data[i], FALSE) != BCME_OK) {
2457 			DHD_ERROR(("invalid size/addr combination\n"));
2458 			return BCME_ERROR;
2459 		}
2460 
2461 		if (si_backplane_access(bus->sih, boardtype_backplane_addr[i], sizeof(int),
2462 				&int_val, TRUE) != BCME_OK) {
2463 			DHD_ERROR(("invalid size/addr combination\n"));
2464 			return BCME_ERROR;
2465 		}
2466 
2467 		DHD_INFO(("%s: boardtype_backplane_addr 0x%08x rdata 0x%04x\n",
2468 			__FUNCTION__, boardtype_backplane_addr[i], int_val));
2469 	}
2470 
2471 	/* read tuple raw data */
2472 	for (i = 0; i < CIS_TUPLE_MAX_COUNT; i++) {
2473 		if (si_backplane_access(bus->sih, CIS_TUPLE_START_ADDRESS + i * sizeof(uint32),
2474 				sizeof(uint32),	&raw_data[i], TRUE) != BCME_OK) {
2475 			break;
2476 		}
2477 	}
2478 
2479 	totlen = i * sizeof(uint32);
2480 	tuple = (cis_tuple_format_t *)raw_data;
2481 
2482 	/* check the first tuple has tag 'start' */
2483 	if (tuple->id != CIS_TUPLE_TAG_START) {
2484 		return BCME_ERROR;
2485 	}
2486 
2487 	*vid_length = *boardtype = 0;
2488 
2489 	/* find tagged parameter */
2490 	while ((totlen >= (tuple->len + CIS_TUPLE_HDR_LEN)) &&
2491 			(*vid_length == 0 || *boardtype == 0)) {
2492 		len = tuple->len;
2493 
2494 		if ((tuple->tag == CIS_TUPLE_TAG_VENDOR) &&
2495 				(totlen >= (int)(len + CIS_TUPLE_HDR_LEN))) {
2496 			/* found VID */
2497 			memcpy(vid, tuple->data, tuple->len - CIS_TUPLE_TAG_LENGTH);
2498 			*vid_length = tuple->len - CIS_TUPLE_TAG_LENGTH;
2499 			prhex("OTP VID", tuple->data, tuple->len - CIS_TUPLE_TAG_LENGTH);
2500 		}
2501 		else if ((tuple->tag == CIS_TUPLE_TAG_BOARDTYPE) &&
2502 				(totlen >= (int)(len + CIS_TUPLE_HDR_LEN))) {
2503 			/* found boardtype */
2504 			*boardtype = (int)tuple->data[0];
2505 			prhex("OTP boardtype", tuple->data, tuple->len - CIS_TUPLE_TAG_LENGTH);
2506 		}
2507 
2508 		tuple = (cis_tuple_format_t*)((uint8*)tuple + (len + CIS_TUPLE_HDR_LEN));
2509 		totlen -= (len + CIS_TUPLE_HDR_LEN);
2510 	}
2511 
2512 	if (*vid_length <= 0 || *boardtype <= 0) {
2513 		DHD_ERROR(("failed to parse information (vid=%d, boardtype=%d)\n",
2514 			*vid_length, *boardtype));
2515 		return BCME_ERROR;
2516 	}
2517 
2518 	return BCME_OK;
2519 
2520 }
2521 
2522 static naming_info_t *
dhd_find_naming_info_by_chip_rev(naming_info_t table[],int table_size,dhd_bus_t * bus,bool * is_murata_fem)2523 dhd_find_naming_info_by_chip_rev(naming_info_t table[], int table_size,
2524 	dhd_bus_t *bus, bool *is_murata_fem)
2525 {
2526 	int board_type = 0, chip_rev = 0, vid_length = 0;
2527 	unsigned char vid[MAX_VID_LEN];
2528 	naming_info_t *info = &table[0];
2529 	char *cid_info = NULL;
2530 
2531 	if (!bus || !bus->sih) {
2532 		DHD_ERROR(("%s:bus(%p) or bus->sih is NULL\n", __FUNCTION__, bus));
2533 		return NULL;
2534 	}
2535 	chip_rev = bus->sih->chiprev;
2536 
2537 	if (dhd_parse_board_information_bcm(bus, &board_type, vid, &vid_length)
2538 			!= BCME_OK) {
2539 		DHD_ERROR(("%s:failed to parse board information\n", __FUNCTION__));
2540 		return NULL;
2541 	}
2542 
2543 	DHD_INFO(("%s:chip version %d\n", __FUNCTION__, chip_rev));
2544 
2545 #if defined(BCM4361_CHIP)
2546 	/* A0 chipset has exception only */
2547 	if (chip_rev == CHIP_REV_A0) {
2548 		if (board_type == BOARD_TYPE_EPA) {
2549 			info = dhd_find_naming_info(table, table_size,
2550 				DEFAULT_CIDINFO_FOR_EPA);
2551 		} else if ((board_type == BOARD_TYPE_IPA) ||
2552 				(board_type == BOARD_TYPE_IPA_OLD)) {
2553 			info = dhd_find_naming_info(table, table_size,
2554 				DEFAULT_CIDINFO_FOR_IPA);
2555 		}
2556 	} else {
2557 		cid_info = dhd_get_cid_info(vid, vid_length);
2558 		if (cid_info) {
2559 			info = dhd_find_naming_info_by_cid(table, table_size, cid_info);
2560 			if (strstr(cid_info, CID_FEM_MURATA)) {
2561 				*is_murata_fem = TRUE;
2562 			}
2563 		}
2564 	}
2565 #else
2566 	cid_info = dhd_get_cid_info(vid, vid_length);
2567 	if (cid_info) {
2568 		info = dhd_find_naming_info_by_cid(table, table_size, cid_info);
2569 		if (strstr(cid_info, CID_FEM_MURATA)) {
2570 			*is_murata_fem = TRUE;
2571 		}
2572 	}
2573 #endif /* BCM4361_CHIP */
2574 
2575 	return info;
2576 }
2577 #endif /* USE_CID_CHECK */
2578 
2579 static int
concate_revision_bcm4361(dhd_bus_t * bus,char * fw_path,char * nv_path)2580 concate_revision_bcm4361(dhd_bus_t *bus, char *fw_path, char *nv_path)
2581 {
2582 	int ret = BCME_OK;
2583 #if defined(SUPPORT_BCM4361_MIXED_MODULES) && defined(USE_CID_CHECK)
2584 	char module_type[MAX_VNAME_LEN];
2585 	naming_info_t *info = NULL;
2586 	bool is_murata_fem = FALSE;
2587 
2588 	memset(module_type, 0, sizeof(module_type));
2589 
2590 	if (dhd_check_module_bcm(module_type,
2591 			MODULE_BCM4361_INDEX, &is_murata_fem) == BCME_OK) {
2592 		info = dhd_find_naming_info(bcm4361_naming_table,
2593 			ARRAYSIZE(bcm4361_naming_table), module_type);
2594 	} else {
2595 		/* in case of .cid.info doesn't exists */
2596 		info = dhd_find_naming_info_by_chip_rev(bcm4361_naming_table,
2597 			ARRAYSIZE(bcm4361_naming_table), bus, &is_murata_fem);
2598 	}
2599 
2600 	if (bcmstrnstr(nv_path, PATH_MAX,  "_murata", 7)) {
2601 		is_murata_fem = FALSE;
2602 	}
2603 
2604 	if (info) {
2605 		if (is_murata_fem) {
2606 			strncat(nv_path, NVRAM_FEM_MURATA, strlen(NVRAM_FEM_MURATA));
2607 		}
2608 		strncat(nv_path, info->nvram_ext, strlen(info->nvram_ext));
2609 		strncat(fw_path, info->fw_ext, strlen(info->fw_ext));
2610 	} else {
2611 		DHD_ERROR(("%s:failed to find extension for nvram and firmware\n", __FUNCTION__));
2612 		ret = BCME_ERROR;
2613 	}
2614 #else /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK */
2615 	char chipver_tag[10] = {0, };
2616 
2617 	strcat(fw_path, chipver_tag);
2618 	strcat(nv_path, chipver_tag);
2619 #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK */
2620 
2621 	return ret;
2622 }
2623 
2624 static int
concate_revision_bcm4375(dhd_bus_t * bus,char * fw_path,char * nv_path)2625 concate_revision_bcm4375(dhd_bus_t *bus, char *fw_path, char *nv_path)
2626 {
2627 	int ret = BCME_OK;
2628 #if defined(SUPPORT_BCM4375_MIXED_MODULES) && defined(USE_CID_CHECK)
2629 	char module_type[MAX_VNAME_LEN];
2630 	naming_info_t *info = NULL;
2631 	bool is_murata_fem = FALSE;
2632 
2633 	memset(module_type, 0, sizeof(module_type));
2634 
2635 	if (dhd_check_module_bcm(module_type,
2636 			MODULE_BCM4375_INDEX, &is_murata_fem) == BCME_OK) {
2637 		info = dhd_find_naming_info(bcm4375_naming_table,
2638 				ARRAYSIZE(bcm4375_naming_table), module_type);
2639 	} else {
2640 		/* in case of .cid.info doesn't exists */
2641 		info = dhd_find_naming_info_by_chip_rev(bcm4375_naming_table,
2642 				ARRAYSIZE(bcm4375_naming_table), bus, &is_murata_fem);
2643 	}
2644 
2645 	if (info) {
2646 		strncat(nv_path, info->nvram_ext, strlen(info->nvram_ext));
2647 		strncat(fw_path, info->fw_ext, strlen(info->fw_ext));
2648 	} else {
2649 		DHD_ERROR(("%s:failed to find extension for nvram and firmware\n", __FUNCTION__));
2650 		ret = BCME_ERROR;
2651 	}
2652 #else /* SUPPORT_BCM4375_MIXED_MODULES && USE_CID_CHECK */
2653 	char chipver_tag[10] = {0, };
2654 
2655 	strcat(fw_path, chipver_tag);
2656 	strcat(nv_path, chipver_tag);
2657 #endif /* SUPPORT_BCM4375_MIXED_MODULES && USE_CID_CHECK */
2658 
2659 	return ret;
2660 }
2661 
2662 int
concate_revision(dhd_bus_t * bus,char * fw_path,char * nv_path)2663 concate_revision(dhd_bus_t *bus, char *fw_path, char *nv_path)
2664 {
2665 	int res = 0;
2666 
2667 	if (!bus || !bus->sih) {
2668 		DHD_ERROR(("%s:Bus is Invalid\n", __FUNCTION__));
2669 		return -1;
2670 	}
2671 
2672 	if (!fw_path || !nv_path) {
2673 		DHD_ERROR(("fw_path or nv_path is null.\n"));
2674 		return res;
2675 	}
2676 
2677 	switch (si_chipid(bus->sih)) {
2678 
2679 	case BCM43569_CHIP_ID:
2680 	case BCM4358_CHIP_ID:
2681 		res = concate_revision_bcm4358(bus, fw_path, nv_path);
2682 		break;
2683 	case BCM4355_CHIP_ID:
2684 	case BCM4359_CHIP_ID:
2685 		res = concate_revision_bcm4359(bus, fw_path, nv_path);
2686 		break;
2687 	case BCM4361_CHIP_ID:
2688 	case BCM4347_CHIP_ID:
2689 		res = concate_revision_bcm4361(bus, fw_path, nv_path);
2690 		break;
2691 	case BCM4375_CHIP_ID:
2692 		res = concate_revision_bcm4375(bus, fw_path, nv_path);
2693 		break;
2694 	default:
2695 		DHD_ERROR(("REVISION SPECIFIC feature is not required\n"));
2696 		return res;
2697 	}
2698 
2699 	return res;
2700 }
2701 #endif /* SUPPORT_MULTIPLE_REVISION */
2702 
2703 uint16
dhd_get_chipid(dhd_pub_t * dhd)2704 dhd_get_chipid(dhd_pub_t *dhd)
2705 {
2706 	dhd_bus_t *bus = dhd->bus;
2707 
2708 	if (bus && bus->sih)
2709 		return (uint16)si_chipid(bus->sih);
2710 	else
2711 		return 0;
2712 }
2713 
2714 /**
2715  * Loads firmware given by caller supplied path and nvram image into PCIe dongle.
2716  *
2717  * BCM_REQUEST_FW specific :
2718  * Given the chip type, determines the to be used file paths within /lib/firmware/brcm/ containing
2719  * firmware and nvm for that chip. If the download fails, retries download with a different nvm file
2720  *
2721  * BCMEMBEDIMAGE specific:
2722  * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header
2723  * file will be used instead.
2724  *
2725  * @return BCME_OK on success
2726  */
2727 int
dhd_bus_download_firmware(struct dhd_bus * bus,osl_t * osh,char * pfw_path,char * pnv_path,char * pclm_path,char * pconf_path)2728 dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
2729                           char *pfw_path, char *pnv_path,
2730                           char *pclm_path, char *pconf_path)
2731 {
2732 	int ret;
2733 
2734 	bus->fw_path = pfw_path;
2735 	bus->nv_path = pnv_path;
2736 	bus->dhd->clm_path = pclm_path;
2737 	bus->dhd->conf_path = pconf_path;
2738 
2739 #if defined(SUPPORT_MULTIPLE_REVISION)
2740 	if (concate_revision(bus, bus->fw_path, bus->nv_path) != 0) {
2741 		DHD_ERROR(("%s: fail to concatnate revison \n",
2742 			__FUNCTION__));
2743 		return BCME_BADARG;
2744 	}
2745 #endif /* SUPPORT_MULTIPLE_REVISION */
2746 
2747 #if defined(DHD_BLOB_EXISTENCE_CHECK)
2748 	dhd_set_blob_support(bus->dhd, bus->fw_path);
2749 #endif /* DHD_BLOB_EXISTENCE_CHECK */
2750 
2751 	DHD_ERROR(("%s: firmware path=%s, nvram path=%s\n",
2752 		__FUNCTION__, bus->fw_path, bus->nv_path));
2753 	dhdpcie_dump_resource(bus);
2754 
2755 	ret = dhdpcie_download_firmware(bus, osh);
2756 
2757 	return ret;
2758 }
2759 
2760 void
dhd_set_bus_params(struct dhd_bus * bus)2761 dhd_set_bus_params(struct dhd_bus *bus)
2762 {
2763 	if (bus->dhd->conf->dhd_poll >= 0) {
2764 		bus->poll = bus->dhd->conf->dhd_poll;
2765 		if (!bus->pollrate)
2766 			bus->pollrate = 1;
2767 		printf("%s: set polling mode %d\n", __FUNCTION__, bus->dhd->conf->dhd_poll);
2768 	}
2769 }
2770 
2771 /**
2772  * Loads firmware given by 'bus->fw_path' into PCIe dongle.
2773  *
2774  * BCM_REQUEST_FW specific :
2775  * Given the chip type, determines the to be used file paths within /lib/firmware/brcm/ containing
2776  * firmware and nvm for that chip. If the download fails, retries download with a different nvm file
2777  *
2778  * BCMEMBEDIMAGE specific:
2779  * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header
2780  * file will be used instead.
2781  *
2782  * @return BCME_OK on success
2783  */
2784 static int
dhdpcie_download_firmware(struct dhd_bus * bus,osl_t * osh)2785 dhdpcie_download_firmware(struct dhd_bus *bus, osl_t *osh)
2786 {
2787 	int ret = 0;
2788 #if defined(BCM_REQUEST_FW)
2789 	uint chipid = bus->sih->chip;
2790 	uint revid = bus->sih->chiprev;
2791 	char fw_path[64] = "/lib/firmware/brcm/bcm";	/* path to firmware image */
2792 	char nv_path[64];		/* path to nvram vars file */
2793 	bus->fw_path = fw_path;
2794 	bus->nv_path = nv_path;
2795 	switch (chipid) {
2796 	case BCM43570_CHIP_ID:
2797 		bcmstrncat(fw_path, "43570", 5);
2798 		switch (revid) {
2799 		case 0:
2800 			bcmstrncat(fw_path, "a0", 2);
2801 			break;
2802 		case 2:
2803 			bcmstrncat(fw_path, "a2", 2);
2804 			break;
2805 		default:
2806 			DHD_ERROR(("%s: revid is not found %x\n", __FUNCTION__,
2807 			revid));
2808 			break;
2809 		}
2810 		break;
2811 	default:
2812 		DHD_ERROR(("%s: unsupported device %x\n", __FUNCTION__,
2813 		chipid));
2814 		return 0;
2815 	}
2816 	/* load board specific nvram file */
2817 	snprintf(bus->nv_path, sizeof(nv_path), "%s.nvm", fw_path);
2818 	/* load firmware */
2819 	snprintf(bus->fw_path, sizeof(fw_path), "%s-firmware.bin", fw_path);
2820 #endif /* BCM_REQUEST_FW */
2821 
2822 	DHD_OS_WAKE_LOCK(bus->dhd);
2823 
2824 	dhd_conf_set_path_params(bus->dhd, bus->fw_path, bus->nv_path);
2825 	dhd_set_bus_params(bus);
2826 
2827 	ret = _dhdpcie_download_firmware(bus);
2828 
2829 	DHD_OS_WAKE_UNLOCK(bus->dhd);
2830 	return ret;
2831 } /* dhdpcie_download_firmware */
2832 
2833 #define DHD_MEMORY_SET_PATTERN 0xAA
2834 
2835 /**
2836  * Downloads a file containing firmware into dongle memory. In case of a .bea file, the DHD
2837  * is updated with the event logging partitions within that file as well.
2838  *
2839  * @param pfw_path    Path to .bin or .bea file
2840  */
2841 static int
dhdpcie_download_code_file(struct dhd_bus * bus,char * pfw_path)2842 dhdpcie_download_code_file(struct dhd_bus *bus, char *pfw_path)
2843 {
2844 	int bcmerror = BCME_ERROR;
2845 	int offset = 0;
2846 	int len = 0;
2847 	bool store_reset;
2848 	char *imgbuf = NULL;
2849 	uint8 *memblock = NULL, *memptr = NULL;
2850 	uint8 *memptr_tmp = NULL; // terence: check downloaded firmware is correct
2851 	int offset_end = bus->ramsize;
2852 	uint32 file_size = 0, read_len = 0;
2853 
2854 #if defined(DHD_FW_MEM_CORRUPTION)
2855 	if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) {
2856 		dhd_tcm_test_enable = TRUE;
2857 	} else {
2858 		dhd_tcm_test_enable = FALSE;
2859 	}
2860 #endif /* DHD_FW_MEM_CORRUPTION */
2861 	DHD_ERROR(("%s: dhd_tcm_test_enable %u\n", __FUNCTION__, dhd_tcm_test_enable));
2862 	/* TCM check */
2863 	if (dhd_tcm_test_enable && !dhd_bus_tcm_test(bus)) {
2864 		DHD_ERROR(("dhd_bus_tcm_test failed\n"));
2865 		bcmerror = BCME_ERROR;
2866 		goto err;
2867 	}
2868 	DHD_ERROR(("%s: download firmware %s\n", __FUNCTION__, pfw_path));
2869 
2870 	/* Should succeed in opening image if it is actually given through registry
2871 	 * entry or in module param.
2872 	 */
2873 	imgbuf = dhd_os_open_image1(bus->dhd, pfw_path);
2874 	if (imgbuf == NULL) {
2875 		printf("%s: Open firmware file failed %s\n", __FUNCTION__, pfw_path);
2876 		goto err;
2877 	}
2878 
2879 	file_size = dhd_os_get_image_size(imgbuf);
2880 	if (!file_size) {
2881 		DHD_ERROR(("%s: get file size fails ! \n", __FUNCTION__));
2882 		goto err;
2883 	}
2884 
2885 	memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
2886 	if (memblock == NULL) {
2887 		DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
2888 		bcmerror = BCME_NOMEM;
2889 		goto err;
2890 	}
2891 	if (dhd_msg_level & DHD_TRACE_VAL) {
2892 		memptr_tmp = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
2893 		if (memptr_tmp == NULL) {
2894 			DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
2895 			goto err;
2896 		}
2897 	}
2898 	if ((uint32)(uintptr)memblock % DHD_SDALIGN) {
2899 		memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
2900 	}
2901 
2902 	/* check if CR4/CA7 */
2903 	store_reset = (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) ||
2904 			si_setcore(bus->sih, ARMCA7_CORE_ID, 0));
2905 	/* Download image with MEMBLOCK size */
2906 	while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, imgbuf))) {
2907 		if (len < 0) {
2908 			DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len));
2909 			bcmerror = BCME_ERROR;
2910 			goto err;
2911 		}
2912 		read_len += len;
2913 		if (read_len > file_size) {
2914 			DHD_ERROR(("%s: WARNING! reading beyond EOF, len=%d; read_len=%u;"
2915 				" file_size=%u truncating len to %d \n", __FUNCTION__,
2916 				len, read_len, file_size, (len - (read_len - file_size))));
2917 			len -= (read_len - file_size);
2918 		}
2919 
2920 		/* if address is 0, store the reset instruction to be written in 0 */
2921 		if (store_reset) {
2922 			ASSERT(offset == 0);
2923 			bus->resetinstr = *(((uint32*)memptr));
2924 			/* Add start of RAM address to the address given by user */
2925 			offset += bus->dongle_ram_base;
2926 			offset_end += offset;
2927 			store_reset = FALSE;
2928 		}
2929 
2930 		bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len);
2931 		if (bcmerror) {
2932 			DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
2933 				__FUNCTION__, bcmerror, MEMBLOCK, offset));
2934 			goto err;
2935 		}
2936 
2937 		if (dhd_msg_level & DHD_TRACE_VAL) {
2938 			bcmerror = dhdpcie_bus_membytes(bus, FALSE, offset, memptr_tmp, len);
2939 			if (bcmerror) {
2940 				DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
2941 				        __FUNCTION__, bcmerror, MEMBLOCK, offset));
2942 				goto err;
2943 			}
2944 			if (memcmp(memptr_tmp, memptr, len)) {
2945 				DHD_ERROR(("%s: Downloaded image is corrupted.\n", __FUNCTION__));
2946 				goto err;
2947 			} else
2948 				DHD_INFO(("%s: Download, Upload and compare succeeded.\n", __FUNCTION__));
2949 		}
2950 		offset += MEMBLOCK;
2951 
2952 		if (offset >= offset_end) {
2953 			DHD_ERROR(("%s: invalid address access to %x (offset end: %x)\n",
2954 				__FUNCTION__, offset, offset_end));
2955 			bcmerror = BCME_ERROR;
2956 			goto err;
2957 		}
2958 
2959 		if (read_len >= file_size) {
2960 			break;
2961 		}
2962 	}
2963 err:
2964 	if (memblock) {
2965 		MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
2966 		if (dhd_msg_level & DHD_TRACE_VAL) {
2967 			if (memptr_tmp)
2968 				MFREE(bus->dhd->osh, memptr_tmp, MEMBLOCK + DHD_SDALIGN);
2969 		}
2970 	}
2971 
2972 	if (imgbuf) {
2973 		dhd_os_close_image1(bus->dhd, imgbuf);
2974 	}
2975 
2976 	return bcmerror;
2977 } /* dhdpcie_download_code_file */
2978 
2979 static int
dhdpcie_download_nvram(struct dhd_bus * bus)2980 dhdpcie_download_nvram(struct dhd_bus *bus)
2981 {
2982 	int bcmerror = BCME_ERROR;
2983 	uint len;
2984 	char * memblock = NULL;
2985 	char *bufp;
2986 	char *pnv_path;
2987 	bool nvram_file_exists;
2988 	bool nvram_uefi_exists = FALSE;
2989 	bool local_alloc = FALSE;
2990 	pnv_path = bus->nv_path;
2991 
2992 	nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0'));
2993 
2994 	/* First try UEFI */
2995 	len = MAX_NVRAMBUF_SIZE;
2996 	dhd_get_download_buffer(bus->dhd, NULL, NVRAM, &memblock, (int *)&len);
2997 
2998 	/* If UEFI empty, then read from file system */
2999 	if ((len <= 0) || (memblock == NULL)) {
3000 
3001 		if (nvram_file_exists) {
3002 			len = MAX_NVRAMBUF_SIZE;
3003 			dhd_get_download_buffer(bus->dhd, pnv_path, NVRAM, &memblock, (int *)&len);
3004 			if ((len <= 0 || len > MAX_NVRAMBUF_SIZE)) {
3005 				goto err;
3006 			}
3007 		}
3008 		else {
3009 			/* For SROM OTP no external file or UEFI required */
3010 			bcmerror = BCME_OK;
3011 		}
3012 	} else {
3013 		nvram_uefi_exists = TRUE;
3014 	}
3015 
3016 	DHD_ERROR(("%s: dhd_get_download_buffer len %d\n", __FUNCTION__, len));
3017 
3018 	if (len > 0 && len <= MAX_NVRAMBUF_SIZE && memblock != NULL) {
3019 		bufp = (char *) memblock;
3020 
3021 		{
3022 			bufp[len] = 0;
3023 			if (nvram_uefi_exists || nvram_file_exists) {
3024 				len = process_nvram_vars(bufp, len);
3025 			}
3026 		}
3027 
3028 		DHD_ERROR(("%s: process_nvram_vars len %d\n", __FUNCTION__, len));
3029 
3030 		if (len % 4) {
3031 			len += 4 - (len % 4);
3032 		}
3033 		bufp += len;
3034 		*bufp++ = 0;
3035 		if (len)
3036 			bcmerror = dhdpcie_downloadvars(bus, memblock, len + 1);
3037 		if (bcmerror) {
3038 			DHD_ERROR(("%s: error downloading vars: %d\n",
3039 				__FUNCTION__, bcmerror));
3040 		}
3041 	}
3042 
3043 err:
3044 	if (memblock) {
3045 		if (local_alloc) {
3046 			MFREE(bus->dhd->osh, memblock, MAX_NVRAMBUF_SIZE);
3047 		} else {
3048 			dhd_free_download_buffer(bus->dhd, memblock, MAX_NVRAMBUF_SIZE);
3049 		}
3050 	}
3051 
3052 	return bcmerror;
3053 }
3054 
3055 static int
dhdpcie_ramsize_read_image(struct dhd_bus * bus,char * buf,int len)3056 dhdpcie_ramsize_read_image(struct dhd_bus *bus, char *buf, int len)
3057 {
3058 	int bcmerror = BCME_ERROR;
3059 	char *imgbuf = NULL;
3060 
3061 	if (buf == NULL || len == 0)
3062 		goto err;
3063 
3064 	/* External image takes precedence if specified */
3065 	if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
3066 		// opens and seeks to correct file offset:
3067 		imgbuf = dhd_os_open_image1(bus->dhd, bus->fw_path);
3068 		if (imgbuf == NULL) {
3069 			DHD_ERROR(("%s: Failed to open firmware file\n", __FUNCTION__));
3070 			goto err;
3071 		}
3072 
3073 		/* Read it */
3074 		if (len != dhd_os_get_image_block(buf, len, imgbuf)) {
3075 			DHD_ERROR(("%s: Failed to read %d bytes data\n", __FUNCTION__, len));
3076 			goto err;
3077 		}
3078 
3079 		bcmerror = BCME_OK;
3080 	}
3081 
3082 err:
3083 	if (imgbuf)
3084 		dhd_os_close_image1(bus->dhd, imgbuf);
3085 
3086 	return bcmerror;
3087 }
3088 
3089 /* The ramsize can be changed in the dongle image, for example 4365 chip share the sysmem
3090  * with BMC and we can adjust how many sysmem belong to CA7 during dongle compilation.
3091  * So in DHD we need to detect this case and update the correct dongle RAMSIZE as well.
3092  */
3093 static void
dhdpcie_ramsize_adj(struct dhd_bus * bus)3094 dhdpcie_ramsize_adj(struct dhd_bus *bus)
3095 {
3096 	int i, search_len = 0;
3097 	uint8 *memptr = NULL;
3098 	uint8 *ramsizeptr = NULL;
3099 	uint ramsizelen;
3100 	uint32 ramsize_ptr_ptr[] = {RAMSIZE_PTR_PTR_LIST};
3101 	hnd_ramsize_ptr_t ramsize_info;
3102 
3103 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3104 
3105 	/* Adjust dongle RAMSIZE already called. */
3106 	if (bus->ramsize_adjusted) {
3107 		return;
3108 	}
3109 
3110 	/* success or failure,  we don't want to be here
3111 	 * more than once.
3112 	 */
3113 	bus->ramsize_adjusted = TRUE;
3114 
3115 	/* Not handle if user restrict dongle ram size enabled */
3116 	if (dhd_dongle_memsize) {
3117 		DHD_ERROR(("%s: user restrict dongle ram size to %d.\n", __FUNCTION__,
3118 			dhd_dongle_memsize));
3119 		return;
3120 	}
3121 
3122 	/* Out immediately if no image to download */
3123 	if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
3124 		DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__));
3125 		return;
3126 	}
3127 
3128 	/* Get maximum RAMSIZE info search length */
3129 	for (i = 0; ; i++) {
3130 		if (ramsize_ptr_ptr[i] == RAMSIZE_PTR_PTR_END)
3131 			break;
3132 
3133 		if (search_len < (int)ramsize_ptr_ptr[i])
3134 			search_len = (int)ramsize_ptr_ptr[i];
3135 	}
3136 
3137 	if (!search_len)
3138 		return;
3139 
3140 	search_len += sizeof(hnd_ramsize_ptr_t);
3141 
3142 	memptr = MALLOC(bus->dhd->osh, search_len);
3143 	if (memptr == NULL) {
3144 		DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, search_len));
3145 		return;
3146 	}
3147 
3148 	/* External image takes precedence if specified */
3149 	if (dhdpcie_ramsize_read_image(bus, (char *)memptr, search_len) != BCME_OK) {
3150 		goto err;
3151 	}
3152 	else {
3153 		ramsizeptr = memptr;
3154 		ramsizelen = search_len;
3155 	}
3156 
3157 	if (ramsizeptr) {
3158 		/* Check Magic */
3159 		for (i = 0; ; i++) {
3160 			if (ramsize_ptr_ptr[i] == RAMSIZE_PTR_PTR_END)
3161 				break;
3162 
3163 			if (ramsize_ptr_ptr[i] + sizeof(hnd_ramsize_ptr_t) > ramsizelen)
3164 				continue;
3165 
3166 			memcpy((char *)&ramsize_info, ramsizeptr + ramsize_ptr_ptr[i],
3167 				sizeof(hnd_ramsize_ptr_t));
3168 
3169 			if (ramsize_info.magic == HTOL32(HND_RAMSIZE_PTR_MAGIC)) {
3170 				bus->orig_ramsize = LTOH32(ramsize_info.ram_size);
3171 				bus->ramsize = LTOH32(ramsize_info.ram_size);
3172 				DHD_ERROR(("%s: Adjust dongle RAMSIZE to 0x%x\n", __FUNCTION__,
3173 					bus->ramsize));
3174 				break;
3175 			}
3176 		}
3177 	}
3178 
3179 err:
3180 	if (memptr)
3181 		MFREE(bus->dhd->osh, memptr, search_len);
3182 
3183 	return;
3184 } /* dhdpcie_ramsize_adj */
3185 
3186 /**
3187  * Downloads firmware file given by 'bus->fw_path' into PCIe dongle
3188  *
3189  * BCMEMBEDIMAGE specific:
3190  * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header
3191  * file will be used instead.
3192  *
3193  */
3194 static int
_dhdpcie_download_firmware(struct dhd_bus * bus)3195 _dhdpcie_download_firmware(struct dhd_bus *bus)
3196 {
3197 	int bcmerror = -1;
3198 
3199 	bool embed = FALSE;	/* download embedded firmware */
3200 	bool dlok = FALSE;	/* download firmware succeeded */
3201 
3202 	/* Out immediately if no image to download */
3203 	if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
3204 		DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__));
3205 		return 0;
3206 	}
3207 	/* Adjust ram size */
3208 	dhdpcie_ramsize_adj(bus);
3209 
3210 	/* Keep arm in reset */
3211 	if (dhdpcie_bus_download_state(bus, TRUE)) {
3212 		DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__));
3213 		goto err;
3214 	}
3215 
3216 	/* External image takes precedence if specified */
3217 	if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
3218 		if (dhdpcie_download_code_file(bus, bus->fw_path)) {
3219 			DHD_ERROR(("%s:%d dongle image file download failed\n", __FUNCTION__,
3220 				__LINE__));
3221 			goto err;
3222 		} else {
3223 			embed = FALSE;
3224 			dlok = TRUE;
3225 		}
3226 	}
3227 
3228 	BCM_REFERENCE(embed);
3229 	if (!dlok) {
3230 		DHD_ERROR(("%s:%d dongle image download failed\n", __FUNCTION__, __LINE__));
3231 		goto err;
3232 	}
3233 
3234 	/* EXAMPLE: nvram_array */
3235 	/* If a valid nvram_arry is specified as above, it can be passed down to dongle */
3236 	/* dhd_bus_set_nvram_params(bus, (char *)&nvram_array); */
3237 
3238 	/* External nvram takes precedence if specified */
3239 	if (dhdpcie_download_nvram(bus)) {
3240 		DHD_ERROR(("%s:%d dongle nvram file download failed\n", __FUNCTION__, __LINE__));
3241 		goto err;
3242 	}
3243 
3244 	/* Take arm out of reset */
3245 	if (dhdpcie_bus_download_state(bus, FALSE)) {
3246 		DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__));
3247 		goto err;
3248 	}
3249 
3250 	bcmerror = 0;
3251 
3252 err:
3253 	return bcmerror;
3254 } /* _dhdpcie_download_firmware */
3255 
3256 static int
dhdpcie_bus_readconsole(dhd_bus_t * bus)3257 dhdpcie_bus_readconsole(dhd_bus_t *bus)
3258 {
3259 	dhd_console_t *c = &bus->console;
3260 	uint8 line[CONSOLE_LINE_MAX], ch;
3261 	uint32 n, idx, addr;
3262 	int rv;
3263 	uint readlen = 0;
3264 	uint i = 0;
3265 
3266 	/* Don't do anything until FWREADY updates console address */
3267 	if (bus->console_addr == 0)
3268 		return -1;
3269 
3270 	/* Read console log struct */
3271 	addr = bus->console_addr + OFFSETOF(hnd_cons_t, log);
3272 
3273 	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&c->log, sizeof(c->log))) < 0)
3274 		return rv;
3275 
3276 	/* Allocate console buffer (one time only) */
3277 	if (c->buf == NULL) {
3278 		c->bufsize = ltoh32(c->log.buf_size);
3279 		if ((c->buf = MALLOC(bus->dhd->osh, c->bufsize)) == NULL)
3280 			return BCME_NOMEM;
3281 		DHD_INFO(("conlog: bufsize=0x%x\n", c->bufsize));
3282 	}
3283 	idx = ltoh32(c->log.idx);
3284 
3285 	/* Protect against corrupt value */
3286 	if (idx > c->bufsize)
3287 		return BCME_ERROR;
3288 
3289 	/* Skip reading the console buffer if the index pointer has not moved */
3290 	if (idx == c->last)
3291 		return BCME_OK;
3292 
3293 	DHD_INFO(("conlog: addr=0x%x, idx=0x%x, last=0x%x \n", c->log.buf,
3294 	   idx, c->last));
3295 
3296 	/* Read the console buffer data to a local buffer */
3297 	/* optimize and read only the portion of the buffer needed, but
3298 	 * important to handle wrap-around.
3299 	 */
3300 	addr = ltoh32(c->log.buf);
3301 
3302 	/* wrap around case - write ptr < read ptr */
3303 	if (idx < c->last) {
3304 		/* from read ptr to end of buffer */
3305 		readlen = c->bufsize - c->last;
3306 		if ((rv = dhdpcie_bus_membytes(bus, FALSE,
3307 				addr + c->last, c->buf, readlen)) < 0) {
3308 			DHD_ERROR(("conlog: read error[1] ! \n"));
3309 			return rv;
3310 		}
3311 		/* from beginning of buffer to write ptr */
3312 		if ((rv = dhdpcie_bus_membytes(bus, FALSE,
3313 				addr, c->buf + readlen,
3314 				idx)) < 0) {
3315 			DHD_ERROR(("conlog: read error[2] ! \n"));
3316 			return rv;
3317 		}
3318 		readlen += idx;
3319 	} else {
3320 		/* non-wraparound case, write ptr > read ptr */
3321 		readlen = (uint)idx - c->last;
3322 		if ((rv = dhdpcie_bus_membytes(bus, FALSE,
3323 				addr + c->last, c->buf, readlen)) < 0) {
3324 			DHD_ERROR(("conlog: read error[3] ! \n"));
3325 			return rv;
3326 		}
3327 	}
3328 	/* update read ptr */
3329 	c->last = idx;
3330 
3331 	/* now output the read data from the local buffer to the host console */
3332 	while (i < readlen) {
3333 		for (n = 0; n < CONSOLE_LINE_MAX - 2 && i < readlen; n++) {
3334 			ch = c->buf[i];
3335 			++i;
3336 			if (ch == '\n')
3337 				break;
3338 			line[n] = ch;
3339 		}
3340 
3341 		if (n > 0) {
3342 			if (line[n - 1] == '\r')
3343 				n--;
3344 			line[n] = 0;
3345 			printf("CONSOLE: %s\n", line);
3346 		}
3347 	}
3348 
3349 	return BCME_OK;
3350 
3351 } /* dhdpcie_bus_readconsole */
3352 
3353 void
dhd_bus_dump_console_buffer(dhd_bus_t * bus)3354 dhd_bus_dump_console_buffer(dhd_bus_t *bus)
3355 {
3356 	uint32 n, i;
3357 	uint32 addr;
3358 	char *console_buffer = NULL;
3359 	uint32 console_ptr, console_size, console_index;
3360 	uint8 line[CONSOLE_LINE_MAX], ch;
3361 	int rv;
3362 
3363 	DHD_ERROR(("%s: Dump Complete Console Buffer\n", __FUNCTION__));
3364 
3365 	if (bus->is_linkdown) {
3366 		DHD_ERROR(("%s: Skip dump Console Buffer due to PCIe link down\n", __FUNCTION__));
3367 		return;
3368 	}
3369 
3370 	addr =	bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log);
3371 	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
3372 		(uint8 *)&console_ptr, sizeof(console_ptr))) < 0) {
3373 		goto exit;
3374 	}
3375 
3376 	addr =	bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.buf_size);
3377 	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
3378 		(uint8 *)&console_size, sizeof(console_size))) < 0) {
3379 		goto exit;
3380 	}
3381 
3382 	addr =	bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.idx);
3383 	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
3384 		(uint8 *)&console_index, sizeof(console_index))) < 0) {
3385 		goto exit;
3386 	}
3387 
3388 	console_ptr = ltoh32(console_ptr);
3389 	console_size = ltoh32(console_size);
3390 	console_index = ltoh32(console_index);
3391 
3392 	if (console_size > CONSOLE_BUFFER_MAX ||
3393 		!(console_buffer = MALLOC(bus->dhd->osh, console_size))) {
3394 		goto exit;
3395 	}
3396 
3397 	if ((rv = dhdpcie_bus_membytes(bus, FALSE, console_ptr,
3398 		(uint8 *)console_buffer, console_size)) < 0) {
3399 		goto exit;
3400 	}
3401 
3402 	for (i = 0, n = 0; i < console_size; i += n + 1) {
3403 		for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
3404 			ch = console_buffer[(console_index + i + n) % console_size];
3405 			if (ch == '\n')
3406 				break;
3407 			line[n] = ch;
3408 		}
3409 
3410 		if (n > 0) {
3411 			if (line[n - 1] == '\r')
3412 				n--;
3413 			line[n] = 0;
3414 			/* Don't use DHD_ERROR macro since we print
3415 			 * a lot of information quickly. The macro
3416 			 * will truncate a lot of the printfs
3417 			 */
3418 
3419 			printf("CONSOLE: %s\n", line);
3420 		}
3421 	}
3422 
3423 exit:
3424 	if (console_buffer)
3425 		MFREE(bus->dhd->osh, console_buffer, console_size);
3426 	return;
3427 }
3428 
3429 /**
3430  * Opens the file given by bus->fw_path, reads part of the file into a buffer and closes the file.
3431  *
3432  * @return BCME_OK on success
3433  */
3434 static int
dhdpcie_checkdied(dhd_bus_t * bus,char * data,uint size)3435 dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size)
3436 {
3437 	int bcmerror = 0;
3438 	uint msize = 512;
3439 	char *mbuffer = NULL;
3440 	uint maxstrlen = 256;
3441 	char *str = NULL;
3442 	pciedev_shared_t *local_pciedev_shared = bus->pcie_sh;
3443 	struct bcmstrbuf strbuf;
3444 	unsigned long flags;
3445 	bool dongle_trap_occured = FALSE;
3446 
3447 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3448 
3449 	if (DHD_NOCHECKDIED_ON()) {
3450 		return 0;
3451 	}
3452 
3453 	if (data == NULL) {
3454 		/*
3455 		 * Called after a rx ctrl timeout. "data" is NULL.
3456 		 * allocate memory to trace the trap or assert.
3457 		 */
3458 		size = msize;
3459 		mbuffer = data = MALLOC(bus->dhd->osh, msize);
3460 
3461 		if (mbuffer == NULL) {
3462 			DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize));
3463 			bcmerror = BCME_NOMEM;
3464 			goto done2;
3465 		}
3466 	}
3467 
3468 	if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) {
3469 		DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen));
3470 		bcmerror = BCME_NOMEM;
3471 		goto done2;
3472 	}
3473 	DHD_GENERAL_LOCK(bus->dhd, flags);
3474 	DHD_BUS_BUSY_SET_IN_CHECKDIED(bus->dhd);
3475 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
3476 
3477 	if (MULTIBP_ENAB(bus->sih)) {
3478 		dhd_bus_pcie_pwr_req(bus);
3479 	}
3480 	if ((bcmerror = dhdpcie_readshared(bus)) < 0) {
3481 		goto done1;
3482 	}
3483 
3484 	bcm_binit(&strbuf, data, size);
3485 
3486 	bcm_bprintf(&strbuf, "msgtrace address : 0x%08X\nconsole address  : 0x%08X\n",
3487 	            local_pciedev_shared->msgtrace_addr, local_pciedev_shared->console_addr);
3488 
3489 	if ((local_pciedev_shared->flags & PCIE_SHARED_ASSERT_BUILT) == 0) {
3490 		/* NOTE: Misspelled assert is intentional - DO NOT FIX.
3491 		 * (Avoids conflict with real asserts for programmatic parsing of output.)
3492 		 */
3493 		bcm_bprintf(&strbuf, "Assrt not built in dongle\n");
3494 	}
3495 
3496 	if ((bus->pcie_sh->flags & (PCIE_SHARED_ASSERT|PCIE_SHARED_TRAP)) == 0) {
3497 		/* NOTE: Misspelled assert is intentional - DO NOT FIX.
3498 		 * (Avoids conflict with real asserts for programmatic parsing of output.)
3499 		 */
3500 		bcm_bprintf(&strbuf, "No trap%s in dongle",
3501 		          (bus->pcie_sh->flags & PCIE_SHARED_ASSERT_BUILT)
3502 		          ?"/assrt" :"");
3503 	} else {
3504 		if (bus->pcie_sh->flags & PCIE_SHARED_ASSERT) {
3505 			/* Download assert */
3506 			bcm_bprintf(&strbuf, "Dongle assert");
3507 			if (bus->pcie_sh->assert_exp_addr != 0) {
3508 				str[0] = '\0';
3509 				if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
3510 					bus->pcie_sh->assert_exp_addr,
3511 					(uint8 *)str, maxstrlen)) < 0) {
3512 					goto done1;
3513 				}
3514 
3515 				str[maxstrlen - 1] = '\0';
3516 				bcm_bprintf(&strbuf, " expr \"%s\"", str);
3517 			}
3518 
3519 			if (bus->pcie_sh->assert_file_addr != 0) {
3520 				str[0] = '\0';
3521 				if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
3522 					bus->pcie_sh->assert_file_addr,
3523 					(uint8 *)str, maxstrlen)) < 0) {
3524 					goto done1;
3525 				}
3526 
3527 				str[maxstrlen - 1] = '\0';
3528 				bcm_bprintf(&strbuf, " file \"%s\"", str);
3529 			}
3530 
3531 			bcm_bprintf(&strbuf, " line %d ",  bus->pcie_sh->assert_line);
3532 		}
3533 
3534 		if (bus->pcie_sh->flags & PCIE_SHARED_TRAP) {
3535 			trap_t *tr = &bus->dhd->last_trap_info;
3536 			dongle_trap_occured = TRUE;
3537 			if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
3538 				bus->pcie_sh->trap_addr, (uint8*)tr, sizeof(trap_t))) < 0) {
3539 				bus->dhd->dongle_trap_occured = TRUE;
3540 				goto done1;
3541 			}
3542 			dhd_bus_dump_trap_info(bus, &strbuf);
3543 		}
3544 	}
3545 
3546 	if (bus->pcie_sh->flags & (PCIE_SHARED_ASSERT | PCIE_SHARED_TRAP)) {
3547 		printf("%s: %s\n", __FUNCTION__, strbuf.origbuf);
3548 
3549 		dhd_bus_dump_console_buffer(bus);
3550 		dhd_prot_debug_info_print(bus->dhd);
3551 
3552 #if defined(DHD_FW_COREDUMP)
3553 		/* save core dump or write to a file */
3554 		if (bus->dhd->memdump_enabled) {
3555 #ifdef DHD_SSSR_DUMP
3556 			bus->dhd->collect_sssr = TRUE;
3557 #endif /* DHD_SSSR_DUMP */
3558 			bus->dhd->memdump_type = DUMP_TYPE_DONGLE_TRAP;
3559 			dhdpcie_mem_dump(bus);
3560 		}
3561 #endif /* DHD_FW_COREDUMP */
3562 
3563 		/* set the trap occured flag only after all the memdump,
3564 		* logdump and sssr dump collection has been scheduled
3565 		*/
3566 		if (dongle_trap_occured) {
3567 			bus->dhd->dongle_trap_occured = TRUE;
3568 		}
3569 
3570 		/* wake up IOCTL wait event */
3571 		dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_TRAP);
3572 
3573 		dhd_schedule_reset(bus->dhd);
3574 
3575 	}
3576 
3577 done1:
3578 	if (MULTIBP_ENAB(bus->sih)) {
3579 		dhd_bus_pcie_pwr_req_clear(bus);
3580 	}
3581 
3582 	DHD_GENERAL_LOCK(bus->dhd, flags);
3583 	DHD_BUS_BUSY_CLEAR_IN_CHECKDIED(bus->dhd);
3584 	dhd_os_busbusy_wake(bus->dhd);
3585 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
3586 done2:
3587 	if (mbuffer)
3588 		MFREE(bus->dhd->osh, mbuffer, msize);
3589 	if (str)
3590 		MFREE(bus->dhd->osh, str, maxstrlen);
3591 
3592 	return bcmerror;
3593 } /* dhdpcie_checkdied */
3594 
3595 /* Custom copy of dhdpcie_mem_dump() that can be called at interrupt level */
dhdpcie_mem_dump_bugcheck(dhd_bus_t * bus,uint8 * buf)3596 void dhdpcie_mem_dump_bugcheck(dhd_bus_t *bus, uint8 *buf)
3597 {
3598 	int ret = 0;
3599 	int size; /* Full mem size */
3600 	int start; /* Start address */
3601 	int read_size = 0; /* Read size of each iteration */
3602 	uint8 *databuf = buf;
3603 
3604 	if (bus == NULL) {
3605 		return;
3606 	}
3607 
3608 	start = bus->dongle_ram_base;
3609 	read_size = 4;
3610 	/* check for dead bus */
3611 	{
3612 		uint test_word = 0;
3613 		ret = dhdpcie_bus_membytes(bus, FALSE, start, (uint8*)&test_word, read_size);
3614 		/* if read error or bus timeout */
3615 		if (ret || (test_word == 0xFFFFFFFF)) {
3616 			return;
3617 		}
3618 	}
3619 
3620 	/* Get full mem size */
3621 	size = bus->ramsize;
3622 	/* Read mem content */
3623 	while (size)
3624 	{
3625 		read_size = MIN(MEMBLOCK, size);
3626 		if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size))) {
3627 			return;
3628 		}
3629 
3630 		/* Decrement size and increment start address */
3631 		size -= read_size;
3632 		start += read_size;
3633 		databuf += read_size;
3634 	}
3635 	bus->dhd->soc_ram = buf;
3636 	bus->dhd->soc_ram_length = bus->ramsize;
3637 	return;
3638 }
3639 
3640 #if defined(DHD_FW_COREDUMP)
3641 static int
dhdpcie_get_mem_dump(dhd_bus_t * bus)3642 dhdpcie_get_mem_dump(dhd_bus_t *bus)
3643 {
3644 	int ret = BCME_OK;
3645 	int size = 0;
3646 	int start = 0;
3647 	int read_size = 0; /* Read size of each iteration */
3648 	uint8 *p_buf = NULL, *databuf = NULL;
3649 
3650 	if (!bus) {
3651 		DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
3652 		return BCME_ERROR;
3653 	}
3654 
3655 	if (!bus->dhd) {
3656 		DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
3657 		return BCME_ERROR;
3658 	}
3659 
3660 	size = bus->ramsize; /* Full mem size */
3661 	start = bus->dongle_ram_base; /* Start address */
3662 
3663 	/* Get full mem size */
3664 	p_buf = dhd_get_fwdump_buf(bus->dhd, size);
3665 	if (!p_buf) {
3666 		DHD_ERROR(("%s: Out of memory (%d bytes)\n",
3667 			__FUNCTION__, size));
3668 		return BCME_ERROR;
3669 	}
3670 
3671 	/* Read mem content */
3672 	DHD_TRACE_HW4(("Dump dongle memory\n"));
3673 	databuf = p_buf;
3674 	while (size > 0) {
3675 		read_size = MIN(MEMBLOCK, size);
3676 		ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size);
3677 		if (ret) {
3678 			DHD_ERROR(("%s: Error membytes %d\n", __FUNCTION__, ret));
3679 #ifdef DHD_DEBUG_UART
3680 			bus->dhd->memdump_success = FALSE;
3681 #endif	/* DHD_DEBUG_UART */
3682 			break;
3683 		}
3684 		DHD_TRACE(("."));
3685 
3686 		/* Decrement size and increment start address */
3687 		size -= read_size;
3688 		start += read_size;
3689 		databuf += read_size;
3690 	}
3691 
3692 	return ret;
3693 }
3694 
3695 static int
dhdpcie_mem_dump(dhd_bus_t * bus)3696 dhdpcie_mem_dump(dhd_bus_t *bus)
3697 {
3698 	dhd_pub_t *dhdp;
3699 	int ret;
3700 
3701 #ifdef EXYNOS_PCIE_DEBUG
3702 	exynos_pcie_register_dump(1);
3703 #endif /* EXYNOS_PCIE_DEBUG */
3704 
3705 	dhdp = bus->dhd;
3706 	if (!dhdp) {
3707 		DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
3708 		return BCME_ERROR;
3709 	}
3710 
3711 	if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
3712 		DHD_ERROR(("%s: bus is down! can't collect mem dump. \n", __FUNCTION__));
3713 		return BCME_ERROR;
3714 	}
3715 
3716 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
3717 	if (pm_runtime_get_sync(dhd_bus_to_dev(bus)) < 0)
3718 		return BCME_ERROR;
3719 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
3720 
3721 	ret = dhdpcie_get_mem_dump(bus);
3722 	if (ret) {
3723 		DHD_ERROR(("%s: failed to get mem dump, err=%d\n",
3724 			__FUNCTION__, ret));
3725 		return ret;
3726 	}
3727 
3728 	dhd_schedule_memdump(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
3729 	/* buf, actually soc_ram free handled in dhd_{free,clear} */
3730 
3731 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
3732 	pm_runtime_mark_last_busy(dhd_bus_to_dev(bus));
3733 	pm_runtime_put_autosuspend(dhd_bus_to_dev(bus));
3734 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
3735 
3736 	return ret;
3737 }
3738 
3739 int
dhd_bus_get_mem_dump(dhd_pub_t * dhdp)3740 dhd_bus_get_mem_dump(dhd_pub_t *dhdp)
3741 {
3742 	if (!dhdp) {
3743 		DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
3744 		return BCME_ERROR;
3745 	}
3746 
3747 	return dhdpcie_get_mem_dump(dhdp->bus);
3748 }
3749 
3750 int
dhd_bus_mem_dump(dhd_pub_t * dhdp)3751 dhd_bus_mem_dump(dhd_pub_t *dhdp)
3752 {
3753 	dhd_bus_t *bus = dhdp->bus;
3754 	int ret = BCME_ERROR;
3755 
3756 	if (dhdp->busstate == DHD_BUS_DOWN) {
3757 		DHD_ERROR(("%s bus is down\n", __FUNCTION__));
3758 		return BCME_ERROR;
3759 	}
3760 
3761 	/* Try to resume if already suspended or suspend in progress */
3762 
3763 	/* Skip if still in suspended or suspend in progress */
3764 	if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhdp)) {
3765 		DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
3766 			__FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
3767 		return BCME_ERROR;
3768 	}
3769 
3770 	DHD_OS_WAKE_LOCK(dhdp);
3771 	ret = dhdpcie_mem_dump(bus);
3772 	DHD_OS_WAKE_UNLOCK(dhdp);
3773 	return ret;
3774 }
3775 #endif	/* DHD_FW_COREDUMP */
3776 
3777 int
dhd_socram_dump(dhd_bus_t * bus)3778 dhd_socram_dump(dhd_bus_t *bus)
3779 {
3780 #if defined(DHD_FW_COREDUMP)
3781 	DHD_OS_WAKE_LOCK(bus->dhd);
3782 	dhd_bus_mem_dump(bus->dhd);
3783 	DHD_OS_WAKE_UNLOCK(bus->dhd);
3784 	return 0;
3785 #else
3786 	return -1;
3787 #endif // endif
3788 }
3789 
3790 /**
3791  * Transfers bytes from host to dongle using pio mode.
3792  * Parameter 'address' is a backplane address.
3793  */
3794 static int
dhdpcie_bus_membytes(dhd_bus_t * bus,bool write,ulong address,uint8 * data,uint size)3795 dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size)
3796 {
3797 	uint dsize;
3798 	int detect_endian_flag = 0x01;
3799 	bool little_endian;
3800 
3801 	if (write && bus->is_linkdown) {
3802 		DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
3803 		return BCME_ERROR;
3804 	}
3805 
3806 	if (MULTIBP_ENAB(bus->sih)) {
3807 		dhd_bus_pcie_pwr_req(bus);
3808 	}
3809 	/* Detect endianness. */
3810 	little_endian = *(char *)&detect_endian_flag;
3811 
3812 	/* In remap mode, adjust address beyond socram and redirect
3813 	 * to devram at SOCDEVRAM_BP_ADDR since remap address > orig_ramsize
3814 	 * is not backplane accessible
3815 	 */
3816 
3817 	/* Determine initial transfer parameters */
3818 #ifdef DHD_SUPPORT_64BIT
3819 	dsize = sizeof(uint64);
3820 #else /* !DHD_SUPPORT_64BIT */
3821 	dsize = sizeof(uint32);
3822 #endif /* DHD_SUPPORT_64BIT */
3823 
3824 	/* Do the transfer(s) */
3825 	DHD_INFO(("%s: %s %d bytes in window 0x%08lx\n",
3826 	          __FUNCTION__, (write ? "write" : "read"), size, address));
3827 	if (write) {
3828 		while (size) {
3829 #ifdef DHD_SUPPORT_64BIT
3830 			if (size >= sizeof(uint64) && little_endian &&	!(address % 8)) {
3831 				dhdpcie_bus_wtcm64(bus, address, *((uint64 *)data));
3832 			}
3833 #else /* !DHD_SUPPORT_64BIT */
3834 			if (size >= sizeof(uint32) && little_endian &&	!(address % 4)) {
3835 				dhdpcie_bus_wtcm32(bus, address, *((uint32*)data));
3836 			}
3837 #endif /* DHD_SUPPORT_64BIT */
3838 			else {
3839 				dsize = sizeof(uint8);
3840 				dhdpcie_bus_wtcm8(bus, address, *data);
3841 			}
3842 
3843 			/* Adjust for next transfer (if any) */
3844 			if ((size -= dsize)) {
3845 				data += dsize;
3846 				address += dsize;
3847 			}
3848 		}
3849 	} else {
3850 		while (size) {
3851 #ifdef DHD_SUPPORT_64BIT
3852 			if (size >= sizeof(uint64) && little_endian &&	!(address % 8))
3853 			{
3854 				*(uint64 *)data = dhdpcie_bus_rtcm64(bus, address);
3855 			}
3856 #else /* !DHD_SUPPORT_64BIT */
3857 			if (size >= sizeof(uint32) && little_endian &&	!(address % 4))
3858 			{
3859 				*(uint32 *)data = dhdpcie_bus_rtcm32(bus, address);
3860 			}
3861 #endif /* DHD_SUPPORT_64BIT */
3862 			else {
3863 				dsize = sizeof(uint8);
3864 				*data = dhdpcie_bus_rtcm8(bus, address);
3865 			}
3866 
3867 			/* Adjust for next transfer (if any) */
3868 			if ((size -= dsize) > 0) {
3869 				data += dsize;
3870 				address += dsize;
3871 			}
3872 		}
3873 	}
3874 	if (MULTIBP_ENAB(bus->sih)) {
3875 		dhd_bus_pcie_pwr_req_clear(bus);
3876 	}
3877 	return BCME_OK;
3878 } /* dhdpcie_bus_membytes */
3879 
3880 /**
3881  * Transfers one transmit (ethernet) packet that was queued in the (flow controlled) flow ring queue
3882  * to the (non flow controlled) flow ring.
3883  */
3884 int BCMFASTPATH
dhd_bus_schedule_queue(struct dhd_bus * bus,uint16 flow_id,bool txs)3885 dhd_bus_schedule_queue(struct dhd_bus  *bus, uint16 flow_id, bool txs)
3886 {
3887 	flow_ring_node_t *flow_ring_node;
3888 	int ret = BCME_OK;
3889 #ifdef DHD_LOSSLESS_ROAMING
3890 	dhd_pub_t *dhdp = bus->dhd;
3891 #endif // endif
3892 	DHD_INFO(("%s: flow_id is %d\n", __FUNCTION__, flow_id));
3893 
3894 	/* ASSERT on flow_id */
3895 	if (flow_id >= bus->max_submission_rings) {
3896 		DHD_ERROR(("%s: flow_id is invalid %d, max %d\n", __FUNCTION__,
3897 			flow_id, bus->max_submission_rings));
3898 		return 0;
3899 	}
3900 
3901 	flow_ring_node = DHD_FLOW_RING(bus->dhd, flow_id);
3902 
3903 	if (flow_ring_node->prot_info == NULL) {
3904 	    DHD_ERROR((" %s : invalid flow_ring_node \n", __FUNCTION__));
3905 	    return BCME_NOTREADY;
3906 	}
3907 
3908 #ifdef DHD_LOSSLESS_ROAMING
3909 	if ((dhdp->dequeue_prec_map & (1 << flow_ring_node->flow_info.tid)) == 0) {
3910 		DHD_INFO(("%s: tid %d is not in precedence map. block scheduling\n",
3911 			__FUNCTION__, flow_ring_node->flow_info.tid));
3912 		return BCME_OK;
3913 	}
3914 #endif /* DHD_LOSSLESS_ROAMING */
3915 
3916 	{
3917 		unsigned long flags;
3918 		void *txp = NULL;
3919 		flow_queue_t *queue;
3920 #ifdef DHD_LOSSLESS_ROAMING
3921 		struct ether_header *eh;
3922 		uint8 *pktdata;
3923 #endif /* DHD_LOSSLESS_ROAMING */
3924 
3925 		queue = &flow_ring_node->queue; /* queue associated with flow ring */
3926 
3927 		DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
3928 
3929 		if (flow_ring_node->status != FLOW_RING_STATUS_OPEN) {
3930 			DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
3931 			return BCME_NOTREADY;
3932 		}
3933 
3934 		while ((txp = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
3935 			if (bus->dhd->conf->orphan_move <= 1)
3936 				PKTORPHAN(txp, bus->dhd->conf->tsq);
3937 
3938 			/*
3939 			 * Modifying the packet length caused P2P cert failures.
3940 			 * Specifically on test cases where a packet of size 52 bytes
3941 			 * was injected, the sniffer capture showed 62 bytes because of
3942 			 * which the cert tests failed. So making the below change
3943 			 * only Router specific.
3944 			 */
3945 
3946 #ifdef DHDTCPACK_SUPPRESS
3947 			if (bus->dhd->tcpack_sup_mode != TCPACK_SUP_HOLD) {
3948 				ret = dhd_tcpack_check_xmit(bus->dhd, txp);
3949 				if (ret != BCME_OK) {
3950 					DHD_ERROR(("%s: dhd_tcpack_check_xmit() error.\n",
3951 						__FUNCTION__));
3952 				}
3953 			}
3954 #endif /* DHDTCPACK_SUPPRESS */
3955 #ifdef DHD_LOSSLESS_ROAMING
3956 			pktdata = (uint8 *)PKTDATA(OSH_NULL, txp);
3957 			eh = (struct ether_header *) pktdata;
3958 			if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) {
3959 				uint8 prio = (uint8)PKTPRIO(txp);
3960 				/* Restore to original priority for 802.1X packet */
3961 				if (prio == PRIO_8021D_NC) {
3962 					PKTSETPRIO(txp, dhdp->prio_8021x);
3963 				}
3964 			}
3965 #endif /* DHD_LOSSLESS_ROAMING */
3966 			/* Attempt to transfer packet over flow ring */
3967 			ret = dhd_prot_txdata(bus->dhd, txp, flow_ring_node->flow_info.ifindex);
3968 			if (ret != BCME_OK) { /* may not have resources in flow ring */
3969 				DHD_INFO(("%s: Reinserrt %d\n", __FUNCTION__, ret));
3970 				dhd_prot_txdata_write_flush(bus->dhd, flow_id);
3971 				/* reinsert at head */
3972 				dhd_flow_queue_reinsert(bus->dhd, queue, txp);
3973 				DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
3974 
3975 				/* If we are able to requeue back, return success */
3976 				return BCME_OK;
3977 			}
3978 		}
3979 
3980 #ifdef DHD_HP2P
3981 		if (!flow_ring_node->hp2p_ring) {
3982 			dhd_prot_txdata_write_flush(bus->dhd, flow_id);
3983 		}
3984 #else
3985 		dhd_prot_txdata_write_flush(bus->dhd, flow_id);
3986 #endif // endif
3987 		DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
3988 	}
3989 
3990 	return ret;
3991 } /* dhd_bus_schedule_queue */
3992 
3993 /** Sends an (ethernet) data frame (in 'txp') to the dongle. Callee disposes of txp. */
3994 int BCMFASTPATH
dhd_bus_txdata(struct dhd_bus * bus,void * txp,uint8 ifidx)3995 dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx)
3996 {
3997 	uint16 flowid;
3998 #ifdef IDLE_TX_FLOW_MGMT
3999 	uint8	node_status;
4000 #endif /* IDLE_TX_FLOW_MGMT */
4001 	flow_queue_t *queue;
4002 	flow_ring_node_t *flow_ring_node;
4003 	unsigned long flags;
4004 	int ret = BCME_OK;
4005 	void *txp_pend = NULL;
4006 
4007 	if (!bus->dhd->flowid_allocator) {
4008 		DHD_ERROR(("%s: Flow ring not intited yet  \n", __FUNCTION__));
4009 		goto toss;
4010 	}
4011 
4012 	flowid = DHD_PKT_GET_FLOWID(txp);
4013 
4014 	flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
4015 
4016 	DHD_TRACE(("%s: pkt flowid %d, status %d active %d\n",
4017 		__FUNCTION__, flowid, flow_ring_node->status, flow_ring_node->active));
4018 
4019 	DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
4020 	if ((flowid >= bus->dhd->num_flow_rings) ||
4021 #ifdef IDLE_TX_FLOW_MGMT
4022 		(!flow_ring_node->active))
4023 #else
4024 		(!flow_ring_node->active) ||
4025 		(flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) ||
4026 		(flow_ring_node->status == FLOW_RING_STATUS_STA_FREEING))
4027 #endif /* IDLE_TX_FLOW_MGMT */
4028 	{
4029 		DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4030 		DHD_INFO(("%s: Dropping pkt flowid %d, status %d active %d\n",
4031 			__FUNCTION__, flowid, flow_ring_node->status,
4032 			flow_ring_node->active));
4033 		ret = BCME_ERROR;
4034 			goto toss;
4035 	}
4036 
4037 #ifdef IDLE_TX_FLOW_MGMT
4038 	node_status = flow_ring_node->status;
4039 
4040 	/* handle diffrent status states here!! */
4041 	switch (node_status)
4042 	{
4043 		case FLOW_RING_STATUS_OPEN:
4044 
4045 			if (bus->enable_idle_flowring_mgmt) {
4046 				/* Move the node to the head of active list */
4047 				dhd_flow_ring_move_to_active_list_head(bus, flow_ring_node);
4048 			}
4049 			break;
4050 
4051 		case FLOW_RING_STATUS_SUSPENDED:
4052 			DHD_INFO(("Need to Initiate TX Flow resume\n"));
4053 			/* Issue resume_ring request */
4054 			dhd_bus_flow_ring_resume_request(bus,
4055 					flow_ring_node);
4056 			break;
4057 
4058 		case FLOW_RING_STATUS_CREATE_PENDING:
4059 		case FLOW_RING_STATUS_RESUME_PENDING:
4060 			/* Dont do anything here!! */
4061 			DHD_INFO(("Waiting for Flow create/resume! status is %u\n",
4062 				node_status));
4063 			break;
4064 
4065 		case FLOW_RING_STATUS_DELETE_PENDING:
4066 		default:
4067 			DHD_ERROR(("Dropping packet!! flowid %u status is %u\n",
4068 				flowid, node_status));
4069 			/* error here!! */
4070 			ret = BCME_ERROR;
4071 			DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4072 			goto toss;
4073 	}
4074 	/* Now queue the packet */
4075 #endif /* IDLE_TX_FLOW_MGMT */
4076 
4077 	queue = &flow_ring_node->queue; /* queue associated with flow ring */
4078 
4079 	if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp)) != BCME_OK)
4080 		txp_pend = txp;
4081 
4082 	DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4083 
4084 	if (flow_ring_node->status) {
4085 		DHD_INFO(("%s: Enq pkt flowid %d, status %d active %d\n",
4086 		    __FUNCTION__, flowid, flow_ring_node->status,
4087 		    flow_ring_node->active));
4088 		if (txp_pend) {
4089 			txp = txp_pend;
4090 			goto toss;
4091 		}
4092 		return BCME_OK;
4093 	}
4094 	ret = dhd_bus_schedule_queue(bus, flowid, FALSE); /* from queue to flowring */
4095 
4096 	/* If we have anything pending, try to push into q */
4097 	if (txp_pend) {
4098 		DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
4099 
4100 		if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp_pend)) != BCME_OK) {
4101 			DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4102 			txp = txp_pend;
4103 			goto toss;
4104 		}
4105 
4106 		DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4107 	}
4108 
4109 	return ret;
4110 
4111 toss:
4112 	DHD_INFO(("%s: Toss %d\n", __FUNCTION__, ret));
4113 	PKTCFREE(bus->dhd->osh, txp, TRUE);
4114 	return ret;
4115 } /* dhd_bus_txdata */
4116 
4117 void
dhd_bus_stop_queue(struct dhd_bus * bus)4118 dhd_bus_stop_queue(struct dhd_bus *bus)
4119 {
4120 	dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
4121 }
4122 
4123 void
dhd_bus_start_queue(struct dhd_bus * bus)4124 dhd_bus_start_queue(struct dhd_bus *bus)
4125 {
4126 	/*
4127 	 * Tx queue has been stopped due to resource shortage (or)
4128 	 * bus is not in a state to turn on.
4129 	 *
4130 	 * Note that we try to re-start network interface only
4131 	 * when we have enough resources, one has to first change the
4132 	 * flag indicating we have all the resources.
4133 	 */
4134 	if (dhd_prot_check_tx_resource(bus->dhd)) {
4135 		DHD_ERROR(("%s: Interface NOT started, previously stopped "
4136 			"due to resource shortage\n", __FUNCTION__));
4137 		return;
4138 	}
4139 	dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
4140 }
4141 
4142 /* Device console input function */
dhd_bus_console_in(dhd_pub_t * dhd,uchar * msg,uint msglen)4143 int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen)
4144 {
4145 	dhd_bus_t *bus = dhd->bus;
4146 	uint32 addr, val;
4147 	int rv;
4148 	/* Address could be zero if CONSOLE := 0 in dongle Makefile */
4149 	if (bus->console_addr == 0)
4150 		return BCME_UNSUPPORTED;
4151 
4152 	/* Don't allow input if dongle is in reset */
4153 	if (bus->dhd->dongle_reset) {
4154 		return BCME_NOTREADY;
4155 	}
4156 
4157 	/* Zero cbuf_index */
4158 	addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf_idx);
4159 	val = htol32(0);
4160 	if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
4161 		goto done;
4162 
4163 	/* Write message into cbuf */
4164 	addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf);
4165 	if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)msg, msglen)) < 0)
4166 		goto done;
4167 
4168 	/* Write length into vcons_in */
4169 	addr = bus->console_addr + OFFSETOF(hnd_cons_t, vcons_in);
4170 	val = htol32(msglen);
4171 	if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
4172 		goto done;
4173 
4174 	/* generate an interrupt to dongle to indicate that it needs to process cons command */
4175 	dhdpcie_send_mb_data(bus, H2D_HOST_CONS_INT);
4176 done:
4177 	return rv;
4178 } /* dhd_bus_console_in */
4179 
4180 /**
4181  * Called on frame reception, the frame was received from the dongle on interface 'ifidx' and is
4182  * contained in 'pkt'. Processes rx frame, forwards up the layer to netif.
4183  */
4184 void BCMFASTPATH
dhd_bus_rx_frame(struct dhd_bus * bus,void * pkt,int ifidx,uint pkt_count)4185 dhd_bus_rx_frame(struct dhd_bus *bus, void* pkt, int ifidx, uint pkt_count)
4186 {
4187 	dhd_rx_frame(bus->dhd, ifidx, pkt, pkt_count, 0);
4188 }
4189 
4190 void
dhdpcie_setbar1win(dhd_bus_t * bus,uint32 addr)4191 dhdpcie_setbar1win(dhd_bus_t *bus, uint32 addr)
4192 {
4193 	dhdpcie_os_setbar1win(bus, addr);
4194 }
4195 
4196 /** 'offset' is a backplane address */
4197 void
dhdpcie_bus_wtcm8(dhd_bus_t * bus,ulong offset,uint8 data)4198 dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data)
4199 {
4200 	if (bus->is_linkdown) {
4201 		DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4202 		return;
4203 	} else {
4204 		dhdpcie_os_wtcm8(bus, offset, data);
4205 	}
4206 }
4207 
4208 uint8
dhdpcie_bus_rtcm8(dhd_bus_t * bus,ulong offset)4209 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset)
4210 {
4211 	volatile uint8 data;
4212 	if (bus->is_linkdown) {
4213 		DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4214 		data = (uint8)-1;
4215 	} else {
4216 		data = dhdpcie_os_rtcm8(bus, offset);
4217 	}
4218 	return data;
4219 }
4220 
4221 void
dhdpcie_bus_wtcm32(dhd_bus_t * bus,ulong offset,uint32 data)4222 dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data)
4223 {
4224 	if (bus->is_linkdown) {
4225 		DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4226 		return;
4227 	} else {
4228 		dhdpcie_os_wtcm32(bus, offset, data);
4229 	}
4230 }
4231 void
dhdpcie_bus_wtcm16(dhd_bus_t * bus,ulong offset,uint16 data)4232 dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data)
4233 {
4234 	if (bus->is_linkdown) {
4235 		DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4236 		return;
4237 	} else {
4238 		dhdpcie_os_wtcm16(bus, offset, data);
4239 	}
4240 }
4241 #ifdef DHD_SUPPORT_64BIT
4242 void
dhdpcie_bus_wtcm64(dhd_bus_t * bus,ulong offset,uint64 data)4243 dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data)
4244 {
4245 	if (bus->is_linkdown) {
4246 		DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4247 		return;
4248 	} else {
4249 		dhdpcie_os_wtcm64(bus, offset, data);
4250 	}
4251 }
4252 #endif /* DHD_SUPPORT_64BIT */
4253 
4254 uint16
dhdpcie_bus_rtcm16(dhd_bus_t * bus,ulong offset)4255 dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset)
4256 {
4257 	volatile uint16 data;
4258 	if (bus->is_linkdown) {
4259 		DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4260 		data = (uint16)-1;
4261 	} else {
4262 		data = dhdpcie_os_rtcm16(bus, offset);
4263 	}
4264 	return data;
4265 }
4266 
4267 uint32
dhdpcie_bus_rtcm32(dhd_bus_t * bus,ulong offset)4268 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset)
4269 {
4270 	volatile uint32 data;
4271 	if (bus->is_linkdown) {
4272 		DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4273 		data = (uint32)-1;
4274 	} else {
4275 		data = dhdpcie_os_rtcm32(bus, offset);
4276 	}
4277 	return data;
4278 }
4279 
4280 #ifdef DHD_SUPPORT_64BIT
4281 uint64
dhdpcie_bus_rtcm64(dhd_bus_t * bus,ulong offset)4282 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset)
4283 {
4284 	volatile uint64 data;
4285 	if (bus->is_linkdown) {
4286 		DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4287 		data = (uint64)-1;
4288 	} else {
4289 		data = dhdpcie_os_rtcm64(bus, offset);
4290 	}
4291 	return data;
4292 }
4293 #endif /* DHD_SUPPORT_64BIT */
4294 
4295 /** A snippet of dongle memory is shared between host and dongle */
4296 void
dhd_bus_cmn_writeshared(dhd_bus_t * bus,void * data,uint32 len,uint8 type,uint16 ringid)4297 dhd_bus_cmn_writeshared(dhd_bus_t *bus, void *data, uint32 len, uint8 type, uint16 ringid)
4298 {
4299 	uint64 long_data;
4300 	ulong addr; /* dongle address */
4301 
4302 	DHD_INFO(("%s: writing to dongle type %d len %d\n", __FUNCTION__, type, len));
4303 
4304 	if (bus->is_linkdown) {
4305 		DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
4306 		return;
4307 	}
4308 
4309 	if (MULTIBP_ENAB(bus->sih)) {
4310 		dhd_bus_pcie_pwr_req(bus);
4311 	}
4312 	switch (type) {
4313 		case D2H_DMA_SCRATCH_BUF:
4314 			addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_dma_scratch_buffer);
4315 			long_data = HTOL64(*(uint64 *)data);
4316 			dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
4317 			if (dhd_msg_level & DHD_INFO_VAL) {
4318 				prhex(__FUNCTION__, data, len);
4319 			}
4320 			break;
4321 
4322 		case D2H_DMA_SCRATCH_BUF_LEN :
4323 			addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_dma_scratch_buffer_len);
4324 			dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data));
4325 			if (dhd_msg_level & DHD_INFO_VAL) {
4326 				prhex(__FUNCTION__, data, len);
4327 			}
4328 			break;
4329 
4330 		case H2D_DMA_INDX_WR_BUF:
4331 			long_data = HTOL64(*(uint64 *)data);
4332 			addr = DHD_RING_INFO_MEMBER_ADDR(bus, h2d_w_idx_hostaddr);
4333 			dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
4334 			if (dhd_msg_level & DHD_INFO_VAL) {
4335 				prhex(__FUNCTION__, data, len);
4336 			}
4337 			break;
4338 
4339 		case H2D_DMA_INDX_RD_BUF:
4340 			long_data = HTOL64(*(uint64 *)data);
4341 			addr = DHD_RING_INFO_MEMBER_ADDR(bus, h2d_r_idx_hostaddr);
4342 			dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
4343 			if (dhd_msg_level & DHD_INFO_VAL) {
4344 				prhex(__FUNCTION__, data, len);
4345 			}
4346 			break;
4347 
4348 		case D2H_DMA_INDX_WR_BUF:
4349 			long_data = HTOL64(*(uint64 *)data);
4350 			addr = DHD_RING_INFO_MEMBER_ADDR(bus, d2h_w_idx_hostaddr);
4351 			dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
4352 			if (dhd_msg_level & DHD_INFO_VAL) {
4353 				prhex(__FUNCTION__, data, len);
4354 			}
4355 			break;
4356 
4357 		case D2H_DMA_INDX_RD_BUF:
4358 			long_data = HTOL64(*(uint64 *)data);
4359 			addr = DHD_RING_INFO_MEMBER_ADDR(bus, d2h_r_idx_hostaddr);
4360 			dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
4361 			if (dhd_msg_level & DHD_INFO_VAL) {
4362 				prhex(__FUNCTION__, data, len);
4363 			}
4364 			break;
4365 
4366 		case H2D_IFRM_INDX_WR_BUF:
4367 			long_data = HTOL64(*(uint64 *)data);
4368 			addr = DHD_RING_INFO_MEMBER_ADDR(bus, ifrm_w_idx_hostaddr);
4369 			dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
4370 			if (dhd_msg_level & DHD_INFO_VAL) {
4371 				prhex(__FUNCTION__, data, len);
4372 			}
4373 			break;
4374 
4375 		case RING_ITEM_LEN :
4376 			addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, len_items);
4377 			dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
4378 			break;
4379 
4380 		case RING_MAX_ITEMS :
4381 			addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, max_item);
4382 			dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
4383 			break;
4384 
4385 		case RING_BUF_ADDR :
4386 			long_data = HTOL64(*(uint64 *)data);
4387 			addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, base_addr);
4388 			dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *) &long_data, len);
4389 			if (dhd_msg_level & DHD_INFO_VAL) {
4390 				prhex(__FUNCTION__, data, len);
4391 			}
4392 			break;
4393 
4394 		case RING_WR_UPD :
4395 			addr = bus->ring_sh[ringid].ring_state_w;
4396 			dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
4397 			break;
4398 
4399 		case RING_RD_UPD :
4400 			addr = bus->ring_sh[ringid].ring_state_r;
4401 			dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
4402 			break;
4403 
4404 		case D2H_MB_DATA:
4405 			addr = bus->d2h_mb_data_ptr_addr;
4406 			dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data));
4407 			break;
4408 
4409 		case H2D_MB_DATA:
4410 			addr = bus->h2d_mb_data_ptr_addr;
4411 			dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data));
4412 			break;
4413 
4414 		case HOST_API_VERSION:
4415 			addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_cap);
4416 			dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data));
4417 			break;
4418 
4419 		case DNGL_TO_HOST_TRAP_ADDR:
4420 			long_data = HTOL64(*(uint64 *)data);
4421 			addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_trap_addr);
4422 			dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *) &long_data, len);
4423 			DHD_INFO(("Wrote trap addr:0x%x\n", (uint32) HTOL32(*(uint32 *)data)));
4424 			break;
4425 
4426 		case HOST_SCB_ADDR:
4427 			addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_scb_addr);
4428 #ifdef DHD_SUPPORT_64BIT
4429 			dhdpcie_bus_wtcm64(bus, addr, (uint64) HTOL64(*(uint64 *)data));
4430 #else /* !DHD_SUPPORT_64BIT */
4431 			dhdpcie_bus_wtcm32(bus, addr, *((uint32*)data));
4432 #endif /* DHD_SUPPORT_64BIT */
4433 			DHD_INFO(("Wrote host_scb_addr:0x%x\n",
4434 				(uint32) HTOL32(*(uint32 *)data)));
4435 			break;
4436 
4437 		default:
4438 			break;
4439 	}
4440 	if (MULTIBP_ENAB(bus->sih)) {
4441 		dhd_bus_pcie_pwr_req_clear(bus);
4442 	}
4443 } /* dhd_bus_cmn_writeshared */
4444 
4445 /** A snippet of dongle memory is shared between host and dongle */
4446 void
dhd_bus_cmn_readshared(dhd_bus_t * bus,void * data,uint8 type,uint16 ringid)4447 dhd_bus_cmn_readshared(dhd_bus_t *bus, void* data, uint8 type, uint16 ringid)
4448 {
4449 	ulong addr; /* dongle address */
4450 
4451 	if (MULTIBP_ENAB(bus->sih)) {
4452 		dhd_bus_pcie_pwr_req(bus);
4453 	}
4454 	switch (type) {
4455 		case RING_WR_UPD :
4456 			addr = bus->ring_sh[ringid].ring_state_w;
4457 			*(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
4458 			break;
4459 
4460 		case RING_RD_UPD :
4461 			addr = bus->ring_sh[ringid].ring_state_r;
4462 			*(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
4463 			break;
4464 
4465 		case TOTAL_LFRAG_PACKET_CNT :
4466 			addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, total_lfrag_pkt_cnt);
4467 			*(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
4468 			break;
4469 
4470 		case H2D_MB_DATA:
4471 			addr = bus->h2d_mb_data_ptr_addr;
4472 			*(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, addr));
4473 			break;
4474 
4475 		case D2H_MB_DATA:
4476 			addr = bus->d2h_mb_data_ptr_addr;
4477 			*(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, addr));
4478 			break;
4479 
4480 		case MAX_HOST_RXBUFS :
4481 			addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, max_host_rxbufs);
4482 			*(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
4483 			break;
4484 
4485 		case HOST_SCB_ADDR:
4486 			addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_scb_size);
4487 			*(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, addr));
4488 			break;
4489 
4490 		default :
4491 			break;
4492 	}
4493 	if (MULTIBP_ENAB(bus->sih)) {
4494 		dhd_bus_pcie_pwr_req_clear(bus);
4495 	}
4496 }
4497 
dhd_bus_get_sharedflags(dhd_bus_t * bus)4498 uint32 dhd_bus_get_sharedflags(dhd_bus_t *bus)
4499 {
4500 	return ((pciedev_shared_t*)bus->pcie_sh)->flags;
4501 }
4502 
4503 void
dhd_bus_clearcounts(dhd_pub_t * dhdp)4504 dhd_bus_clearcounts(dhd_pub_t *dhdp)
4505 {
4506 }
4507 
4508 /**
4509  * @param params    input buffer, NULL for 'set' operation.
4510  * @param plen      length of 'params' buffer, 0 for 'set' operation.
4511  * @param arg       output buffer
4512  */
4513 int
dhd_bus_iovar_op(dhd_pub_t * dhdp,const char * name,void * params,int plen,void * arg,int len,bool set)4514 dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
4515                  void *params, int plen, void *arg, int len, bool set)
4516 {
4517 	dhd_bus_t *bus = dhdp->bus;
4518 	const bcm_iovar_t *vi = NULL;
4519 	int bcmerror = BCME_UNSUPPORTED;
4520 	int val_size;
4521 	uint32 actionid;
4522 
4523 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4524 
4525 	ASSERT(name);
4526 	ASSERT(len >= 0);
4527 	if (!name || len < 0)
4528 		return BCME_BADARG;
4529 
4530 	/* Get MUST have return space */
4531 	ASSERT(set || (arg && len));
4532 	if (!(set || (arg && len)))
4533 		return BCME_BADARG;
4534 
4535 	/* Set does NOT take qualifiers */
4536 	ASSERT(!set || (!params && !plen));
4537 	if (!(!set || (!params && !plen)))
4538 		return BCME_BADARG;
4539 
4540 	DHD_INFO(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
4541 	         name, (set ? "set" : "get"), len, plen));
4542 
4543 	/* Look up var locally; if not found pass to host driver */
4544 	if ((vi = bcm_iovar_lookup(dhdpcie_iovars, name)) == NULL) {
4545 		goto exit;
4546 	}
4547 
4548 	if (MULTIBP_ENAB(bus->sih)) {
4549 		if (vi->flags & DHD_IOVF_PWRREQ_BYPASS) {
4550 			DHD_ERROR(("%s: Bypass pwr request\n", __FUNCTION__));
4551 		} else {
4552 			dhd_bus_pcie_pwr_req(bus);
4553 		}
4554 	}
4555 
4556 	/* set up 'params' pointer in case this is a set command so that
4557 	 * the convenience int and bool code can be common to set and get
4558 	 */
4559 	if (params == NULL) {
4560 		params = arg;
4561 		plen = len;
4562 	}
4563 
4564 	if (vi->type == IOVT_VOID)
4565 		val_size = 0;
4566 	else if (vi->type == IOVT_BUFFER)
4567 		val_size = len;
4568 	else
4569 		/* all other types are integer sized */
4570 		val_size = sizeof(int);
4571 
4572 	actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
4573 	bcmerror = dhdpcie_bus_doiovar(bus, vi, actionid, name, params, plen, arg, len, val_size);
4574 
4575 exit:
4576 	/* In DEVRESET_QUIESCE/DEVRESET_ON,
4577 	 * this includes dongle re-attach which initialize pwr_req_ref count to 0 and
4578 	 * causes pwr_req_ref count miss-match in pwr req clear function and hang.
4579 	 * In this case, bypass pwr req clear.
4580 	 */
4581 	if (bcmerror == BCME_DNGL_DEVRESET) {
4582 		bcmerror = BCME_OK;
4583 	} else {
4584 		if (MULTIBP_ENAB(bus->sih)) {
4585 			if (vi && (vi->flags & DHD_IOVF_PWRREQ_BYPASS)) {
4586 				DHD_ERROR(("%s: Bypass pwr request clear\n", __FUNCTION__));
4587 			} else {
4588 				dhd_bus_pcie_pwr_req_clear(bus);
4589 			}
4590 		}
4591 	}
4592 	return bcmerror;
4593 } /* dhd_bus_iovar_op */
4594 
4595 #ifdef BCM_BUZZZ
4596 #include <bcm_buzzz.h>
4597 
4598 int
dhd_buzzz_dump_cntrs(char * p,uint32 * core,uint32 * log,const int num_counters)4599 dhd_buzzz_dump_cntrs(char *p, uint32 *core, uint32 *log,
4600 	const int num_counters)
4601 {
4602 	int bytes = 0;
4603 	uint32 ctr;
4604 	uint32 curr[BCM_BUZZZ_COUNTERS_MAX], prev[BCM_BUZZZ_COUNTERS_MAX];
4605 	uint32 delta[BCM_BUZZZ_COUNTERS_MAX];
4606 
4607 	/* Compute elapsed counter values per counter event type */
4608 	for (ctr = 0U; ctr < num_counters; ctr++) {
4609 		prev[ctr] = core[ctr];
4610 		curr[ctr] = *log++;
4611 		core[ctr] = curr[ctr];  /* saved for next log */
4612 
4613 		if (curr[ctr] < prev[ctr])
4614 			delta[ctr] = curr[ctr] + (~0U - prev[ctr]);
4615 		else
4616 			delta[ctr] = (curr[ctr] - prev[ctr]);
4617 
4618 		bytes += sprintf(p + bytes, "%12u ", delta[ctr]);
4619 	}
4620 
4621 	return bytes;
4622 }
4623 
4624 typedef union cm3_cnts { /* export this in bcm_buzzz.h */
4625 	uint32 u32;
4626 	uint8  u8[4];
4627 	struct {
4628 		uint8 cpicnt;
4629 		uint8 exccnt;
4630 		uint8 sleepcnt;
4631 		uint8 lsucnt;
4632 	};
4633 } cm3_cnts_t;
4634 
4635 int
dhd_bcm_buzzz_dump_cntrs6(char * p,uint32 * core,uint32 * log)4636 dhd_bcm_buzzz_dump_cntrs6(char *p, uint32 *core, uint32 *log)
4637 {
4638 	int bytes = 0;
4639 
4640 	uint32 cyccnt, instrcnt;
4641 	cm3_cnts_t cm3_cnts;
4642 	uint8 foldcnt;
4643 
4644 	{   /* 32bit cyccnt */
4645 		uint32 curr, prev, delta;
4646 		prev = core[0]; curr = *log++; core[0] = curr;
4647 		if (curr < prev)
4648 			delta = curr + (~0U - prev);
4649 		else
4650 			delta = (curr - prev);
4651 
4652 		bytes += sprintf(p + bytes, "%12u ", delta);
4653 		cyccnt = delta;
4654 	}
4655 
4656 	{	/* Extract the 4 cnts: cpi, exc, sleep and lsu */
4657 		int i;
4658 		uint8 max8 = ~0;
4659 		cm3_cnts_t curr, prev, delta;
4660 		prev.u32 = core[1]; curr.u32 = * log++; core[1] = curr.u32;
4661 		for (i = 0; i < 4; i++) {
4662 			if (curr.u8[i] < prev.u8[i])
4663 				delta.u8[i] = curr.u8[i] + (max8 - prev.u8[i]);
4664 			else
4665 				delta.u8[i] = (curr.u8[i] - prev.u8[i]);
4666 			bytes += sprintf(p + bytes, "%4u ", delta.u8[i]);
4667 		}
4668 		cm3_cnts.u32 = delta.u32;
4669 	}
4670 
4671 	{   /* Extract the foldcnt from arg0 */
4672 		uint8 curr, prev, delta, max8 = ~0;
4673 		bcm_buzzz_arg0_t arg0; arg0.u32 = *log;
4674 		prev = core[2]; curr = arg0.klog.cnt; core[2] = curr;
4675 		if (curr < prev)
4676 			delta = curr + (max8 - prev);
4677 		else
4678 			delta = (curr - prev);
4679 		bytes += sprintf(p + bytes, "%4u ", delta);
4680 		foldcnt = delta;
4681 	}
4682 
4683 	instrcnt = cyccnt - (cm3_cnts.u8[0] + cm3_cnts.u8[1] + cm3_cnts.u8[2]
4684 		                 + cm3_cnts.u8[3]) + foldcnt;
4685 	if (instrcnt > 0xFFFFFF00)
4686 		bytes += sprintf(p + bytes, "[%10s] ", "~");
4687 	else
4688 		bytes += sprintf(p + bytes, "[%10u] ", instrcnt);
4689 	return bytes;
4690 }
4691 
4692 int
dhd_buzzz_dump_log(char * p,uint32 * core,uint32 * log,bcm_buzzz_t * buzzz)4693 dhd_buzzz_dump_log(char *p, uint32 *core, uint32 *log, bcm_buzzz_t *buzzz)
4694 {
4695 	int bytes = 0;
4696 	bcm_buzzz_arg0_t arg0;
4697 	static uint8 * fmt[] = BCM_BUZZZ_FMT_STRINGS;
4698 
4699 	if (buzzz->counters == 6) {
4700 		bytes += dhd_bcm_buzzz_dump_cntrs6(p, core, log);
4701 		log += 2; /* 32bit cyccnt + (4 x 8bit) CM3 */
4702 	} else {
4703 		bytes += dhd_buzzz_dump_cntrs(p, core, log, buzzz->counters);
4704 		log += buzzz->counters; /* (N x 32bit) CR4=3, CA7=4 */
4705 	}
4706 
4707 	/* Dump the logged arguments using the registered formats */
4708 	arg0.u32 = *log++;
4709 
4710 	switch (arg0.klog.args) {
4711 		case 0:
4712 			bytes += sprintf(p + bytes, fmt[arg0.klog.id]);
4713 			break;
4714 		case 1:
4715 		{
4716 			uint32 arg1 = *log++;
4717 			bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1);
4718 			break;
4719 		}
4720 		case 2:
4721 		{
4722 			uint32 arg1, arg2;
4723 			arg1 = *log++; arg2 = *log++;
4724 			bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2);
4725 			break;
4726 		}
4727 		case 3:
4728 		{
4729 			uint32 arg1, arg2, arg3;
4730 			arg1 = *log++; arg2 = *log++; arg3 = *log++;
4731 			bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2, arg3);
4732 			break;
4733 		}
4734 		case 4:
4735 		{
4736 			uint32 arg1, arg2, arg3, arg4;
4737 			arg1 = *log++; arg2 = *log++;
4738 			arg3 = *log++; arg4 = *log++;
4739 			bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2, arg3, arg4);
4740 			break;
4741 		}
4742 		default:
4743 			printf("%s: Maximum one argument supported\n", __FUNCTION__);
4744 			break;
4745 	}
4746 
4747 	bytes += sprintf(p + bytes, "\n");
4748 
4749 	return bytes;
4750 }
4751 
dhd_buzzz_dump(bcm_buzzz_t * buzzz_p,void * buffer_p,char * p)4752 void dhd_buzzz_dump(bcm_buzzz_t *buzzz_p, void *buffer_p, char *p)
4753 {
4754 	int i;
4755 	uint32 total, part1, part2, log_sz, core[BCM_BUZZZ_COUNTERS_MAX];
4756 	void * log;
4757 
4758 	for (i = 0; i < BCM_BUZZZ_COUNTERS_MAX; i++) {
4759 		core[i] = 0;
4760 	}
4761 
4762 	log_sz = buzzz_p->log_sz;
4763 
4764 	part1 = ((uint32)buzzz_p->cur - (uint32)buzzz_p->log) / log_sz;
4765 
4766 	if (buzzz_p->wrap == TRUE) {
4767 		part2 = ((uint32)buzzz_p->end - (uint32)buzzz_p->cur) / log_sz;
4768 		total = (buzzz_p->buffer_sz - BCM_BUZZZ_LOGENTRY_MAXSZ) / log_sz;
4769 	} else {
4770 		part2 = 0U;
4771 		total = buzzz_p->count;
4772 	}
4773 
4774 	if (total == 0U) {
4775 		printf("%s: bcm_buzzz_dump total<%u> done\n", __FUNCTION__, total);
4776 		return;
4777 	} else {
4778 		printf("%s: bcm_buzzz_dump total<%u> : part2<%u> + part1<%u>\n", __FUNCTION__,
4779 		       total, part2, part1);
4780 	}
4781 
4782 	if (part2) {   /* with wrap */
4783 		log = (void*)((size_t)buffer_p + (buzzz_p->cur - buzzz_p->log));
4784 		while (part2--) {   /* from cur to end : part2 */
4785 			p[0] = '\0';
4786 			dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p);
4787 			printf("%s", p);
4788 			log = (void*)((size_t)log + buzzz_p->log_sz);
4789 		}
4790 	}
4791 
4792 	log = (void*)buffer_p;
4793 	while (part1--) {
4794 		p[0] = '\0';
4795 		dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p);
4796 		printf("%s", p);
4797 		log = (void*)((size_t)log + buzzz_p->log_sz);
4798 	}
4799 
4800 	printf("%s: bcm_buzzz_dump done.\n", __FUNCTION__);
4801 }
4802 
dhd_buzzz_dump_dngl(dhd_bus_t * bus)4803 int dhd_buzzz_dump_dngl(dhd_bus_t *bus)
4804 {
4805 	bcm_buzzz_t * buzzz_p = NULL;
4806 	void * buffer_p = NULL;
4807 	char * page_p = NULL;
4808 	pciedev_shared_t *sh;
4809 	int ret = 0;
4810 
4811 	if (bus->dhd->busstate != DHD_BUS_DATA) {
4812 		return BCME_UNSUPPORTED;
4813 	}
4814 	if ((page_p = (char *)MALLOC(bus->dhd->osh, 4096)) == NULL) {
4815 		printf("%s: Page memory allocation failure\n", __FUNCTION__);
4816 		goto done;
4817 	}
4818 	if ((buzzz_p = MALLOC(bus->dhd->osh, sizeof(bcm_buzzz_t))) == NULL) {
4819 		printf("%s: BCM BUZZZ memory allocation failure\n", __FUNCTION__);
4820 		goto done;
4821 	}
4822 
4823 	ret = dhdpcie_readshared(bus);
4824 	if (ret < 0) {
4825 		DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__));
4826 		goto done;
4827 	}
4828 
4829 	sh = bus->pcie_sh;
4830 
4831 	DHD_INFO(("%s buzzz:%08x\n", __FUNCTION__, sh->buzz_dbg_ptr));
4832 
4833 	if (sh->buzz_dbg_ptr != 0U) {	/* Fetch and display dongle BUZZZ Trace */
4834 
4835 		dhdpcie_bus_membytes(bus, FALSE, (ulong)sh->buzz_dbg_ptr,
4836 		                     (uint8 *)buzzz_p, sizeof(bcm_buzzz_t));
4837 
4838 		printf("BUZZZ[0x%08x]: log<0x%08x> cur<0x%08x> end<0x%08x> "
4839 			"count<%u> status<%u> wrap<%u>\n"
4840 			"cpu<0x%02X> counters<%u> group<%u> buffer_sz<%u> log_sz<%u>\n",
4841 			(int)sh->buzz_dbg_ptr,
4842 			(int)buzzz_p->log, (int)buzzz_p->cur, (int)buzzz_p->end,
4843 			buzzz_p->count, buzzz_p->status, buzzz_p->wrap,
4844 			buzzz_p->cpu_idcode, buzzz_p->counters, buzzz_p->group,
4845 			buzzz_p->buffer_sz, buzzz_p->log_sz);
4846 
4847 		if (buzzz_p->count == 0) {
4848 			printf("%s: Empty dongle BUZZZ trace\n\n", __FUNCTION__);
4849 			goto done;
4850 		}
4851 
4852 		/* Allocate memory for trace buffer and format strings */
4853 		buffer_p = MALLOC(bus->dhd->osh, buzzz_p->buffer_sz);
4854 		if (buffer_p == NULL) {
4855 			printf("%s: Buffer memory allocation failure\n", __FUNCTION__);
4856 			goto done;
4857 		}
4858 
4859 		/* Fetch the trace. format strings are exported via bcm_buzzz.h */
4860 		dhdpcie_bus_membytes(bus, FALSE, (uint32)buzzz_p->log,   /* Trace */
4861 		                     (uint8 *)buffer_p, buzzz_p->buffer_sz);
4862 
4863 		/* Process and display the trace using formatted output */
4864 
4865 		{
4866 			int ctr;
4867 			for (ctr = 0; ctr < buzzz_p->counters; ctr++) {
4868 				printf("<Evt[%02X]> ", buzzz_p->eventid[ctr]);
4869 			}
4870 			printf("<code execution point>\n");
4871 		}
4872 
4873 		dhd_buzzz_dump(buzzz_p, buffer_p, page_p);
4874 
4875 		printf("%s: ----- End of dongle BCM BUZZZ Trace -----\n\n", __FUNCTION__);
4876 
4877 		MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz); buffer_p = NULL;
4878 	}
4879 
4880 done:
4881 
4882 	if (page_p)   MFREE(bus->dhd->osh, page_p, 4096);
4883 	if (buzzz_p)  MFREE(bus->dhd->osh, buzzz_p, sizeof(bcm_buzzz_t));
4884 	if (buffer_p) MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz);
4885 
4886 	return BCME_OK;
4887 }
4888 #endif /* BCM_BUZZZ */
4889 
4890 #define PCIE_GEN2(sih) ((BUSTYPE((sih)->bustype) == PCI_BUS) &&	\
4891 	((sih)->buscoretype == PCIE2_CORE_ID))
4892 
4893 #define PCIE_FLR_CAPAB_BIT		28
4894 #define PCIE_FUNCTION_LEVEL_RESET_BIT	15
4895 
4896 /* Change delays for only QT HW, FPGA and silicon uses same delay */
4897 #ifdef BCMQT_HW
4898 #define DHD_FUNCTION_LEVEL_RESET_DELAY		300000u
4899 #define DHD_SSRESET_STATUS_RETRY_DELAY	10000u
4900 #else
4901 #define DHD_FUNCTION_LEVEL_RESET_DELAY	70u	/* 70 msec delay */
4902 #define DHD_SSRESET_STATUS_RETRY_DELAY	40u
4903 #endif // endif
4904 /*
4905  * Increase SSReset de-assert time to 8ms.
4906  * since it takes longer time if re-scan time on 4378B0.
4907  */
4908 #define DHD_SSRESET_STATUS_RETRIES	200u
4909 
4910 static void
dhdpcie_enum_reg_init(dhd_bus_t * bus)4911 dhdpcie_enum_reg_init(dhd_bus_t *bus)
4912 {
4913 	/* initialize Function control register (clear bit 4) to HW init value */
4914 	si_corereg(bus->sih, bus->sih->buscoreidx,
4915 		OFFSETOF(sbpcieregs_t, ftn_ctrl.control), ~0,
4916 		PCIE_CPLCA_ENABLE | PCIE_DLY_PERST_TO_COE);
4917 
4918 	/* clear IntMask */
4919 	si_corereg(bus->sih, bus->sih->buscoreidx,
4920 		OFFSETOF(sbpcieregs_t, ftn_ctrl.intmask), ~0, 0);
4921 	/* clear IntStatus */
4922 	si_corereg(bus->sih, bus->sih->buscoreidx,
4923 		OFFSETOF(sbpcieregs_t, ftn_ctrl.intstatus), ~0,
4924 		si_corereg(bus->sih, bus->sih->buscoreidx,
4925 			OFFSETOF(sbpcieregs_t, ftn_ctrl.intstatus), 0, 0));
4926 
4927 	/* clear MSIVector */
4928 	si_corereg(bus->sih, bus->sih->buscoreidx,
4929 		OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_vector), ~0, 0);
4930 	/* clear MSIIntMask */
4931 	si_corereg(bus->sih, bus->sih->buscoreidx,
4932 		OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intmask), ~0, 0);
4933 	/* clear MSIIntStatus */
4934 	si_corereg(bus->sih, bus->sih->buscoreidx,
4935 		OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intstatus), ~0,
4936 		si_corereg(bus->sih, bus->sih->buscoreidx,
4937 			OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intstatus), 0, 0));
4938 
4939 	/* clear PowerIntMask */
4940 	si_corereg(bus->sih, bus->sih->buscoreidx,
4941 		OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intmask), ~0, 0);
4942 	/* clear PowerIntStatus */
4943 	si_corereg(bus->sih, bus->sih->buscoreidx,
4944 		OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intstatus), ~0,
4945 		si_corereg(bus->sih, bus->sih->buscoreidx,
4946 			OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intstatus), 0, 0));
4947 
4948 	/* clear MailboxIntMask */
4949 	si_corereg(bus->sih, bus->sih->buscoreidx,
4950 		OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intmask), ~0, 0);
4951 	/* clear MailboxInt */
4952 	si_corereg(bus->sih, bus->sih->buscoreidx,
4953 		OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intstatus), ~0,
4954 		si_corereg(bus->sih, bus->sih->buscoreidx,
4955 			OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intstatus), 0, 0));
4956 }
4957 
4958 int
dhd_bus_perform_flr(dhd_bus_t * bus,bool force_fail)4959 dhd_bus_perform_flr(dhd_bus_t *bus, bool force_fail)
4960 {
4961 	uint flr_capab;
4962 	uint val;
4963 	int retry = 0;
4964 
4965 	DHD_ERROR(("******** Perform FLR ********\n"));
4966 
4967 	if (PCIE_ENUM_RESET_WAR_ENAB(bus->sih->buscorerev)) {
4968 		if (bus->pcie_mailbox_mask != 0) {
4969 			dhdpcie_bus_intr_disable(bus);
4970 		}
4971 		/* initialize F0 enum registers before FLR for rev66/67 */
4972 		dhdpcie_enum_reg_init(bus);
4973 	}
4974 
4975 	/* Read PCIE_CFG_DEVICE_CAPABILITY bit 28 to check FLR capability */
4976 	val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CAPABILITY, sizeof(val));
4977 	flr_capab =  val & (1 << PCIE_FLR_CAPAB_BIT);
4978 	DHD_INFO(("Read Device Capability: reg=0x%x read val=0x%x flr_capab=0x%x\n",
4979 		PCIE_CFG_DEVICE_CAPABILITY, val, flr_capab));
4980 	if (!flr_capab) {
4981 	       DHD_ERROR(("Chip does not support FLR\n"));
4982 	       return BCME_UNSUPPORTED;
4983 	}
4984 
4985 	/* Save pcie config space */
4986 	DHD_INFO(("Save Pcie Config Space\n"));
4987 	DHD_PCIE_CONFIG_SAVE(bus);
4988 
4989 	/* Set bit 15 of PCIE_CFG_DEVICE_CONTROL */
4990 	DHD_INFO(("Set PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n",
4991 		PCIE_FUNCTION_LEVEL_RESET_BIT, PCIE_CFG_DEVICE_CONTROL));
4992 	val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val));
4993 	DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
4994 	val = val | (1 << PCIE_FUNCTION_LEVEL_RESET_BIT);
4995 	DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
4996 	OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val), val);
4997 
4998 	/* wait for DHD_FUNCTION_LEVEL_RESET_DELAY msec */
4999 	DHD_INFO(("Delay of %d msec\n", DHD_FUNCTION_LEVEL_RESET_DELAY));
5000 	OSL_DELAY(DHD_FUNCTION_LEVEL_RESET_DELAY * 1000u);
5001 
5002 	if (force_fail) {
5003 		DHD_ERROR(("Set PCIE_SSRESET_DISABLE_BIT(%d) of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)\n",
5004 			PCIE_SSRESET_DISABLE_BIT, PCIE_CFG_SUBSYSTEM_CONTROL));
5005 		val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
5006 		DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL,
5007 			val));
5008 		val = val | (1 << PCIE_SSRESET_DISABLE_BIT);
5009 		DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL,
5010 			val));
5011 		OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val), val);
5012 
5013 		val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
5014 		DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL,
5015 			val));
5016 	}
5017 
5018 	/* Clear bit 15 of PCIE_CFG_DEVICE_CONTROL */
5019 	DHD_INFO(("Clear PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n",
5020 		PCIE_FUNCTION_LEVEL_RESET_BIT, PCIE_CFG_DEVICE_CONTROL));
5021 	val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val));
5022 	DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
5023 	val = val & ~(1 << PCIE_FUNCTION_LEVEL_RESET_BIT);
5024 	DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
5025 	OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val), val);
5026 
5027 	/* Wait till bit 13 of PCIE_CFG_SUBSYSTEM_CONTROL is cleared */
5028 	DHD_INFO(("Wait till PCIE_SSRESET_STATUS_BIT(%d) of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)"
5029 		"is cleared\n",	PCIE_SSRESET_STATUS_BIT, PCIE_CFG_SUBSYSTEM_CONTROL));
5030 	do {
5031 		val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
5032 		DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n",
5033 			PCIE_CFG_SUBSYSTEM_CONTROL, val));
5034 		val = val & (1 << PCIE_SSRESET_STATUS_BIT);
5035 		OSL_DELAY(DHD_SSRESET_STATUS_RETRY_DELAY);
5036 	} while (val && (retry++ < DHD_SSRESET_STATUS_RETRIES));
5037 
5038 	if (val) {
5039 		DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
5040 			PCIE_CFG_SUBSYSTEM_CONTROL, PCIE_SSRESET_STATUS_BIT));
5041 		/* User has to fire the IOVAR again, if force_fail is needed */
5042 		if (force_fail) {
5043 			bus->flr_force_fail = FALSE;
5044 			DHD_ERROR(("%s cleared flr_force_fail flag\n", __FUNCTION__));
5045 		}
5046 		return BCME_DONGLE_DOWN;
5047 	}
5048 
5049 	/* Restore pcie config space */
5050 	DHD_INFO(("Restore Pcie Config Space\n"));
5051 	DHD_PCIE_CONFIG_RESTORE(bus);
5052 
5053 	DHD_ERROR(("******** FLR Succedeed ********\n"));
5054 
5055 	return BCME_OK;
5056 }
5057 
5058 #ifdef DHD_USE_BP_RESET
5059 #define DHD_BP_RESET_ASPM_DISABLE_DELAY	500u	/* usec */
5060 
5061 #define DHD_BP_RESET_STATUS_RETRY_DELAY	40u	/* usec */
5062 #define DHD_BP_RESET_STATUS_RETRIES	50u
5063 
5064 #define PCIE_CFG_SPROM_CTRL_SB_RESET_BIT	10
5065 #define PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT	21
5066 int
dhd_bus_perform_bp_reset(struct dhd_bus * bus)5067 dhd_bus_perform_bp_reset(struct dhd_bus *bus)
5068 {
5069 	uint val;
5070 	int retry = 0;
5071 	uint dar_clk_ctrl_status_reg = DAR_CLK_CTRL(bus->sih->buscorerev);
5072 	int ret = BCME_OK;
5073 	bool cond;
5074 
5075 	DHD_ERROR(("******** Perform BP reset ********\n"));
5076 
5077 	/* Disable ASPM */
5078 	DHD_INFO(("Disable ASPM: Clear bits(1-0) of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n",
5079 		PCIECFGREG_LINK_STATUS_CTRL));
5080 	val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val));
5081 	DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
5082 	val = val & (~PCIE_ASPM_ENAB);
5083 	DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
5084 	OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val), val);
5085 
5086 	/* wait for delay usec */
5087 	DHD_INFO(("Delay of %d usec\n", DHD_BP_RESET_ASPM_DISABLE_DELAY));
5088 	OSL_DELAY(DHD_BP_RESET_ASPM_DISABLE_DELAY);
5089 
5090 	/* Set bit 10 of PCIECFGREG_SPROM_CTRL */
5091 	DHD_INFO(("Set PCIE_CFG_SPROM_CTRL_SB_RESET_BIT(%d) of PCIECFGREG_SPROM_CTRL(0x%x)\n",
5092 		PCIE_CFG_SPROM_CTRL_SB_RESET_BIT, PCIECFGREG_SPROM_CTRL));
5093 	val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val));
5094 	DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_SPROM_CTRL, val));
5095 	val = val | (1 << PCIE_CFG_SPROM_CTRL_SB_RESET_BIT);
5096 	DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_SPROM_CTRL, val));
5097 	OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val), val);
5098 
5099 	/* Wait till bit backplane reset is ASSERTED i,e
5100 	 * bit 10 of PCIECFGREG_SPROM_CTRL is cleared.
5101 	 * Only after this, poll for 21st bit of DAR reg 0xAE0 is valid
5102 	 * else DAR register will read previous old value
5103 	 */
5104 	DHD_INFO(("Wait till PCIE_CFG_SPROM_CTRL_SB_RESET_BIT(%d) of "
5105 		"PCIECFGREG_SPROM_CTRL(0x%x) is cleared\n",
5106 		PCIE_CFG_SPROM_CTRL_SB_RESET_BIT, PCIECFGREG_SPROM_CTRL));
5107 	do {
5108 		val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val));
5109 		DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_SPROM_CTRL, val));
5110 		cond = val & (1 << PCIE_CFG_SPROM_CTRL_SB_RESET_BIT);
5111 		OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY);
5112 	} while (cond && (retry++ < DHD_BP_RESET_STATUS_RETRIES));
5113 
5114 	if (cond) {
5115 		DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
5116 			PCIECFGREG_SPROM_CTRL, PCIE_CFG_SPROM_CTRL_SB_RESET_BIT));
5117 		ret = BCME_ERROR;
5118 		goto aspm_enab;
5119 	}
5120 
5121 	/* Wait till bit 21 of dar_clk_ctrl_status_reg is cleared */
5122 	DHD_INFO(("Wait till PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT(%d) of "
5123 		"dar_clk_ctrl_status_reg(0x%x) is cleared\n",
5124 		PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT, dar_clk_ctrl_status_reg));
5125 	do {
5126 		val = si_corereg(bus->sih, bus->sih->buscoreidx,
5127 			dar_clk_ctrl_status_reg, 0, 0);
5128 		DHD_INFO(("read_dar si_corereg: reg=0x%x read val=0x%x\n",
5129 			dar_clk_ctrl_status_reg, val));
5130 		cond = val & (1 << PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT);
5131 		OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY);
5132 	} while (cond && (retry++ < DHD_BP_RESET_STATUS_RETRIES));
5133 
5134 	if (cond) {
5135 		DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
5136 			dar_clk_ctrl_status_reg, PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT));
5137 		ret = BCME_ERROR;
5138 	}
5139 
5140 aspm_enab:
5141 	/* Enable ASPM */
5142 	DHD_INFO(("Enable ASPM: set bit 1 of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n",
5143 		PCIECFGREG_LINK_STATUS_CTRL));
5144 	val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val));
5145 	DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
5146 	val = val | (PCIE_ASPM_L1_ENAB);
5147 	DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
5148 	OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val), val);
5149 
5150 	DHD_ERROR(("******** BP reset Succedeed ********\n"));
5151 
5152 	return ret;
5153 }
5154 #endif /* DHD_USE_BP_RESET */
5155 
5156 int
dhd_bus_devreset(dhd_pub_t * dhdp,uint8 flag)5157 dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag)
5158 {
5159 	dhd_bus_t *bus = dhdp->bus;
5160 	int bcmerror = 0;
5161 	unsigned long flags;
5162 	unsigned long flags_bus;
5163 #ifdef CONFIG_ARCH_MSM
5164 	int retry = POWERUP_MAX_RETRY;
5165 #endif /* CONFIG_ARCH_MSM */
5166 
5167 	if (flag == TRUE) { /* Turn off WLAN */
5168 		/* Removing Power */
5169 		DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__));
5170 		DHD_ERROR(("%s: making dhdpub up FALSE\n", __FUNCTION__));
5171 		bus->dhd->up = FALSE;
5172 
5173 		/* wait for other contexts to finish -- if required a call
5174 		* to OSL_DELAY for 1s can be added to give other contexts
5175 		* a chance to finish
5176 		*/
5177 		dhdpcie_advertise_bus_cleanup(bus->dhd);
5178 
5179 		if (bus->dhd->busstate != DHD_BUS_DOWN) {
5180 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5181 			atomic_set(&bus->dhd->block_bus, TRUE);
5182 			dhd_flush_rx_tx_wq(bus->dhd);
5183 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5184 
5185 #ifdef BCMPCIE_OOB_HOST_WAKE
5186 			/* Clean up any pending host wake IRQ */
5187 			dhd_bus_oob_intr_set(bus->dhd, FALSE);
5188 			dhd_bus_oob_intr_unregister(bus->dhd);
5189 #endif /* BCMPCIE_OOB_HOST_WAKE */
5190 			dhd_os_wd_timer(dhdp, 0);
5191 			dhd_bus_stop(bus, TRUE);
5192 			if (bus->intr) {
5193 				DHD_BUS_LOCK(bus->bus_lock, flags_bus);
5194 				dhdpcie_bus_intr_disable(bus);
5195 				DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
5196 				dhdpcie_free_irq(bus);
5197 			}
5198 			dhd_deinit_bus_lock(bus);
5199 			dhd_deinit_backplane_access_lock(bus);
5200 			dhd_bus_release_dongle(bus);
5201 			dhdpcie_bus_free_resource(bus);
5202 			bcmerror = dhdpcie_bus_disable_device(bus);
5203 			if (bcmerror) {
5204 				DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
5205 					__FUNCTION__, bcmerror));
5206 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5207 				atomic_set(&bus->dhd->block_bus, FALSE);
5208 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5209 			}
5210 			/* Clean up protocol data after Bus Master Enable bit clear
5211 			 * so that host can safely unmap DMA and remove the allocated buffers
5212 			 * from the PKTID MAP. Some Applicantion Processors supported
5213 			 * System MMU triggers Kernel panic when they detect to attempt to
5214 			 * DMA-unmapped memory access from the devices which use the
5215 			 * System MMU. Therefore, Kernel panic can be happened since it is
5216 			 * possible that dongle can access to DMA-unmapped memory after
5217 			 * calling the dhd_prot_reset().
5218 			 * For this reason, the dhd_prot_reset() and dhd_clear() functions
5219 			 * should be located after the dhdpcie_bus_disable_device().
5220 			 */
5221 			dhd_prot_reset(dhdp);
5222 			dhd_clear(dhdp);
5223 #ifdef CONFIG_ARCH_MSM
5224 			bcmerror = dhdpcie_bus_clock_stop(bus);
5225 			if (bcmerror) {
5226 				DHD_ERROR(("%s: host clock stop failed: %d\n",
5227 					__FUNCTION__, bcmerror));
5228 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5229 				atomic_set(&bus->dhd->block_bus, FALSE);
5230 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5231 				goto done;
5232 			}
5233 #endif /* CONFIG_ARCH_MSM */
5234 			DHD_GENERAL_LOCK(bus->dhd, flags);
5235 			DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
5236 			bus->dhd->busstate = DHD_BUS_DOWN;
5237 			DHD_GENERAL_UNLOCK(bus->dhd, flags);
5238 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5239 			atomic_set(&bus->dhd->block_bus, FALSE);
5240 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5241 		} else {
5242 			if (bus->intr) {
5243 				dhdpcie_free_irq(bus);
5244 			}
5245 #ifdef BCMPCIE_OOB_HOST_WAKE
5246 			/* Clean up any pending host wake IRQ */
5247 			dhd_bus_oob_intr_set(bus->dhd, FALSE);
5248 			dhd_bus_oob_intr_unregister(bus->dhd);
5249 #endif /* BCMPCIE_OOB_HOST_WAKE */
5250 			dhd_dpc_kill(bus->dhd);
5251 			if (!bus->no_bus_init) {
5252 				dhd_bus_release_dongle(bus);
5253 				dhdpcie_bus_free_resource(bus);
5254 				bcmerror = dhdpcie_bus_disable_device(bus);
5255 				if (bcmerror) {
5256 					DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
5257 						__FUNCTION__, bcmerror));
5258 				}
5259 
5260 				/* Clean up protocol data after Bus Master Enable bit clear
5261 				 * so that host can safely unmap DMA and remove the allocated
5262 				 * buffers from the PKTID MAP. Some Applicantion Processors
5263 				 * supported System MMU triggers Kernel panic when they detect
5264 				 * to attempt to DMA-unmapped memory access from the devices
5265 				 * which use the System MMU.
5266 				 * Therefore, Kernel panic can be happened since it is possible
5267 				 * that dongle can access to DMA-unmapped memory after calling
5268 				 * the dhd_prot_reset().
5269 				 * For this reason, the dhd_prot_reset() and dhd_clear() functions
5270 				 * should be located after the dhdpcie_bus_disable_device().
5271 				 */
5272 				dhd_prot_reset(dhdp);
5273 				dhd_clear(dhdp);
5274 			} else {
5275 				bus->no_bus_init = FALSE;
5276 			}
5277 #ifdef CONFIG_ARCH_MSM
5278 			bcmerror = dhdpcie_bus_clock_stop(bus);
5279 			if (bcmerror) {
5280 				DHD_ERROR(("%s: host clock stop failed: %d\n",
5281 					__FUNCTION__, bcmerror));
5282 				goto done;
5283 			}
5284 #endif  /* CONFIG_ARCH_MSM */
5285 		}
5286 
5287 		bus->dhd->dongle_reset = TRUE;
5288 		DHD_ERROR(("%s:  WLAN OFF Done\n", __FUNCTION__));
5289 
5290 	} else { /* Turn on WLAN */
5291 		if (bus->dhd->busstate == DHD_BUS_DOWN) {
5292 			/* Powering On */
5293 			DHD_ERROR(("%s: == Power ON ==\n", __FUNCTION__));
5294 #ifdef CONFIG_ARCH_MSM
5295 			while (--retry) {
5296 				bcmerror = dhdpcie_bus_clock_start(bus);
5297 				if (!bcmerror) {
5298 					DHD_ERROR(("%s: dhdpcie_bus_clock_start OK\n",
5299 						__FUNCTION__));
5300 					break;
5301 				} else {
5302 					OSL_SLEEP(10);
5303 				}
5304 			}
5305 
5306 			if (bcmerror && !retry) {
5307 				DHD_ERROR(("%s: host pcie clock enable failed: %d\n",
5308 					__FUNCTION__, bcmerror));
5309 				goto done;
5310 			}
5311 #if defined(DHD_CONTROL_PCIE_ASPM_WIFI_TURNON)
5312 			dhd_bus_aspm_enable_rc_ep(bus, FALSE);
5313 #endif /* DHD_CONTROL_PCIE_ASPM_WIFI_TURNON */
5314 #endif /* CONFIG_ARCH_MSM */
5315 			bus->is_linkdown = 0;
5316 			bus->cto_triggered = 0;
5317 			bcmerror = dhdpcie_bus_enable_device(bus);
5318 			if (bcmerror) {
5319 				DHD_ERROR(("%s: host configuration restore failed: %d\n",
5320 					__FUNCTION__, bcmerror));
5321 				goto done;
5322 			}
5323 
5324 			bcmerror = dhdpcie_bus_alloc_resource(bus);
5325 			if (bcmerror) {
5326 				DHD_ERROR(("%s: dhdpcie_bus_resource_alloc failed: %d\n",
5327 					__FUNCTION__, bcmerror));
5328 				goto done;
5329 			}
5330 
5331 			bcmerror = dhdpcie_bus_dongle_attach(bus);
5332 			if (bcmerror) {
5333 				DHD_ERROR(("%s: dhdpcie_bus_dongle_attach failed: %d\n",
5334 					__FUNCTION__, bcmerror));
5335 				goto done;
5336 			}
5337 
5338 			bcmerror = dhd_bus_request_irq(bus);
5339 			if (bcmerror) {
5340 				DHD_ERROR(("%s: dhd_bus_request_irq failed: %d\n",
5341 					__FUNCTION__, bcmerror));
5342 				goto done;
5343 			}
5344 
5345 			bus->dhd->dongle_reset = FALSE;
5346 
5347 #if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
5348 			dhd_irq_set_affinity(bus->dhd, cpumask_of(1));
5349 #endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
5350 
5351 			bcmerror = dhd_bus_start(dhdp);
5352 			if (bcmerror) {
5353 				DHD_ERROR(("%s: dhd_bus_start: %d\n",
5354 					__FUNCTION__, bcmerror));
5355 				goto done;
5356 			}
5357 
5358 			bus->dhd->up = TRUE;
5359 			/* Renabling watchdog which is disabled in dhdpcie_advertise_bus_cleanup */
5360 			if (bus->dhd->dhd_watchdog_ms_backup) {
5361 				DHD_ERROR(("%s: Enabling wdtick after dhd init\n",
5362 					__FUNCTION__));
5363 				dhd_os_wd_timer(bus->dhd, bus->dhd->dhd_watchdog_ms_backup);
5364 			}
5365 			DHD_ERROR(("%s: WLAN Power On Done\n", __FUNCTION__));
5366 		} else {
5367 			DHD_ERROR(("%s: what should we do here\n", __FUNCTION__));
5368 			goto done;
5369 		}
5370 	}
5371 
5372 done:
5373 	if (bcmerror) {
5374 		DHD_GENERAL_LOCK(bus->dhd, flags);
5375 		DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
5376 		bus->dhd->busstate = DHD_BUS_DOWN;
5377 		DHD_GENERAL_UNLOCK(bus->dhd, flags);
5378 	}
5379 	return bcmerror;
5380 }
5381 
5382 /* si_backplane_access() manages a shared resource - BAR0 mapping, hence its
5383  * calls shall be serialized. This wrapper function provides such serialization
5384  * and shall be used everywjer einstead of direct call of si_backplane_access()
5385  *
5386  * Linux DHD driver calls si_backplane_access() from 3 three contexts: tasklet
5387  * (that may call dhdpcie_sssr_dump() from dhdpcie_sssr_dump()), iovar
5388  * ("sbreg", "membyres", etc.) and procfs (used by GDB proxy). To avoid race
5389  * conditions calls of si_backplane_access() shall be serialized. Presence of
5390  * tasklet context implies that serialization shall b ebased on spinlock. Hence
5391  * Linux implementation of dhd_pcie_backplane_access_[un]lock() is
5392  * spinlock-based.
5393  *
5394  * Other platforms may add their own implementations of
5395  * dhd_pcie_backplane_access_[un]lock() as needed (e.g. if serialization is not
5396  * needed implementation might be empty)
5397  */
5398 static uint
serialized_backplane_access(dhd_bus_t * bus,uint addr,uint size,uint * val,bool read)5399 serialized_backplane_access(dhd_bus_t *bus, uint addr, uint size, uint *val, bool read)
5400 {
5401 	uint ret;
5402 	unsigned long flags;
5403 	DHD_BACKPLANE_ACCESS_LOCK(bus->backplane_access_lock, flags);
5404 	ret = si_backplane_access(bus->sih, addr, size, val, read);
5405 	DHD_BACKPLANE_ACCESS_UNLOCK(bus->backplane_access_lock, flags);
5406 	return ret;
5407 }
5408 
5409 static int
dhdpcie_get_dma_ring_indices(dhd_pub_t * dhd)5410 dhdpcie_get_dma_ring_indices(dhd_pub_t *dhd)
5411 {
5412 	int h2d_support, d2h_support;
5413 
5414 	d2h_support = dhd->dma_d2h_ring_upd_support ? 1 : 0;
5415 	h2d_support = dhd->dma_h2d_ring_upd_support ? 1 : 0;
5416 	return (d2h_support | (h2d_support << 1));
5417 
5418 }
5419 int
dhdpcie_set_dma_ring_indices(dhd_pub_t * dhd,int32 int_val)5420 dhdpcie_set_dma_ring_indices(dhd_pub_t *dhd, int32 int_val)
5421 {
5422 	int bcmerror = 0;
5423 	/* Can change it only during initialization/FW download */
5424 	if (dhd->busstate == DHD_BUS_DOWN) {
5425 		if ((int_val > 3) || (int_val < 0)) {
5426 			DHD_ERROR(("Bad argument. Possible values: 0, 1, 2 & 3\n"));
5427 			bcmerror = BCME_BADARG;
5428 		} else {
5429 			dhd->dma_d2h_ring_upd_support = (int_val & 1) ? TRUE : FALSE;
5430 			dhd->dma_h2d_ring_upd_support = (int_val & 2) ? TRUE : FALSE;
5431 			dhd->dma_ring_upd_overwrite = TRUE;
5432 		}
5433 	} else {
5434 		DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
5435 			__FUNCTION__));
5436 		bcmerror = BCME_NOTDOWN;
5437 	}
5438 
5439 	return bcmerror;
5440 
5441 }
5442 
5443 /**
5444  * IOVAR handler of the DHD bus layer (in this case, the PCIe bus).
5445  *
5446  * @param actionid  e.g. IOV_SVAL(IOV_PCIEREG)
5447  * @param params    input buffer
5448  * @param plen      length in [bytes] of input buffer 'params'
5449  * @param arg       output buffer
5450  * @param len       length in [bytes] of output buffer 'arg'
5451  */
5452 static int
dhdpcie_bus_doiovar(dhd_bus_t * bus,const bcm_iovar_t * vi,uint32 actionid,const char * name,void * params,int plen,void * arg,int len,int val_size)5453 dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name,
5454                 void *params, int plen, void *arg, int len, int val_size)
5455 {
5456 	int bcmerror = 0;
5457 	int32 int_val = 0;
5458 	int32 int_val2 = 0;
5459 	int32 int_val3 = 0;
5460 	bool bool_val = 0;
5461 
5462 	DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n",
5463 	           __FUNCTION__, actionid, name, params, plen, arg, len, val_size));
5464 
5465 	if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
5466 		goto exit;
5467 
5468 	if (plen >= (int)sizeof(int_val))
5469 		bcopy(params, &int_val, sizeof(int_val));
5470 
5471 	if (plen >= (int)sizeof(int_val) * 2)
5472 		bcopy((void*)((uintptr)params + sizeof(int_val)), &int_val2, sizeof(int_val2));
5473 
5474 	if (plen >= (int)sizeof(int_val) * 3)
5475 		bcopy((void*)((uintptr)params + 2 * sizeof(int_val)), &int_val3, sizeof(int_val3));
5476 
5477 	bool_val = (int_val != 0) ? TRUE : FALSE;
5478 
5479 	/* Check if dongle is in reset. If so, only allow DEVRESET iovars */
5480 	if (bus->dhd->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) ||
5481 	                                actionid == IOV_GVAL(IOV_DEVRESET))) {
5482 		bcmerror = BCME_NOTREADY;
5483 		goto exit;
5484 	}
5485 
5486 	switch (actionid) {
5487 
5488 	case IOV_SVAL(IOV_VARS):
5489 		bcmerror = dhdpcie_downloadvars(bus, arg, len);
5490 		break;
5491 	case IOV_SVAL(IOV_PCIE_LPBK):
5492 		bcmerror = dhdpcie_bus_lpback_req(bus, int_val);
5493 		break;
5494 
5495 	case IOV_SVAL(IOV_PCIE_DMAXFER): {
5496 		dma_xfer_info_t *dmaxfer = (dma_xfer_info_t *)arg;
5497 
5498 		if (!dmaxfer)
5499 			return BCME_BADARG;
5500 		if (dmaxfer->version != DHD_DMAXFER_VERSION)
5501 			return BCME_VERSION;
5502 		if (dmaxfer->length != sizeof(dma_xfer_info_t)) {
5503 			return BCME_BADLEN;
5504 		}
5505 
5506 		bcmerror = dhdpcie_bus_dmaxfer_req(bus, dmaxfer->num_bytes,
5507 				dmaxfer->src_delay, dmaxfer->dest_delay,
5508 				dmaxfer->type, dmaxfer->core_num,
5509 				dmaxfer->should_wait);
5510 
5511 		if (dmaxfer->should_wait && bcmerror >= 0) {
5512 			bcmerror = dhdmsgbuf_dmaxfer_status(bus->dhd, dmaxfer);
5513 		}
5514 		break;
5515 	}
5516 
5517 	case IOV_GVAL(IOV_PCIE_DMAXFER): {
5518 		dma_xfer_info_t *dmaxfer = (dma_xfer_info_t *)params;
5519 		if (!dmaxfer)
5520 			return BCME_BADARG;
5521 		if (dmaxfer->version != DHD_DMAXFER_VERSION)
5522 			return BCME_VERSION;
5523 		if (dmaxfer->length != sizeof(dma_xfer_info_t)) {
5524 			return BCME_BADLEN;
5525 		}
5526 		bcmerror = dhdmsgbuf_dmaxfer_status(bus->dhd, dmaxfer);
5527 		break;
5528 	}
5529 
5530 	case IOV_GVAL(IOV_PCIE_SUSPEND):
5531 		int_val = (bus->dhd->busstate == DHD_BUS_SUSPEND) ? 1 : 0;
5532 		bcopy(&int_val, arg, val_size);
5533 		break;
5534 
5535 	case IOV_SVAL(IOV_PCIE_SUSPEND):
5536 		if (bool_val) { /* Suspend */
5537 			int ret;
5538 			unsigned long flags;
5539 
5540 			/*
5541 			 * If some other context is busy, wait until they are done,
5542 			 * before starting suspend
5543 			 */
5544 			ret = dhd_os_busbusy_wait_condition(bus->dhd,
5545 				&bus->dhd->dhd_bus_busy_state, DHD_BUS_BUSY_IN_DHD_IOVAR);
5546 			if (ret == 0) {
5547 				DHD_ERROR(("%s:Wait Timedout, dhd_bus_busy_state = 0x%x\n",
5548 					__FUNCTION__, bus->dhd->dhd_bus_busy_state));
5549 				return BCME_BUSY;
5550 			}
5551 
5552 			DHD_GENERAL_LOCK(bus->dhd, flags);
5553 			DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd);
5554 			DHD_GENERAL_UNLOCK(bus->dhd, flags);
5555 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5556 			dhdpcie_bus_suspend(bus, TRUE, TRUE);
5557 #else
5558 			dhdpcie_bus_suspend(bus, TRUE);
5559 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5560 
5561 			DHD_GENERAL_LOCK(bus->dhd, flags);
5562 			DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd);
5563 			dhd_os_busbusy_wake(bus->dhd);
5564 			DHD_GENERAL_UNLOCK(bus->dhd, flags);
5565 		} else { /* Resume */
5566 			unsigned long flags;
5567 			DHD_GENERAL_LOCK(bus->dhd, flags);
5568 			DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd);
5569 			DHD_GENERAL_UNLOCK(bus->dhd, flags);
5570 
5571 			dhdpcie_bus_suspend(bus, FALSE);
5572 
5573 			DHD_GENERAL_LOCK(bus->dhd, flags);
5574 			DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd);
5575 			dhd_os_busbusy_wake(bus->dhd);
5576 			DHD_GENERAL_UNLOCK(bus->dhd, flags);
5577 		}
5578 		break;
5579 
5580 	case IOV_GVAL(IOV_MEMSIZE):
5581 		int_val = (int32)bus->ramsize;
5582 		bcopy(&int_val, arg, val_size);
5583 		break;
5584 
5585 	/* Debug related. Dumps core registers or one of the dongle memory */
5586 	case IOV_GVAL(IOV_DUMP_DONGLE):
5587 	{
5588 		dump_dongle_in_t ddi = *(dump_dongle_in_t*)params;
5589 		dump_dongle_out_t *ddo = (dump_dongle_out_t*)arg;
5590 		uint32 *p = ddo->val;
5591 		const uint max_offset = 4096 - 1; /* one core contains max 4096/4 registers */
5592 
5593 		if (plen < sizeof(ddi) || len < sizeof(ddo)) {
5594 			bcmerror = BCME_BADARG;
5595 			break;
5596 		}
5597 
5598 		switch (ddi.type) {
5599 		case DUMP_DONGLE_COREREG:
5600 			ddo->n_bytes = 0;
5601 
5602 			if (si_setcoreidx(bus->sih, ddi.index) == NULL) {
5603 				break; // beyond last core: core enumeration ended
5604 			}
5605 
5606 			ddo->address = si_addrspace(bus->sih, CORE_SLAVE_PORT_0, CORE_BASE_ADDR_0);
5607 			ddo->address += ddi.offset; // BP address at which this dump starts
5608 
5609 			ddo->id = si_coreid(bus->sih);
5610 			ddo->rev = si_corerev(bus->sih);
5611 
5612 			while (ddi.offset < max_offset &&
5613 				sizeof(dump_dongle_out_t) + ddo->n_bytes < (uint)len) {
5614 				*p++ = si_corereg(bus->sih, ddi.index, ddi.offset, 0, 0);
5615 				ddi.offset += sizeof(uint32);
5616 				ddo->n_bytes += sizeof(uint32);
5617 			}
5618 			break;
5619 		default:
5620 			// TODO: implement d11 SHM/TPL dumping
5621 			bcmerror = BCME_BADARG;
5622 			break;
5623 		}
5624 		break;
5625 	}
5626 
5627 	/* Debug related. Returns a string with dongle capabilities */
5628 	case IOV_GVAL(IOV_DNGL_CAPS):
5629 	{
5630 		strncpy(arg, bus->dhd->fw_capabilities,
5631 			MIN(strlen(bus->dhd->fw_capabilities), (size_t)len));
5632 		((char*)arg)[len - 1] = '\0';
5633 		break;
5634 	}
5635 
5636 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
5637 	case IOV_SVAL(IOV_GDB_SERVER):
5638 		/* debugger_*() functions may sleep, so cannot hold spinlock */
5639 		DHD_PERIM_UNLOCK(bus->dhd);
5640 		if (int_val > 0) {
5641 			debugger_init((void *) bus, &bus_ops, int_val, SI_ENUM_BASE(bus->sih));
5642 		} else {
5643 			debugger_close();
5644 		}
5645 		DHD_PERIM_LOCK(bus->dhd);
5646 		break;
5647 #endif /* DEBUGGER || DHD_DSCOPE */
5648 
5649 #ifdef BCM_BUZZZ
5650 	/* Dump dongle side buzzz trace to console */
5651 	case IOV_GVAL(IOV_BUZZZ_DUMP):
5652 		bcmerror = dhd_buzzz_dump_dngl(bus);
5653 		break;
5654 #endif /* BCM_BUZZZ */
5655 
5656 	case IOV_SVAL(IOV_SET_DOWNLOAD_STATE):
5657 		bcmerror = dhdpcie_bus_download_state(bus, bool_val);
5658 		break;
5659 
5660 	case IOV_GVAL(IOV_RAMSIZE):
5661 		int_val = (int32)bus->ramsize;
5662 		bcopy(&int_val, arg, val_size);
5663 		break;
5664 
5665 	case IOV_SVAL(IOV_RAMSIZE):
5666 		bus->ramsize = int_val;
5667 		bus->orig_ramsize = int_val;
5668 		break;
5669 
5670 	case IOV_GVAL(IOV_RAMSTART):
5671 		int_val = (int32)bus->dongle_ram_base;
5672 		bcopy(&int_val, arg, val_size);
5673 		break;
5674 
5675 	case IOV_GVAL(IOV_CC_NVMSHADOW):
5676 	{
5677 		struct bcmstrbuf dump_b;
5678 
5679 		bcm_binit(&dump_b, arg, len);
5680 		bcmerror = dhdpcie_cc_nvmshadow(bus, &dump_b);
5681 		break;
5682 	}
5683 
5684 	case IOV_GVAL(IOV_SLEEP_ALLOWED):
5685 		bool_val = bus->sleep_allowed;
5686 		bcopy(&bool_val, arg, val_size);
5687 		break;
5688 
5689 	case IOV_SVAL(IOV_SLEEP_ALLOWED):
5690 		bus->sleep_allowed = bool_val;
5691 		break;
5692 
5693 	case IOV_GVAL(IOV_DONGLEISOLATION):
5694 		int_val = bus->dhd->dongle_isolation;
5695 		bcopy(&int_val, arg, val_size);
5696 		break;
5697 
5698 	case IOV_SVAL(IOV_DONGLEISOLATION):
5699 		bus->dhd->dongle_isolation = bool_val;
5700 		break;
5701 
5702 	case IOV_GVAL(IOV_LTRSLEEPON_UNLOOAD):
5703 		int_val = bus->ltrsleep_on_unload;
5704 		bcopy(&int_val, arg, val_size);
5705 		break;
5706 
5707 	case IOV_SVAL(IOV_LTRSLEEPON_UNLOOAD):
5708 		bus->ltrsleep_on_unload = bool_val;
5709 		break;
5710 
5711 	case IOV_GVAL(IOV_DUMP_RINGUPD_BLOCK):
5712 	{
5713 		struct bcmstrbuf dump_b;
5714 		bcm_binit(&dump_b, arg, len);
5715 		bcmerror = dhd_prot_ringupd_dump(bus->dhd, &dump_b);
5716 		break;
5717 	}
5718 	case IOV_GVAL(IOV_DMA_RINGINDICES):
5719 	{
5720 		int_val = dhdpcie_get_dma_ring_indices(bus->dhd);
5721 		bcopy(&int_val, arg, sizeof(int_val));
5722 		break;
5723 	}
5724 	case IOV_SVAL(IOV_DMA_RINGINDICES):
5725 		bcmerror = dhdpcie_set_dma_ring_indices(bus->dhd, int_val);
5726 		break;
5727 
5728 	case IOV_GVAL(IOV_METADATA_DBG):
5729 		int_val = dhd_prot_metadata_dbg_get(bus->dhd);
5730 		bcopy(&int_val, arg, val_size);
5731 		break;
5732 	case IOV_SVAL(IOV_METADATA_DBG):
5733 		dhd_prot_metadata_dbg_set(bus->dhd, (int_val != 0));
5734 		break;
5735 
5736 	case IOV_GVAL(IOV_RX_METADATALEN):
5737 		int_val = dhd_prot_metadatalen_get(bus->dhd, TRUE);
5738 		bcopy(&int_val, arg, val_size);
5739 		break;
5740 
5741 	case IOV_SVAL(IOV_RX_METADATALEN):
5742 		if (int_val > 64) {
5743 			bcmerror = BCME_BUFTOOLONG;
5744 			break;
5745 		}
5746 		dhd_prot_metadatalen_set(bus->dhd, int_val, TRUE);
5747 		break;
5748 
5749 	case IOV_SVAL(IOV_TXP_THRESHOLD):
5750 		dhd_prot_txp_threshold(bus->dhd, TRUE, int_val);
5751 		break;
5752 
5753 	case IOV_GVAL(IOV_TXP_THRESHOLD):
5754 		int_val = dhd_prot_txp_threshold(bus->dhd, FALSE, int_val);
5755 		bcopy(&int_val, arg, val_size);
5756 		break;
5757 
5758 	case IOV_SVAL(IOV_DB1_FOR_MB):
5759 		if (int_val)
5760 			bus->db1_for_mb = TRUE;
5761 		else
5762 			bus->db1_for_mb = FALSE;
5763 		break;
5764 
5765 	case IOV_GVAL(IOV_DB1_FOR_MB):
5766 		if (bus->db1_for_mb)
5767 			int_val = 1;
5768 		else
5769 			int_val = 0;
5770 		bcopy(&int_val, arg, val_size);
5771 		break;
5772 
5773 	case IOV_GVAL(IOV_TX_METADATALEN):
5774 		int_val = dhd_prot_metadatalen_get(bus->dhd, FALSE);
5775 		bcopy(&int_val, arg, val_size);
5776 		break;
5777 
5778 	case IOV_SVAL(IOV_TX_METADATALEN):
5779 		if (int_val > 64) {
5780 			bcmerror = BCME_BUFTOOLONG;
5781 			break;
5782 		}
5783 		dhd_prot_metadatalen_set(bus->dhd, int_val, FALSE);
5784 		break;
5785 
5786 	case IOV_SVAL(IOV_DEVRESET):
5787 		switch (int_val) {
5788 			case DHD_BUS_DEVRESET_ON:
5789 				bcmerror = dhd_bus_devreset(bus->dhd, (uint8)int_val);
5790 				break;
5791 			case DHD_BUS_DEVRESET_OFF:
5792 				bcmerror = dhd_bus_devreset(bus->dhd, (uint8)int_val);
5793 				break;
5794 			case DHD_BUS_DEVRESET_FLR:
5795 				bcmerror = dhd_bus_perform_flr(bus, bus->flr_force_fail);
5796 				break;
5797 			case DHD_BUS_DEVRESET_FLR_FORCE_FAIL:
5798 				bus->flr_force_fail = TRUE;
5799 				break;
5800 			default:
5801 				DHD_ERROR(("%s: invalid argument for devreset\n", __FUNCTION__));
5802 				break;
5803 		}
5804 		break;
5805 	case IOV_SVAL(IOV_FORCE_FW_TRAP):
5806 		if (bus->dhd->busstate == DHD_BUS_DATA)
5807 			dhdpcie_fw_trap(bus);
5808 		else {
5809 			DHD_ERROR(("%s: Bus is NOT up\n", __FUNCTION__));
5810 			bcmerror = BCME_NOTUP;
5811 		}
5812 		break;
5813 	case IOV_GVAL(IOV_FLOW_PRIO_MAP):
5814 		int_val = bus->dhd->flow_prio_map_type;
5815 		bcopy(&int_val, arg, val_size);
5816 		break;
5817 
5818 	case IOV_SVAL(IOV_FLOW_PRIO_MAP):
5819 		int_val = (int32)dhd_update_flow_prio_map(bus->dhd, (uint8)int_val);
5820 		bcopy(&int_val, arg, val_size);
5821 		break;
5822 
5823 	case IOV_GVAL(IOV_TXBOUND):
5824 		int_val = (int32)dhd_txbound;
5825 		bcopy(&int_val, arg, val_size);
5826 		break;
5827 
5828 	case IOV_SVAL(IOV_TXBOUND):
5829 		dhd_txbound = (uint)int_val;
5830 		break;
5831 
5832 	case IOV_SVAL(IOV_H2D_MAILBOXDATA):
5833 		dhdpcie_send_mb_data(bus, (uint)int_val);
5834 		break;
5835 
5836 	case IOV_SVAL(IOV_INFORINGS):
5837 		dhd_prot_init_info_rings(bus->dhd);
5838 		break;
5839 
5840 	case IOV_SVAL(IOV_H2D_PHASE):
5841 		if (bus->dhd->busstate != DHD_BUS_DOWN) {
5842 			DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
5843 				__FUNCTION__));
5844 			bcmerror = BCME_NOTDOWN;
5845 			break;
5846 		}
5847 		if (int_val)
5848 			bus->dhd->h2d_phase_supported = TRUE;
5849 		else
5850 			bus->dhd->h2d_phase_supported = FALSE;
5851 		break;
5852 
5853 	case IOV_GVAL(IOV_H2D_PHASE):
5854 		int_val = (int32) bus->dhd->h2d_phase_supported;
5855 		bcopy(&int_val, arg, val_size);
5856 		break;
5857 
5858 	case IOV_SVAL(IOV_H2D_ENABLE_TRAP_BADPHASE):
5859 		if (bus->dhd->busstate != DHD_BUS_DOWN) {
5860 			DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
5861 				__FUNCTION__));
5862 			bcmerror = BCME_NOTDOWN;
5863 			break;
5864 		}
5865 		if (int_val)
5866 			bus->dhd->force_dongletrap_on_bad_h2d_phase = TRUE;
5867 		else
5868 			bus->dhd->force_dongletrap_on_bad_h2d_phase = FALSE;
5869 		break;
5870 
5871 	case IOV_GVAL(IOV_H2D_ENABLE_TRAP_BADPHASE):
5872 		int_val = (int32) bus->dhd->force_dongletrap_on_bad_h2d_phase;
5873 		bcopy(&int_val, arg, val_size);
5874 		break;
5875 
5876 	case IOV_SVAL(IOV_H2D_TXPOST_MAX_ITEM):
5877 		if (bus->dhd->busstate != DHD_BUS_DOWN) {
5878 			DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
5879 				__FUNCTION__));
5880 			bcmerror = BCME_NOTDOWN;
5881 			break;
5882 		}
5883 		dhd_prot_set_h2d_max_txpost(bus->dhd, (uint16)int_val);
5884 		break;
5885 
5886 	case IOV_GVAL(IOV_H2D_TXPOST_MAX_ITEM):
5887 		int_val = dhd_prot_get_h2d_max_txpost(bus->dhd);
5888 		bcopy(&int_val, arg, val_size);
5889 		break;
5890 
5891 	case IOV_GVAL(IOV_RXBOUND):
5892 		int_val = (int32)dhd_rxbound;
5893 		bcopy(&int_val, arg, val_size);
5894 		break;
5895 
5896 	case IOV_SVAL(IOV_RXBOUND):
5897 		dhd_rxbound = (uint)int_val;
5898 		break;
5899 
5900 	case IOV_GVAL(IOV_TRAPDATA):
5901 	{
5902 		struct bcmstrbuf dump_b;
5903 		bcm_binit(&dump_b, arg, len);
5904 		bcmerror = dhd_prot_dump_extended_trap(bus->dhd, &dump_b, FALSE);
5905 		break;
5906 	}
5907 
5908 	case IOV_GVAL(IOV_TRAPDATA_RAW):
5909 	{
5910 		struct bcmstrbuf dump_b;
5911 		bcm_binit(&dump_b, arg, len);
5912 		bcmerror = dhd_prot_dump_extended_trap(bus->dhd, &dump_b, TRUE);
5913 		break;
5914 	}
5915 	case IOV_SVAL(IOV_HANGREPORT):
5916 		bus->dhd->hang_report = bool_val;
5917 		DHD_ERROR(("%s: Set hang_report as %d\n",
5918 			__FUNCTION__, bus->dhd->hang_report));
5919 		break;
5920 
5921 	case IOV_GVAL(IOV_HANGREPORT):
5922 		int_val = (int32)bus->dhd->hang_report;
5923 		bcopy(&int_val, arg, val_size);
5924 		break;
5925 
5926 	case IOV_SVAL(IOV_CTO_PREVENTION):
5927 		bcmerror = dhdpcie_cto_init(bus, bool_val);
5928 		break;
5929 
5930 	case IOV_GVAL(IOV_CTO_PREVENTION):
5931 		if (bus->sih->buscorerev < 19) {
5932 			bcmerror = BCME_UNSUPPORTED;
5933 			break;
5934 		}
5935 		int_val = (int32)bus->cto_enable;
5936 		bcopy(&int_val, arg, val_size);
5937 		break;
5938 
5939 	case IOV_SVAL(IOV_CTO_THRESHOLD):
5940 		{
5941 			if (bus->sih->buscorerev < 19) {
5942 				bcmerror = BCME_UNSUPPORTED;
5943 				break;
5944 			}
5945 			bus->cto_threshold = (uint32)int_val;
5946 		}
5947 		break;
5948 
5949 	case IOV_GVAL(IOV_CTO_THRESHOLD):
5950 		if (bus->sih->buscorerev < 19) {
5951 			bcmerror = BCME_UNSUPPORTED;
5952 			break;
5953 		}
5954 		if (bus->cto_threshold)
5955 			int_val = (int32)bus->cto_threshold;
5956 		else
5957 			int_val = (int32)PCIE_CTO_TO_THRESH_DEFAULT;
5958 
5959 		bcopy(&int_val, arg, val_size);
5960 		break;
5961 
5962 	case IOV_SVAL(IOV_PCIE_WD_RESET):
5963 		if (bool_val) {
5964 			/* Legacy chipcommon watchdog reset */
5965 			dhdpcie_cc_watchdog_reset(bus);
5966 		}
5967 		break;
5968 
5969 	case IOV_GVAL(IOV_HWA_ENAB_BMAP):
5970 		int_val = bus->hwa_enab_bmap;
5971 		bcopy(&int_val, arg, val_size);
5972 		break;
5973 	case IOV_SVAL(IOV_HWA_ENAB_BMAP):
5974 		bus->hwa_enab_bmap = (uint8)int_val;
5975 		break;
5976 	case IOV_GVAL(IOV_IDMA_ENABLE):
5977 		int_val = bus->idma_enabled;
5978 		bcopy(&int_val, arg, val_size);
5979 		break;
5980 	case IOV_SVAL(IOV_IDMA_ENABLE):
5981 		bus->idma_enabled = (bool)int_val;
5982 		break;
5983 	case IOV_GVAL(IOV_IFRM_ENABLE):
5984 		int_val = bus->ifrm_enabled;
5985 		bcopy(&int_val, arg, val_size);
5986 		break;
5987 	case IOV_SVAL(IOV_IFRM_ENABLE):
5988 		bus->ifrm_enabled = (bool)int_val;
5989 		break;
5990 	case IOV_GVAL(IOV_CLEAR_RING):
5991 		bcopy(&int_val, arg, val_size);
5992 		dhd_flow_rings_flush(bus->dhd, 0);
5993 		break;
5994 	case IOV_GVAL(IOV_DAR_ENABLE):
5995 		int_val = bus->dar_enabled;
5996 		bcopy(&int_val, arg, val_size);
5997 		break;
5998 	case IOV_SVAL(IOV_DAR_ENABLE):
5999 		bus->dar_enabled = (bool)int_val;
6000 		break;
6001 	case IOV_GVAL(IOV_HSCBSIZE):
6002 		bcmerror = dhd_get_hscb_info(bus->dhd, NULL, (uint32 *)arg);
6003 		break;
6004 
6005 #ifdef DHD_HP2P
6006 	case IOV_SVAL(IOV_HP2P_ENABLE):
6007 		dhd_prot_hp2p_enable(bus->dhd, TRUE, int_val);
6008 		break;
6009 
6010 	case IOV_GVAL(IOV_HP2P_ENABLE):
6011 		int_val = dhd_prot_hp2p_enable(bus->dhd, FALSE, int_val);
6012 		bcopy(&int_val, arg, val_size);
6013 		break;
6014 
6015 	case IOV_SVAL(IOV_HP2P_PKT_THRESHOLD):
6016 		dhd_prot_pkt_threshold(bus->dhd, TRUE, int_val);
6017 		break;
6018 
6019 	case IOV_GVAL(IOV_HP2P_PKT_THRESHOLD):
6020 		int_val = dhd_prot_pkt_threshold(bus->dhd, FALSE, int_val);
6021 		bcopy(&int_val, arg, val_size);
6022 		break;
6023 
6024 	case IOV_SVAL(IOV_HP2P_TIME_THRESHOLD):
6025 		dhd_prot_time_threshold(bus->dhd, TRUE, int_val);
6026 		break;
6027 
6028 	case IOV_GVAL(IOV_HP2P_TIME_THRESHOLD):
6029 		int_val = dhd_prot_time_threshold(bus->dhd, FALSE, int_val);
6030 		bcopy(&int_val, arg, val_size);
6031 		break;
6032 
6033 	case IOV_SVAL(IOV_HP2P_PKT_EXPIRY):
6034 		dhd_prot_pkt_expiry(bus->dhd, TRUE, int_val);
6035 		break;
6036 
6037 	case IOV_GVAL(IOV_HP2P_PKT_EXPIRY):
6038 		int_val = dhd_prot_pkt_expiry(bus->dhd, FALSE, int_val);
6039 		bcopy(&int_val, arg, val_size);
6040 		break;
6041 	case IOV_SVAL(IOV_HP2P_TXCPL_MAXITEMS):
6042 		if (bus->dhd->busstate != DHD_BUS_DOWN) {
6043 			return BCME_NOTDOWN;
6044 		}
6045 		dhd_bus_set_hp2p_ring_max_size(bus, TRUE, int_val);
6046 		break;
6047 
6048 	case IOV_GVAL(IOV_HP2P_TXCPL_MAXITEMS):
6049 		int_val = dhd_bus_get_hp2p_ring_max_size(bus, TRUE);
6050 		bcopy(&int_val, arg, val_size);
6051 		break;
6052 	case IOV_SVAL(IOV_HP2P_RXCPL_MAXITEMS):
6053 		if (bus->dhd->busstate != DHD_BUS_DOWN) {
6054 			return BCME_NOTDOWN;
6055 		}
6056 		dhd_bus_set_hp2p_ring_max_size(bus, FALSE, int_val);
6057 		break;
6058 
6059 	case IOV_GVAL(IOV_HP2P_RXCPL_MAXITEMS):
6060 		int_val = dhd_bus_get_hp2p_ring_max_size(bus, FALSE);
6061 		bcopy(&int_val, arg, val_size);
6062 		break;
6063 #endif /* DHD_HP2P */
6064 	case IOV_SVAL(IOV_EXTDTXS_IN_TXCPL):
6065 		if (bus->dhd->busstate != DHD_BUS_DOWN) {
6066 			return BCME_NOTDOWN;
6067 		}
6068 		if (int_val)
6069 			bus->dhd->extdtxs_in_txcpl = TRUE;
6070 		else
6071 			bus->dhd->extdtxs_in_txcpl = FALSE;
6072 		break;
6073 
6074 	case IOV_GVAL(IOV_EXTDTXS_IN_TXCPL):
6075 		int_val = bus->dhd->extdtxs_in_txcpl;
6076 		bcopy(&int_val, arg, val_size);
6077 		break;
6078 
6079 	case IOV_SVAL(IOV_HOSTRDY_AFTER_INIT):
6080 		if (bus->dhd->busstate != DHD_BUS_DOWN) {
6081 			return BCME_NOTDOWN;
6082 		}
6083 		if (int_val)
6084 			bus->dhd->hostrdy_after_init = TRUE;
6085 		else
6086 			bus->dhd->hostrdy_after_init = FALSE;
6087 		break;
6088 
6089 	case IOV_GVAL(IOV_HOSTRDY_AFTER_INIT):
6090 		int_val = bus->dhd->hostrdy_after_init;
6091 		bcopy(&int_val, arg, val_size);
6092 		break;
6093 
6094 	default:
6095 		bcmerror = BCME_UNSUPPORTED;
6096 		break;
6097 	}
6098 
6099 exit:
6100 	return bcmerror;
6101 } /* dhdpcie_bus_doiovar */
6102 
6103 /** Transfers bytes from host to dongle using pio mode */
6104 static int
dhdpcie_bus_lpback_req(struct dhd_bus * bus,uint32 len)6105 dhdpcie_bus_lpback_req(struct  dhd_bus *bus, uint32 len)
6106 {
6107 	if (bus->dhd == NULL) {
6108 		DHD_ERROR(("%s: bus not inited\n", __FUNCTION__));
6109 		return 0;
6110 	}
6111 	if (bus->dhd->prot == NULL) {
6112 		DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__));
6113 		return 0;
6114 	}
6115 	if (bus->dhd->busstate != DHD_BUS_DATA) {
6116 		DHD_ERROR(("%s: not in a readystate to LPBK  is not inited\n", __FUNCTION__));
6117 		return 0;
6118 	}
6119 	dhdmsgbuf_lpbk_req(bus->dhd, len);
6120 	return 0;
6121 }
6122 
6123 void
dhd_bus_dump_dar_registers(struct dhd_bus * bus)6124 dhd_bus_dump_dar_registers(struct dhd_bus *bus)
6125 {
6126 	uint32 dar_clk_ctrl_val, dar_pwr_ctrl_val, dar_intstat_val,
6127 		dar_errlog_val, dar_erraddr_val, dar_pcie_mbint_val;
6128 	uint32 dar_clk_ctrl_reg, dar_pwr_ctrl_reg, dar_intstat_reg,
6129 		dar_errlog_reg, dar_erraddr_reg, dar_pcie_mbint_reg;
6130 
6131 	if (bus->is_linkdown && !bus->cto_triggered) {
6132 		DHD_ERROR(("%s: link is down\n", __FUNCTION__));
6133 		return;
6134 	}
6135 
6136 	dar_clk_ctrl_reg = (uint32)DAR_CLK_CTRL(bus->sih->buscorerev);
6137 	dar_pwr_ctrl_reg = (uint32)DAR_PCIE_PWR_CTRL(bus->sih->buscorerev);
6138 	dar_intstat_reg = (uint32)DAR_INTSTAT(bus->sih->buscorerev);
6139 	dar_errlog_reg = (uint32)DAR_ERRLOG(bus->sih->buscorerev);
6140 	dar_erraddr_reg = (uint32)DAR_ERRADDR(bus->sih->buscorerev);
6141 	dar_pcie_mbint_reg = (uint32)DAR_PCIMailBoxInt(bus->sih->buscorerev);
6142 
6143 	if (bus->sih->buscorerev < 24) {
6144 		DHD_ERROR(("%s: DAR not supported for corerev(%d) < 24\n",
6145 			__FUNCTION__, bus->sih->buscorerev));
6146 		return;
6147 	}
6148 
6149 	dar_clk_ctrl_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_clk_ctrl_reg, 0, 0);
6150 	dar_pwr_ctrl_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_pwr_ctrl_reg, 0, 0);
6151 	dar_intstat_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_intstat_reg, 0, 0);
6152 	dar_errlog_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_errlog_reg, 0, 0);
6153 	dar_erraddr_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_erraddr_reg, 0, 0);
6154 	dar_pcie_mbint_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_pcie_mbint_reg, 0, 0);
6155 
6156 	DHD_ERROR(("%s: dar_clk_ctrl(0x%x:0x%x) dar_pwr_ctrl(0x%x:0x%x) dar_intstat(0x%x:0x%x)\n",
6157 		__FUNCTION__, dar_clk_ctrl_reg, dar_clk_ctrl_val,
6158 		dar_pwr_ctrl_reg, dar_pwr_ctrl_val, dar_intstat_reg, dar_intstat_val));
6159 
6160 	DHD_ERROR(("%s: dar_errlog(0x%x:0x%x) dar_erraddr(0x%x:0x%x) dar_pcie_mbint(0x%x:0x%x)\n",
6161 		__FUNCTION__, dar_errlog_reg, dar_errlog_val,
6162 		dar_erraddr_reg, dar_erraddr_val, dar_pcie_mbint_reg, dar_pcie_mbint_val));
6163 }
6164 
6165 /* Ring DoorBell1 to indicate Hostready i.e. D3 Exit */
6166 void
dhd_bus_hostready(struct dhd_bus * bus)6167 dhd_bus_hostready(struct  dhd_bus *bus)
6168 {
6169 	if (!bus->dhd->d2h_hostrdy_supported) {
6170 		return;
6171 	}
6172 
6173 	if (bus->is_linkdown) {
6174 		DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
6175 		return;
6176 	}
6177 
6178 	DHD_ERROR(("%s : Read PCICMD Reg: 0x%08X\n", __FUNCTION__,
6179 		dhd_pcie_config_read(bus->osh, PCI_CFG_CMD, sizeof(uint32))));
6180 
6181 	if (DAR_PWRREQ(bus)) {
6182 		dhd_bus_pcie_pwr_req(bus);
6183 	}
6184 
6185 	dhd_bus_dump_dar_registers(bus);
6186 
6187 	si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db1_addr_get(bus), ~0, 0x12345678);
6188 	bus->hostready_count ++;
6189 	DHD_ERROR(("%s: Ring Hostready:%d\n", __FUNCTION__, bus->hostready_count));
6190 }
6191 
6192 /* Clear INTSTATUS */
6193 void
dhdpcie_bus_clear_intstatus(struct dhd_bus * bus)6194 dhdpcie_bus_clear_intstatus(struct dhd_bus *bus)
6195 {
6196 	uint32 intstatus = 0;
6197 	if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) ||
6198 		(bus->sih->buscorerev == 2)) {
6199 		intstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
6200 		dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, intstatus);
6201 	} else {
6202 		/* this is a PCIE core register..not a config register... */
6203 		intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, 0, 0);
6204 		si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, bus->def_intmask,
6205 			intstatus);
6206 	}
6207 }
6208 
6209 int
6210 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
dhdpcie_bus_suspend(struct dhd_bus * bus,bool state,bool byint)6211 dhdpcie_bus_suspend(struct dhd_bus *bus, bool state, bool byint)
6212 #else
6213 dhdpcie_bus_suspend(struct dhd_bus *bus, bool state)
6214 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
6215 {
6216 	int timeleft;
6217 	int rc = 0;
6218 	unsigned long flags, flags_bus;
6219 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
6220 	int d3_read_retry = 0;
6221 	uint32 d2h_mb_data = 0;
6222 	uint32 zero = 0;
6223 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
6224 
6225 	printf("%s: state=%d\n", __FUNCTION__, state);
6226 	if (bus->dhd == NULL) {
6227 		DHD_ERROR(("%s: bus not inited\n", __FUNCTION__));
6228 		return BCME_ERROR;
6229 	}
6230 	if (bus->dhd->prot == NULL) {
6231 		DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__));
6232 		return BCME_ERROR;
6233 	}
6234 
6235 	if (dhd_query_bus_erros(bus->dhd)) {
6236 		return BCME_ERROR;
6237 	}
6238 
6239 	DHD_GENERAL_LOCK(bus->dhd, flags);
6240 	if (!(bus->dhd->busstate == DHD_BUS_DATA || bus->dhd->busstate == DHD_BUS_SUSPEND)) {
6241 		DHD_ERROR(("%s: not in a readystate\n", __FUNCTION__));
6242 		DHD_GENERAL_UNLOCK(bus->dhd, flags);
6243 		return BCME_ERROR;
6244 	}
6245 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
6246 	if (bus->dhd->dongle_reset) {
6247 		DHD_ERROR(("Dongle is in reset state.\n"));
6248 		return -EIO;
6249 	}
6250 
6251 	/* Check whether we are already in the requested state.
6252 	 * state=TRUE means Suspend
6253 	 * state=FALSE meanse Resume
6254 	 */
6255 	if (state == TRUE && bus->dhd->busstate == DHD_BUS_SUSPEND) {
6256 		DHD_ERROR(("Bus is already in SUSPEND state.\n"));
6257 		return BCME_OK;
6258 	} else if (state == FALSE && bus->dhd->busstate == DHD_BUS_DATA) {
6259 		DHD_ERROR(("Bus is already in RESUME state.\n"));
6260 		return BCME_OK;
6261 	}
6262 
6263 	if (state) {
6264 		int idle_retry = 0;
6265 		int active;
6266 
6267 		if (bus->is_linkdown) {
6268 			DHD_ERROR(("%s: PCIe link was down, state=%d\n",
6269 				__FUNCTION__, state));
6270 			return BCME_ERROR;
6271 		}
6272 
6273 		/* Suspend */
6274 		DHD_ERROR(("%s: Entering suspend state\n", __FUNCTION__));
6275 
6276 		bus->dhd->dhd_watchdog_ms_backup = dhd_watchdog_ms;
6277 		if (bus->dhd->dhd_watchdog_ms_backup) {
6278 			DHD_ERROR(("%s: Disabling wdtick before going to suspend\n",
6279 				__FUNCTION__));
6280 			dhd_os_wd_timer(bus->dhd, 0);
6281 		}
6282 
6283 		DHD_GENERAL_LOCK(bus->dhd, flags);
6284 		if (DHD_BUS_BUSY_CHECK_IN_TX(bus->dhd)) {
6285 			DHD_ERROR(("Tx Request is not ended\n"));
6286 			bus->dhd->busstate = DHD_BUS_DATA;
6287 			DHD_GENERAL_UNLOCK(bus->dhd, flags);
6288 			return -EBUSY;
6289 		}
6290 
6291 		bus->last_suspend_start_time = OSL_LOCALTIME_NS();
6292 
6293 		/* stop all interface network queue. */
6294 		dhd_bus_stop_queue(bus);
6295 		DHD_GENERAL_UNLOCK(bus->dhd, flags);
6296 
6297 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
6298 		if (byint) {
6299 			DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
6300 			/* Clear wait_for_d3_ack before sending D3_INFORM */
6301 			bus->wait_for_d3_ack = 0;
6302 			dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM);
6303 
6304 			timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
6305 			DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
6306 		} else {
6307 			/* Clear wait_for_d3_ack before sending D3_INFORM */
6308 			bus->wait_for_d3_ack = 0;
6309 			dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM | H2D_HOST_ACK_NOINT);
6310 			while (!bus->wait_for_d3_ack && d3_read_retry < MAX_D3_ACK_TIMEOUT) {
6311 				dhdpcie_handle_mb_data(bus);
6312 				usleep_range(1000, 1500);
6313 				d3_read_retry++;
6314 			}
6315 		}
6316 #else
6317 		DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
6318 		/* Clear wait_for_d3_ack before sending D3_INFORM */
6319 		bus->wait_for_d3_ack = 0;
6320 		/*
6321 		 * Send H2D_HOST_D3_INFORM to dongle and mark bus->bus_low_power_state
6322 		 * to DHD_BUS_D3_INFORM_SENT in dhd_prot_ring_write_complete_mbdata
6323 		 * inside atomic context, so that no more DBs will be
6324 		 * rung after sending D3_INFORM
6325 		 */
6326 		dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM);
6327 
6328 		/* Wait for D3 ACK for D3_ACK_RESP_TIMEOUT seconds */
6329 
6330 		timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
6331 
6332 #ifdef DHD_RECOVER_TIMEOUT
6333 		if (bus->wait_for_d3_ack == 0) {
6334 			/* If wait_for_d3_ack was not updated because D2H MB was not received */
6335 			uint32 intstatus = si_corereg(bus->sih, bus->sih->buscoreidx,
6336 				bus->pcie_mailbox_int, 0, 0);
6337 			int host_irq_disabled = dhdpcie_irq_disabled(bus);
6338 			if ((intstatus) && (intstatus != (uint32)-1) &&
6339 				(timeleft == 0) && (!dhd_query_bus_erros(bus->dhd))) {
6340 				DHD_ERROR(("%s: D3 ACK trying again intstatus=%x"
6341 					" host_irq_disabled=%d\n",
6342 					__FUNCTION__, intstatus, host_irq_disabled));
6343 				dhd_pcie_intr_count_dump(bus->dhd);
6344 				dhd_print_tasklet_status(bus->dhd);
6345 				if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6 &&
6346 					!bus->use_mailbox) {
6347 					dhd_prot_process_ctrlbuf(bus->dhd);
6348 				} else {
6349 					dhdpcie_handle_mb_data(bus);
6350 				}
6351 				timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
6352 				/* Clear Interrupts */
6353 				dhdpcie_bus_clear_intstatus(bus);
6354 			}
6355 		} /* bus->wait_for_d3_ack was 0 */
6356 #endif /* DHD_RECOVER_TIMEOUT */
6357 
6358 		DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
6359 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
6360 
6361 		/* To allow threads that got pre-empted to complete.
6362 		 */
6363 		while ((active = dhd_os_check_wakelock_all(bus->dhd)) &&
6364 			(idle_retry < MAX_WKLK_IDLE_CHECK)) {
6365 			OSL_SLEEP(1);
6366 			idle_retry++;
6367 		}
6368 
6369 		if (bus->wait_for_d3_ack) {
6370 			DHD_ERROR(("%s: Got D3 Ack \n", __FUNCTION__));
6371 			/* Got D3 Ack. Suspend the bus */
6372 			if (active) {
6373 				DHD_ERROR(("%s():Suspend failed because of wakelock"
6374 					"restoring Dongle to D0\n", __FUNCTION__));
6375 
6376 				if (bus->dhd->dhd_watchdog_ms_backup) {
6377 					DHD_ERROR(("%s: Enabling wdtick due to wakelock active\n",
6378 						__FUNCTION__));
6379 					dhd_os_wd_timer(bus->dhd,
6380 						bus->dhd->dhd_watchdog_ms_backup);
6381 				}
6382 
6383 				/*
6384 				 * Dongle still thinks that it has to be in D3 state until
6385 				 * it gets a D0 Inform, but we are backing off from suspend.
6386 				 * Ensure that Dongle is brought back to D0.
6387 				 *
6388 				 * Bringing back Dongle from D3 Ack state to D0 state is a
6389 				 * 2 step process. Dongle would want to know that D0 Inform
6390 				 * would be sent as a MB interrupt to bring it out of D3 Ack
6391 				 * state to D0 state. So we have to send both this message.
6392 				 */
6393 
6394 				/* Clear wait_for_d3_ack to send D0_INFORM or host_ready */
6395 				bus->wait_for_d3_ack = 0;
6396 
6397 				DHD_BUS_LOCK(bus->bus_lock, flags_bus);
6398 				bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE;
6399 				/* Enable back the intmask which was cleared in DPC
6400 				 * after getting D3_ACK.
6401 				 */
6402 				bus->resume_intr_enable_count++;
6403 
6404 				/* For Linux, Macos etc (otherthan NDIS) enable back the dongle
6405 				 * interrupts using intmask and host interrupts
6406 				 * which were disabled in the dhdpcie_bus_isr()->
6407 				 * dhd_bus_handle_d3_ack().
6408 				 */
6409 				/* Enable back interrupt using Intmask!! */
6410 				dhdpcie_bus_intr_enable(bus);
6411 				/* Enable back interrupt from Host side!! */
6412 				dhdpcie_enable_irq(bus);
6413 
6414 				DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
6415 
6416 				if (bus->use_d0_inform) {
6417 					DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
6418 					dhdpcie_send_mb_data(bus,
6419 						(H2D_HOST_D0_INFORM_IN_USE | H2D_HOST_D0_INFORM));
6420 					DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
6421 				}
6422 				/* ring doorbell 1 (hostready) */
6423 				dhd_bus_hostready(bus);
6424 
6425 				DHD_GENERAL_LOCK(bus->dhd, flags);
6426 				bus->dhd->busstate = DHD_BUS_DATA;
6427 				/* resume all interface network queue. */
6428 				dhd_bus_start_queue(bus);
6429 				DHD_GENERAL_UNLOCK(bus->dhd, flags);
6430 				rc = BCME_ERROR;
6431 			} else {
6432 				/* Actual Suspend after no wakelock */
6433 				/* At this time bus->bus_low_power_state will be
6434 				 * made to DHD_BUS_D3_ACK_RECIEVED after recieving D3_ACK
6435 				 * in dhd_bus_handle_d3_ack()
6436 				 */
6437 				if (bus->use_d0_inform &&
6438 					(bus->api.fw_rev < PCIE_SHARED_VERSION_6)) {
6439 					DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
6440 					dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM_IN_USE));
6441 					DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
6442 				}
6443 
6444 #if defined(BCMPCIE_OOB_HOST_WAKE)
6445 				if (bus->dhd->dhd_induce_error == DHD_INDUCE_DROP_OOB_IRQ) {
6446 					DHD_ERROR(("%s: Inducing DROP OOB IRQ\n", __FUNCTION__));
6447 				} else {
6448 					dhdpcie_oob_intr_set(bus, TRUE);
6449 				}
6450 #endif /* BCMPCIE_OOB_HOST_WAKE */
6451 
6452 				DHD_GENERAL_LOCK(bus->dhd, flags);
6453 				/* The Host cannot process interrupts now so disable the same.
6454 				 * No need to disable the dongle INTR using intmask, as we are
6455 				 * already calling disabling INTRs from DPC context after
6456 				 * getting D3_ACK in dhd_bus_handle_d3_ack.
6457 				 * Code may not look symmetric between Suspend and
6458 				 * Resume paths but this is done to close down the timing window
6459 				 * between DPC and suspend context and bus->bus_low_power_state
6460 				 * will be set to DHD_BUS_D3_ACK_RECIEVED in DPC.
6461 				 */
6462 				bus->dhd->d3ackcnt_timeout = 0;
6463 				bus->dhd->busstate = DHD_BUS_SUSPEND;
6464 				DHD_GENERAL_UNLOCK(bus->dhd, flags);
6465 				dhdpcie_dump_resource(bus);
6466 				/* Handle Host Suspend */
6467 				rc = dhdpcie_pci_suspend_resume(bus, state);
6468 				if (!rc) {
6469 					bus->last_suspend_end_time = OSL_LOCALTIME_NS();
6470 				}
6471 			}
6472 		} else if (timeleft == 0) { /* D3 ACK Timeout */
6473 #ifdef DHD_FW_COREDUMP
6474 			uint32 cur_memdump_mode = bus->dhd->memdump_enabled;
6475 #endif /* DHD_FW_COREDUMP */
6476 
6477 			/* check if the D3 ACK timeout due to scheduling issue */
6478 			bus->dhd->is_sched_error = !dhd_query_bus_erros(bus->dhd) &&
6479 				bus->isr_entry_time > bus->last_d3_inform_time &&
6480 				dhd_bus_query_dpc_sched_errors(bus->dhd);
6481 			bus->dhd->d3ack_timeout_occured = TRUE;
6482 			/* If the D3 Ack has timeout */
6483 			bus->dhd->d3ackcnt_timeout++;
6484 			DHD_ERROR(("%s: resumed on timeout for D3 ACK%s d3_inform_cnt %d\n",
6485 				__FUNCTION__, bus->dhd->is_sched_error ?
6486 				" due to scheduling problem" : "", bus->dhd->d3ackcnt_timeout));
6487 #if defined(DHD_KERNEL_SCHED_DEBUG) && defined(DHD_FW_COREDUMP)
6488 			if (bus->dhd->is_sched_error && cur_memdump_mode == DUMP_MEMFILE_BUGON) {
6489 				/* change g_assert_type to trigger Kernel panic */
6490 				g_assert_type = 2;
6491 				/* use ASSERT() to trigger panic */
6492 				ASSERT(0);
6493 			}
6494 #endif /* DHD_KERNEL_SCHED_DEBUG && DHD_FW_COREDUMP */
6495 			DHD_BUS_LOCK(bus->bus_lock, flags_bus);
6496 			bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE;
6497 			DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
6498 			DHD_GENERAL_LOCK(bus->dhd, flags);
6499 			bus->dhd->busstate = DHD_BUS_DATA;
6500 			/* resume all interface network queue. */
6501 			dhd_bus_start_queue(bus);
6502 			DHD_GENERAL_UNLOCK(bus->dhd, flags);
6503 			if (!bus->dhd->dongle_trap_occured &&
6504 				!bus->is_linkdown &&
6505 				!bus->cto_triggered) {
6506 				uint32 intstatus = 0;
6507 
6508 				/* Check if PCIe bus status is valid */
6509 				intstatus = si_corereg(bus->sih, bus->sih->buscoreidx,
6510 					bus->pcie_mailbox_int, 0, 0);
6511 				if (intstatus == (uint32)-1) {
6512 					/* Invalidate PCIe bus status */
6513 					bus->is_linkdown = 1;
6514 				}
6515 
6516 				dhd_bus_dump_console_buffer(bus);
6517 				dhd_prot_debug_info_print(bus->dhd);
6518 #ifdef DHD_FW_COREDUMP
6519 				if (cur_memdump_mode) {
6520 					/* write core dump to file */
6521 					bus->dhd->memdump_type = DUMP_TYPE_D3_ACK_TIMEOUT;
6522 					dhdpcie_mem_dump(bus);
6523 				}
6524 #endif /* DHD_FW_COREDUMP */
6525 
6526 				DHD_ERROR(("%s: Event HANG send up due to D3_ACK timeout\n",
6527 					__FUNCTION__));
6528 				dhd_os_check_hang(bus->dhd, 0, -ETIMEDOUT);
6529 			}
6530 #if defined(DHD_ERPOM)
6531 			dhd_schedule_reset(bus->dhd);
6532 #endif // endif
6533 			rc = -ETIMEDOUT;
6534 		}
6535 	} else {
6536 		/* Resume */
6537 		DHD_ERROR(("%s: Entering resume state\n", __FUNCTION__));
6538 		bus->last_resume_start_time = OSL_LOCALTIME_NS();
6539 
6540 		/**
6541 		 * PCIE2_BAR0_CORE2_WIN gets reset after D3 cold.
6542 		 * si_backplane_access(function to read/write backplane)
6543 		 * updates the window(PCIE2_BAR0_CORE2_WIN) only if
6544 		 * window being accessed is different form the window
6545 		 * being pointed by second_bar0win.
6546 		 * Since PCIE2_BAR0_CORE2_WIN is already reset because of D3 cold,
6547 		 * invalidating second_bar0win after resume updates
6548 		 * PCIE2_BAR0_CORE2_WIN with right window.
6549 		 */
6550 		si_invalidate_second_bar0win(bus->sih);
6551 #if defined(BCMPCIE_OOB_HOST_WAKE)
6552 		DHD_OS_OOB_IRQ_WAKE_UNLOCK(bus->dhd);
6553 #endif /* BCMPCIE_OOB_HOST_WAKE */
6554 		rc = dhdpcie_pci_suspend_resume(bus, state);
6555 		dhdpcie_dump_resource(bus);
6556 
6557 		DHD_BUS_LOCK(bus->bus_lock, flags_bus);
6558 		/* set bus_low_power_state to DHD_BUS_NO_LOW_POWER_STATE */
6559 		bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE;
6560 		DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
6561 
6562 		if (!rc && bus->dhd->busstate == DHD_BUS_SUSPEND) {
6563 			if (bus->use_d0_inform) {
6564 				DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
6565 				dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM));
6566 				DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
6567 			}
6568 			/* ring doorbell 1 (hostready) */
6569 			dhd_bus_hostready(bus);
6570 		}
6571 		DHD_GENERAL_LOCK(bus->dhd, flags);
6572 		bus->dhd->busstate = DHD_BUS_DATA;
6573 		/* resume all interface network queue. */
6574 		dhd_bus_start_queue(bus);
6575 
6576 		/* TODO: for NDIS also we need to use enable_irq in future */
6577 		bus->resume_intr_enable_count++;
6578 
6579 		/* For Linux, Macos etc (otherthan NDIS) enable back the dongle interrupts
6580 		 * using intmask and host interrupts
6581 		 * which were disabled in the dhdpcie_bus_isr()->dhd_bus_handle_d3_ack().
6582 		 */
6583 		dhdpcie_bus_intr_enable(bus); /* Enable back interrupt using Intmask!! */
6584 		dhdpcie_enable_irq(bus); /* Enable back interrupt from Host side!! */
6585 
6586 		DHD_GENERAL_UNLOCK(bus->dhd, flags);
6587 
6588 		if (bus->dhd->dhd_watchdog_ms_backup) {
6589 			DHD_ERROR(("%s: Enabling wdtick after resume\n",
6590 				__FUNCTION__));
6591 			dhd_os_wd_timer(bus->dhd, bus->dhd->dhd_watchdog_ms_backup);
6592 		}
6593 
6594 		bus->last_resume_end_time = OSL_LOCALTIME_NS();
6595 		/* Update TCM rd index for EDL ring */
6596 		DHD_EDL_RING_TCM_RD_UPDATE(bus->dhd);
6597 	}
6598 	return rc;
6599 }
6600 
6601 uint32
dhdpcie_force_alp(struct dhd_bus * bus,bool enable)6602 dhdpcie_force_alp(struct dhd_bus *bus, bool enable)
6603 {
6604 	ASSERT(bus && bus->sih);
6605 	if (enable) {
6606 	si_corereg(bus->sih, bus->sih->buscoreidx,
6607 		OFFSETOF(sbpcieregs_t, u.pcie2.clk_ctl_st), CCS_FORCEALP, CCS_FORCEALP);
6608 	} else {
6609 		si_corereg(bus->sih, bus->sih->buscoreidx,
6610 			OFFSETOF(sbpcieregs_t, u.pcie2.clk_ctl_st), CCS_FORCEALP, 0);
6611 	}
6612 	return 0;
6613 }
6614 
6615 /* set pcie l1 entry time: dhd pciereg 0x1004[22:16] */
6616 uint32
dhdpcie_set_l1_entry_time(struct dhd_bus * bus,int l1_entry_time)6617 dhdpcie_set_l1_entry_time(struct dhd_bus *bus, int l1_entry_time)
6618 {
6619 	uint reg_val;
6620 
6621 	ASSERT(bus && bus->sih);
6622 
6623 	si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0,
6624 		0x1004);
6625 	reg_val = si_corereg(bus->sih, bus->sih->buscoreidx,
6626 		OFFSETOF(sbpcieregs_t, configdata), 0, 0);
6627 	reg_val = (reg_val & ~(0x7f << 16)) | ((l1_entry_time & 0x7f) << 16);
6628 	si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), ~0,
6629 		reg_val);
6630 
6631 	return 0;
6632 }
6633 
6634 static uint32
dhd_apply_d11_war_length(struct dhd_bus * bus,uint32 len,uint32 d11_lpbk)6635 dhd_apply_d11_war_length(struct  dhd_bus *bus, uint32 len, uint32 d11_lpbk)
6636 {
6637 	uint16 chipid = si_chipid(bus->sih);
6638 	if ((chipid == BCM4375_CHIP_ID ||
6639 		chipid == BCM4362_CHIP_ID ||
6640 		chipid == BCM43751_CHIP_ID ||
6641 		chipid == BCM43752_CHIP_ID ||
6642 		chipid == BCM4377_CHIP_ID) &&
6643 		(d11_lpbk != M2M_DMA_LPBK && d11_lpbk != M2M_NON_DMA_LPBK)) {
6644 			len += 8;
6645 	}
6646 	DHD_ERROR(("%s: len %d\n", __FUNCTION__, len));
6647 	return len;
6648 }
6649 
6650 /** Transfers bytes from host to dongle and to host again using DMA */
6651 static int
dhdpcie_bus_dmaxfer_req(struct dhd_bus * bus,uint32 len,uint32 srcdelay,uint32 destdelay,uint32 d11_lpbk,uint32 core_num,uint32 wait)6652 dhdpcie_bus_dmaxfer_req(struct  dhd_bus *bus,
6653 		uint32 len, uint32 srcdelay, uint32 destdelay,
6654 		uint32 d11_lpbk, uint32 core_num, uint32 wait)
6655 {
6656 	int ret = 0;
6657 
6658 	if (bus->dhd == NULL) {
6659 		DHD_ERROR(("%s: bus not inited\n", __FUNCTION__));
6660 		return BCME_ERROR;
6661 	}
6662 	if (bus->dhd->prot == NULL) {
6663 		DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__));
6664 		return BCME_ERROR;
6665 	}
6666 	if (bus->dhd->busstate != DHD_BUS_DATA) {
6667 		DHD_ERROR(("%s: not in a readystate to LPBK  is not inited\n", __FUNCTION__));
6668 		return BCME_ERROR;
6669 	}
6670 
6671 	if (len < 5 || len > 4194296) {
6672 		DHD_ERROR(("%s: len is too small or too large\n", __FUNCTION__));
6673 		return BCME_ERROR;
6674 	}
6675 
6676 	len = dhd_apply_d11_war_length(bus, len, d11_lpbk);
6677 
6678 	bus->dmaxfer_complete = FALSE;
6679 	ret = dhdmsgbuf_dmaxfer_req(bus->dhd, len, srcdelay, destdelay,
6680 		d11_lpbk, core_num);
6681 	if (ret != BCME_OK || !wait) {
6682 		DHD_INFO(("%s: dmaxfer req returns status %u; wait = %u\n", __FUNCTION__,
6683 				ret, wait));
6684 	} else {
6685 		ret = dhd_os_dmaxfer_wait(bus->dhd, &bus->dmaxfer_complete);
6686 		if (ret < 0)
6687 			ret = BCME_NOTREADY;
6688 	}
6689 
6690 	return ret;
6691 
6692 }
6693 
6694 bool
dhd_bus_is_multibp_capable(struct dhd_bus * bus)6695 dhd_bus_is_multibp_capable(struct dhd_bus *bus)
6696 {
6697 	return MULTIBP_CAP(bus->sih);
6698 }
6699 
6700 #define PCIE_REV_FOR_4378A0	66	/* dhd_bus_perform_flr_with_quiesce() causes problems */
6701 #define PCIE_REV_FOR_4378B0	68
6702 
6703 static int
dhdpcie_bus_download_state(dhd_bus_t * bus,bool enter)6704 dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter)
6705 {
6706 	int bcmerror = 0;
6707 	volatile uint32 *cr4_regs;
6708 	bool do_flr;
6709 
6710 	if (!bus->sih) {
6711 		DHD_ERROR(("%s: NULL sih!!\n", __FUNCTION__));
6712 		return BCME_ERROR;
6713 	}
6714 
6715 	do_flr = ((bus->sih->buscorerev != PCIE_REV_FOR_4378A0) &&
6716 			(bus->sih->buscorerev != PCIE_REV_FOR_4378B0));
6717 
6718 	if (MULTIBP_ENAB(bus->sih) && !do_flr) {
6719 		dhd_bus_pcie_pwr_req(bus);
6720 	}
6721 
6722 	/* To enter download state, disable ARM and reset SOCRAM.
6723 	 * To exit download state, simply reset ARM (default is RAM boot).
6724 	 */
6725 	if (enter) {
6726 
6727 		/* Make sure BAR1 maps to backplane address 0 */
6728 		dhdpcie_setbar1win(bus, 0x00000000);
6729 		bus->alp_only = TRUE;
6730 
6731 		/* some chips (e.g. 43602) have two ARM cores, the CR4 is receives the firmware. */
6732 		cr4_regs = si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
6733 
6734 		if (cr4_regs == NULL && !(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
6735 		    !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) &&
6736 		    !(si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
6737 			DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
6738 			bcmerror = BCME_ERROR;
6739 			goto fail;
6740 		}
6741 
6742 		if (si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) {
6743 			/* Halt ARM & remove reset */
6744 			si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT);
6745 			if (!(si_setcore(bus->sih, SYSMEM_CORE_ID, 0))) {
6746 				DHD_ERROR(("%s: Failed to find SYSMEM core!\n", __FUNCTION__));
6747 				bcmerror = BCME_ERROR;
6748 				goto fail;
6749 			}
6750 			si_core_reset(bus->sih, 0, 0);
6751 			/* reset last 4 bytes of RAM address. to be used for shared area */
6752 			dhdpcie_init_shared_addr(bus);
6753 		} else if (cr4_regs == NULL) { /* no CR4 present on chip */
6754 			si_core_disable(bus->sih, 0);
6755 
6756 			if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
6757 				DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
6758 				bcmerror = BCME_ERROR;
6759 				goto fail;
6760 			}
6761 
6762 			si_core_reset(bus->sih, 0, 0);
6763 
6764 			/* Clear the top bit of memory */
6765 			if (bus->ramsize) {
6766 				uint32 zeros = 0;
6767 				if (dhdpcie_bus_membytes(bus, TRUE, bus->ramsize - 4,
6768 				                     (uint8*)&zeros, 4) < 0) {
6769 					bcmerror = BCME_ERROR;
6770 					goto fail;
6771 				}
6772 			}
6773 		} else {
6774 			/* For CR4,
6775 			 * Halt ARM
6776 			 * Remove ARM reset
6777 			 * Read RAM base address [0x18_0000]
6778 			 * [next] Download firmware
6779 			 * [done at else] Populate the reset vector
6780 			 * [done at else] Remove ARM halt
6781 			*/
6782 			/* Halt ARM & remove reset */
6783 			si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT);
6784 			if (BCM43602_CHIP(bus->sih->chip)) {
6785 				W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 5);
6786 				W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0);
6787 				W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 7);
6788 				W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0);
6789 			}
6790 			/* reset last 4 bytes of RAM address. to be used for shared area */
6791 			dhdpcie_init_shared_addr(bus);
6792 		}
6793 	} else {
6794 		if (si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) {
6795 			/* write vars */
6796 			if ((bcmerror = dhdpcie_bus_write_vars(bus))) {
6797 				DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
6798 				goto fail;
6799 			}
6800 			/* write random numbers to sysmem for the purpose of
6801 			 * randomizing heap address space.
6802 			 */
6803 			if ((bcmerror = dhdpcie_wrt_rnd(bus)) != BCME_OK) {
6804 				DHD_ERROR(("%s: Failed to get random seed to write to TCM !\n",
6805 					__FUNCTION__));
6806 				goto fail;
6807 			}
6808 			/* switch back to arm core again */
6809 			if (!(si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
6810 				DHD_ERROR(("%s: Failed to find ARM CA7 core!\n", __FUNCTION__));
6811 				bcmerror = BCME_ERROR;
6812 				goto fail;
6813 			}
6814 			/* write address 0 with reset instruction */
6815 			bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0,
6816 				(uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
6817 			/* now remove reset and halt and continue to run CA7 */
6818 		} else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
6819 			if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
6820 				DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
6821 				bcmerror = BCME_ERROR;
6822 				goto fail;
6823 			}
6824 
6825 			if (!si_iscoreup(bus->sih)) {
6826 				DHD_ERROR(("%s: SOCRAM core is down after reset?\n", __FUNCTION__));
6827 				bcmerror = BCME_ERROR;
6828 				goto fail;
6829 			}
6830 
6831 			/* Enable remap before ARM reset but after vars.
6832 			 * No backplane access in remap mode
6833 			 */
6834 			if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) &&
6835 			    !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) {
6836 				DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__));
6837 				bcmerror = BCME_ERROR;
6838 				goto fail;
6839 			}
6840 
6841 			if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
6842 			    !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
6843 				DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
6844 				bcmerror = BCME_ERROR;
6845 				goto fail;
6846 			}
6847 		} else {
6848 			if (BCM43602_CHIP(bus->sih->chip)) {
6849 				/* Firmware crashes on SOCSRAM access when core is in reset */
6850 				if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
6851 					DHD_ERROR(("%s: Failed to find SOCRAM core!\n",
6852 						__FUNCTION__));
6853 					bcmerror = BCME_ERROR;
6854 					goto fail;
6855 				}
6856 				si_core_reset(bus->sih, 0, 0);
6857 				si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
6858 			}
6859 
6860 			/* write vars */
6861 			if ((bcmerror = dhdpcie_bus_write_vars(bus))) {
6862 				DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
6863 				goto fail;
6864 			}
6865 
6866 			/* write a random number to TCM for the purpose of
6867 			 * randomizing heap address space.
6868 			 */
6869 			if ((bcmerror = dhdpcie_wrt_rnd(bus)) != BCME_OK) {
6870 				DHD_ERROR(("%s: Failed to get random seed to write to TCM !\n",
6871 					__FUNCTION__));
6872 				goto fail;
6873 			}
6874 
6875 			/* switch back to arm core again */
6876 			if (!(si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) {
6877 				DHD_ERROR(("%s: Failed to find ARM CR4 core!\n", __FUNCTION__));
6878 				bcmerror = BCME_ERROR;
6879 				goto fail;
6880 			}
6881 
6882 			/* write address 0 with reset instruction */
6883 			bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0,
6884 				(uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
6885 
6886 			if (bcmerror == BCME_OK) {
6887 				uint32 tmp;
6888 
6889 				bcmerror = dhdpcie_bus_membytes(bus, FALSE, 0,
6890 				                                (uint8 *)&tmp, sizeof(tmp));
6891 
6892 				if (bcmerror == BCME_OK && tmp != bus->resetinstr) {
6893 					DHD_ERROR(("%s: Failed to write 0x%08x to addr 0\n",
6894 					          __FUNCTION__, bus->resetinstr));
6895 					DHD_ERROR(("%s: contents of addr 0 is 0x%08x\n",
6896 					          __FUNCTION__, tmp));
6897 					bcmerror = BCME_ERROR;
6898 					goto fail;
6899 				}
6900 			}
6901 
6902 			/* now remove reset and halt and continue to run CR4 */
6903 		}
6904 
6905 		si_core_reset(bus->sih, 0, 0);
6906 
6907 		/* Allow HT Clock now that the ARM is running. */
6908 		bus->alp_only = FALSE;
6909 
6910 		bus->dhd->busstate = DHD_BUS_LOAD;
6911 	}
6912 
6913 fail:
6914 	/* Always return to PCIE core */
6915 	si_setcore(bus->sih, PCIE2_CORE_ID, 0);
6916 
6917 	if (MULTIBP_ENAB(bus->sih) && !do_flr) {
6918 		dhd_bus_pcie_pwr_req_clear(bus);
6919 	}
6920 
6921 	return bcmerror;
6922 } /* dhdpcie_bus_download_state */
6923 
6924 static int
dhdpcie_bus_write_vars(dhd_bus_t * bus)6925 dhdpcie_bus_write_vars(dhd_bus_t *bus)
6926 {
6927 	int bcmerror = 0;
6928 	uint32 varsize, phys_size;
6929 	uint32 varaddr;
6930 	uint8 *vbuffer;
6931 	uint32 varsizew;
6932 #ifdef DHD_DEBUG
6933 	uint8 *nvram_ularray;
6934 #endif /* DHD_DEBUG */
6935 
6936 	/* Even if there are no vars are to be written, we still need to set the ramsize. */
6937 	varsize = bus->varsz ? ROUNDUP(bus->varsz, 4) : 0;
6938 	varaddr = (bus->ramsize - 4) - varsize;
6939 
6940 	varaddr += bus->dongle_ram_base;
6941 
6942 	if (bus->vars) {
6943 
6944 		vbuffer = (uint8 *)MALLOC(bus->dhd->osh, varsize);
6945 		if (!vbuffer)
6946 			return BCME_NOMEM;
6947 
6948 		bzero(vbuffer, varsize);
6949 		bcopy(bus->vars, vbuffer, bus->varsz);
6950 		/* Write the vars list */
6951 		bcmerror = dhdpcie_bus_membytes(bus, TRUE, varaddr, vbuffer, varsize);
6952 
6953 		/* Implement read back and verify later */
6954 #ifdef DHD_DEBUG
6955 		/* Verify NVRAM bytes */
6956 		DHD_INFO(("%s: Compare NVRAM dl & ul; varsize=%d\n", __FUNCTION__, varsize));
6957 		nvram_ularray = (uint8*)MALLOC(bus->dhd->osh, varsize);
6958 		if (!nvram_ularray) {
6959 			MFREE(bus->dhd->osh, vbuffer, varsize);
6960 			return BCME_NOMEM;
6961 		}
6962 
6963 		/* Upload image to verify downloaded contents. */
6964 		memset(nvram_ularray, 0xaa, varsize);
6965 
6966 		/* Read the vars list to temp buffer for comparison */
6967 		bcmerror = dhdpcie_bus_membytes(bus, FALSE, varaddr, nvram_ularray, varsize);
6968 		if (bcmerror) {
6969 				DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n",
6970 					__FUNCTION__, bcmerror, varsize, varaddr));
6971 		}
6972 
6973 		/* Compare the org NVRAM with the one read from RAM */
6974 		if (memcmp(vbuffer, nvram_ularray, varsize)) {
6975 			DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__));
6976 		} else
6977 			DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n",
6978 			__FUNCTION__));
6979 
6980 		MFREE(bus->dhd->osh, nvram_ularray, varsize);
6981 #endif /* DHD_DEBUG */
6982 
6983 		MFREE(bus->dhd->osh, vbuffer, varsize);
6984 	}
6985 
6986 	phys_size = REMAP_ENAB(bus) ? bus->ramsize : bus->orig_ramsize;
6987 
6988 	phys_size += bus->dongle_ram_base;
6989 
6990 	/* adjust to the user specified RAM */
6991 	DHD_INFO(("%s: Physical memory size: %d, usable memory size: %d\n", __FUNCTION__,
6992 		phys_size, bus->ramsize));
6993 	DHD_INFO(("%s: Vars are at %d, orig varsize is %d\n", __FUNCTION__,
6994 		varaddr, varsize));
6995 	varsize = ((phys_size - 4) - varaddr);
6996 
6997 	/*
6998 	 * Determine the length token:
6999 	 * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits.
7000 	 */
7001 	if (bcmerror) {
7002 		varsizew = 0;
7003 		bus->nvram_csm = varsizew;
7004 	} else {
7005 		varsizew = varsize / 4;
7006 		varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF);
7007 		bus->nvram_csm = varsizew;
7008 		varsizew = htol32(varsizew);
7009 	}
7010 
7011 	DHD_INFO(("%s: New varsize is %d, length token=0x%08x\n", __FUNCTION__, varsize, varsizew));
7012 
7013 	/* Write the length token to the last word */
7014 	bcmerror = dhdpcie_bus_membytes(bus, TRUE, (phys_size - 4),
7015 		(uint8*)&varsizew, 4);
7016 
7017 	return bcmerror;
7018 } /* dhdpcie_bus_write_vars */
7019 
7020 int
dhdpcie_downloadvars(dhd_bus_t * bus,void * arg,int len)7021 dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len)
7022 {
7023 	int bcmerror = BCME_OK;
7024 
7025 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7026 
7027 	/* Basic sanity checks */
7028 	if (bus->dhd->up) {
7029 		bcmerror = BCME_NOTDOWN;
7030 		goto err;
7031 	}
7032 	if (!len) {
7033 		bcmerror = BCME_BUFTOOSHORT;
7034 		goto err;
7035 	}
7036 
7037 	/* Free the old ones and replace with passed variables */
7038 	if (bus->vars)
7039 		MFREE(bus->dhd->osh, bus->vars, bus->varsz);
7040 
7041 	bus->vars = MALLOC(bus->dhd->osh, len);
7042 	bus->varsz = bus->vars ? len : 0;
7043 	if (bus->vars == NULL) {
7044 		bcmerror = BCME_NOMEM;
7045 		goto err;
7046 	}
7047 
7048 	/* Copy the passed variables, which should include the terminating double-null */
7049 	bcopy(arg, bus->vars, bus->varsz);
7050 
7051 #ifdef DHD_USE_SINGLE_NVRAM_FILE
7052 	if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) {
7053 		char *sp = NULL;
7054 		char *ep = NULL;
7055 		int i;
7056 		char tag[2][8] = {"ccode=", "regrev="};
7057 
7058 		/* Find ccode and regrev info */
7059 		for (i = 0; i < 2; i++) {
7060 			sp = strnstr(bus->vars, tag[i], bus->varsz);
7061 			if (!sp) {
7062 				DHD_ERROR(("%s: Could not find ccode info from the nvram %s\n",
7063 					__FUNCTION__, bus->nv_path));
7064 				bcmerror = BCME_ERROR;
7065 				goto err;
7066 			}
7067 			sp = strchr(sp, '=');
7068 			ep = strchr(sp, '\0');
7069 			/* We assumed that string length of both ccode and
7070 			 * regrev values should not exceed WLC_CNTRY_BUF_SZ
7071 			 */
7072 			if (ep && ((ep - sp) <= WLC_CNTRY_BUF_SZ)) {
7073 				sp++;
7074 				while (*sp != '\0') {
7075 					DHD_INFO(("%s: parse '%s', current sp = '%c'\n",
7076 						__FUNCTION__, tag[i], *sp));
7077 					*sp++ = '0';
7078 				}
7079 			} else {
7080 				DHD_ERROR(("%s: Invalid parameter format when parsing for %s\n",
7081 					__FUNCTION__, tag[i]));
7082 				bcmerror = BCME_ERROR;
7083 				goto err;
7084 			}
7085 		}
7086 	}
7087 #endif /* DHD_USE_SINGLE_NVRAM_FILE */
7088 
7089 err:
7090 	return bcmerror;
7091 }
7092 
7093 /* loop through the capability list and see if the pcie capabilty exists */
7094 uint8
dhdpcie_find_pci_capability(osl_t * osh,uint8 req_cap_id)7095 dhdpcie_find_pci_capability(osl_t *osh, uint8 req_cap_id)
7096 {
7097 	uint8 cap_id;
7098 	uint8 cap_ptr = 0;
7099 	uint8 byte_val;
7100 
7101 	/* check for Header type 0 */
7102 	byte_val = read_pci_cfg_byte(PCI_CFG_HDR);
7103 	if ((byte_val & 0x7f) != PCI_HEADER_NORMAL) {
7104 		DHD_ERROR(("%s : PCI config header not normal.\n", __FUNCTION__));
7105 		goto end;
7106 	}
7107 
7108 	/* check if the capability pointer field exists */
7109 	byte_val = read_pci_cfg_byte(PCI_CFG_STAT);
7110 	if (!(byte_val & PCI_CAPPTR_PRESENT)) {
7111 		DHD_ERROR(("%s : PCI CAP pointer not present.\n", __FUNCTION__));
7112 		goto end;
7113 	}
7114 
7115 	cap_ptr = read_pci_cfg_byte(PCI_CFG_CAPPTR);
7116 	/* check if the capability pointer is 0x00 */
7117 	if (cap_ptr == 0x00) {
7118 		DHD_ERROR(("%s : PCI CAP pointer is 0x00.\n", __FUNCTION__));
7119 		goto end;
7120 	}
7121 
7122 	/* loop thr'u the capability list and see if the pcie capabilty exists */
7123 
7124 	cap_id = read_pci_cfg_byte(cap_ptr);
7125 
7126 	while (cap_id != req_cap_id) {
7127 		cap_ptr = read_pci_cfg_byte((cap_ptr + 1));
7128 		if (cap_ptr == 0x00) break;
7129 		cap_id = read_pci_cfg_byte(cap_ptr);
7130 	}
7131 
7132 end:
7133 	return cap_ptr;
7134 }
7135 
7136 void
dhdpcie_pme_active(osl_t * osh,bool enable)7137 dhdpcie_pme_active(osl_t *osh, bool enable)
7138 {
7139 	uint8 cap_ptr;
7140 	uint32 pme_csr;
7141 
7142 	cap_ptr = dhdpcie_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID);
7143 
7144 	if (!cap_ptr) {
7145 		DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__));
7146 		return;
7147 	}
7148 
7149 	pme_csr = OSL_PCI_READ_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32));
7150 	DHD_ERROR(("%s : pme_sts_ctrl 0x%x\n", __FUNCTION__, pme_csr));
7151 
7152 	pme_csr |= PME_CSR_PME_STAT;
7153 	if (enable) {
7154 		pme_csr |= PME_CSR_PME_EN;
7155 	} else {
7156 		pme_csr &= ~PME_CSR_PME_EN;
7157 	}
7158 
7159 	OSL_PCI_WRITE_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32), pme_csr);
7160 }
7161 
7162 bool
dhdpcie_pme_cap(osl_t * osh)7163 dhdpcie_pme_cap(osl_t *osh)
7164 {
7165 	uint8 cap_ptr;
7166 	uint32 pme_cap;
7167 
7168 	cap_ptr = dhdpcie_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID);
7169 
7170 	if (!cap_ptr) {
7171 		DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__));
7172 		return FALSE;
7173 	}
7174 
7175 	pme_cap = OSL_PCI_READ_CONFIG(osh, cap_ptr, sizeof(uint32));
7176 
7177 	DHD_ERROR(("%s : pme_cap 0x%x\n", __FUNCTION__, pme_cap));
7178 
7179 	return ((pme_cap & PME_CAP_PM_STATES) != 0);
7180 }
7181 
7182 uint32
dhdpcie_lcreg(osl_t * osh,uint32 mask,uint32 val)7183 dhdpcie_lcreg(osl_t *osh, uint32 mask, uint32 val)
7184 {
7185 
7186 	uint8	pcie_cap;
7187 	uint8	lcreg_offset;	/* PCIE capability LCreg offset in the config space */
7188 	uint32	reg_val;
7189 
7190 	pcie_cap = dhdpcie_find_pci_capability(osh, PCI_CAP_PCIECAP_ID);
7191 
7192 	if (!pcie_cap) {
7193 		DHD_ERROR(("%s : PCIe Capability not present\n", __FUNCTION__));
7194 		return 0;
7195 	}
7196 
7197 	lcreg_offset = pcie_cap + PCIE_CAP_LINKCTRL_OFFSET;
7198 
7199 	/* set operation */
7200 	if (mask) {
7201 		/* read */
7202 		reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
7203 
7204 		/* modify */
7205 		reg_val &= ~mask;
7206 		reg_val |= (mask & val);
7207 
7208 		/* write */
7209 		OSL_PCI_WRITE_CONFIG(osh, lcreg_offset, sizeof(uint32), reg_val);
7210 	}
7211 	return OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
7212 }
7213 
7214 uint8
dhdpcie_clkreq(osl_t * osh,uint32 mask,uint32 val)7215 dhdpcie_clkreq(osl_t *osh, uint32 mask, uint32 val)
7216 {
7217 	uint8	pcie_cap;
7218 	uint32	reg_val;
7219 	uint8	lcreg_offset;	/* PCIE capability LCreg offset in the config space */
7220 
7221 	pcie_cap = dhdpcie_find_pci_capability(osh, PCI_CAP_PCIECAP_ID);
7222 
7223 	if (!pcie_cap) {
7224 		DHD_ERROR(("%s : PCIe Capability not present\n", __FUNCTION__));
7225 		return 0;
7226 	}
7227 
7228 	lcreg_offset = pcie_cap + PCIE_CAP_LINKCTRL_OFFSET;
7229 
7230 	reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
7231 	/* set operation */
7232 	if (mask) {
7233 		if (val)
7234 			reg_val |= PCIE_CLKREQ_ENAB;
7235 		else
7236 			reg_val &= ~PCIE_CLKREQ_ENAB;
7237 		OSL_PCI_WRITE_CONFIG(osh, lcreg_offset, sizeof(uint32), reg_val);
7238 		reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
7239 	}
7240 	if (reg_val & PCIE_CLKREQ_ENAB)
7241 		return 1;
7242 	else
7243 		return 0;
7244 }
7245 
dhd_dump_intr_counters(dhd_pub_t * dhd,struct bcmstrbuf * strbuf)7246 void dhd_dump_intr_counters(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
7247 {
7248 	dhd_bus_t *bus;
7249 	uint64 current_time = OSL_LOCALTIME_NS();
7250 
7251 	if (!dhd) {
7252 		DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
7253 		return;
7254 	}
7255 
7256 	bus = dhd->bus;
7257 	if (!bus) {
7258 		DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
7259 		return;
7260 	}
7261 
7262 	bcm_bprintf(strbuf, "\n ------- DUMPING INTR enable/disable counters-------\n");
7263 	bcm_bprintf(strbuf, "resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n"
7264 		"isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n"
7265 		"dpc_return_busdown_count=%lu non_ours_irq_count=%lu\n",
7266 		bus->resume_intr_enable_count, bus->dpc_intr_enable_count,
7267 		bus->isr_intr_disable_count, bus->suspend_intr_disable_count,
7268 		bus->dpc_return_busdown_count, bus->non_ours_irq_count);
7269 #ifdef BCMPCIE_OOB_HOST_WAKE
7270 	bcm_bprintf(strbuf, "oob_intr_count=%lu oob_intr_enable_count=%lu"
7271 		" oob_intr_disable_count=%lu\noob_irq_num=%d last_oob_irq_time="SEC_USEC_FMT
7272 		" last_oob_irq_enable_time="SEC_USEC_FMT"\nlast_oob_irq_disable_time="SEC_USEC_FMT
7273 		" oob_irq_enabled=%d oob_gpio_level=%d\n",
7274 		bus->oob_intr_count, bus->oob_intr_enable_count,
7275 		bus->oob_intr_disable_count, dhdpcie_get_oob_irq_num(bus),
7276 		GET_SEC_USEC(bus->last_oob_irq_time), GET_SEC_USEC(bus->last_oob_irq_enable_time),
7277 		GET_SEC_USEC(bus->last_oob_irq_disable_time), dhdpcie_get_oob_irq_status(bus),
7278 		dhdpcie_get_oob_irq_level());
7279 #endif /* BCMPCIE_OOB_HOST_WAKE */
7280 	bcm_bprintf(strbuf, "\ncurrent_time="SEC_USEC_FMT" isr_entry_time="SEC_USEC_FMT
7281 		" isr_exit_time="SEC_USEC_FMT"\ndpc_sched_time="SEC_USEC_FMT
7282 		" last_non_ours_irq_time="SEC_USEC_FMT" dpc_entry_time="SEC_USEC_FMT"\n"
7283 		"last_process_ctrlbuf_time="SEC_USEC_FMT " last_process_flowring_time="SEC_USEC_FMT
7284 		" last_process_txcpl_time="SEC_USEC_FMT"\nlast_process_rxcpl_time="SEC_USEC_FMT
7285 		" last_process_infocpl_time="SEC_USEC_FMT" last_process_edl_time="SEC_USEC_FMT
7286 		"\ndpc_exit_time="SEC_USEC_FMT" resched_dpc_time="SEC_USEC_FMT"\n"
7287 		"last_d3_inform_time="SEC_USEC_FMT"\n",
7288 		GET_SEC_USEC(current_time), GET_SEC_USEC(bus->isr_entry_time),
7289 		GET_SEC_USEC(bus->isr_exit_time), GET_SEC_USEC(bus->dpc_sched_time),
7290 		GET_SEC_USEC(bus->last_non_ours_irq_time), GET_SEC_USEC(bus->dpc_entry_time),
7291 		GET_SEC_USEC(bus->last_process_ctrlbuf_time),
7292 		GET_SEC_USEC(bus->last_process_flowring_time),
7293 		GET_SEC_USEC(bus->last_process_txcpl_time),
7294 		GET_SEC_USEC(bus->last_process_rxcpl_time),
7295 		GET_SEC_USEC(bus->last_process_infocpl_time),
7296 		GET_SEC_USEC(bus->last_process_edl_time),
7297 		GET_SEC_USEC(bus->dpc_exit_time), GET_SEC_USEC(bus->resched_dpc_time),
7298 		GET_SEC_USEC(bus->last_d3_inform_time));
7299 
7300 	bcm_bprintf(strbuf, "\nlast_suspend_start_time="SEC_USEC_FMT" last_suspend_end_time="
7301 		SEC_USEC_FMT" last_resume_start_time="SEC_USEC_FMT" last_resume_end_time="
7302 		SEC_USEC_FMT"\n", GET_SEC_USEC(bus->last_suspend_start_time),
7303 		GET_SEC_USEC(bus->last_suspend_end_time),
7304 		GET_SEC_USEC(bus->last_resume_start_time),
7305 		GET_SEC_USEC(bus->last_resume_end_time));
7306 
7307 #if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE)
7308 		bcm_bprintf(strbuf, "logtrace_thread_entry_time="SEC_USEC_FMT
7309 			" logtrace_thread_sem_down_time="SEC_USEC_FMT
7310 			"\nlogtrace_thread_flush_time="SEC_USEC_FMT
7311 			" logtrace_thread_unexpected_break_time="SEC_USEC_FMT
7312 			"\nlogtrace_thread_complete_time="SEC_USEC_FMT"\n",
7313 			GET_SEC_USEC(dhd->logtrace_thr_ts.entry_time),
7314 			GET_SEC_USEC(dhd->logtrace_thr_ts.sem_down_time),
7315 			GET_SEC_USEC(dhd->logtrace_thr_ts.flush_time),
7316 			GET_SEC_USEC(dhd->logtrace_thr_ts.unexpected_break_time),
7317 			GET_SEC_USEC(dhd->logtrace_thr_ts.complete_time));
7318 #endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */
7319 }
7320 
dhd_dump_intr_registers(dhd_pub_t * dhd,struct bcmstrbuf * strbuf)7321 void dhd_dump_intr_registers(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
7322 {
7323 	uint32 intstatus = 0;
7324 	uint32 intmask = 0;
7325 	uint32 d2h_db0 = 0;
7326 	uint32 d2h_mb_data = 0;
7327 
7328 	intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
7329 		dhd->bus->pcie_mailbox_int, 0, 0);
7330 	intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
7331 		dhd->bus->pcie_mailbox_mask, 0, 0);
7332 	d2h_db0 = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCID2H_MailBox, 0, 0);
7333 	dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0);
7334 
7335 	bcm_bprintf(strbuf, "intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
7336 		intstatus, intmask, d2h_db0);
7337 	bcm_bprintf(strbuf, "d2h_mb_data=0x%x def_intmask=0x%x\n",
7338 		d2h_mb_data, dhd->bus->def_intmask);
7339 }
7340 /** Add bus dump output to a buffer */
dhd_bus_dump(dhd_pub_t * dhdp,struct bcmstrbuf * strbuf)7341 void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
7342 {
7343 	uint16 flowid;
7344 	int ix = 0;
7345 	flow_ring_node_t *flow_ring_node;
7346 	flow_info_t *flow_info;
7347 #ifdef TX_STATUS_LATENCY_STATS
7348 	uint8 ifindex;
7349 	if_flow_lkup_t *if_flow_lkup;
7350 	dhd_if_tx_status_latency_t if_tx_status_latency[DHD_MAX_IFS];
7351 #endif /* TX_STATUS_LATENCY_STATS */
7352 
7353 	if (dhdp->busstate != DHD_BUS_DATA)
7354 		return;
7355 
7356 #ifdef TX_STATUS_LATENCY_STATS
7357 	memset(if_tx_status_latency, 0, sizeof(if_tx_status_latency));
7358 #endif /* TX_STATUS_LATENCY_STATS */
7359 #ifdef DHD_WAKE_STATUS
7360 	bcm_bprintf(strbuf, "wake %u rxwake %u readctrlwake %u\n",
7361 		bcmpcie_get_total_wake(dhdp->bus), dhdp->bus->wake_counts.rxwake,
7362 		dhdp->bus->wake_counts.rcwake);
7363 #ifdef DHD_WAKE_RX_STATUS
7364 	bcm_bprintf(strbuf, " unicast %u muticast %u broadcast %u arp %u\n",
7365 		dhdp->bus->wake_counts.rx_ucast, dhdp->bus->wake_counts.rx_mcast,
7366 		dhdp->bus->wake_counts.rx_bcast, dhdp->bus->wake_counts.rx_arp);
7367 	bcm_bprintf(strbuf, " multi4 %u multi6 %u icmp6 %u multiother %u\n",
7368 		dhdp->bus->wake_counts.rx_multi_ipv4, dhdp->bus->wake_counts.rx_multi_ipv6,
7369 		dhdp->bus->wake_counts.rx_icmpv6, dhdp->bus->wake_counts.rx_multi_other);
7370 	bcm_bprintf(strbuf, " icmp6_ra %u, icmp6_na %u, icmp6_ns %u\n",
7371 		dhdp->bus->wake_counts.rx_icmpv6_ra, dhdp->bus->wake_counts.rx_icmpv6_na,
7372 		dhdp->bus->wake_counts.rx_icmpv6_ns);
7373 #endif /* DHD_WAKE_RX_STATUS */
7374 #ifdef DHD_WAKE_EVENT_STATUS
7375 	for (flowid = 0; flowid < WLC_E_LAST; flowid++)
7376 		if (dhdp->bus->wake_counts.rc_event[flowid] != 0)
7377 			bcm_bprintf(strbuf, " %s = %u\n", bcmevent_get_name(flowid),
7378 				dhdp->bus->wake_counts.rc_event[flowid]);
7379 	bcm_bprintf(strbuf, "\n");
7380 #endif /* DHD_WAKE_EVENT_STATUS */
7381 #endif /* DHD_WAKE_STATUS */
7382 
7383 	dhd_prot_print_info(dhdp, strbuf);
7384 	dhd_dump_intr_registers(dhdp, strbuf);
7385 	dhd_dump_intr_counters(dhdp, strbuf);
7386 	bcm_bprintf(strbuf, "h2d_mb_data_ptr_addr 0x%x, d2h_mb_data_ptr_addr 0x%x\n",
7387 		dhdp->bus->h2d_mb_data_ptr_addr, dhdp->bus->d2h_mb_data_ptr_addr);
7388 	bcm_bprintf(strbuf, "dhd cumm_ctr %d\n", DHD_CUMM_CTR_READ(&dhdp->cumm_ctr));
7389 #ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
7390 	bcm_bprintf(strbuf, "multi_client_flow_rings:%d max_multi_client_flow_rings:%d\n",
7391 		dhdp->multi_client_flow_rings, dhdp->max_multi_client_flow_rings);
7392 #endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */
7393 	bcm_bprintf(strbuf,
7394 		"%4s %4s %2s %4s %17s %4s %4s %6s %10s %4s %4s ",
7395 		"Num:", "Flow", "If", "Prio", ":Dest_MacAddress:", "Qlen", "CLen", "L2CLen",
7396 		" Overflows", "  RD", "  WR");
7397 
7398 #ifdef TX_STATUS_LATENCY_STATS
7399 	/* Average Tx status/Completion Latency in micro secs */
7400 	bcm_bprintf(strbuf, "%16s %16s ", "       NumTxPkts", "    AvgTxCmpL_Us");
7401 #endif /* TX_STATUS_LATENCY_STATS */
7402 
7403 	bcm_bprintf(strbuf, "\n");
7404 
7405 	for (flowid = 0; flowid < dhdp->num_flow_rings; flowid++) {
7406 		flow_ring_node = DHD_FLOW_RING(dhdp, flowid);
7407 		if (!flow_ring_node->active)
7408 			continue;
7409 
7410 		flow_info = &flow_ring_node->flow_info;
7411 		bcm_bprintf(strbuf,
7412 			"%4d %4d %2d %4d "MACDBG" %4d %4d %6d %10u ", ix++,
7413 			flow_ring_node->flowid, flow_info->ifindex, flow_info->tid,
7414 			MAC2STRDBG(flow_info->da),
7415 			DHD_FLOW_QUEUE_LEN(&flow_ring_node->queue),
7416 			DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_CLEN_PTR(&flow_ring_node->queue)),
7417 			DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_L2CLEN_PTR(&flow_ring_node->queue)),
7418 			DHD_FLOW_QUEUE_FAILURES(&flow_ring_node->queue));
7419 		dhd_prot_print_flow_ring(dhdp, flow_ring_node->prot_info, strbuf,
7420 			"%4d %4d ");
7421 
7422 #ifdef TX_STATUS_LATENCY_STATS
7423 		bcm_bprintf(strbuf, "%16d %16d ",
7424 			flow_info->num_tx_pkts,
7425 			flow_info->num_tx_status ?
7426 			DIV_U64_BY_U64(flow_info->cum_tx_status_latency,
7427 			flow_info->num_tx_status) : 0);
7428 
7429 		ifindex = flow_info->ifindex;
7430 		ASSERT(ifindex < DHD_MAX_IFS);
7431 		if (ifindex < DHD_MAX_IFS) {
7432 			if_tx_status_latency[ifindex].num_tx_status += flow_info->num_tx_status;
7433 			if_tx_status_latency[ifindex].cum_tx_status_latency +=
7434 				flow_info->cum_tx_status_latency;
7435 		} else {
7436 			DHD_ERROR(("%s: Bad IF index: %d associated with flowid: %d\n",
7437 				__FUNCTION__, ifindex, flowid));
7438 		}
7439 #endif /* TX_STATUS_LATENCY_STATS */
7440 		bcm_bprintf(strbuf, "\n");
7441 	}
7442 
7443 #ifdef TX_STATUS_LATENCY_STATS
7444 	bcm_bprintf(strbuf, "\n%s  %16s  %16s\n", "If", "AvgTxCmpL_Us", "NumTxStatus");
7445 	if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
7446 	for (ix = 0; ix < DHD_MAX_IFS; ix++) {
7447 		if (!if_flow_lkup[ix].status) {
7448 			continue;
7449 		}
7450 		bcm_bprintf(strbuf, "%2d  %16d  %16d\n",
7451 			ix,
7452 			if_tx_status_latency[ix].num_tx_status ?
7453 			DIV_U64_BY_U64(if_tx_status_latency[ix].cum_tx_status_latency,
7454 			if_tx_status_latency[ix].num_tx_status): 0,
7455 			if_tx_status_latency[ix].num_tx_status);
7456 	}
7457 #endif /* TX_STATUS_LATENCY_STATS */
7458 
7459 #ifdef DHD_HP2P
7460 	if (dhdp->hp2p_capable) {
7461 		bcm_bprintf(strbuf, "\n%s  %16s  %16s", "Flowid", "Tx_t0", "Tx_t1");
7462 
7463 		for (flowid = 0; flowid < MAX_HP2P_FLOWS; flowid++) {
7464 			hp2p_info_t *hp2p_info;
7465 			int bin;
7466 
7467 			hp2p_info = &dhdp->hp2p_info[flowid];
7468 			if (hp2p_info->num_timer_start == 0)
7469 				continue;
7470 
7471 			bcm_bprintf(strbuf, "\n%d", hp2p_info->flowid);
7472 			bcm_bprintf(strbuf, "\n%s", "Bin");
7473 
7474 			for (bin = 0; bin < MAX_TX_HIST_BIN; bin++) {
7475 				bcm_bprintf(strbuf, "\n%2d %20d  %16d", bin,
7476 					hp2p_info->tx_t0[bin], hp2p_info->tx_t1[bin]);
7477 			}
7478 
7479 			bcm_bprintf(strbuf, "\n%s  %16s", "Flowid", "Rx_t0");
7480 			bcm_bprintf(strbuf, "\n%d", hp2p_info->flowid);
7481 			bcm_bprintf(strbuf, "\n%s", "Bin");
7482 
7483 			for (bin = 0; bin < MAX_RX_HIST_BIN; bin++) {
7484 				bcm_bprintf(strbuf, "\n%d %20d", bin,
7485 					hp2p_info->rx_t0[bin]);
7486 			}
7487 
7488 			bcm_bprintf(strbuf, "\n%s  %16s  %16s",
7489 				"Packet limit", "Timer limit", "Timer start");
7490 			bcm_bprintf(strbuf, "\n%d %24d %16d", hp2p_info->num_pkt_limit,
7491 				hp2p_info->num_timer_limit, hp2p_info->num_timer_start);
7492 		}
7493 
7494 		bcm_bprintf(strbuf, "\n");
7495 	}
7496 #endif /* DHD_HP2P */
7497 
7498 	bcm_bprintf(strbuf, "D3 inform cnt %d\n", dhdp->bus->d3_inform_cnt);
7499 	bcm_bprintf(strbuf, "D0 inform cnt %d\n", dhdp->bus->d0_inform_cnt);
7500 	bcm_bprintf(strbuf, "D0 inform in use cnt %d\n", dhdp->bus->d0_inform_in_use_cnt);
7501 	if (dhdp->d2h_hostrdy_supported) {
7502 		bcm_bprintf(strbuf, "hostready count:%d\n", dhdp->bus->hostready_count);
7503 	}
7504 	bcm_bprintf(strbuf, "d2h_intr_method -> %s\n",
7505 		dhdp->bus->d2h_intr_method ? "PCIE_MSI" : "PCIE_INTX");
7506 }
7507 
7508 #ifdef DNGL_AXI_ERROR_LOGGING
7509 bool
dhd_axi_sig_match(dhd_pub_t * dhdp)7510 dhd_axi_sig_match(dhd_pub_t *dhdp)
7511 {
7512 	uint32 axi_tcm_addr = dhdpcie_bus_rtcm32(dhdp->bus, dhdp->axierror_logbuf_addr);
7513 
7514 	if (dhdp->dhd_induce_error == DHD_INDUCE_DROP_AXI_SIG) {
7515 		DHD_ERROR(("%s: Induce AXI signature drop\n", __FUNCTION__));
7516 		return FALSE;
7517 	}
7518 
7519 	DHD_ERROR(("%s: axi_tcm_addr: 0x%x, tcm range: 0x%x ~ 0x%x\n",
7520 		__FUNCTION__, axi_tcm_addr, dhdp->bus->dongle_ram_base,
7521 		dhdp->bus->dongle_ram_base + dhdp->bus->ramsize));
7522 	if (axi_tcm_addr >= dhdp->bus->dongle_ram_base &&
7523 	    axi_tcm_addr < dhdp->bus->dongle_ram_base + dhdp->bus->ramsize) {
7524 		uint32 axi_signature = dhdpcie_bus_rtcm32(dhdp->bus, (axi_tcm_addr +
7525 			OFFSETOF(hnd_ext_trap_axi_error_v1_t, signature)));
7526 		if (axi_signature == HND_EXT_TRAP_AXIERROR_SIGNATURE) {
7527 			return TRUE;
7528 		} else {
7529 			DHD_ERROR(("%s: No AXI signature: 0x%x\n",
7530 				__FUNCTION__, axi_signature));
7531 			return FALSE;
7532 		}
7533 	} else {
7534 		DHD_ERROR(("%s: No AXI shared tcm address debug info.\n", __FUNCTION__));
7535 		return FALSE;
7536 	}
7537 }
7538 
7539 void
dhd_axi_error(dhd_pub_t * dhdp)7540 dhd_axi_error(dhd_pub_t *dhdp)
7541 {
7542 	dhd_axi_error_dump_t *axi_err_dump;
7543 	uint8 *axi_err_buf = NULL;
7544 	uint8 *p_axi_err = NULL;
7545 	uint32 axi_logbuf_addr;
7546 	uint32 axi_tcm_addr;
7547 	int err, size;
7548 
7549 	OSL_DELAY(75000);
7550 
7551 	axi_logbuf_addr = dhdp->axierror_logbuf_addr;
7552 	if (!axi_logbuf_addr) {
7553 		DHD_ERROR(("%s: No AXI TCM address debug info.\n", __FUNCTION__));
7554 		goto sched_axi;
7555 	}
7556 
7557 	axi_err_dump = dhdp->axi_err_dump;
7558 	if (!axi_err_dump) {
7559 		goto sched_axi;
7560 	}
7561 
7562 	if (!dhd_axi_sig_match(dhdp)) {
7563 		goto sched_axi;
7564 	}
7565 
7566 	/* Reading AXI error data for SMMU fault */
7567 	DHD_ERROR(("%s: Read AXI data from TCM address\n", __FUNCTION__));
7568 	axi_tcm_addr = dhdpcie_bus_rtcm32(dhdp->bus, axi_logbuf_addr);
7569 	size = sizeof(hnd_ext_trap_axi_error_v1_t);
7570 	axi_err_buf = MALLOCZ(dhdp->osh, size);
7571 	if (axi_err_buf == NULL) {
7572 		DHD_ERROR(("%s: out of memory !\n", __FUNCTION__));
7573 		goto sched_axi;
7574 	}
7575 
7576 	p_axi_err = axi_err_buf;
7577 	err = dhdpcie_bus_membytes(dhdp->bus, FALSE, axi_tcm_addr, p_axi_err, size);
7578 	if (err) {
7579 		DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
7580 			__FUNCTION__, err, size, axi_tcm_addr));
7581 		goto sched_axi;
7582 	}
7583 
7584 	/* Dump data to Dmesg */
7585 	dhd_log_dump_axi_error(axi_err_buf);
7586 	err = memcpy_s(&axi_err_dump->etd_axi_error_v1, size, axi_err_buf, size);
7587 	if (err) {
7588 		DHD_ERROR(("%s: failed to copy etd axi error info, err=%d\n",
7589 			__FUNCTION__, err));
7590 	}
7591 
7592 sched_axi:
7593 	if (axi_err_buf) {
7594 		MFREE(dhdp->osh, axi_err_buf, size);
7595 	}
7596 	dhd_schedule_axi_error_dump(dhdp, NULL);
7597 }
7598 
7599 static void
dhd_log_dump_axi_error(uint8 * axi_err)7600 dhd_log_dump_axi_error(uint8 *axi_err)
7601 {
7602 	dma_dentry_v1_t dma_dentry;
7603 	dma_fifo_v1_t dma_fifo;
7604 	int i = 0, j = 0;
7605 
7606 	if (*(uint8 *)axi_err == HND_EXT_TRAP_AXIERROR_VERSION_1) {
7607 		hnd_ext_trap_axi_error_v1_t *axi_err_v1 = (hnd_ext_trap_axi_error_v1_t *)axi_err;
7608 		DHD_ERROR(("%s: signature : 0x%x\n", __FUNCTION__, axi_err_v1->signature));
7609 		DHD_ERROR(("%s: version : 0x%x\n", __FUNCTION__, axi_err_v1->version));
7610 		DHD_ERROR(("%s: length : 0x%x\n", __FUNCTION__, axi_err_v1->length));
7611 		DHD_ERROR(("%s: dma_fifo_valid_count : 0x%x\n",
7612 			__FUNCTION__, axi_err_v1->dma_fifo_valid_count));
7613 		DHD_ERROR(("%s: axi_errorlog_status : 0x%x\n",
7614 			__FUNCTION__, axi_err_v1->axi_errorlog_status));
7615 		DHD_ERROR(("%s: axi_errorlog_core : 0x%x\n",
7616 			__FUNCTION__, axi_err_v1->axi_errorlog_core));
7617 		DHD_ERROR(("%s: axi_errorlog_hi : 0x%x\n",
7618 			__FUNCTION__, axi_err_v1->axi_errorlog_hi));
7619 		DHD_ERROR(("%s: axi_errorlog_lo : 0x%x\n",
7620 			__FUNCTION__, axi_err_v1->axi_errorlog_lo));
7621 		DHD_ERROR(("%s: axi_errorlog_id : 0x%x\n",
7622 			__FUNCTION__, axi_err_v1->axi_errorlog_id));
7623 
7624 		for (i = 0; i < MAX_DMAFIFO_ENTRIES_V1; i++) {
7625 			dma_fifo = axi_err_v1->dma_fifo[i];
7626 			DHD_ERROR(("%s: valid:%d : 0x%x\n", __FUNCTION__, i, dma_fifo.valid));
7627 			DHD_ERROR(("%s: direction:%d : 0x%x\n",
7628 				__FUNCTION__, i, dma_fifo.direction));
7629 			DHD_ERROR(("%s: index:%d : 0x%x\n",
7630 				__FUNCTION__, i, dma_fifo.index));
7631 			DHD_ERROR(("%s: dpa:%d : 0x%x\n",
7632 				__FUNCTION__, i, dma_fifo.dpa));
7633 			DHD_ERROR(("%s: desc_lo:%d : 0x%x\n",
7634 				__FUNCTION__, i, dma_fifo.desc_lo));
7635 			DHD_ERROR(("%s: desc_hi:%d : 0x%x\n",
7636 				__FUNCTION__, i, dma_fifo.desc_hi));
7637 			DHD_ERROR(("%s: din:%d : 0x%x\n",
7638 				__FUNCTION__, i, dma_fifo.din));
7639 			DHD_ERROR(("%s: dout:%d : 0x%x\n",
7640 				__FUNCTION__, i, dma_fifo.dout));
7641 			for (j = 0; j < MAX_DMAFIFO_DESC_ENTRIES_V1; j++) {
7642 				dma_dentry = axi_err_v1->dma_fifo[i].dentry[j];
7643 				DHD_ERROR(("%s: ctrl1:%d : 0x%x\n",
7644 					__FUNCTION__, i, dma_dentry.ctrl1));
7645 				DHD_ERROR(("%s: ctrl2:%d : 0x%x\n",
7646 					__FUNCTION__, i, dma_dentry.ctrl2));
7647 				DHD_ERROR(("%s: addrlo:%d : 0x%x\n",
7648 					__FUNCTION__, i, dma_dentry.addrlo));
7649 				DHD_ERROR(("%s: addrhi:%d : 0x%x\n",
7650 					__FUNCTION__, i, dma_dentry.addrhi));
7651 			}
7652 		}
7653 	}
7654 	else {
7655 		DHD_ERROR(("%s: Invalid AXI version: 0x%x\n", __FUNCTION__, (*(uint8 *)axi_err)));
7656 	}
7657 }
7658 #endif /* DNGL_AXI_ERROR_LOGGING */
7659 
7660 /**
7661  * Brings transmit packets on all flow rings closer to the dongle, by moving (a subset) from their
7662  * flow queue to their flow ring.
7663  */
7664 static void
dhd_update_txflowrings(dhd_pub_t * dhd)7665 dhd_update_txflowrings(dhd_pub_t *dhd)
7666 {
7667 	unsigned long flags;
7668 	dll_t *item, *next;
7669 	flow_ring_node_t *flow_ring_node;
7670 	struct dhd_bus *bus = dhd->bus;
7671 
7672 	if (dhd_query_bus_erros(dhd)) {
7673 		return;
7674 	}
7675 
7676 	/* Hold flowring_list_lock to ensure no race condition while accessing the List */
7677 	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
7678 	for (item = dll_head_p(&bus->flowring_active_list);
7679 		(!dhd_is_device_removed(dhd) && !dll_end(&bus->flowring_active_list, item));
7680 		item = next) {
7681 		if (dhd->hang_was_sent) {
7682 			break;
7683 		}
7684 
7685 		next = dll_next_p(item);
7686 		flow_ring_node = dhd_constlist_to_flowring(item);
7687 
7688 		/* Ensure that flow_ring_node in the list is Not Null */
7689 		ASSERT(flow_ring_node != NULL);
7690 
7691 		/* Ensure that the flowring node has valid contents */
7692 		ASSERT(flow_ring_node->prot_info != NULL);
7693 
7694 		dhd_prot_update_txflowring(dhd, flow_ring_node->flowid, flow_ring_node->prot_info);
7695 	}
7696 	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
7697 }
7698 
7699 /** Mailbox ringbell Function */
7700 static void
dhd_bus_gen_devmb_intr(struct dhd_bus * bus)7701 dhd_bus_gen_devmb_intr(struct dhd_bus *bus)
7702 {
7703 	if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
7704 		(bus->sih->buscorerev == 4)) {
7705 		DHD_ERROR(("%s: mailbox communication not supported\n", __FUNCTION__));
7706 		return;
7707 	}
7708 	if (bus->db1_for_mb)  {
7709 		/* this is a pcie core register, not the config register */
7710 		DHD_INFO(("%s: writing a mail box interrupt to the device, through doorbell 1\n", __FUNCTION__));
7711 		if (DAR_PWRREQ(bus)) {
7712 			dhd_bus_pcie_pwr_req(bus);
7713 		}
7714 		si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db1_addr_get(bus),
7715 			~0, 0x12345678);
7716 	} else {
7717 		DHD_INFO(("%s: writing a mail box interrupt to the device, through config space\n", __FUNCTION__));
7718 		dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
7719 		dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
7720 	}
7721 }
7722 
7723 /* Upon receiving a mailbox interrupt,
7724  * if H2D_FW_TRAP bit is set in mailbox location
7725  * device traps
7726  */
7727 static void
dhdpcie_fw_trap(dhd_bus_t * bus)7728 dhdpcie_fw_trap(dhd_bus_t *bus)
7729 {
7730 	/* Send the mailbox data and generate mailbox intr. */
7731 	dhdpcie_send_mb_data(bus, H2D_FW_TRAP);
7732 	/* For FWs that cannot interprete H2D_FW_TRAP */
7733 	(void)dhd_wl_ioctl_set_intiovar(bus->dhd, "bus:disconnect", 99, WLC_SET_VAR, TRUE, 0);
7734 }
7735 
7736 /** mailbox doorbell ring function */
7737 void
dhd_bus_ringbell(struct dhd_bus * bus,uint32 value)7738 dhd_bus_ringbell(struct dhd_bus *bus, uint32 value)
7739 {
7740 	/* Skip after sending D3_INFORM */
7741 	if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
7742 		DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
7743 			__FUNCTION__, bus->bus_low_power_state));
7744 		return;
7745 	}
7746 
7747 	/* Skip in the case of link down */
7748 	if (bus->is_linkdown) {
7749 		DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
7750 		return;
7751 	}
7752 
7753 	if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
7754 		(bus->sih->buscorerev == 4)) {
7755 		si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int,
7756 			PCIE_INTB, PCIE_INTB);
7757 	} else {
7758 		/* this is a pcie core register, not the config regsiter */
7759 		DHD_INFO(("%s: writing a door bell to the device\n", __FUNCTION__));
7760 		if (IDMA_ACTIVE(bus->dhd)) {
7761 			if (DAR_PWRREQ(bus)) {
7762 				dhd_bus_pcie_pwr_req(bus);
7763 			}
7764 			si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db0_addr_2_get(bus),
7765 				~0, value);
7766 		} else {
7767 			if (DAR_PWRREQ(bus)) {
7768 				dhd_bus_pcie_pwr_req(bus);
7769 			}
7770 			si_corereg(bus->sih, bus->sih->buscoreidx,
7771 				dhd_bus_db0_addr_get(bus), ~0, 0x12345678);
7772 		}
7773 	}
7774 }
7775 
7776 /** mailbox doorbell ring function for IDMA/IFRM using dma channel2 */
7777 void
dhd_bus_ringbell_2(struct dhd_bus * bus,uint32 value,bool devwake)7778 dhd_bus_ringbell_2(struct dhd_bus *bus, uint32 value, bool devwake)
7779 {
7780 	/* this is a pcie core register, not the config regsiter */
7781 	/* Skip after sending D3_INFORM */
7782 	if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
7783 		DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
7784 			__FUNCTION__, bus->bus_low_power_state));
7785 		return;
7786 	}
7787 
7788 	/* Skip in the case of link down */
7789 	if (bus->is_linkdown) {
7790 		DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
7791 		return;
7792 	}
7793 
7794 	DHD_INFO(("writing a door bell 2 to the device\n"));
7795 	if (DAR_PWRREQ(bus)) {
7796 		dhd_bus_pcie_pwr_req(bus);
7797 	}
7798 	si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db0_addr_2_get(bus),
7799 		~0, value);
7800 }
7801 
7802 void
dhdpcie_bus_ringbell_fast(struct dhd_bus * bus,uint32 value)7803 dhdpcie_bus_ringbell_fast(struct dhd_bus *bus, uint32 value)
7804 {
7805 	/* Skip after sending D3_INFORM */
7806 	if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
7807 		DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
7808 			__FUNCTION__, bus->bus_low_power_state));
7809 		return;
7810 	}
7811 
7812 	/* Skip in the case of link down */
7813 	if (bus->is_linkdown) {
7814 		DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
7815 		return;
7816 	}
7817 
7818 	if (DAR_PWRREQ(bus)) {
7819 		dhd_bus_pcie_pwr_req(bus);
7820 	}
7821 
7822 #ifdef DHD_DB0TS
7823 	if (bus->dhd->db0ts_capable) {
7824 		uint64 ts;
7825 
7826 		ts = local_clock();
7827 		do_div(ts, 1000);
7828 
7829 		value = htol32(ts & 0xFFFFFFFF);
7830 		DHD_INFO(("%s: usec timer = 0x%x\n", __FUNCTION__, value));
7831 	}
7832 #endif /* DHD_DB0TS */
7833 	W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, value);
7834 }
7835 
7836 void
dhdpcie_bus_ringbell_2_fast(struct dhd_bus * bus,uint32 value,bool devwake)7837 dhdpcie_bus_ringbell_2_fast(struct dhd_bus *bus, uint32 value, bool devwake)
7838 {
7839 	/* Skip after sending D3_INFORM */
7840 	if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
7841 		DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
7842 			__FUNCTION__, bus->bus_low_power_state));
7843 		return;
7844 	}
7845 
7846 	/* Skip in the case of link down */
7847 	if (bus->is_linkdown) {
7848 		DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
7849 		return;
7850 	}
7851 
7852 	if (DAR_PWRREQ(bus)) {
7853 		dhd_bus_pcie_pwr_req(bus);
7854 	}
7855 	W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_2_addr, value);
7856 }
7857 
7858 static void
dhd_bus_ringbell_oldpcie(struct dhd_bus * bus,uint32 value)7859 dhd_bus_ringbell_oldpcie(struct dhd_bus *bus, uint32 value)
7860 {
7861 	uint32 w;
7862 	/* Skip after sending D3_INFORM */
7863 	if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
7864 		DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
7865 			__FUNCTION__, bus->bus_low_power_state));
7866 		return;
7867 	}
7868 
7869 	/* Skip in the case of link down */
7870 	if (bus->is_linkdown) {
7871 		DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
7872 		return;
7873 	}
7874 
7875 	w = (R_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr) & ~PCIE_INTB) | PCIE_INTB;
7876 	W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, w);
7877 }
7878 
7879 dhd_mb_ring_t
dhd_bus_get_mbintr_fn(struct dhd_bus * bus)7880 dhd_bus_get_mbintr_fn(struct dhd_bus *bus)
7881 {
7882 	if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
7883 		(bus->sih->buscorerev == 4)) {
7884 		bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
7885 			bus->pcie_mailbox_int);
7886 		if (bus->pcie_mb_intr_addr) {
7887 			bus->pcie_mb_intr_osh = si_osh(bus->sih);
7888 			return dhd_bus_ringbell_oldpcie;
7889 		}
7890 	} else {
7891 		bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
7892 			dhd_bus_db0_addr_get(bus));
7893 		if (bus->pcie_mb_intr_addr) {
7894 			bus->pcie_mb_intr_osh = si_osh(bus->sih);
7895 			return dhdpcie_bus_ringbell_fast;
7896 		}
7897 	}
7898 	return dhd_bus_ringbell;
7899 }
7900 
7901 dhd_mb_ring_2_t
dhd_bus_get_mbintr_2_fn(struct dhd_bus * bus)7902 dhd_bus_get_mbintr_2_fn(struct dhd_bus *bus)
7903 {
7904 	bus->pcie_mb_intr_2_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
7905 		dhd_bus_db0_addr_2_get(bus));
7906 	if (bus->pcie_mb_intr_2_addr) {
7907 		bus->pcie_mb_intr_osh = si_osh(bus->sih);
7908 		return dhdpcie_bus_ringbell_2_fast;
7909 	}
7910 	return dhd_bus_ringbell_2;
7911 }
7912 
7913 bool BCMFASTPATH
dhd_bus_dpc(struct dhd_bus * bus)7914 dhd_bus_dpc(struct dhd_bus *bus)
7915 {
7916 	bool resched = FALSE;	  /* Flag indicating resched wanted */
7917 	unsigned long flags;
7918 
7919 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7920 
7921 	bus->dpc_entry_time = OSL_LOCALTIME_NS();
7922 
7923 	DHD_GENERAL_LOCK(bus->dhd, flags);
7924 	/* Check for only DHD_BUS_DOWN and not for DHD_BUS_DOWN_IN_PROGRESS
7925 	 * to avoid IOCTL Resumed On timeout when ioctl is waiting for response
7926 	 * and rmmod is fired in parallel, which will make DHD_BUS_DOWN_IN_PROGRESS
7927 	 * and if we return from here, then IOCTL response will never be handled
7928 	 */
7929 	if (bus->dhd->busstate == DHD_BUS_DOWN) {
7930 		DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__));
7931 		bus->intstatus = 0;
7932 		DHD_GENERAL_UNLOCK(bus->dhd, flags);
7933 		bus->dpc_return_busdown_count++;
7934 		return 0;
7935 	}
7936 	DHD_BUS_BUSY_SET_IN_DPC(bus->dhd);
7937 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
7938 
7939 #ifdef DHD_READ_INTSTATUS_IN_DPC
7940 	if (bus->ipend) {
7941 		bus->ipend = FALSE;
7942 		bus->intstatus = dhdpcie_bus_intstatus(bus);
7943 		/* Check if the interrupt is ours or not */
7944 		if (bus->intstatus == 0) {
7945 			goto INTR_ON;
7946 		}
7947 		bus->intrcount++;
7948 	}
7949 #endif /* DHD_READ_INTSTATUS_IN_DPC */
7950 
7951 	resched = dhdpcie_bus_process_mailbox_intr(bus, bus->intstatus);
7952 	if (!resched) {
7953 		bus->intstatus = 0;
7954 #ifdef DHD_READ_INTSTATUS_IN_DPC
7955 INTR_ON:
7956 #endif /* DHD_READ_INTSTATUS_IN_DPC */
7957 		bus->dpc_intr_enable_count++;
7958 		/* For Linux, Macos etc (otherthan NDIS) enable back the host interrupts
7959 		 * which has been disabled in the dhdpcie_bus_isr()
7960 		 */
7961 		 dhdpcie_enable_irq(bus); /* Enable back interrupt!! */
7962 		bus->dpc_exit_time = OSL_LOCALTIME_NS();
7963 	} else {
7964 		bus->resched_dpc_time = OSL_LOCALTIME_NS();
7965 	}
7966 
7967 	bus->dpc_sched = resched;
7968 
7969 	DHD_GENERAL_LOCK(bus->dhd, flags);
7970 	DHD_BUS_BUSY_CLEAR_IN_DPC(bus->dhd);
7971 	dhd_os_busbusy_wake(bus->dhd);
7972 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
7973 
7974 	return resched;
7975 
7976 }
7977 
7978 int
dhdpcie_send_mb_data(dhd_bus_t * bus,uint32 h2d_mb_data)7979 dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data)
7980 {
7981 	uint32 cur_h2d_mb_data = 0;
7982 
7983 	DHD_INFO_HW4(("%s: H2D_MB_DATA: 0x%08X\n", __FUNCTION__, h2d_mb_data));
7984 
7985 	if (bus->is_linkdown) {
7986 		DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
7987 		return BCME_ERROR;
7988 	}
7989 
7990 	if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !bus->use_mailbox) {
7991 		DHD_INFO(("API rev is 6, sending mb data as H2D Ctrl message to dongle, 0x%04x\n",
7992 			h2d_mb_data));
7993 		/* Prevent asserting device_wake during doorbell ring for mb data to avoid loop. */
7994 		{
7995 			if (dhd_prot_h2d_mbdata_send_ctrlmsg(bus->dhd, h2d_mb_data)) {
7996 				DHD_ERROR(("failure sending the H2D Mailbox message "
7997 					"to firmware\n"));
7998 				goto fail;
7999 			}
8000 		}
8001 		goto done;
8002 	}
8003 
8004 	dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, H2D_MB_DATA, 0);
8005 
8006 	if (cur_h2d_mb_data != 0) {
8007 		uint32 i = 0;
8008 		DHD_INFO(("%s: GRRRRRRR: MB transaction is already pending 0x%04x\n", __FUNCTION__, cur_h2d_mb_data));
8009 		while ((i++ < 100) && cur_h2d_mb_data) {
8010 			OSL_DELAY(10);
8011 			dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, H2D_MB_DATA, 0);
8012 		}
8013 		if (i >= 100) {
8014 			DHD_ERROR(("%s : waited 1ms for the dngl "
8015 				"to ack the previous mb transaction\n", __FUNCTION__));
8016 			DHD_ERROR(("%s : MB transaction is still pending 0x%04x\n",
8017 				__FUNCTION__, cur_h2d_mb_data));
8018 		}
8019 	}
8020 
8021 	dhd_bus_cmn_writeshared(bus, &h2d_mb_data, sizeof(uint32), H2D_MB_DATA, 0);
8022 	dhd_bus_gen_devmb_intr(bus);
8023 
8024 done:
8025 	if (h2d_mb_data == H2D_HOST_D3_INFORM) {
8026 		DHD_INFO_HW4(("%s: send H2D_HOST_D3_INFORM to dongle\n", __FUNCTION__));
8027 		bus->last_d3_inform_time = OSL_LOCALTIME_NS();
8028 		bus->d3_inform_cnt++;
8029 	}
8030 	if (h2d_mb_data == H2D_HOST_D0_INFORM_IN_USE) {
8031 		DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM_IN_USE to dongle\n", __FUNCTION__));
8032 		bus->d0_inform_in_use_cnt++;
8033 	}
8034 	if (h2d_mb_data == H2D_HOST_D0_INFORM) {
8035 		DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM to dongle\n", __FUNCTION__));
8036 		bus->d0_inform_cnt++;
8037 	}
8038 	return BCME_OK;
8039 fail:
8040 	return BCME_ERROR;
8041 }
8042 
8043 static void
dhd_bus_handle_d3_ack(dhd_bus_t * bus)8044 dhd_bus_handle_d3_ack(dhd_bus_t *bus)
8045 {
8046 	unsigned long flags_bus;
8047 	DHD_BUS_LOCK(bus->bus_lock, flags_bus);
8048 	bus->suspend_intr_disable_count++;
8049 	/* Disable dongle Interrupts Immediately after D3 */
8050 
8051 	/* For Linux, Macos etc (otherthan NDIS) along with disabling
8052 	 * dongle interrupt by clearing the IntMask, disable directly
8053 	 * interrupt from the host side as well. Also clear the intstatus
8054 	 * if it is set to avoid unnecessary intrrupts after D3 ACK.
8055 	 */
8056 	dhdpcie_bus_intr_disable(bus); /* Disable interrupt using IntMask!! */
8057 	dhdpcie_bus_clear_intstatus(bus);
8058 	dhdpcie_disable_irq_nosync(bus); /* Disable host interrupt!! */
8059 
8060 	if (bus->dhd->dhd_induce_error != DHD_INDUCE_D3_ACK_TIMEOUT) {
8061 		/* Set bus_low_power_state to DHD_BUS_D3_ACK_RECIEVED */
8062 		bus->bus_low_power_state = DHD_BUS_D3_ACK_RECIEVED;
8063 		DHD_ERROR(("%s: D3_ACK Recieved\n", __FUNCTION__));
8064 	}
8065 	DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
8066 	/* Check for D3 ACK induce flag, which is set by firing dhd iovar to induce D3 Ack timeout.
8067 	 * If flag is set, D3 wake is skipped, which results in to D3 Ack timeout.
8068 	 */
8069 	if (bus->dhd->dhd_induce_error != DHD_INDUCE_D3_ACK_TIMEOUT) {
8070 		bus->wait_for_d3_ack = 1;
8071 		dhd_os_d3ack_wake(bus->dhd);
8072 	} else {
8073 		DHD_ERROR(("%s: Inducing D3 ACK timeout\n", __FUNCTION__));
8074 	}
8075 }
8076 void
dhd_bus_handle_mb_data(dhd_bus_t * bus,uint32 d2h_mb_data)8077 dhd_bus_handle_mb_data(dhd_bus_t *bus, uint32 d2h_mb_data)
8078 {
8079 	if (MULTIBP_ENAB(bus->sih)) {
8080 		dhd_bus_pcie_pwr_req(bus);
8081 	}
8082 
8083 	DHD_INFO(("D2H_MB_DATA: 0x%04x\n", d2h_mb_data));
8084 
8085 	if (d2h_mb_data & D2H_DEV_FWHALT) {
8086 		DHD_ERROR(("FW trap has happened\n"));
8087 		dhdpcie_checkdied(bus, NULL, 0);
8088 		dhd_os_check_hang(bus->dhd, 0, -EREMOTEIO);
8089 		goto exit;
8090 	}
8091 	if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ)  {
8092 		bool ds_acked = FALSE;
8093 		BCM_REFERENCE(ds_acked);
8094 		if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
8095 			DHD_ERROR(("DS-ENTRY AFTER D3-ACK!!!!! QUITING\n"));
8096 			DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
8097 			bus->dhd->busstate = DHD_BUS_DOWN;
8098 			goto exit;
8099 		}
8100 		/* what should we do */
8101 		DHD_INFO(("D2H_MB_DATA: DEEP SLEEP REQ\n"));
8102 		{
8103 			dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
8104 			DHD_INFO(("D2H_MB_DATA: sent DEEP SLEEP ACK\n"));
8105 		}
8106 	}
8107 	if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE)  {
8108 		/* what should we do */
8109 		DHD_INFO(("D2H_MB_DATA: DEEP SLEEP EXIT\n"));
8110 	}
8111 	if (d2h_mb_data & D2HMB_DS_HOST_SLEEP_EXIT_ACK)  {
8112 		/* what should we do */
8113 		DHD_INFO(("D2H_MB_DATA: D0 ACK\n"));
8114 	}
8115 	if (d2h_mb_data & D2H_DEV_D3_ACK)  {
8116 		/* what should we do */
8117 		DHD_INFO_HW4(("D2H_MB_DATA: D3 ACK\n"));
8118 		if (!bus->wait_for_d3_ack) {
8119 			dhd_bus_handle_d3_ack(bus);
8120 		}
8121 	}
8122 
8123 exit:
8124 	if (MULTIBP_ENAB(bus->sih)) {
8125 		dhd_bus_pcie_pwr_req_clear(bus);
8126 	}
8127 }
8128 
8129 static void
dhdpcie_handle_mb_data(dhd_bus_t * bus)8130 dhdpcie_handle_mb_data(dhd_bus_t *bus)
8131 {
8132 	uint32 d2h_mb_data = 0;
8133 	uint32 zero = 0;
8134 
8135 	if (MULTIBP_ENAB(bus->sih)) {
8136 		dhd_bus_pcie_pwr_req(bus);
8137 	}
8138 
8139 	dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0);
8140 	if (D2H_DEV_MB_INVALIDATED(d2h_mb_data)) {
8141 		DHD_ERROR(("%s: Invalid D2H_MB_DATA: 0x%08x\n",
8142 			__FUNCTION__, d2h_mb_data));
8143 		goto exit;
8144 	}
8145 
8146 	dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), D2H_MB_DATA, 0);
8147 
8148 	DHD_INFO_HW4(("%s: D2H_MB_DATA: 0x%04x\n", __FUNCTION__, d2h_mb_data));
8149 	if (d2h_mb_data & D2H_DEV_FWHALT)  {
8150 		DHD_ERROR(("FW trap has happened\n"));
8151 		dhdpcie_checkdied(bus, NULL, 0);
8152 		/* not ready yet dhd_os_ind_firmware_stall(bus->dhd); */
8153 		goto exit;
8154 	}
8155 	if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ)  {
8156 		/* what should we do */
8157 		DHD_INFO(("%s: D2H_MB_DATA: DEEP SLEEP REQ\n", __FUNCTION__));
8158 		dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
8159 		DHD_INFO(("%s: D2H_MB_DATA: sent DEEP SLEEP ACK\n", __FUNCTION__));
8160 	}
8161 	if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE)  {
8162 		/* what should we do */
8163 		DHD_INFO(("%s: D2H_MB_DATA: DEEP SLEEP EXIT\n", __FUNCTION__));
8164 	}
8165 	if (d2h_mb_data & D2H_DEV_D3_ACK)  {
8166 		/* what should we do */
8167 		DHD_INFO_HW4(("%s: D2H_MB_DATA: D3 ACK\n", __FUNCTION__));
8168 		if (!bus->wait_for_d3_ack) {
8169 			dhd_bus_handle_d3_ack(bus);
8170 		}
8171 	}
8172 
8173 exit:
8174 	if (MULTIBP_ENAB(bus->sih)) {
8175 		dhd_bus_pcie_pwr_req_clear(bus);
8176 	}
8177 }
8178 
8179 static void
dhdpcie_read_handle_mb_data(dhd_bus_t * bus)8180 dhdpcie_read_handle_mb_data(dhd_bus_t *bus)
8181 {
8182 	uint32 d2h_mb_data = 0;
8183 	uint32 zero = 0;
8184 
8185 	if (bus->is_linkdown) {
8186 		DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
8187 		return;
8188 	}
8189 
8190 	if (MULTIBP_ENAB(bus->sih)) {
8191 		dhd_bus_pcie_pwr_req(bus);
8192 	}
8193 
8194 	dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0);
8195 	if (!d2h_mb_data) {
8196 		goto exit;
8197 	}
8198 
8199 	dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), D2H_MB_DATA, 0);
8200 
8201 	dhd_bus_handle_mb_data(bus, d2h_mb_data);
8202 
8203 exit:
8204 	if (MULTIBP_ENAB(bus->sih)) {
8205 		dhd_bus_pcie_pwr_req_clear(bus);
8206 	}
8207 }
8208 
8209 static bool
dhdpcie_bus_process_mailbox_intr(dhd_bus_t * bus,uint32 intstatus)8210 dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus)
8211 {
8212 	bool resched = FALSE;
8213 	unsigned long flags_bus;
8214 
8215 	if (MULTIBP_ENAB(bus->sih)) {
8216 		dhd_bus_pcie_pwr_req(bus);
8217 	}
8218 	if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
8219 		(bus->sih->buscorerev == 4)) {
8220 		/* Msg stream interrupt */
8221 		if (intstatus & I_BIT1) {
8222 			resched = dhdpci_bus_read_frames(bus);
8223 		} else if (intstatus & I_BIT0) {
8224 			/* do nothing for Now */
8225 		}
8226 	} else {
8227 		if (intstatus & (PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1))
8228 			bus->api.handle_mb_data(bus);
8229 
8230 		/* Do no process any rings after recieving D3_ACK */
8231 		DHD_BUS_LOCK(bus->bus_lock, flags_bus);
8232 		if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
8233 			DHD_ERROR(("%s: D3 Ack Recieved. "
8234 				"Skip processing rest of ring buffers.\n", __FUNCTION__));
8235 			DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
8236 			goto exit;
8237 		}
8238 		DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
8239 
8240 		/* Validate intstatus only for INTX case */
8241 		if ((bus->d2h_intr_method == PCIE_MSI) ||
8242 			((bus->d2h_intr_method == PCIE_INTX) && (intstatus & bus->d2h_mb_mask))) {
8243 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
8244 			if (pm_runtime_get(dhd_bus_to_dev(bus)) >= 0) {
8245 				resched = dhdpci_bus_read_frames(bus);
8246 				pm_runtime_mark_last_busy(dhd_bus_to_dev(bus));
8247 				pm_runtime_put_autosuspend(dhd_bus_to_dev(bus));
8248 			}
8249 #else
8250 			resched = dhdpci_bus_read_frames(bus);
8251 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
8252 		}
8253 	}
8254 
8255 exit:
8256 	if (MULTIBP_ENAB(bus->sih)) {
8257 		dhd_bus_pcie_pwr_req_clear(bus);
8258 	}
8259 	return resched;
8260 }
8261 
8262 #if defined(DHD_H2D_LOG_TIME_SYNC)
8263 static void
dhdpci_bus_rte_log_time_sync_poll(dhd_bus_t * bus)8264 dhdpci_bus_rte_log_time_sync_poll(dhd_bus_t *bus)
8265 {
8266 	unsigned long time_elapsed;
8267 
8268 	/* Poll for timeout value periodically */
8269 	if ((bus->dhd->busstate == DHD_BUS_DATA) &&
8270 		(bus->dhd->dhd_rte_time_sync_ms != 0) &&
8271 		(bus->bus_low_power_state == DHD_BUS_NO_LOW_POWER_STATE)) {
8272 		time_elapsed = OSL_SYSUPTIME_US() - bus->dhd_rte_time_sync_count;
8273 		/* Compare time is milli seconds */
8274 		if ((time_elapsed / 1000) >= bus->dhd->dhd_rte_time_sync_ms) {
8275 			/*
8276 			 * Its fine, if it has crossed the timeout value. No need to adjust the
8277 			 * elapsed time
8278 			 */
8279 			bus->dhd_rte_time_sync_count += time_elapsed;
8280 
8281 			/* Schedule deffered work. Work function will send IOVAR. */
8282 			dhd_h2d_log_time_sync_deferred_wq_schedule(bus->dhd);
8283 		}
8284 	}
8285 }
8286 #endif /* DHD_H2D_LOG_TIME_SYNC */
8287 
8288 static bool
dhdpci_bus_read_frames(dhd_bus_t * bus)8289 dhdpci_bus_read_frames(dhd_bus_t *bus)
8290 {
8291 	bool more = FALSE;
8292 	unsigned long flags_bus;
8293 
8294 	/* First check if there a FW trap */
8295 	if ((bus->api.fw_rev >= PCIE_SHARED_VERSION_6) &&
8296 		(bus->dhd->dongle_trap_data = dhd_prot_process_trapbuf(bus->dhd))) {
8297 #ifdef DNGL_AXI_ERROR_LOGGING
8298 		if (bus->dhd->axi_error) {
8299 			DHD_ERROR(("AXI Error happened\n"));
8300 			return FALSE;
8301 		}
8302 #endif /* DNGL_AXI_ERROR_LOGGING */
8303 		dhd_bus_handle_mb_data(bus, D2H_DEV_FWHALT);
8304 		return FALSE;
8305 	}
8306 
8307 	/* There may be frames in both ctrl buf and data buf; check ctrl buf first */
8308 	DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
8309 
8310 	dhd_prot_process_ctrlbuf(bus->dhd);
8311 	bus->last_process_ctrlbuf_time = OSL_LOCALTIME_NS();
8312 	/* Unlock to give chance for resp to be handled */
8313 	DHD_PERIM_UNLOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
8314 
8315 	/* Do not process rest of ring buf once bus enters low power state (D3_INFORM/D3_ACK) */
8316 	DHD_BUS_LOCK(bus->bus_lock, flags_bus);
8317 	if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
8318 		DHD_ERROR(("%s: Bus is in power save state (%d). "
8319 			"Skip processing rest of ring buffers.\n",
8320 			__FUNCTION__, bus->bus_low_power_state));
8321 		DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
8322 		return FALSE;
8323 	}
8324 	DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
8325 
8326 	DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
8327 	/* update the flow ring cpls */
8328 	dhd_update_txflowrings(bus->dhd);
8329 	bus->last_process_flowring_time = OSL_LOCALTIME_NS();
8330 
8331 	/* With heavy TX traffic, we could get a lot of TxStatus
8332 	 * so add bound
8333 	 */
8334 #ifdef DHD_HP2P
8335 	more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound, DHD_HP2P_RING);
8336 #endif /* DHD_HP2P */
8337 	more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound, DHD_REGULAR_RING);
8338 	bus->last_process_txcpl_time = OSL_LOCALTIME_NS();
8339 
8340 	/* With heavy RX traffic, this routine potentially could spend some time
8341 	 * processing RX frames without RX bound
8342 	 */
8343 #ifdef DHD_HP2P
8344 	more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound, DHD_HP2P_RING);
8345 #endif /* DHD_HP2P */
8346 	more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound, DHD_REGULAR_RING);
8347 	bus->last_process_rxcpl_time = OSL_LOCALTIME_NS();
8348 
8349 	/* Process info ring completion messages */
8350 #ifdef EWP_EDL
8351 	if (!bus->dhd->dongle_edl_support)
8352 #endif // endif
8353 	{
8354 		more |= dhd_prot_process_msgbuf_infocpl(bus->dhd, DHD_INFORING_BOUND);
8355 		bus->last_process_infocpl_time = OSL_LOCALTIME_NS();
8356 	}
8357 #ifdef EWP_EDL
8358 	else {
8359 		more |= dhd_prot_process_msgbuf_edl(bus->dhd);
8360 		bus->last_process_edl_time = OSL_LOCALTIME_NS();
8361 	}
8362 #endif /* EWP_EDL */
8363 
8364 #ifdef IDLE_TX_FLOW_MGMT
8365 	if (bus->enable_idle_flowring_mgmt) {
8366 		/* Look for idle flow rings */
8367 		dhd_bus_check_idle_scan(bus);
8368 	}
8369 #endif /* IDLE_TX_FLOW_MGMT */
8370 
8371 	/* don't talk to the dongle if fw is about to be reloaded */
8372 	if (bus->dhd->hang_was_sent) {
8373 		more = FALSE;
8374 	}
8375 	DHD_PERIM_UNLOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
8376 
8377 #if defined(DHD_H2D_LOG_TIME_SYNC)
8378 	dhdpci_bus_rte_log_time_sync_poll(bus);
8379 #endif /* DHD_H2D_LOG_TIME_SYNC */
8380 	return more;
8381 }
8382 
8383 bool
dhdpcie_tcm_valid(dhd_bus_t * bus)8384 dhdpcie_tcm_valid(dhd_bus_t *bus)
8385 {
8386 	uint32 addr = 0;
8387 	int rv;
8388 	uint32 shaddr = 0;
8389 	pciedev_shared_t sh;
8390 
8391 	shaddr = bus->dongle_ram_base + bus->ramsize - 4;
8392 
8393 	/* Read last word in memory to determine address of pciedev_shared structure */
8394 	addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr));
8395 
8396 	if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) ||
8397 		(addr > shaddr)) {
8398 		DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid addr\n",
8399 			__FUNCTION__, addr));
8400 		return FALSE;
8401 	}
8402 
8403 	/* Read hndrte_shared structure */
8404 	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&sh,
8405 		sizeof(pciedev_shared_t))) < 0) {
8406 		DHD_ERROR(("Failed to read PCIe shared struct with %d\n", rv));
8407 		return FALSE;
8408 	}
8409 
8410 	/* Compare any field in pciedev_shared_t */
8411 	if (sh.console_addr != bus->pcie_sh->console_addr) {
8412 		DHD_ERROR(("Contents of pciedev_shared_t structure are not matching.\n"));
8413 		return FALSE;
8414 	}
8415 
8416 	return TRUE;
8417 }
8418 
8419 static void
dhdpcie_update_bus_api_revisions(uint32 firmware_api_version,uint32 host_api_version)8420 dhdpcie_update_bus_api_revisions(uint32 firmware_api_version, uint32 host_api_version)
8421 {
8422 	snprintf(bus_api_revision, BUS_API_REV_STR_LEN, "\nBus API revisions:(FW rev%d)(DHD rev%d)",
8423 			firmware_api_version, host_api_version);
8424 	return;
8425 }
8426 
8427 static bool
dhdpcie_check_firmware_compatible(uint32 firmware_api_version,uint32 host_api_version)8428 dhdpcie_check_firmware_compatible(uint32 firmware_api_version, uint32 host_api_version)
8429 {
8430 	bool retcode = FALSE;
8431 
8432 	DHD_INFO(("firmware api revision %d, host api revision %d\n",
8433 		firmware_api_version, host_api_version));
8434 
8435 	switch (firmware_api_version) {
8436 	case PCIE_SHARED_VERSION_7:
8437 	case PCIE_SHARED_VERSION_6:
8438 	case PCIE_SHARED_VERSION_5:
8439 		retcode = TRUE;
8440 		break;
8441 	default:
8442 		if (firmware_api_version <= host_api_version)
8443 			retcode = TRUE;
8444 	}
8445 	return retcode;
8446 }
8447 
8448 static int
dhdpcie_readshared(dhd_bus_t * bus)8449 dhdpcie_readshared(dhd_bus_t *bus)
8450 {
8451 	uint32 addr = 0;
8452 	int rv, dma_indx_wr_buf, dma_indx_rd_buf;
8453 	uint32 shaddr = 0;
8454 	pciedev_shared_t *sh = bus->pcie_sh;
8455 	dhd_timeout_t tmo;
8456 	bool idma_en = FALSE;
8457 
8458 	if (MULTIBP_ENAB(bus->sih)) {
8459 		dhd_bus_pcie_pwr_req(bus);
8460 	}
8461 
8462 	shaddr = bus->dongle_ram_base + bus->ramsize - 4;
8463 	/* start a timer for 5 seconds */
8464 	dhd_timeout_start(&tmo, MAX_READ_TIMEOUT);
8465 
8466 	while (((addr == 0) || (addr == bus->nvram_csm)) && !dhd_timeout_expired(&tmo)) {
8467 		/* Read last word in memory to determine address of pciedev_shared structure */
8468 		addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr));
8469 	}
8470 
8471 	if (addr == (uint32)-1) {
8472 		DHD_ERROR(("%s: PCIe link might be down\n", __FUNCTION__));
8473 		bus->is_linkdown = 1;
8474 		return BCME_ERROR;
8475 	}
8476 
8477 	if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) ||
8478 		(addr > shaddr)) {
8479 		DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid\n",
8480 			__FUNCTION__, addr));
8481 		DHD_ERROR(("%s: Waited %u usec, dongle is not ready\n", __FUNCTION__, tmo.elapsed));
8482 #ifdef DEBUG_DNGL_INIT_FAIL
8483 		if (addr != (uint32)-1) {	/* skip further PCIE reads if read this addr */
8484 			if (bus->dhd->memdump_enabled) {
8485 				bus->dhd->memdump_type = DUMP_TYPE_DONGLE_INIT_FAILURE;
8486 				dhdpcie_mem_dump(bus);
8487 			}
8488 		}
8489 #endif /* DEBUG_DNGL_INIT_FAIL */
8490 		return BCME_ERROR;
8491 	} else {
8492 		bus->shared_addr = (ulong)addr;
8493 		DHD_ERROR(("%s: PCIe shared addr (0x%08x) read took %u usec "
8494 			"before dongle is ready\n", __FUNCTION__, addr, tmo.elapsed));
8495 	}
8496 
8497 	/* Read hndrte_shared structure */
8498 	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)sh,
8499 		sizeof(pciedev_shared_t))) < 0) {
8500 		DHD_ERROR(("%s: Failed to read PCIe shared struct with %d\n", __FUNCTION__, rv));
8501 		return rv;
8502 	}
8503 
8504 	/* Endianness */
8505 	sh->flags = ltoh32(sh->flags);
8506 	sh->trap_addr = ltoh32(sh->trap_addr);
8507 	sh->assert_exp_addr = ltoh32(sh->assert_exp_addr);
8508 	sh->assert_file_addr = ltoh32(sh->assert_file_addr);
8509 	sh->assert_line = ltoh32(sh->assert_line);
8510 	sh->console_addr = ltoh32(sh->console_addr);
8511 	sh->msgtrace_addr = ltoh32(sh->msgtrace_addr);
8512 	sh->dma_rxoffset = ltoh32(sh->dma_rxoffset);
8513 	sh->rings_info_ptr = ltoh32(sh->rings_info_ptr);
8514 	sh->flags2 = ltoh32(sh->flags2);
8515 
8516 	/* load bus console address */
8517 	bus->console_addr = sh->console_addr;
8518 
8519 	/* Read the dma rx offset */
8520 	bus->dma_rxoffset = bus->pcie_sh->dma_rxoffset;
8521 	dhd_prot_rx_dataoffset(bus->dhd, bus->dma_rxoffset);
8522 
8523 	DHD_INFO(("%s: DMA RX offset from shared Area %d\n", __FUNCTION__, bus->dma_rxoffset));
8524 
8525 	bus->api.fw_rev = sh->flags & PCIE_SHARED_VERSION_MASK;
8526 	if (!(dhdpcie_check_firmware_compatible(bus->api.fw_rev, PCIE_SHARED_VERSION)))
8527 	{
8528 		DHD_ERROR(("%s: pcie_shared version %d in dhd "
8529 		           "is older than pciedev_shared version %d in dongle\n",
8530 		           __FUNCTION__, PCIE_SHARED_VERSION,
8531 		           bus->api.fw_rev));
8532 		return BCME_ERROR;
8533 	}
8534 	dhdpcie_update_bus_api_revisions(bus->api.fw_rev, PCIE_SHARED_VERSION);
8535 
8536 	bus->rw_index_sz = (sh->flags & PCIE_SHARED_2BYTE_INDICES) ?
8537 		sizeof(uint16) : sizeof(uint32);
8538 	DHD_INFO(("%s: Dongle advertizes %d size indices\n",
8539 		__FUNCTION__, bus->rw_index_sz));
8540 
8541 #ifdef IDLE_TX_FLOW_MGMT
8542 	if (sh->flags & PCIE_SHARED_IDLE_FLOW_RING) {
8543 		DHD_ERROR(("%s: FW Supports IdleFlow ring managment!\n",
8544 			__FUNCTION__));
8545 		bus->enable_idle_flowring_mgmt = TRUE;
8546 	}
8547 #endif /* IDLE_TX_FLOW_MGMT */
8548 
8549 	if (IDMA_CAPABLE(bus)) {
8550 		if (bus->sih->buscorerev == 23) {
8551 		} else {
8552 			idma_en = TRUE;
8553 		}
8554 	}
8555 
8556 	/* TODO: This need to be selected based on IPC instead of compile time */
8557 	bus->dhd->hwa_enable = TRUE;
8558 
8559 	if (idma_en) {
8560 		bus->dhd->idma_enable = (sh->flags & PCIE_SHARED_IDMA) ? TRUE : FALSE;
8561 		bus->dhd->ifrm_enable = (sh->flags & PCIE_SHARED_IFRM) ? TRUE : FALSE;
8562 	}
8563 
8564 	bus->dhd->d2h_sync_mode = sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK;
8565 
8566 	bus->dhd->dar_enable = (sh->flags & PCIE_SHARED_DAR) ? TRUE : FALSE;
8567 
8568 	/* Does the FW support DMA'ing r/w indices */
8569 	if (sh->flags & PCIE_SHARED_DMA_INDEX) {
8570 		if (!bus->dhd->dma_ring_upd_overwrite) {
8571 			{
8572 				if (!IFRM_ENAB(bus->dhd)) {
8573 					bus->dhd->dma_h2d_ring_upd_support = TRUE;
8574 				}
8575 				bus->dhd->dma_d2h_ring_upd_support = TRUE;
8576 			}
8577 		}
8578 
8579 		if (bus->dhd->dma_d2h_ring_upd_support)
8580 			bus->dhd->d2h_sync_mode = 0;
8581 
8582 		DHD_INFO(("%s: Host support DMAing indices: H2D:%d - D2H:%d. FW supports it\n",
8583 			__FUNCTION__,
8584 			(bus->dhd->dma_h2d_ring_upd_support ? 1 : 0),
8585 			(bus->dhd->dma_d2h_ring_upd_support ? 1 : 0)));
8586 	} else if (!(sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK)) {
8587 		DHD_ERROR(("%s FW has to support either dma indices or d2h sync\n",
8588 			__FUNCTION__));
8589 		return BCME_UNSUPPORTED;
8590 	} else {
8591 		bus->dhd->dma_h2d_ring_upd_support = FALSE;
8592 		bus->dhd->dma_d2h_ring_upd_support = FALSE;
8593 	}
8594 
8595 	/* Does the firmware support fast delete ring? */
8596 	if (sh->flags2 & PCIE_SHARED2_FAST_DELETE_RING) {
8597 		DHD_INFO(("%s: Firmware supports fast delete ring\n",
8598 			__FUNCTION__));
8599 		bus->dhd->fast_delete_ring_support = TRUE;
8600 	} else {
8601 		DHD_INFO(("%s: Firmware does not support fast delete ring\n",
8602 			__FUNCTION__));
8603 		bus->dhd->fast_delete_ring_support = FALSE;
8604 	}
8605 
8606 	/* get ring_info, ring_state and mb data ptrs and store the addresses in bus structure */
8607 	{
8608 		ring_info_t  ring_info;
8609 
8610 		/* boundary check */
8611 		if (sh->rings_info_ptr > shaddr) {
8612 			DHD_ERROR(("%s: rings_info_ptr is invalid 0x%x, skip reading ring info\n",
8613 				__FUNCTION__, sh->rings_info_ptr));
8614 			return BCME_ERROR;
8615 		}
8616 
8617 		if ((rv = dhdpcie_bus_membytes(bus, FALSE, sh->rings_info_ptr,
8618 			(uint8 *)&ring_info, sizeof(ring_info_t))) < 0)
8619 			return rv;
8620 
8621 		bus->h2d_mb_data_ptr_addr = ltoh32(sh->h2d_mb_data_ptr);
8622 		bus->d2h_mb_data_ptr_addr = ltoh32(sh->d2h_mb_data_ptr);
8623 
8624 		if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6) {
8625 			bus->max_tx_flowrings = ltoh16(ring_info.max_tx_flowrings);
8626 			bus->max_submission_rings = ltoh16(ring_info.max_submission_queues);
8627 			bus->max_completion_rings = ltoh16(ring_info.max_completion_rings);
8628 			bus->max_cmn_rings = bus->max_submission_rings - bus->max_tx_flowrings;
8629 			bus->api.handle_mb_data = dhdpcie_read_handle_mb_data;
8630 			bus->use_mailbox = sh->flags & PCIE_SHARED_USE_MAILBOX;
8631 		}
8632 		else {
8633 			bus->max_tx_flowrings = ltoh16(ring_info.max_tx_flowrings);
8634 			bus->max_submission_rings = bus->max_tx_flowrings;
8635 			bus->max_completion_rings = BCMPCIE_D2H_COMMON_MSGRINGS;
8636 			bus->max_cmn_rings = BCMPCIE_H2D_COMMON_MSGRINGS;
8637 			bus->api.handle_mb_data = dhdpcie_handle_mb_data;
8638 			bus->use_mailbox = TRUE;
8639 		}
8640 		if (bus->max_completion_rings == 0) {
8641 			DHD_ERROR(("dongle completion rings are invalid %d\n",
8642 				bus->max_completion_rings));
8643 			return BCME_ERROR;
8644 		}
8645 		if (bus->max_submission_rings == 0) {
8646 			DHD_ERROR(("dongle submission rings are invalid %d\n",
8647 				bus->max_submission_rings));
8648 			return BCME_ERROR;
8649 		}
8650 		if (bus->max_tx_flowrings == 0) {
8651 			DHD_ERROR(("dongle txflow rings are invalid %d\n", bus->max_tx_flowrings));
8652 			return BCME_ERROR;
8653 		}
8654 
8655 		/* If both FW and Host support DMA'ing indices, allocate memory and notify FW
8656 		 * The max_sub_queues is read from FW initialized ring_info
8657 		 */
8658 		if (bus->dhd->dma_h2d_ring_upd_support || IDMA_ENAB(bus->dhd)) {
8659 			dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
8660 				H2D_DMA_INDX_WR_BUF, bus->max_submission_rings);
8661 			dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
8662 				D2H_DMA_INDX_RD_BUF, bus->max_completion_rings);
8663 
8664 			if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) {
8665 				DHD_ERROR(("%s: Failed to allocate memory for dma'ing h2d indices"
8666 						"Host will use w/r indices in TCM\n",
8667 						__FUNCTION__));
8668 				bus->dhd->dma_h2d_ring_upd_support = FALSE;
8669 				bus->dhd->idma_enable = FALSE;
8670 			}
8671 		}
8672 
8673 		if (bus->dhd->dma_d2h_ring_upd_support) {
8674 			dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
8675 				D2H_DMA_INDX_WR_BUF, bus->max_completion_rings);
8676 			dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
8677 				H2D_DMA_INDX_RD_BUF, bus->max_submission_rings);
8678 
8679 			if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) {
8680 				DHD_ERROR(("%s: Failed to allocate memory for dma'ing d2h indices"
8681 						"Host will use w/r indices in TCM\n",
8682 						__FUNCTION__));
8683 				bus->dhd->dma_d2h_ring_upd_support = FALSE;
8684 			}
8685 		}
8686 
8687 		if (IFRM_ENAB(bus->dhd)) {
8688 			dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
8689 				H2D_IFRM_INDX_WR_BUF, bus->max_tx_flowrings);
8690 
8691 			if (dma_indx_wr_buf != BCME_OK) {
8692 				DHD_ERROR(("%s: Failed to alloc memory for Implicit DMA\n",
8693 						__FUNCTION__));
8694 				bus->dhd->ifrm_enable = FALSE;
8695 			}
8696 		}
8697 
8698 		/* read ringmem and ringstate ptrs from shared area and store in host variables */
8699 		dhd_fillup_ring_sharedptr_info(bus, &ring_info);
8700 		if (dhd_msg_level & DHD_INFO_VAL) {
8701 			bcm_print_bytes("ring_info_raw", (uchar *)&ring_info, sizeof(ring_info_t));
8702 		}
8703 		DHD_INFO(("%s: ring_info\n", __FUNCTION__));
8704 
8705 		DHD_ERROR(("%s: max H2D queues %d\n",
8706 			__FUNCTION__, ltoh16(ring_info.max_tx_flowrings)));
8707 
8708 		DHD_INFO(("mail box address\n"));
8709 		DHD_INFO(("%s: h2d_mb_data_ptr_addr 0x%04x\n",
8710 			__FUNCTION__, bus->h2d_mb_data_ptr_addr));
8711 		DHD_INFO(("%s: d2h_mb_data_ptr_addr 0x%04x\n",
8712 			__FUNCTION__, bus->d2h_mb_data_ptr_addr));
8713 	}
8714 
8715 	DHD_INFO(("%s: d2h_sync_mode 0x%08x\n",
8716 		__FUNCTION__, bus->dhd->d2h_sync_mode));
8717 
8718 	bus->dhd->d2h_hostrdy_supported =
8719 		((sh->flags & PCIE_SHARED_HOSTRDY_SUPPORT) == PCIE_SHARED_HOSTRDY_SUPPORT);
8720 
8721 	bus->dhd->ext_trap_data_supported =
8722 		((sh->flags2 & PCIE_SHARED2_EXTENDED_TRAP_DATA) == PCIE_SHARED2_EXTENDED_TRAP_DATA);
8723 
8724 	if ((sh->flags2 & PCIE_SHARED2_TXSTATUS_METADATA) == 0)
8725 		bus->dhd->pcie_txs_metadata_enable = 0;
8726 
8727 	bus->dhd->hscb_enable =
8728 		(sh->flags2 & PCIE_SHARED2_HSCB) == PCIE_SHARED2_HSCB;
8729 
8730 #ifdef EWP_EDL
8731 	if (host_edl_support) {
8732 		bus->dhd->dongle_edl_support = (sh->flags2 & PCIE_SHARED2_EDL_RING) ? TRUE : FALSE;
8733 		DHD_ERROR(("Dongle EDL support: %u\n", bus->dhd->dongle_edl_support));
8734 	}
8735 #endif /* EWP_EDL */
8736 
8737 	bus->dhd->debug_buf_dest_support =
8738 		(sh->flags2 & PCIE_SHARED2_DEBUG_BUF_DEST) ? TRUE : FALSE;
8739 	DHD_ERROR(("FW supports debug buf dest ? %s \n",
8740 		bus->dhd->debug_buf_dest_support ? "Y" : "N"));
8741 
8742 #ifdef DHD_HP2P
8743 	if (bus->dhd->hp2p_enable) {
8744 		bus->dhd->hp2p_ts_capable =
8745 			(sh->flags2 & PCIE_SHARED2_PKT_TIMESTAMP) == PCIE_SHARED2_PKT_TIMESTAMP;
8746 		bus->dhd->hp2p_capable =
8747 			(sh->flags2 & PCIE_SHARED2_HP2P) == PCIE_SHARED2_HP2P;
8748 		bus->dhd->hp2p_capable &= bus->dhd->hp2p_ts_capable;
8749 
8750 		DHD_ERROR(("FW supports HP2P ? %s \n",
8751 			bus->dhd->hp2p_capable ? "Y" : "N"));
8752 
8753 		if (bus->dhd->hp2p_capable) {
8754 			bus->dhd->pkt_thresh = HP2P_PKT_THRESH;
8755 			bus->dhd->pkt_expiry = HP2P_PKT_EXPIRY;
8756 			bus->dhd->time_thresh = HP2P_TIME_THRESH;
8757 			for (addr = 0; addr < MAX_HP2P_FLOWS; addr++) {
8758 				hp2p_info_t *hp2p_info = &bus->dhd->hp2p_info[addr];
8759 
8760 				hp2p_info->hrtimer_init = FALSE;
8761 				tasklet_hrtimer_init(&hp2p_info->timer,
8762 					dhd_hp2p_write, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
8763 			}
8764 		}
8765 	}
8766 #endif /* DHD_HP2P */
8767 
8768 #ifdef DHD_DB0TS
8769 	bus->dhd->db0ts_capable =
8770 		(sh->flags & PCIE_SHARED_TIMESTAMP_DB0) == PCIE_SHARED_TIMESTAMP_DB0;
8771 #endif /* DHD_DB0TS */
8772 
8773 	if (MULTIBP_ENAB(bus->sih)) {
8774 		dhd_bus_pcie_pwr_req_clear(bus);
8775 
8776 		/*
8777 		 * WAR to fix ARM cold boot;
8778 		 * De-assert WL domain in DAR
8779 		 */
8780 		if (bus->sih->buscorerev >= 68) {
8781 			dhd_bus_pcie_pwr_req_wl_domain(bus,
8782 				DAR_PCIE_PWR_CTRL((bus->sih)->buscorerev), FALSE);
8783 		}
8784 	}
8785 	return BCME_OK;
8786 } /* dhdpcie_readshared */
8787 
8788 /** Read ring mem and ring state ptr info from shared memory area in device memory */
8789 static void
dhd_fillup_ring_sharedptr_info(dhd_bus_t * bus,ring_info_t * ring_info)8790 dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info)
8791 {
8792 	uint16 i = 0;
8793 	uint16 j = 0;
8794 	uint32 tcm_memloc;
8795 	uint32	d2h_w_idx_ptr, d2h_r_idx_ptr, h2d_w_idx_ptr, h2d_r_idx_ptr;
8796 	uint16  max_tx_flowrings = bus->max_tx_flowrings;
8797 
8798 	/* Ring mem ptr info */
8799 	/* Alloated in the order
8800 		H2D_MSGRING_CONTROL_SUBMIT              0
8801 		H2D_MSGRING_RXPOST_SUBMIT               1
8802 		D2H_MSGRING_CONTROL_COMPLETE            2
8803 		D2H_MSGRING_TX_COMPLETE                 3
8804 		D2H_MSGRING_RX_COMPLETE                 4
8805 	*/
8806 
8807 	{
8808 		/* ringmemptr holds start of the mem block address space */
8809 		tcm_memloc = ltoh32(ring_info->ringmem_ptr);
8810 
8811 		/* Find out ringmem ptr for each ring common  ring */
8812 		for (i = 0; i <= BCMPCIE_COMMON_MSGRING_MAX_ID; i++) {
8813 			bus->ring_sh[i].ring_mem_addr = tcm_memloc;
8814 			/* Update mem block */
8815 			tcm_memloc = tcm_memloc + sizeof(ring_mem_t);
8816 			DHD_INFO(("%s: ring id %d ring mem addr 0x%04x \n", __FUNCTION__,
8817 				i, bus->ring_sh[i].ring_mem_addr));
8818 		}
8819 	}
8820 
8821 	/* Ring state mem ptr info */
8822 	{
8823 		d2h_w_idx_ptr = ltoh32(ring_info->d2h_w_idx_ptr);
8824 		d2h_r_idx_ptr = ltoh32(ring_info->d2h_r_idx_ptr);
8825 		h2d_w_idx_ptr = ltoh32(ring_info->h2d_w_idx_ptr);
8826 		h2d_r_idx_ptr = ltoh32(ring_info->h2d_r_idx_ptr);
8827 
8828 		/* Store h2d common ring write/read pointers */
8829 		for (i = 0; i < BCMPCIE_H2D_COMMON_MSGRINGS; i++) {
8830 			bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
8831 			bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
8832 
8833 			/* update mem block */
8834 			h2d_w_idx_ptr = h2d_w_idx_ptr + bus->rw_index_sz;
8835 			h2d_r_idx_ptr = h2d_r_idx_ptr + bus->rw_index_sz;
8836 
8837 			DHD_INFO(("%s: h2d w/r : idx %d write %x read %x \n", __FUNCTION__, i,
8838 				bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
8839 		}
8840 
8841 		/* Store d2h common ring write/read pointers */
8842 		for (j = 0; j < BCMPCIE_D2H_COMMON_MSGRINGS; j++, i++) {
8843 			bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr;
8844 			bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr;
8845 
8846 			/* update mem block */
8847 			d2h_w_idx_ptr = d2h_w_idx_ptr + bus->rw_index_sz;
8848 			d2h_r_idx_ptr = d2h_r_idx_ptr + bus->rw_index_sz;
8849 
8850 			DHD_INFO(("%s: d2h w/r : idx %d write %x read %x \n", __FUNCTION__, i,
8851 				bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
8852 		}
8853 
8854 		/* Store txflow ring write/read pointers */
8855 		if (bus->api.fw_rev < PCIE_SHARED_VERSION_6) {
8856 			max_tx_flowrings -= BCMPCIE_H2D_COMMON_MSGRINGS;
8857 		} else {
8858 			/* Account for Debug info h2d ring located after the last tx flow ring */
8859 			max_tx_flowrings = max_tx_flowrings + 1;
8860 		}
8861 		for (j = 0; j < max_tx_flowrings; i++, j++)
8862 		{
8863 			bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
8864 			bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
8865 
8866 			/* update mem block */
8867 			h2d_w_idx_ptr = h2d_w_idx_ptr + bus->rw_index_sz;
8868 			h2d_r_idx_ptr = h2d_r_idx_ptr + bus->rw_index_sz;
8869 
8870 			DHD_INFO(("%s: FLOW Rings h2d w/r : idx %d write %x read %x \n",
8871 				__FUNCTION__, i,
8872 				bus->ring_sh[i].ring_state_w,
8873 				bus->ring_sh[i].ring_state_r));
8874 		}
8875 		/* store wr/rd pointers for  debug info completion ring */
8876 		bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr;
8877 		bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr;
8878 		d2h_w_idx_ptr = d2h_w_idx_ptr + bus->rw_index_sz;
8879 		d2h_r_idx_ptr = d2h_r_idx_ptr + bus->rw_index_sz;
8880 		DHD_INFO(("d2h w/r : idx %d write %x read %x \n", i,
8881 			bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
8882 	}
8883 } /* dhd_fillup_ring_sharedptr_info */
8884 
8885 /**
8886  * Initialize bus module: prepare for communication with the dongle. Called after downloading
8887  * firmware into the dongle.
8888  */
dhd_bus_init(dhd_pub_t * dhdp,bool enforce_mutex)8889 int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex)
8890 {
8891 	dhd_bus_t *bus = dhdp->bus;
8892 	int  ret = 0;
8893 
8894 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
8895 
8896 	ASSERT(bus->dhd);
8897 	if (!bus->dhd)
8898 		return 0;
8899 
8900 	if (PCIE_RELOAD_WAR_ENAB(bus->sih->buscorerev)) {
8901 		dhd_bus_pcie_pwr_req_clear_reload_war(bus);
8902 	}
8903 
8904 	if (MULTIBP_ENAB(bus->sih)) {
8905 		dhd_bus_pcie_pwr_req(bus);
8906 	}
8907 
8908 	/* Configure AER registers to log the TLP header */
8909 	dhd_bus_aer_config(bus);
8910 
8911 	/* Make sure we're talking to the core. */
8912 	bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
8913 	ASSERT(bus->reg != NULL);
8914 
8915 	/* before opening up bus for data transfer, check if shared are is intact */
8916 	ret = dhdpcie_readshared(bus);
8917 	if (ret < 0) {
8918 		DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__));
8919 		goto exit;
8920 	}
8921 
8922 	/* Make sure we're talking to the core. */
8923 	bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
8924 	ASSERT(bus->reg != NULL);
8925 
8926 	dhd_init_bus_lock(bus);
8927 
8928 	dhd_init_backplane_access_lock(bus);
8929 
8930 	/* Set bus state according to enable result */
8931 	dhdp->busstate = DHD_BUS_DATA;
8932 	bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE;
8933 	dhdp->dhd_bus_busy_state = 0;
8934 
8935 	/* D11 status via PCIe completion header */
8936 	if ((ret = dhdpcie_init_d11status(bus)) < 0) {
8937 		goto exit;
8938 	}
8939 
8940 	if (!dhd_download_fw_on_driverload)
8941 		dhd_dpc_enable(bus->dhd);
8942 	/* Enable the interrupt after device is up */
8943 	dhdpcie_bus_intr_enable(bus);
8944 
8945 	bus->intr_enabled = TRUE;
8946 
8947 	/* bcmsdh_intr_unmask(bus->sdh); */
8948 	bus->idletime = 0;
8949 
8950 	/* Make use_d0_inform TRUE for Rev 5 for backward compatibility */
8951 	if (bus->api.fw_rev < PCIE_SHARED_VERSION_6) {
8952 		bus->use_d0_inform = TRUE;
8953 	} else {
8954 		bus->use_d0_inform = FALSE;
8955 	}
8956 
8957 exit:
8958 	if (MULTIBP_ENAB(bus->sih)) {
8959 		dhd_bus_pcie_pwr_req_clear(bus);
8960 	}
8961 	return ret;
8962 }
8963 
8964 static void
dhdpcie_init_shared_addr(dhd_bus_t * bus)8965 dhdpcie_init_shared_addr(dhd_bus_t *bus)
8966 {
8967 	uint32 addr = 0;
8968 	uint32 val = 0;
8969 
8970 	addr = bus->dongle_ram_base + bus->ramsize - 4;
8971 	dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val));
8972 }
8973 
8974 bool
dhdpcie_chipmatch(uint16 vendor,uint16 device)8975 dhdpcie_chipmatch(uint16 vendor, uint16 device)
8976 {
8977 	if (vendor != PCI_VENDOR_ID_BROADCOM) {
8978 		DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__,
8979 			vendor, device));
8980 		return (-ENODEV);
8981 	}
8982 
8983 	if ((device == BCM4350_D11AC_ID) || (device == BCM4350_D11AC2G_ID) ||
8984 		(device == BCM4350_D11AC5G_ID) || (device == BCM4350_CHIP_ID) ||
8985 		(device == BCM43569_CHIP_ID)) {
8986 		return 0;
8987 	}
8988 
8989 	if ((device == BCM4354_D11AC_ID) || (device == BCM4354_D11AC2G_ID) ||
8990 		(device == BCM4354_D11AC5G_ID) || (device == BCM4354_CHIP_ID)) {
8991 		return 0;
8992 	}
8993 
8994 	if ((device == BCM4356_D11AC_ID) || (device == BCM4356_D11AC2G_ID) ||
8995 		(device == BCM4356_D11AC5G_ID) || (device == BCM4356_CHIP_ID)) {
8996 		return 0;
8997 	}
8998 
8999 	if ((device == BCM4371_D11AC_ID) || (device == BCM4371_D11AC2G_ID) ||
9000 		(device == BCM4371_D11AC5G_ID) || (device == BCM4371_CHIP_ID)) {
9001 		return 0;
9002 	}
9003 
9004 	if ((device == BCM4345_D11AC_ID) || (device == BCM4345_D11AC2G_ID) ||
9005 		(device == BCM4345_D11AC5G_ID) || BCM4345_CHIP(device)) {
9006 		return 0;
9007 	}
9008 
9009 	if ((device == BCM43452_D11AC_ID) || (device == BCM43452_D11AC2G_ID) ||
9010 		(device == BCM43452_D11AC5G_ID)) {
9011 		return 0;
9012 	}
9013 
9014 	if ((device == BCM4335_D11AC_ID) || (device == BCM4335_D11AC2G_ID) ||
9015 		(device == BCM4335_D11AC5G_ID) || (device == BCM4335_CHIP_ID)) {
9016 		return 0;
9017 	}
9018 
9019 	if ((device == BCM43602_D11AC_ID) || (device == BCM43602_D11AC2G_ID) ||
9020 		(device == BCM43602_D11AC5G_ID) || (device == BCM43602_CHIP_ID)) {
9021 		return 0;
9022 	}
9023 
9024 	if ((device == BCM43569_D11AC_ID) || (device == BCM43569_D11AC2G_ID) ||
9025 		(device == BCM43569_D11AC5G_ID) || (device == BCM43569_CHIP_ID)) {
9026 		return 0;
9027 	}
9028 
9029 	if ((device == BCM4358_D11AC_ID) || (device == BCM4358_D11AC2G_ID) ||
9030 		(device == BCM4358_D11AC5G_ID)) {
9031 		return 0;
9032 	}
9033 
9034 	if ((device == BCM4349_D11AC_ID) || (device == BCM4349_D11AC2G_ID) ||
9035 		(device == BCM4349_D11AC5G_ID) || (device == BCM4349_CHIP_ID)) {
9036 		return 0;
9037 	}
9038 
9039 	if ((device == BCM4355_D11AC_ID) || (device == BCM4355_D11AC2G_ID) ||
9040 		(device == BCM4355_D11AC5G_ID) || (device == BCM4355_CHIP_ID)) {
9041 		return 0;
9042 	}
9043 
9044 	if ((device == BCM4359_D11AC_ID) || (device == BCM4359_D11AC2G_ID) ||
9045 		(device == BCM4359_D11AC5G_ID)) {
9046 		return 0;
9047 	}
9048 
9049 	if ((device == BCM43596_D11AC_ID) || (device == BCM43596_D11AC2G_ID) ||
9050 		(device == BCM43596_D11AC5G_ID)) {
9051 		return 0;
9052 	}
9053 
9054 	if ((device == BCM43597_D11AC_ID) || (device == BCM43597_D11AC2G_ID) ||
9055 		(device == BCM43597_D11AC5G_ID)) {
9056 		return 0;
9057 	}
9058 
9059 	if ((device == BCM4364_D11AC_ID) || (device == BCM4364_D11AC2G_ID) ||
9060 		(device == BCM4364_D11AC5G_ID) || (device == BCM4364_CHIP_ID)) {
9061 		return 0;
9062 	}
9063 
9064 	if ((device == BCM4361_D11AC_ID) || (device == BCM4361_D11AC2G_ID) ||
9065 		(device == BCM4361_D11AC5G_ID) || (device == BCM4361_CHIP_ID)) {
9066 		return 0;
9067 	}
9068 	if ((device == BCM4362_D11AX_ID) || (device == BCM4362_D11AX2G_ID) ||
9069 		(device == BCM4362_D11AX5G_ID) || (device == BCM4362_CHIP_ID)) {
9070 		return 0;
9071 	}
9072 	if ((device == BCM43751_D11AX_ID) || (device == BCM43751_D11AX2G_ID) ||
9073 		(device == BCM43751_D11AX5G_ID) || (device == BCM43751_CHIP_ID)) {
9074 		return 0;
9075 	}
9076 	if ((device == BCM43752_D11AX_ID) || (device == BCM43752_D11AX2G_ID) ||
9077 		(device == BCM43752_D11AX5G_ID) || (device == BCM43752_CHIP_ID)) {
9078 		return 0;
9079 	}
9080 	if ((device == BCM4347_D11AC_ID) || (device == BCM4347_D11AC2G_ID) ||
9081 		(device == BCM4347_D11AC5G_ID) || (device == BCM4347_CHIP_ID)) {
9082 		return 0;
9083 	}
9084 
9085 	if ((device == BCM4365_D11AC_ID) || (device == BCM4365_D11AC2G_ID) ||
9086 		(device == BCM4365_D11AC5G_ID) || (device == BCM4365_CHIP_ID)) {
9087 		return 0;
9088 	}
9089 
9090 	if ((device == BCM4366_D11AC_ID) || (device == BCM4366_D11AC2G_ID) ||
9091 		(device == BCM4366_D11AC5G_ID) || (device == BCM4366_CHIP_ID) ||
9092 		(device == BCM43664_CHIP_ID) || (device == BCM43666_CHIP_ID)) {
9093 		return 0;
9094 	}
9095 
9096 	if ((device == BCM4369_D11AX_ID) || (device == BCM4369_D11AX2G_ID) ||
9097 		(device == BCM4369_D11AX5G_ID) || (device == BCM4369_CHIP_ID)) {
9098 		return 0;
9099 	}
9100 
9101 	if ((device == BCM4375_D11AX_ID) || (device == BCM4375_D11AX2G_ID) ||
9102 		(device == BCM4375_D11AX5G_ID) || (device == BCM4375_CHIP_ID)) {
9103 		return 0;
9104 	}
9105 
9106 	DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__, vendor, device));
9107 	return (-ENODEV);
9108 } /* dhdpcie_chipmatch */
9109 
9110 /**
9111  * Name:  dhdpcie_cc_nvmshadow
9112  *
9113  * Description:
9114  * A shadow of OTP/SPROM exists in ChipCommon Region
9115  * betw. 0x800 and 0xBFF (Backplane Addr. 0x1800_0800 and 0x1800_0BFF).
9116  * Strapping option (SPROM vs. OTP), presence of OTP/SPROM and its size
9117  * can also be read from ChipCommon Registers.
9118  */
9119 static int
dhdpcie_cc_nvmshadow(dhd_bus_t * bus,struct bcmstrbuf * b)9120 dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b)
9121 {
9122 	uint16 dump_offset = 0;
9123 	uint32 dump_size = 0, otp_size = 0, sprom_size = 0;
9124 
9125 	/* Table for 65nm OTP Size (in bits) */
9126 	int  otp_size_65nm[8] = {0, 2048, 4096, 8192, 4096, 6144, 512, 1024};
9127 
9128 	volatile uint16 *nvm_shadow;
9129 
9130 	uint cur_coreid;
9131 	uint chipc_corerev;
9132 	chipcregs_t *chipcregs;
9133 
9134 	/* Save the current core */
9135 	cur_coreid = si_coreid(bus->sih);
9136 	/* Switch to ChipC */
9137 	chipcregs = (chipcregs_t *)si_setcore(bus->sih, CC_CORE_ID, 0);
9138 	ASSERT(chipcregs != NULL);
9139 
9140 	chipc_corerev = si_corerev(bus->sih);
9141 
9142 	/* Check ChipcommonCore Rev */
9143 	if (chipc_corerev < 44) {
9144 		DHD_ERROR(("%s: ChipcommonCore Rev %d < 44\n", __FUNCTION__, chipc_corerev));
9145 		return BCME_UNSUPPORTED;
9146 	}
9147 
9148 	/* Check ChipID */
9149 	if (((uint16)bus->sih->chip != BCM4350_CHIP_ID) && !BCM4345_CHIP((uint16)bus->sih->chip) &&
9150 	        ((uint16)bus->sih->chip != BCM4355_CHIP_ID) &&
9151 	        ((uint16)bus->sih->chip != BCM4364_CHIP_ID)) {
9152 		DHD_ERROR(("%s: cc_nvmdump cmd. supported for Olympic chips"
9153 					"4350/4345/4355/4364 only\n", __FUNCTION__));
9154 		return BCME_UNSUPPORTED;
9155 	}
9156 
9157 	/* Check if SRC_PRESENT in SpromCtrl(0x190 in ChipCommon Regs) is set */
9158 	if (chipcregs->sromcontrol & SRC_PRESENT) {
9159 		/* SPROM Size: 1Kbits (0x0), 4Kbits (0x1), 16Kbits(0x2) */
9160 		sprom_size = (1 << (2 * ((chipcregs->sromcontrol & SRC_SIZE_MASK)
9161 					>> SRC_SIZE_SHIFT))) * 1024;
9162 		bcm_bprintf(b, "\nSPROM Present (Size %d bits)\n", sprom_size);
9163 	}
9164 
9165 	if (chipcregs->sromcontrol & SRC_OTPPRESENT) {
9166 		bcm_bprintf(b, "\nOTP Present");
9167 
9168 		if (((chipcregs->otplayout & OTPL_WRAP_TYPE_MASK) >> OTPL_WRAP_TYPE_SHIFT)
9169 			== OTPL_WRAP_TYPE_40NM) {
9170 			/* 40nm OTP: Size = (OtpSize + 1) * 1024 bits */
9171 			/* Chipcommon rev51 is a variation on rev45 and does not support
9172 			 * the latest OTP configuration.
9173 			 */
9174 			if (chipc_corerev != 51 && chipc_corerev >= 49) {
9175 				otp_size = (((chipcregs->otplayout & OTPL_ROW_SIZE_MASK)
9176 					>> OTPL_ROW_SIZE_SHIFT) + 1) * 1024;
9177 				bcm_bprintf(b, "(Size %d bits)\n", otp_size);
9178 			} else {
9179 				otp_size =  (((chipcregs->capabilities & CC_CAP_OTPSIZE)
9180 				        >> CC_CAP_OTPSIZE_SHIFT) + 1) * 1024;
9181 				bcm_bprintf(b, "(Size %d bits)\n", otp_size);
9182 			}
9183 		} else {
9184 			/* This part is untested since newer chips have 40nm OTP */
9185 			/* Chipcommon rev51 is a variation on rev45 and does not support
9186 			 * the latest OTP configuration.
9187 			 */
9188 			if (chipc_corerev != 51 && chipc_corerev >= 49) {
9189 				otp_size = otp_size_65nm[(chipcregs->otplayout & OTPL_ROW_SIZE_MASK)
9190 						>> OTPL_ROW_SIZE_SHIFT];
9191 				bcm_bprintf(b, "(Size %d bits)\n", otp_size);
9192 			} else {
9193 				otp_size = otp_size_65nm[(chipcregs->capabilities & CC_CAP_OTPSIZE)
9194 					        >> CC_CAP_OTPSIZE_SHIFT];
9195 				bcm_bprintf(b, "(Size %d bits)\n", otp_size);
9196 				DHD_INFO(("%s: 65nm/130nm OTP Size not tested. \n",
9197 					__FUNCTION__));
9198 			}
9199 		}
9200 	}
9201 
9202 	/* Chipcommon rev51 is a variation on rev45 and does not support
9203 	 * the latest OTP configuration.
9204 	 */
9205 	if (chipc_corerev != 51 && chipc_corerev >= 49) {
9206 		if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
9207 			((chipcregs->otplayout & OTPL_ROW_SIZE_MASK) == 0)) {
9208 			DHD_ERROR(("%s: SPROM and OTP could not be found "
9209 				"sromcontrol = %x, otplayout = %x \n",
9210 				__FUNCTION__, chipcregs->sromcontrol, chipcregs->otplayout));
9211 			return BCME_NOTFOUND;
9212 		}
9213 	} else {
9214 		if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
9215 			((chipcregs->capabilities & CC_CAP_OTPSIZE) == 0)) {
9216 			DHD_ERROR(("%s: SPROM and OTP could not be found "
9217 				"sromcontrol = %x, capablities = %x \n",
9218 				__FUNCTION__, chipcregs->sromcontrol, chipcregs->capabilities));
9219 			return BCME_NOTFOUND;
9220 		}
9221 	}
9222 
9223 	/* Check the strapping option in SpromCtrl: Set = OTP otherwise SPROM */
9224 	if ((!(chipcregs->sromcontrol & SRC_PRESENT) || (chipcregs->sromcontrol & SRC_OTPSEL)) &&
9225 		(chipcregs->sromcontrol & SRC_OTPPRESENT)) {
9226 
9227 		bcm_bprintf(b, "OTP Strap selected.\n"
9228 		               "\nOTP Shadow in ChipCommon:\n");
9229 
9230 		dump_size = otp_size / 16 ; /* 16bit words */
9231 
9232 	} else if (((chipcregs->sromcontrol & SRC_OTPSEL) == 0) &&
9233 		(chipcregs->sromcontrol & SRC_PRESENT)) {
9234 
9235 		bcm_bprintf(b, "SPROM Strap selected\n"
9236 				"\nSPROM Shadow in ChipCommon:\n");
9237 
9238 		/* If SPROM > 8K only 8Kbits is mapped to ChipCommon (0x800 - 0xBFF) */
9239 		/* dump_size in 16bit words */
9240 		dump_size = sprom_size > 8 ? (8 * 1024) / 16 : sprom_size / 16;
9241 	} else {
9242 		DHD_ERROR(("%s: NVM Shadow does not exist in ChipCommon\n",
9243 			__FUNCTION__));
9244 		return BCME_NOTFOUND;
9245 	}
9246 
9247 	if (bus->regs == NULL) {
9248 		DHD_ERROR(("ChipCommon Regs. not initialized\n"));
9249 		return BCME_NOTREADY;
9250 	} else {
9251 		bcm_bprintf(b, "\n OffSet:");
9252 
9253 		/* Chipcommon rev51 is a variation on rev45 and does not support
9254 		 * the latest OTP configuration.
9255 		 */
9256 		if (chipc_corerev != 51 && chipc_corerev >= 49) {
9257 			/* Chip common can read only 8kbits,
9258 			* for ccrev >= 49 otp size is around 12 kbits so use GCI core
9259 			*/
9260 			nvm_shadow = (volatile uint16 *)si_setcore(bus->sih, GCI_CORE_ID, 0);
9261 		} else {
9262 			/* Point to the SPROM/OTP shadow in ChipCommon */
9263 			nvm_shadow = chipcregs->sromotp;
9264 		}
9265 
9266 		if (nvm_shadow == NULL) {
9267 			DHD_ERROR(("%s: NVM Shadow is not intialized\n", __FUNCTION__));
9268 			return BCME_NOTFOUND;
9269 		}
9270 
9271 		/*
9272 		* Read 16 bits / iteration.
9273 		* dump_size & dump_offset in 16-bit words
9274 		*/
9275 		while (dump_offset < dump_size) {
9276 			if (dump_offset % 2 == 0)
9277 				/* Print the offset in the shadow space in Bytes */
9278 				bcm_bprintf(b, "\n 0x%04x", dump_offset * 2);
9279 
9280 			bcm_bprintf(b, "\t0x%04x", *(nvm_shadow + dump_offset));
9281 			dump_offset += 0x1;
9282 		}
9283 	}
9284 
9285 	/* Switch back to the original core */
9286 	si_setcore(bus->sih, cur_coreid, 0);
9287 
9288 	return BCME_OK;
9289 } /* dhdpcie_cc_nvmshadow */
9290 
9291 /** Flow rings are dynamically created and destroyed */
dhd_bus_clean_flow_ring(dhd_bus_t * bus,void * node)9292 void dhd_bus_clean_flow_ring(dhd_bus_t *bus, void *node)
9293 {
9294 	void *pkt;
9295 	flow_queue_t *queue;
9296 	flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)node;
9297 	unsigned long flags;
9298 
9299 	queue = &flow_ring_node->queue;
9300 
9301 #ifdef DHDTCPACK_SUPPRESS
9302 	/* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
9303 	 * when there is a newly coming packet from network stack.
9304 	 */
9305 	dhd_tcpack_info_tbl_clean(bus->dhd);
9306 #endif /* DHDTCPACK_SUPPRESS */
9307 
9308 #ifdef DHD_HP2P
9309 	if (flow_ring_node->hp2p_ring) {
9310 		bus->dhd->hp2p_ring_active = FALSE;
9311 		flow_ring_node->hp2p_ring = FALSE;
9312 	}
9313 #endif /* DHD_HP2P */
9314 
9315 	/* clean up BUS level info */
9316 	DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
9317 
9318 	/* Flush all pending packets in the queue, if any */
9319 	while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
9320 		PKTFREE(bus->dhd->osh, pkt, TRUE);
9321 	}
9322 	ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
9323 
9324 	/* Reinitialise flowring's queue */
9325 	dhd_flow_queue_reinit(bus->dhd, queue, bus->dhd->conf->flow_ring_queue_threshold);
9326 	flow_ring_node->status = FLOW_RING_STATUS_CLOSED;
9327 	flow_ring_node->active = FALSE;
9328 
9329 	DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
9330 
9331 	/* Hold flowring_list_lock to ensure no race condition while accessing the List */
9332 	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
9333 	dll_delete(&flow_ring_node->list);
9334 	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
9335 
9336 	/* Release the flowring object back into the pool */
9337 	dhd_prot_flowrings_pool_release(bus->dhd,
9338 		flow_ring_node->flowid, flow_ring_node->prot_info);
9339 
9340 	/* Free the flowid back to the flowid allocator */
9341 	dhd_flowid_free(bus->dhd, flow_ring_node->flow_info.ifindex,
9342 	                flow_ring_node->flowid);
9343 }
9344 
9345 /**
9346  * Allocate a Flow ring buffer,
9347  * Init Ring buffer, send Msg to device about flow ring creation
9348 */
9349 int
dhd_bus_flow_ring_create_request(dhd_bus_t * bus,void * arg)9350 dhd_bus_flow_ring_create_request(dhd_bus_t *bus, void *arg)
9351 {
9352 	flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)arg;
9353 
9354 	DHD_INFO(("%s :Flow create\n", __FUNCTION__));
9355 
9356 	/* Send Msg to device about flow ring creation */
9357 	if (dhd_prot_flow_ring_create(bus->dhd, flow_ring_node) != BCME_OK)
9358 		return BCME_NOMEM;
9359 
9360 	return BCME_OK;
9361 }
9362 
9363 /** Handle response from dongle on a 'flow ring create' request */
9364 void
dhd_bus_flow_ring_create_response(dhd_bus_t * bus,uint16 flowid,int32 status)9365 dhd_bus_flow_ring_create_response(dhd_bus_t *bus, uint16 flowid, int32 status)
9366 {
9367 	flow_ring_node_t *flow_ring_node;
9368 	unsigned long flags;
9369 
9370 	DHD_INFO(("%s :Flow Response %d \n", __FUNCTION__, flowid));
9371 
9372 	/* Boundary check of the flowid */
9373 	if (flowid >= bus->dhd->num_flow_rings) {
9374 		DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__,
9375 			flowid, bus->dhd->num_flow_rings));
9376 		return;
9377 	}
9378 
9379 	flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
9380 	if (!flow_ring_node) {
9381 		DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__));
9382 		return;
9383 	}
9384 
9385 	ASSERT(flow_ring_node->flowid == flowid);
9386 	if (flow_ring_node->flowid != flowid) {
9387 		DHD_ERROR(("%s: flowid %d is different from the flowid "
9388 			"of the flow_ring_node %d\n", __FUNCTION__, flowid,
9389 			flow_ring_node->flowid));
9390 		return;
9391 	}
9392 
9393 	if (status != BCME_OK) {
9394 		DHD_ERROR(("%s Flow create Response failure error status = %d \n",
9395 		     __FUNCTION__, status));
9396 		/* Call Flow clean up */
9397 		dhd_bus_clean_flow_ring(bus, flow_ring_node);
9398 		return;
9399 	}
9400 
9401 	DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
9402 	flow_ring_node->status = FLOW_RING_STATUS_OPEN;
9403 	DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
9404 
9405 	/* Now add the Flow ring node into the active list
9406 	 * Note that this code to add the newly created node to the active
9407 	 * list was living in dhd_flowid_lookup. But note that after
9408 	 * adding the node to the active list the contents of node is being
9409 	 * filled in dhd_prot_flow_ring_create.
9410 	 * If there is a D2H interrupt after the node gets added to the
9411 	 * active list and before the node gets populated with values
9412 	 * from the Bottom half dhd_update_txflowrings would be called.
9413 	 * which will then try to walk through the active flow ring list,
9414 	 * pickup the nodes and operate on them. Now note that since
9415 	 * the function dhd_prot_flow_ring_create is not finished yet
9416 	 * the contents of flow_ring_node can still be NULL leading to
9417 	 * crashes. Hence the flow_ring_node should be added to the
9418 	 * active list only after its truely created, which is after
9419 	 * receiving the create response message from the Host.
9420 	 */
9421 	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
9422 	dll_prepend(&bus->flowring_active_list, &flow_ring_node->list);
9423 	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
9424 
9425 	dhd_bus_schedule_queue(bus, flowid, FALSE); /* from queue to flowring */
9426 
9427 	return;
9428 }
9429 
9430 int
dhd_bus_flow_ring_delete_request(dhd_bus_t * bus,void * arg)9431 dhd_bus_flow_ring_delete_request(dhd_bus_t *bus, void *arg)
9432 {
9433 	void * pkt;
9434 	flow_queue_t *queue;
9435 	flow_ring_node_t *flow_ring_node;
9436 	unsigned long flags;
9437 
9438 	DHD_INFO(("%s :Flow Delete\n", __FUNCTION__));
9439 
9440 	flow_ring_node = (flow_ring_node_t *)arg;
9441 
9442 #ifdef DHDTCPACK_SUPPRESS
9443 	/* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
9444 	 * when there is a newly coming packet from network stack.
9445 	 */
9446 	dhd_tcpack_info_tbl_clean(bus->dhd);
9447 #endif /* DHDTCPACK_SUPPRESS */
9448 	DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
9449 	if (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) {
9450 		DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
9451 		DHD_ERROR(("%s :Delete Pending flowid %u\n", __FUNCTION__, flow_ring_node->flowid));
9452 		return BCME_ERROR;
9453 	}
9454 	flow_ring_node->status = FLOW_RING_STATUS_DELETE_PENDING;
9455 
9456 	queue = &flow_ring_node->queue; /* queue associated with flow ring */
9457 
9458 	/* Flush all pending packets in the queue, if any */
9459 	while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
9460 		PKTFREE(bus->dhd->osh, pkt, TRUE);
9461 	}
9462 	ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
9463 
9464 	DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
9465 
9466 	/* Send Msg to device about flow ring deletion */
9467 	dhd_prot_flow_ring_delete(bus->dhd, flow_ring_node);
9468 
9469 	return BCME_OK;
9470 }
9471 
9472 void
dhd_bus_flow_ring_delete_response(dhd_bus_t * bus,uint16 flowid,uint32 status)9473 dhd_bus_flow_ring_delete_response(dhd_bus_t *bus, uint16 flowid, uint32 status)
9474 {
9475 	flow_ring_node_t *flow_ring_node;
9476 
9477 	DHD_INFO(("%s :Flow Delete Response %d \n", __FUNCTION__, flowid));
9478 
9479 	/* Boundary check of the flowid */
9480 	if (flowid >= bus->dhd->num_flow_rings) {
9481 		DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__,
9482 			flowid, bus->dhd->num_flow_rings));
9483 		return;
9484 	}
9485 
9486 	flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
9487 	if (!flow_ring_node) {
9488 		DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__));
9489 		return;
9490 	}
9491 
9492 	ASSERT(flow_ring_node->flowid == flowid);
9493 	if (flow_ring_node->flowid != flowid) {
9494 		DHD_ERROR(("%s: flowid %d is different from the flowid "
9495 			"of the flow_ring_node %d\n", __FUNCTION__, flowid,
9496 			flow_ring_node->flowid));
9497 		return;
9498 	}
9499 
9500 	if (status != BCME_OK) {
9501 		DHD_ERROR(("%s Flow Delete Response failure error status = %d \n",
9502 		    __FUNCTION__, status));
9503 		return;
9504 	}
9505 	/* Call Flow clean up */
9506 	dhd_bus_clean_flow_ring(bus, flow_ring_node);
9507 
9508 	return;
9509 
9510 }
9511 
dhd_bus_flow_ring_flush_request(dhd_bus_t * bus,void * arg)9512 int dhd_bus_flow_ring_flush_request(dhd_bus_t *bus, void *arg)
9513 {
9514 	void *pkt;
9515 	flow_queue_t *queue;
9516 	flow_ring_node_t *flow_ring_node;
9517 	unsigned long flags;
9518 
9519 	DHD_INFO(("%s :Flow Flush\n", __FUNCTION__));
9520 
9521 	flow_ring_node = (flow_ring_node_t *)arg;
9522 
9523 	DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
9524 	queue = &flow_ring_node->queue; /* queue associated with flow ring */
9525 	/* Flow ring status will be set back to FLOW_RING_STATUS_OPEN
9526 	 * once flow ring flush response is received for this flowring node.
9527 	 */
9528 	flow_ring_node->status = FLOW_RING_STATUS_FLUSH_PENDING;
9529 
9530 #ifdef DHDTCPACK_SUPPRESS
9531 	/* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
9532 	 * when there is a newly coming packet from network stack.
9533 	 */
9534 	dhd_tcpack_info_tbl_clean(bus->dhd);
9535 #endif /* DHDTCPACK_SUPPRESS */
9536 
9537 	/* Flush all pending packets in the queue, if any */
9538 	while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
9539 		PKTFREE(bus->dhd->osh, pkt, TRUE);
9540 	}
9541 	ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
9542 
9543 	DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
9544 
9545 	/* Send Msg to device about flow ring flush */
9546 	dhd_prot_flow_ring_flush(bus->dhd, flow_ring_node);
9547 
9548 	return BCME_OK;
9549 }
9550 
9551 void
dhd_bus_flow_ring_flush_response(dhd_bus_t * bus,uint16 flowid,uint32 status)9552 dhd_bus_flow_ring_flush_response(dhd_bus_t *bus, uint16 flowid, uint32 status)
9553 {
9554 	flow_ring_node_t *flow_ring_node;
9555 
9556 	if (status != BCME_OK) {
9557 		DHD_ERROR(("%s Flow flush Response failure error status = %d \n",
9558 		    __FUNCTION__, status));
9559 		return;
9560 	}
9561 
9562 	/* Boundary check of the flowid */
9563 	if (flowid >= bus->dhd->num_flow_rings) {
9564 		DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__,
9565 			flowid, bus->dhd->num_flow_rings));
9566 		return;
9567 	}
9568 
9569 	flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
9570 	if (!flow_ring_node) {
9571 		DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__));
9572 		return;
9573 	}
9574 
9575 	ASSERT(flow_ring_node->flowid == flowid);
9576 	if (flow_ring_node->flowid != flowid) {
9577 		DHD_ERROR(("%s: flowid %d is different from the flowid "
9578 			"of the flow_ring_node %d\n", __FUNCTION__, flowid,
9579 			flow_ring_node->flowid));
9580 		return;
9581 	}
9582 
9583 	flow_ring_node->status = FLOW_RING_STATUS_OPEN;
9584 	return;
9585 }
9586 
9587 uint32
dhd_bus_max_h2d_queues(struct dhd_bus * bus)9588 dhd_bus_max_h2d_queues(struct dhd_bus *bus)
9589 {
9590 	return bus->max_submission_rings;
9591 }
9592 
9593 /* To be symmetric with SDIO */
9594 void
dhd_bus_pktq_flush(dhd_pub_t * dhdp)9595 dhd_bus_pktq_flush(dhd_pub_t *dhdp)
9596 {
9597 	return;
9598 }
9599 
9600 void
dhd_bus_set_linkdown(dhd_pub_t * dhdp,bool val)9601 dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val)
9602 {
9603 	dhdp->bus->is_linkdown = val;
9604 }
9605 
9606 int
dhd_bus_get_linkdown(dhd_pub_t * dhdp)9607 dhd_bus_get_linkdown(dhd_pub_t *dhdp)
9608 {
9609 	return dhdp->bus->is_linkdown;
9610 }
9611 
9612 int
dhd_bus_get_cto(dhd_pub_t * dhdp)9613 dhd_bus_get_cto(dhd_pub_t *dhdp)
9614 {
9615 	return dhdp->bus->cto_triggered;
9616 }
9617 
9618 #ifdef IDLE_TX_FLOW_MGMT
9619 /* resume request */
9620 int
dhd_bus_flow_ring_resume_request(dhd_bus_t * bus,void * arg)9621 dhd_bus_flow_ring_resume_request(dhd_bus_t *bus, void *arg)
9622 {
9623 	flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)arg;
9624 
9625 	DHD_ERROR(("%s :Flow Resume Request flow id %u\n", __FUNCTION__, flow_ring_node->flowid));
9626 
9627 	flow_ring_node->status = FLOW_RING_STATUS_RESUME_PENDING;
9628 
9629 	/* Send Msg to device about flow ring resume */
9630 	dhd_prot_flow_ring_resume(bus->dhd, flow_ring_node);
9631 
9632 	return BCME_OK;
9633 }
9634 
9635 /* add the node back to active flowring */
9636 void
dhd_bus_flow_ring_resume_response(dhd_bus_t * bus,uint16 flowid,int32 status)9637 dhd_bus_flow_ring_resume_response(dhd_bus_t *bus, uint16 flowid, int32 status)
9638 {
9639 
9640 	flow_ring_node_t *flow_ring_node;
9641 
9642 	DHD_TRACE(("%s :flowid %d \n", __FUNCTION__, flowid));
9643 
9644 	flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
9645 	ASSERT(flow_ring_node->flowid == flowid);
9646 
9647 	if (status != BCME_OK) {
9648 		DHD_ERROR(("%s Error Status = %d \n",
9649 			__FUNCTION__, status));
9650 		return;
9651 	}
9652 
9653 	DHD_TRACE(("%s :Number of pkts queued in FlowId:%d is -> %u!!\n",
9654 		__FUNCTION__, flow_ring_node->flowid,  flow_ring_node->queue.len));
9655 
9656 	flow_ring_node->status = FLOW_RING_STATUS_OPEN;
9657 
9658 	dhd_bus_schedule_queue(bus, flowid, FALSE);
9659 	return;
9660 }
9661 
9662 /* scan the flow rings in active list for idle time out */
9663 void
dhd_bus_check_idle_scan(dhd_bus_t * bus)9664 dhd_bus_check_idle_scan(dhd_bus_t *bus)
9665 {
9666 	uint64 time_stamp; /* in millisec */
9667 	uint64 diff;
9668 
9669 	time_stamp = OSL_SYSUPTIME();
9670 	diff = time_stamp - bus->active_list_last_process_ts;
9671 
9672 	if (diff > IDLE_FLOW_LIST_TIMEOUT) {
9673 		dhd_bus_idle_scan(bus);
9674 		bus->active_list_last_process_ts = OSL_SYSUPTIME();
9675 	}
9676 
9677 	return;
9678 }
9679 
9680 /* scan the nodes in active list till it finds a non idle node */
9681 void
dhd_bus_idle_scan(dhd_bus_t * bus)9682 dhd_bus_idle_scan(dhd_bus_t *bus)
9683 {
9684 	dll_t *item, *prev;
9685 	flow_ring_node_t *flow_ring_node;
9686 	uint64 time_stamp, diff;
9687 	unsigned long flags;
9688 	uint16 ringid[MAX_SUSPEND_REQ];
9689 	uint16 count = 0;
9690 
9691 	time_stamp = OSL_SYSUPTIME();
9692 	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
9693 
9694 	for (item = dll_tail_p(&bus->flowring_active_list);
9695 	         !dll_end(&bus->flowring_active_list, item); item = prev) {
9696 		prev = dll_prev_p(item);
9697 
9698 		flow_ring_node = dhd_constlist_to_flowring(item);
9699 
9700 		if (flow_ring_node->flowid == (bus->max_submission_rings - 1))
9701 			continue;
9702 
9703 		if (flow_ring_node->status != FLOW_RING_STATUS_OPEN) {
9704 			/* Takes care of deleting zombie rings */
9705 			/* delete from the active list */
9706 			DHD_INFO(("deleting flow id %u from active list\n",
9707 				flow_ring_node->flowid));
9708 			__dhd_flow_ring_delete_from_active_list(bus, flow_ring_node);
9709 			continue;
9710 		}
9711 
9712 		diff = time_stamp - flow_ring_node->last_active_ts;
9713 
9714 		if ((diff > IDLE_FLOW_RING_TIMEOUT) && !(flow_ring_node->queue.len))  {
9715 			DHD_ERROR(("\nSuspending flowid %d\n", flow_ring_node->flowid));
9716 			/* delete from the active list */
9717 			__dhd_flow_ring_delete_from_active_list(bus, flow_ring_node);
9718 			flow_ring_node->status = FLOW_RING_STATUS_SUSPENDED;
9719 			ringid[count] = flow_ring_node->flowid;
9720 			count++;
9721 			if (count == MAX_SUSPEND_REQ) {
9722 				/* create a batch message now!! */
9723 				dhd_prot_flow_ring_batch_suspend_request(bus->dhd, ringid, count);
9724 				count = 0;
9725 			}
9726 
9727 		} else {
9728 
9729 			/* No more scanning, break from here! */
9730 			break;
9731 		}
9732 	}
9733 
9734 	if (count) {
9735 		dhd_prot_flow_ring_batch_suspend_request(bus->dhd, ringid, count);
9736 	}
9737 
9738 	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
9739 
9740 	return;
9741 }
9742 
dhd_flow_ring_move_to_active_list_head(struct dhd_bus * bus,flow_ring_node_t * flow_ring_node)9743 void dhd_flow_ring_move_to_active_list_head(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
9744 {
9745 	unsigned long flags;
9746 	dll_t* list;
9747 
9748 	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
9749 	/* check if the node is already at head, otherwise delete it and prepend */
9750 	list = dll_head_p(&bus->flowring_active_list);
9751 	if (&flow_ring_node->list != list) {
9752 		dll_delete(&flow_ring_node->list);
9753 		dll_prepend(&bus->flowring_active_list, &flow_ring_node->list);
9754 	}
9755 
9756 	/* update flow ring timestamp */
9757 	flow_ring_node->last_active_ts = OSL_SYSUPTIME();
9758 
9759 	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
9760 
9761 	return;
9762 }
9763 
dhd_flow_ring_add_to_active_list(struct dhd_bus * bus,flow_ring_node_t * flow_ring_node)9764 void dhd_flow_ring_add_to_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
9765 {
9766 	unsigned long flags;
9767 
9768 	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
9769 
9770 	dll_prepend(&bus->flowring_active_list, &flow_ring_node->list);
9771 	/* update flow ring timestamp */
9772 	flow_ring_node->last_active_ts = OSL_SYSUPTIME();
9773 
9774 	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
9775 
9776 	return;
9777 }
__dhd_flow_ring_delete_from_active_list(struct dhd_bus * bus,flow_ring_node_t * flow_ring_node)9778 void __dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
9779 {
9780 	dll_delete(&flow_ring_node->list);
9781 }
9782 
dhd_flow_ring_delete_from_active_list(struct dhd_bus * bus,flow_ring_node_t * flow_ring_node)9783 void dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
9784 {
9785 	unsigned long flags;
9786 
9787 	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
9788 
9789 	__dhd_flow_ring_delete_from_active_list(bus, flow_ring_node);
9790 
9791 	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
9792 
9793 	return;
9794 }
9795 #endif /* IDLE_TX_FLOW_MGMT */
9796 
9797 int
dhdpcie_bus_clock_start(struct dhd_bus * bus)9798 dhdpcie_bus_clock_start(struct dhd_bus *bus)
9799 {
9800 	return dhdpcie_start_host_pcieclock(bus);
9801 }
9802 
9803 int
dhdpcie_bus_clock_stop(struct dhd_bus * bus)9804 dhdpcie_bus_clock_stop(struct dhd_bus *bus)
9805 {
9806 	return dhdpcie_stop_host_pcieclock(bus);
9807 }
9808 
9809 int
dhdpcie_bus_disable_device(struct dhd_bus * bus)9810 dhdpcie_bus_disable_device(struct dhd_bus *bus)
9811 {
9812 	return dhdpcie_disable_device(bus);
9813 }
9814 
9815 int
dhdpcie_bus_enable_device(struct dhd_bus * bus)9816 dhdpcie_bus_enable_device(struct dhd_bus *bus)
9817 {
9818 	return dhdpcie_enable_device(bus);
9819 }
9820 
9821 int
dhdpcie_bus_alloc_resource(struct dhd_bus * bus)9822 dhdpcie_bus_alloc_resource(struct dhd_bus *bus)
9823 {
9824 	return dhdpcie_alloc_resource(bus);
9825 }
9826 
9827 void
dhdpcie_bus_free_resource(struct dhd_bus * bus)9828 dhdpcie_bus_free_resource(struct dhd_bus *bus)
9829 {
9830 	dhdpcie_free_resource(bus);
9831 }
9832 
9833 int
dhd_bus_request_irq(struct dhd_bus * bus)9834 dhd_bus_request_irq(struct dhd_bus *bus)
9835 {
9836 	return dhdpcie_bus_request_irq(bus);
9837 }
9838 
9839 bool
dhdpcie_bus_dongle_attach(struct dhd_bus * bus)9840 dhdpcie_bus_dongle_attach(struct dhd_bus *bus)
9841 {
9842 	return dhdpcie_dongle_attach(bus);
9843 }
9844 
9845 int
dhd_bus_release_dongle(struct dhd_bus * bus)9846 dhd_bus_release_dongle(struct dhd_bus *bus)
9847 {
9848 	bool dongle_isolation;
9849 	osl_t *osh;
9850 
9851 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
9852 
9853 	if (bus) {
9854 		osh = bus->osh;
9855 		ASSERT(osh);
9856 
9857 		if (bus->dhd) {
9858 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
9859 			debugger_close();
9860 #endif /* DEBUGGER || DHD_DSCOPE */
9861 
9862 			dongle_isolation = bus->dhd->dongle_isolation;
9863 			dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
9864 		}
9865 	}
9866 
9867 	return 0;
9868 }
9869 
9870 int
dhdpcie_cto_cfg_init(struct dhd_bus * bus,bool enable)9871 dhdpcie_cto_cfg_init(struct dhd_bus *bus, bool enable)
9872 {
9873 	uint32 val;
9874 	if (enable) {
9875 		dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4,
9876 			PCI_CTO_INT_MASK | PCI_SBIM_MASK_SERR);
9877 		val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
9878 		dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val | SPROM_BACKPLANE_EN);
9879 	} else {
9880 		dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, 0);
9881 		val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
9882 		dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val & ~SPROM_BACKPLANE_EN);
9883 	}
9884 	return 0;
9885 }
9886 
9887 int
dhdpcie_cto_init(struct dhd_bus * bus,bool enable)9888 dhdpcie_cto_init(struct dhd_bus *bus, bool enable)
9889 {
9890 	if (bus->sih->buscorerev < 19) {
9891 		DHD_INFO(("%s: Unsupported CTO, buscorerev=%d\n",
9892 			__FUNCTION__, bus->sih->buscorerev));
9893 		return BCME_UNSUPPORTED;
9894 	}
9895 
9896 	if (bus->sih->buscorerev == 19) {
9897 		uint32 pcie_lnkst;
9898 		si_corereg(bus->sih, bus->sih->buscoreidx,
9899 			OFFSETOF(sbpcieregs_t, configaddr), ~0, PCI_LINK_STATUS);
9900 
9901 		pcie_lnkst = si_corereg(bus->sih, bus->sih->buscoreidx,
9902 			OFFSETOF(sbpcieregs_t, configdata), 0, 0);
9903 
9904 		if (((pcie_lnkst >> PCI_LINK_SPEED_SHIFT) &
9905 			PCI_LINK_SPEED_MASK) == PCIE_LNK_SPEED_GEN1) {
9906 			return BCME_UNSUPPORTED;
9907 		}
9908 	}
9909 
9910 	bus->cto_enable = enable;
9911 
9912 	dhdpcie_cto_cfg_init(bus, enable);
9913 
9914 	if (enable) {
9915 		if (bus->cto_threshold == 0) {
9916 			bus->cto_threshold = PCIE_CTO_TO_THRESH_DEFAULT;
9917 		}
9918 		si_corereg(bus->sih, bus->sih->buscoreidx,
9919 			OFFSETOF(sbpcieregs_t, ctoctrl), ~0,
9920 			((bus->cto_threshold << PCIE_CTO_TO_THRESHOLD_SHIFT) &
9921 			PCIE_CTO_TO_THRESHHOLD_MASK) |
9922 			((PCIE_CTO_CLKCHKCNT_VAL << PCIE_CTO_CLKCHKCNT_SHIFT) &
9923 			PCIE_CTO_CLKCHKCNT_MASK) |
9924 			PCIE_CTO_ENAB_MASK);
9925 	} else {
9926 		si_corereg(bus->sih, bus->sih->buscoreidx,
9927 			OFFSETOF(sbpcieregs_t, ctoctrl), ~0, 0);
9928 	}
9929 
9930 	DHD_ERROR(("%s: set CTO prevention and recovery enable/disable %d\n",
9931 		__FUNCTION__, bus->cto_enable));
9932 
9933 	return 0;
9934 }
9935 
9936 static int
dhdpcie_cto_error_recovery(struct dhd_bus * bus)9937 dhdpcie_cto_error_recovery(struct dhd_bus *bus)
9938 {
9939 	uint32 pci_intmask, err_status;
9940 	uint8 i = 0;
9941 	uint32 val;
9942 
9943 	pci_intmask = dhdpcie_bus_cfg_read_dword(bus, PCI_INT_MASK, 4);
9944 	dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, pci_intmask & ~PCI_CTO_INT_MASK);
9945 
9946 	DHD_OS_WAKE_LOCK(bus->dhd);
9947 
9948 	DHD_ERROR(("--- CTO Triggered --- %d\n", bus->pwr_req_ref));
9949 
9950 	/*
9951 	 * DAR still accessible
9952 	 */
9953 	dhd_bus_dump_dar_registers(bus);
9954 
9955 	/* reset backplane */
9956 	val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
9957 	dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val | SPROM_CFG_TO_SB_RST);
9958 
9959 	/* clear timeout error */
9960 	while (1) {
9961 		err_status =  si_corereg(bus->sih, bus->sih->buscoreidx,
9962 			DAR_ERRLOG(bus->sih->buscorerev),
9963 			0, 0);
9964 		if (err_status & PCIE_CTO_ERR_MASK) {
9965 			si_corereg(bus->sih, bus->sih->buscoreidx,
9966 					DAR_ERRLOG(bus->sih->buscorerev),
9967 					~0, PCIE_CTO_ERR_MASK);
9968 		} else {
9969 			break;
9970 		}
9971 		OSL_DELAY(CTO_TO_CLEAR_WAIT_MS * 1000);
9972 		i++;
9973 		if (i > CTO_TO_CLEAR_WAIT_MAX_CNT) {
9974 			DHD_ERROR(("cto recovery fail\n"));
9975 
9976 			DHD_OS_WAKE_UNLOCK(bus->dhd);
9977 			return BCME_ERROR;
9978 		}
9979 	}
9980 
9981 	/* clear interrupt status */
9982 	dhdpcie_bus_cfg_write_dword(bus, PCI_INT_STATUS, 4, PCI_CTO_INT_MASK);
9983 
9984 	/* Halt ARM & remove reset */
9985 	/* TBD : we can add ARM Halt here in case */
9986 
9987 	/* reset SPROM_CFG_TO_SB_RST */
9988 	val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
9989 
9990 	DHD_ERROR(("cto recovery reset 0x%x:SPROM_CFG_TO_SB_RST(0x%x) 0x%x\n",
9991 		PCI_SPROM_CONTROL, SPROM_CFG_TO_SB_RST, val));
9992 	dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val & ~SPROM_CFG_TO_SB_RST);
9993 
9994 	val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
9995 	DHD_ERROR(("cto recovery success 0x%x:SPROM_CFG_TO_SB_RST(0x%x) 0x%x\n",
9996 		PCI_SPROM_CONTROL, SPROM_CFG_TO_SB_RST, val));
9997 
9998 	DHD_OS_WAKE_UNLOCK(bus->dhd);
9999 
10000 	return BCME_OK;
10001 }
10002 
10003 void
dhdpcie_ssreset_dis_enum_rst(struct dhd_bus * bus)10004 dhdpcie_ssreset_dis_enum_rst(struct dhd_bus *bus)
10005 {
10006 	uint32 val;
10007 
10008 	val = dhdpcie_bus_cfg_read_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4);
10009 	dhdpcie_bus_cfg_write_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4,
10010 		val | (0x1 << PCIE_SSRESET_DIS_ENUM_RST_BIT));
10011 }
10012 
10013 #if defined(DBG_PKT_MON)
10014 static int
dhdpcie_init_d11status(struct dhd_bus * bus)10015 dhdpcie_init_d11status(struct dhd_bus *bus)
10016 {
10017 	uint32 addr;
10018 	uint32 flags2;
10019 	int ret = 0;
10020 
10021 	if (bus->pcie_sh->flags2 & PCIE_SHARED2_D2H_D11_TX_STATUS) {
10022 		flags2 = bus->pcie_sh->flags2;
10023 		addr = bus->shared_addr + OFFSETOF(pciedev_shared_t, flags2);
10024 		flags2 |= PCIE_SHARED2_H2D_D11_TX_STATUS;
10025 		ret = dhdpcie_bus_membytes(bus, TRUE, addr,
10026 			(uint8 *)&flags2, sizeof(flags2));
10027 		if (ret < 0) {
10028 			DHD_ERROR(("%s: update flag bit (H2D_D11_TX_STATUS) failed\n",
10029 				__FUNCTION__));
10030 			return ret;
10031 		}
10032 		bus->pcie_sh->flags2 = flags2;
10033 		bus->dhd->d11_tx_status = TRUE;
10034 	}
10035 	return ret;
10036 }
10037 
10038 #else
10039 static int
dhdpcie_init_d11status(struct dhd_bus * bus)10040 dhdpcie_init_d11status(struct dhd_bus *bus)
10041 {
10042 	return 0;
10043 }
10044 #endif // endif
10045 
10046 #ifdef BCMPCIE_OOB_HOST_WAKE
10047 int
dhd_bus_oob_intr_register(dhd_pub_t * dhdp)10048 dhd_bus_oob_intr_register(dhd_pub_t *dhdp)
10049 {
10050 	return dhdpcie_oob_intr_register(dhdp->bus);
10051 }
10052 
10053 void
dhd_bus_oob_intr_unregister(dhd_pub_t * dhdp)10054 dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp)
10055 {
10056 	dhdpcie_oob_intr_unregister(dhdp->bus);
10057 }
10058 
10059 void
dhd_bus_oob_intr_set(dhd_pub_t * dhdp,bool enable)10060 dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable)
10061 {
10062 	dhdpcie_oob_intr_set(dhdp->bus, enable);
10063 }
10064 #endif /* BCMPCIE_OOB_HOST_WAKE */
10065 
10066 bool
dhdpcie_bus_get_pcie_hostready_supported(dhd_bus_t * bus)10067 dhdpcie_bus_get_pcie_hostready_supported(dhd_bus_t *bus)
10068 {
10069 	return bus->dhd->d2h_hostrdy_supported;
10070 }
10071 
10072 void
dhd_pcie_dump_core_regs(dhd_pub_t * pub,uint32 index,uint32 first_addr,uint32 last_addr)10073 dhd_pcie_dump_core_regs(dhd_pub_t * pub, uint32 index, uint32 first_addr, uint32 last_addr)
10074 {
10075 	dhd_bus_t *bus = pub->bus;
10076 	uint32	coreoffset = index << 12;
10077 	uint32	core_addr = SI_ENUM_BASE(bus->sih) + coreoffset;
10078 	uint32 value;
10079 
10080 	while (first_addr <= last_addr) {
10081 		core_addr = SI_ENUM_BASE(bus->sih) + coreoffset + first_addr;
10082 		if (serialized_backplane_access(bus, core_addr, 4, &value, TRUE) != BCME_OK) {
10083 			DHD_ERROR(("Invalid size/addr combination \n"));
10084 		}
10085 		DHD_ERROR(("[0x%08x]: 0x%08x\n", core_addr, value));
10086 		first_addr = first_addr + 4;
10087 	}
10088 }
10089 
10090 bool
dhdpcie_bus_get_pcie_hwa_supported(dhd_bus_t * bus)10091 dhdpcie_bus_get_pcie_hwa_supported(dhd_bus_t *bus)
10092 {
10093 	if (!bus->dhd)
10094 		return FALSE;
10095 	else if (bus->hwa_enab_bmap) {
10096 		return bus->dhd->hwa_enable;
10097 	} else {
10098 		return FALSE;
10099 	}
10100 }
10101 
10102 bool
dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t * bus)10103 dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t *bus)
10104 {
10105 	if (!bus->dhd)
10106 		return FALSE;
10107 	else if (bus->idma_enabled) {
10108 		return bus->dhd->idma_enable;
10109 	} else {
10110 		return FALSE;
10111 	}
10112 }
10113 
10114 bool
dhdpcie_bus_get_pcie_ifrm_supported(dhd_bus_t * bus)10115 dhdpcie_bus_get_pcie_ifrm_supported(dhd_bus_t *bus)
10116 {
10117 	if (!bus->dhd)
10118 		return FALSE;
10119 	else if (bus->ifrm_enabled) {
10120 		return bus->dhd->ifrm_enable;
10121 	} else {
10122 		return FALSE;
10123 	}
10124 }
10125 
10126 bool
dhdpcie_bus_get_pcie_dar_supported(dhd_bus_t * bus)10127 dhdpcie_bus_get_pcie_dar_supported(dhd_bus_t *bus)
10128 {
10129 	if (!bus->dhd) {
10130 		return FALSE;
10131 	} else if (bus->dar_enabled) {
10132 		return bus->dhd->dar_enable;
10133 	} else {
10134 		return FALSE;
10135 	}
10136 }
10137 
10138 void
dhdpcie_bus_enab_pcie_dw(dhd_bus_t * bus,uint8 dw_option)10139 dhdpcie_bus_enab_pcie_dw(dhd_bus_t *bus, uint8 dw_option)
10140 {
10141 	DHD_ERROR(("ENABLING DW:%d\n", dw_option));
10142 	bus->dw_option = dw_option;
10143 }
10144 
10145 void
dhd_bus_dump_trap_info(dhd_bus_t * bus,struct bcmstrbuf * strbuf)10146 dhd_bus_dump_trap_info(dhd_bus_t *bus, struct bcmstrbuf *strbuf)
10147 {
10148 	trap_t *tr = &bus->dhd->last_trap_info;
10149 	bcm_bprintf(strbuf,
10150 		"\nTRAP type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x,"
10151 		" lp 0x%x, rpc 0x%x"
10152 		"\nTrap offset 0x%x, r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, "
10153 		"r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x, r8 0x%x, r9 0x%x, "
10154 		"r10 0x%x, r11 0x%x, r12 0x%x\n\n",
10155 		ltoh32(tr->type), ltoh32(tr->epc), ltoh32(tr->cpsr), ltoh32(tr->spsr),
10156 		ltoh32(tr->r13), ltoh32(tr->r14), ltoh32(tr->pc),
10157 		ltoh32(bus->pcie_sh->trap_addr),
10158 		ltoh32(tr->r0), ltoh32(tr->r1), ltoh32(tr->r2), ltoh32(tr->r3),
10159 		ltoh32(tr->r4), ltoh32(tr->r5), ltoh32(tr->r6), ltoh32(tr->r7),
10160 		ltoh32(tr->r8), ltoh32(tr->r9), ltoh32(tr->r10),
10161 		ltoh32(tr->r11), ltoh32(tr->r12));
10162 }
10163 
10164 int
dhd_bus_readwrite_bp_addr(dhd_pub_t * dhdp,uint addr,uint size,uint * data,bool read)10165 dhd_bus_readwrite_bp_addr(dhd_pub_t *dhdp, uint addr, uint size, uint* data, bool read)
10166 {
10167 	int bcmerror = 0;
10168 	struct dhd_bus *bus = dhdp->bus;
10169 
10170 	if (serialized_backplane_access(bus, addr, size, data, read) != BCME_OK) {
10171 			DHD_ERROR(("Invalid size/addr combination \n"));
10172 			bcmerror = BCME_ERROR;
10173 	}
10174 
10175 	return bcmerror;
10176 }
10177 
10178 int
dhd_get_idletime(dhd_pub_t * dhd)10179 dhd_get_idletime(dhd_pub_t *dhd)
10180 {
10181 	return dhd->bus->idletime;
10182 }
10183 
10184 static INLINE void
dhd_sbreg_op(dhd_pub_t * dhd,uint addr,uint * val,bool read)10185 dhd_sbreg_op(dhd_pub_t *dhd, uint addr, uint *val, bool read)
10186 {
10187 	OSL_DELAY(1);
10188 	if (serialized_backplane_access(dhd->bus, addr, sizeof(uint), val, read) != BCME_OK) {
10189 		DHD_ERROR(("sbreg: Invalid uint addr: 0x%x \n", addr));
10190 	} else {
10191 		DHD_ERROR(("sbreg: addr:0x%x val:0x%x read:%d\n", addr, *val, read));
10192 	}
10193 	return;
10194 }
10195 
10196 #ifdef DHD_SSSR_DUMP
10197 static int
dhdpcie_get_sssr_fifo_dump(dhd_pub_t * dhd,uint * buf,uint fifo_size,uint addr_reg,uint data_reg)10198 dhdpcie_get_sssr_fifo_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size,
10199 	uint addr_reg, uint data_reg)
10200 {
10201 	uint addr;
10202 	uint val = 0;
10203 	int i;
10204 
10205 	DHD_ERROR(("%s\n", __FUNCTION__));
10206 
10207 	if (!buf) {
10208 		DHD_ERROR(("%s: buf is NULL\n", __FUNCTION__));
10209 		return BCME_ERROR;
10210 	}
10211 
10212 	if (!fifo_size) {
10213 		DHD_ERROR(("%s: fifo_size is 0\n", __FUNCTION__));
10214 		return BCME_ERROR;
10215 	}
10216 
10217 	/* Set the base address offset to 0 */
10218 	addr = addr_reg;
10219 	val = 0;
10220 	dhd_sbreg_op(dhd, addr, &val, FALSE);
10221 
10222 	addr = data_reg;
10223 	/* Read 4 bytes at once and loop for fifo_size / 4 */
10224 	for (i = 0; i < fifo_size / 4; i++) {
10225 		if (serialized_backplane_access(dhd->bus, addr,
10226 				sizeof(uint), &val, TRUE) != BCME_OK) {
10227 			DHD_ERROR(("%s: error in serialized_backplane_access\n", __FUNCTION__));
10228 			return BCME_ERROR;
10229 		}
10230 		buf[i] = val;
10231 		OSL_DELAY(1);
10232 	}
10233 	return BCME_OK;
10234 }
10235 
10236 static int
dhdpcie_get_sssr_dig_dump(dhd_pub_t * dhd,uint * buf,uint fifo_size,uint addr_reg)10237 dhdpcie_get_sssr_dig_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size,
10238 	uint addr_reg)
10239 {
10240 	uint addr;
10241 	uint val = 0;
10242 	int i;
10243 	si_t *sih = dhd->bus->sih;
10244 
10245 	DHD_ERROR(("%s\n", __FUNCTION__));
10246 
10247 	if (!buf) {
10248 		DHD_ERROR(("%s: buf is NULL\n", __FUNCTION__));
10249 		return BCME_ERROR;
10250 	}
10251 
10252 	if (!fifo_size) {
10253 		DHD_ERROR(("%s: fifo_size is 0\n", __FUNCTION__));
10254 		return BCME_ERROR;
10255 	}
10256 
10257 	if (addr_reg) {
10258 
10259 		if ((!dhd->sssr_reg_info.vasip_regs.vasip_sr_size) &&
10260 			dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) {
10261 			int err = dhdpcie_bus_membytes(dhd->bus, FALSE, addr_reg, (uint8 *)buf,
10262 				fifo_size);
10263 			if (err != BCME_OK) {
10264 				DHD_ERROR(("%s: Error reading dig dump from dongle !\n",
10265 					__FUNCTION__));
10266 			}
10267 		} else {
10268 			/* Check if vasip clk is disabled, if yes enable it */
10269 			addr = dhd->sssr_reg_info.vasip_regs.wrapper_regs.ioctrl;
10270 			dhd_sbreg_op(dhd, addr, &val, TRUE);
10271 			if (!val) {
10272 				val = 1;
10273 				dhd_sbreg_op(dhd, addr, &val, FALSE);
10274 			}
10275 
10276 			addr = addr_reg;
10277 			/* Read 4 bytes at once and loop for fifo_size / 4 */
10278 			for (i = 0; i < fifo_size / 4; i++, addr += 4) {
10279 				if (serialized_backplane_access(dhd->bus, addr, sizeof(uint),
10280 					&val, TRUE) != BCME_OK) {
10281 					DHD_ERROR(("%s: Invalid uint addr: 0x%x \n", __FUNCTION__,
10282 						addr));
10283 					return BCME_ERROR;
10284 				}
10285 				buf[i] = val;
10286 				OSL_DELAY(1);
10287 			}
10288 		}
10289 	} else {
10290 		uint cur_coreid;
10291 		uint chipc_corerev;
10292 		chipcregs_t *chipcregs;
10293 
10294 		/* Save the current core */
10295 		cur_coreid = si_coreid(sih);
10296 
10297 		/* Switch to ChipC */
10298 		chipcregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
10299 
10300 		chipc_corerev = si_corerev(sih);
10301 
10302 		if ((chipc_corerev == 64) || (chipc_corerev == 65)) {
10303 			W_REG(si_osh(sih), &chipcregs->sr_memrw_addr, 0);
10304 
10305 			/* Read 4 bytes at once and loop for fifo_size / 4 */
10306 			for (i = 0; i < fifo_size / 4; i++) {
10307 				buf[i] = R_REG(si_osh(sih), &chipcregs->sr_memrw_data);
10308 				OSL_DELAY(1);
10309 			}
10310 		}
10311 
10312 		/* Switch back to the original core */
10313 		si_setcore(sih, cur_coreid, 0);
10314 	}
10315 
10316 	return BCME_OK;
10317 }
10318 
10319 #if defined(EWP_ETD_PRSRV_LOGS)
10320 void
dhdpcie_get_etd_preserve_logs(dhd_pub_t * dhd,uint8 * ext_trap_data,void * event_decode_data)10321 dhdpcie_get_etd_preserve_logs(dhd_pub_t *dhd,
10322 		uint8 *ext_trap_data, void *event_decode_data)
10323 {
10324 	hnd_ext_trap_hdr_t *hdr = NULL;
10325 	bcm_tlv_t *tlv;
10326 	eventlog_trapdata_info_t *etd_evtlog = NULL;
10327 	eventlog_trap_buf_info_t *evtlog_buf_arr = NULL;
10328 	uint arr_size = 0;
10329 	int i = 0;
10330 	int err = 0;
10331 	uint32 seqnum = 0;
10332 
10333 	if (!ext_trap_data || !event_decode_data || !dhd)
10334 		return;
10335 
10336 	if (!dhd->concise_dbg_buf)
10337 		return;
10338 
10339 	/* First word is original trap_data, skip */
10340 	ext_trap_data += sizeof(uint32);
10341 
10342 	hdr = (hnd_ext_trap_hdr_t *)ext_trap_data;
10343 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_LOG_DATA);
10344 	if (tlv) {
10345 		uint32 baseaddr = 0;
10346 		uint32 endaddr = dhd->bus->dongle_ram_base + dhd->bus->ramsize - 4;
10347 
10348 		etd_evtlog = (eventlog_trapdata_info_t *)tlv->data;
10349 		DHD_ERROR(("%s: etd_evtlog tlv found, num_elements=%x; "
10350 			"seq_num=%x; log_arr_addr=%x\n", __FUNCTION__,
10351 			(etd_evtlog->num_elements),
10352 			ntoh32(etd_evtlog->seq_num), (etd_evtlog->log_arr_addr)));
10353 		arr_size = (uint32)sizeof(*evtlog_buf_arr) * (etd_evtlog->num_elements);
10354 		if (!arr_size) {
10355 			DHD_ERROR(("%s: num event logs is zero! \n", __FUNCTION__));
10356 			return;
10357 		}
10358 		evtlog_buf_arr = MALLOCZ(dhd->osh, arr_size);
10359 		if (!evtlog_buf_arr) {
10360 			DHD_ERROR(("%s: out of memory !\n",	__FUNCTION__));
10361 			return;
10362 		}
10363 
10364 		/* boundary check */
10365 		baseaddr = etd_evtlog->log_arr_addr;
10366 		if ((baseaddr < dhd->bus->dongle_ram_base) ||
10367 			((baseaddr + arr_size) > endaddr)) {
10368 			DHD_ERROR(("%s: Error reading invalid address\n",
10369 				__FUNCTION__));
10370 			goto err;
10371 		}
10372 
10373 		/* read the eventlog_trap_buf_info_t array from dongle memory */
10374 		err = dhdpcie_bus_membytes(dhd->bus, FALSE,
10375 				(ulong)(etd_evtlog->log_arr_addr),
10376 				(uint8 *)evtlog_buf_arr, arr_size);
10377 		if (err != BCME_OK) {
10378 			DHD_ERROR(("%s: Error reading event log array from dongle !\n",
10379 				__FUNCTION__));
10380 			goto err;
10381 		}
10382 		/* ntoh is required only for seq_num, because in the original
10383 		* case of event logs from info ring, it is sent from dongle in that way
10384 		* so for ETD also dongle follows same convention
10385 		*/
10386 		seqnum = ntoh32(etd_evtlog->seq_num);
10387 		memset(dhd->concise_dbg_buf, 0, CONCISE_DUMP_BUFLEN);
10388 		for (i = 0; i < (etd_evtlog->num_elements); ++i) {
10389 			/* boundary check */
10390 			baseaddr = evtlog_buf_arr[i].buf_addr;
10391 			if ((baseaddr < dhd->bus->dongle_ram_base) ||
10392 				((baseaddr + evtlog_buf_arr[i].len) > endaddr)) {
10393 				DHD_ERROR(("%s: Error reading invalid address\n",
10394 					__FUNCTION__));
10395 				goto err;
10396 			}
10397 			/* read each individual event log buf from dongle memory */
10398 			err = dhdpcie_bus_membytes(dhd->bus, FALSE,
10399 					((ulong)evtlog_buf_arr[i].buf_addr),
10400 					dhd->concise_dbg_buf, (evtlog_buf_arr[i].len));
10401 			if (err != BCME_OK) {
10402 				DHD_ERROR(("%s: Error reading event log buffer from dongle !\n",
10403 					__FUNCTION__));
10404 				goto err;
10405 			}
10406 			dhd_dbg_msgtrace_log_parser(dhd, dhd->concise_dbg_buf,
10407 				event_decode_data, (evtlog_buf_arr[i].len),
10408 				FALSE, hton32(seqnum));
10409 			++seqnum;
10410 		}
10411 err:
10412 		MFREE(dhd->osh, evtlog_buf_arr, arr_size);
10413 	} else {
10414 		DHD_ERROR(("%s: Error getting trap log data in ETD !\n", __FUNCTION__));
10415 	}
10416 }
10417 #endif /* BCMPCIE && DHD_LOG_DUMP */
10418 
10419 static uint32
dhdpcie_resume_chipcommon_powerctrl(dhd_pub_t * dhd,uint32 reg_val)10420 dhdpcie_resume_chipcommon_powerctrl(dhd_pub_t *dhd, uint32 reg_val)
10421 {
10422 	uint addr;
10423 	uint val = 0;
10424 
10425 	DHD_ERROR(("%s\n", __FUNCTION__));
10426 
10427 	/* conditionally clear bits [11:8] of PowerCtrl */
10428 	addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
10429 	dhd_sbreg_op(dhd, addr, &val, TRUE);
10430 	if (!(val & dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask)) {
10431 		addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
10432 		dhd_sbreg_op(dhd, addr, &reg_val, FALSE);
10433 	}
10434 	return BCME_OK;
10435 }
10436 
10437 static uint32
dhdpcie_suspend_chipcommon_powerctrl(dhd_pub_t * dhd)10438 dhdpcie_suspend_chipcommon_powerctrl(dhd_pub_t *dhd)
10439 {
10440 	uint addr;
10441 	uint val = 0, reg_val = 0;
10442 
10443 	DHD_ERROR(("%s\n", __FUNCTION__));
10444 
10445 	/* conditionally clear bits [11:8] of PowerCtrl */
10446 	addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
10447 	dhd_sbreg_op(dhd, addr, &reg_val, TRUE);
10448 	if (reg_val & dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask) {
10449 		addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
10450 		val = 0;
10451 		dhd_sbreg_op(dhd, addr, &val, FALSE);
10452 	}
10453 	return reg_val;
10454 }
10455 
10456 static int
dhdpcie_clear_intmask_and_timer(dhd_pub_t * dhd)10457 dhdpcie_clear_intmask_and_timer(dhd_pub_t *dhd)
10458 {
10459 	uint addr;
10460 	uint val;
10461 
10462 	DHD_ERROR(("%s\n", __FUNCTION__));
10463 
10464 	/* clear chipcommon intmask */
10465 	addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.intmask;
10466 	val = 0x0;
10467 	dhd_sbreg_op(dhd, addr, &val, FALSE);
10468 
10469 	/* clear PMUIntMask0 */
10470 	addr = dhd->sssr_reg_info.pmu_regs.base_regs.pmuintmask0;
10471 	val = 0x0;
10472 	dhd_sbreg_op(dhd, addr, &val, FALSE);
10473 
10474 	/* clear PMUIntMask1 */
10475 	addr = dhd->sssr_reg_info.pmu_regs.base_regs.pmuintmask1;
10476 	val = 0x0;
10477 	dhd_sbreg_op(dhd, addr, &val, FALSE);
10478 
10479 	/* clear res_req_timer */
10480 	addr = dhd->sssr_reg_info.pmu_regs.base_regs.resreqtimer;
10481 	val = 0x0;
10482 	dhd_sbreg_op(dhd, addr, &val, FALSE);
10483 
10484 	/* clear macresreqtimer */
10485 	addr = dhd->sssr_reg_info.pmu_regs.base_regs.macresreqtimer;
10486 	val = 0x0;
10487 	dhd_sbreg_op(dhd, addr, &val, FALSE);
10488 
10489 	/* clear macresreqtimer1 */
10490 	addr = dhd->sssr_reg_info.pmu_regs.base_regs.macresreqtimer1;
10491 	val = 0x0;
10492 	dhd_sbreg_op(dhd, addr, &val, FALSE);
10493 
10494 	/* clear VasipClkEn */
10495 	if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
10496 		addr = dhd->sssr_reg_info.vasip_regs.wrapper_regs.ioctrl;
10497 		val = 0x0;
10498 		dhd_sbreg_op(dhd, addr, &val, FALSE);
10499 	}
10500 
10501 	return BCME_OK;
10502 }
10503 
10504 static void
dhdpcie_update_d11_status_from_trapdata(dhd_pub_t * dhd)10505 dhdpcie_update_d11_status_from_trapdata(dhd_pub_t *dhd)
10506 {
10507 #define TRAP_DATA_MAIN_CORE_BIT_MASK	(1 << 1)
10508 #define TRAP_DATA_AUX_CORE_BIT_MASK	(1 << 4)
10509 	uint trap_data_mask[MAX_NUM_D11CORES] =
10510 		{TRAP_DATA_MAIN_CORE_BIT_MASK, TRAP_DATA_AUX_CORE_BIT_MASK};
10511 	int i;
10512 	/* Apply only for 4375 chip */
10513 	if (dhd_bus_chip_id(dhd) == BCM4375_CHIP_ID) {
10514 		for (i = 0; i < MAX_NUM_D11CORES; i++) {
10515 			if (dhd->sssr_d11_outofreset[i] &&
10516 				(dhd->dongle_trap_data & trap_data_mask[i])) {
10517 				dhd->sssr_d11_outofreset[i] = TRUE;
10518 			} else {
10519 				dhd->sssr_d11_outofreset[i] = FALSE;
10520 			}
10521 			DHD_ERROR(("%s: sssr_d11_outofreset[%d] : %d after AND with "
10522 				"trap_data:0x%x-0x%x\n",
10523 				__FUNCTION__, i, dhd->sssr_d11_outofreset[i],
10524 				dhd->dongle_trap_data, trap_data_mask[i]));
10525 		}
10526 	}
10527 }
10528 
10529 static int
dhdpcie_d11_check_outofreset(dhd_pub_t * dhd)10530 dhdpcie_d11_check_outofreset(dhd_pub_t *dhd)
10531 {
10532 	int i;
10533 	uint addr;
10534 	uint val = 0;
10535 
10536 	DHD_ERROR(("%s\n", __FUNCTION__));
10537 
10538 	for (i = 0; i < MAX_NUM_D11CORES; i++) {
10539 		/* Check if bit 0 of resetctrl is cleared */
10540 		addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.resetctrl;
10541 		if (!addr) {
10542 			DHD_ERROR(("%s: skipping for core[%d] as 'addr' is NULL\n",
10543 				__FUNCTION__, i));
10544 			continue;
10545 		}
10546 		dhd_sbreg_op(dhd, addr, &val, TRUE);
10547 		if (!(val & 1)) {
10548 			dhd->sssr_d11_outofreset[i] = TRUE;
10549 		} else {
10550 			dhd->sssr_d11_outofreset[i] = FALSE;
10551 		}
10552 		DHD_ERROR(("%s: sssr_d11_outofreset[%d] : %d\n",
10553 			__FUNCTION__, i, dhd->sssr_d11_outofreset[i]));
10554 	}
10555 	dhdpcie_update_d11_status_from_trapdata(dhd);
10556 
10557 	return BCME_OK;
10558 }
10559 
10560 static int
dhdpcie_d11_clear_clk_req(dhd_pub_t * dhd)10561 dhdpcie_d11_clear_clk_req(dhd_pub_t *dhd)
10562 {
10563 	int i;
10564 	uint addr;
10565 	uint val = 0;
10566 
10567 	DHD_ERROR(("%s\n", __FUNCTION__));
10568 
10569 	for (i = 0; i < MAX_NUM_D11CORES; i++) {
10570 		if (dhd->sssr_d11_outofreset[i]) {
10571 			/* clear request clk only if itopoobb is non zero */
10572 			addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.itopoobb;
10573 			dhd_sbreg_op(dhd, addr, &val, TRUE);
10574 			if (val != 0) {
10575 				/* clear clockcontrolstatus */
10576 				addr = dhd->sssr_reg_info.mac_regs[i].base_regs.clockcontrolstatus;
10577 				val =
10578 				dhd->sssr_reg_info.mac_regs[i].base_regs.clockcontrolstatus_val;
10579 				dhd_sbreg_op(dhd, addr, &val, FALSE);
10580 			}
10581 		}
10582 	}
10583 	return BCME_OK;
10584 }
10585 
10586 static int
dhdpcie_arm_clear_clk_req(dhd_pub_t * dhd)10587 dhdpcie_arm_clear_clk_req(dhd_pub_t *dhd)
10588 {
10589 	uint addr;
10590 	uint val = 0;
10591 
10592 	DHD_ERROR(("%s\n", __FUNCTION__));
10593 
10594 	/* Check if bit 0 of resetctrl is cleared */
10595 	addr = dhd->sssr_reg_info.arm_regs.wrapper_regs.resetctrl;
10596 	dhd_sbreg_op(dhd, addr, &val, TRUE);
10597 	if (!(val & 1)) {
10598 		/* clear request clk only if itopoobb is non zero */
10599 		addr = dhd->sssr_reg_info.arm_regs.wrapper_regs.itopoobb;
10600 		dhd_sbreg_op(dhd, addr, &val, TRUE);
10601 		if (val != 0) {
10602 			/* clear clockcontrolstatus */
10603 			addr = dhd->sssr_reg_info.arm_regs.base_regs.clockcontrolstatus;
10604 			val = dhd->sssr_reg_info.arm_regs.base_regs.clockcontrolstatus_val;
10605 			dhd_sbreg_op(dhd, addr, &val, FALSE);
10606 		}
10607 
10608 		if (MULTIBP_ENAB(dhd->bus->sih)) {
10609 			uint32 resetctrl = dhd->sssr_reg_info.arm_regs.wrapper_regs.resetctrl;
10610 
10611 			/* Just halt ARM but do not reset the core */
10612 			resetctrl &= ~(SI_CORE_SIZE - 1);
10613 			resetctrl += OFFSETOF(aidmp_t, ioctrl);
10614 
10615 			dhd_sbreg_op(dhd, resetctrl, &val, TRUE);
10616 			val |= SICF_CPUHALT;
10617 			dhd_sbreg_op(dhd, resetctrl, &val, FALSE);
10618 		}
10619 	}
10620 	return BCME_OK;
10621 }
10622 
10623 static int
dhdpcie_arm_resume_clk_req(dhd_pub_t * dhd)10624 dhdpcie_arm_resume_clk_req(dhd_pub_t *dhd)
10625 {
10626 	uint addr;
10627 	uint val = 0;
10628 
10629 	DHD_ERROR(("%s\n", __FUNCTION__));
10630 
10631 	/* Check if bit 0 of resetctrl is cleared */
10632 	addr = dhd->sssr_reg_info.arm_regs.wrapper_regs.resetctrl;
10633 	dhd_sbreg_op(dhd, addr, &val, TRUE);
10634 	if (!(val & 1)) {
10635 		if (MULTIBP_ENAB(dhd->bus->sih)) {
10636 			uint32 resetctrl = dhd->sssr_reg_info.arm_regs.wrapper_regs.resetctrl;
10637 
10638 			/* Take ARM out of halt but do not reset core */
10639 			resetctrl &= ~(SI_CORE_SIZE - 1);
10640 			resetctrl += OFFSETOF(aidmp_t, ioctrl);
10641 
10642 			dhd_sbreg_op(dhd, resetctrl, &val, TRUE);
10643 			val &= ~SICF_CPUHALT;
10644 			dhd_sbreg_op(dhd, resetctrl, &val, FALSE);
10645 			dhd_sbreg_op(dhd, resetctrl, &val, TRUE);
10646 		}
10647 	}
10648 
10649 	return BCME_OK;
10650 }
10651 
10652 static int
dhdpcie_pcie_clear_clk_req(dhd_pub_t * dhd)10653 dhdpcie_pcie_clear_clk_req(dhd_pub_t *dhd)
10654 {
10655 	uint addr;
10656 	uint val = 0;
10657 
10658 	DHD_ERROR(("%s\n", __FUNCTION__));
10659 
10660 	/* clear request clk only if itopoobb is non zero */
10661 	addr = dhd->sssr_reg_info.pcie_regs.wrapper_regs.itopoobb;
10662 	dhd_sbreg_op(dhd, addr, &val, TRUE);
10663 	if (val) {
10664 		/* clear clockcontrolstatus */
10665 		addr = dhd->sssr_reg_info.pcie_regs.base_regs.clockcontrolstatus;
10666 		val = dhd->sssr_reg_info.pcie_regs.base_regs.clockcontrolstatus_val;
10667 		dhd_sbreg_op(dhd, addr, &val, FALSE);
10668 	}
10669 	return BCME_OK;
10670 }
10671 
10672 static int
dhdpcie_pcie_send_ltrsleep(dhd_pub_t * dhd)10673 dhdpcie_pcie_send_ltrsleep(dhd_pub_t *dhd)
10674 {
10675 	uint addr;
10676 	uint val = 0;
10677 
10678 	DHD_ERROR(("%s\n", __FUNCTION__));
10679 
10680 	addr = dhd->sssr_reg_info.pcie_regs.base_regs.ltrstate;
10681 	val = LTR_ACTIVE;
10682 	dhd_sbreg_op(dhd, addr, &val, FALSE);
10683 
10684 	val = LTR_SLEEP;
10685 	dhd_sbreg_op(dhd, addr, &val, FALSE);
10686 
10687 	return BCME_OK;
10688 }
10689 
10690 static int
dhdpcie_clear_clk_req(dhd_pub_t * dhd)10691 dhdpcie_clear_clk_req(dhd_pub_t *dhd)
10692 {
10693 	DHD_ERROR(("%s\n", __FUNCTION__));
10694 
10695 	dhdpcie_arm_clear_clk_req(dhd);
10696 
10697 	dhdpcie_d11_clear_clk_req(dhd);
10698 
10699 	dhdpcie_pcie_clear_clk_req(dhd);
10700 
10701 	return BCME_OK;
10702 }
10703 
10704 static int
dhdpcie_bring_d11_outofreset(dhd_pub_t * dhd)10705 dhdpcie_bring_d11_outofreset(dhd_pub_t *dhd)
10706 {
10707 	int i;
10708 	uint addr;
10709 	uint val = 0;
10710 
10711 	DHD_ERROR(("%s\n", __FUNCTION__));
10712 
10713 	for (i = 0; i < MAX_NUM_D11CORES; i++) {
10714 		if (dhd->sssr_d11_outofreset[i]) {
10715 			/* disable core by setting bit 0 */
10716 			addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.resetctrl;
10717 			val = 1;
10718 			dhd_sbreg_op(dhd, addr, &val, FALSE);
10719 			OSL_DELAY(6000);
10720 
10721 			addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.ioctrl;
10722 			val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[0];
10723 			dhd_sbreg_op(dhd, addr, &val, FALSE);
10724 
10725 			val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[1];
10726 			dhd_sbreg_op(dhd, addr, &val, FALSE);
10727 
10728 			/* enable core by clearing bit 0 */
10729 			addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.resetctrl;
10730 			val = 0;
10731 			dhd_sbreg_op(dhd, addr, &val, FALSE);
10732 
10733 			addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.ioctrl;
10734 			val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[2];
10735 			dhd_sbreg_op(dhd, addr, &val, FALSE);
10736 
10737 			val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[3];
10738 			dhd_sbreg_op(dhd, addr, &val, FALSE);
10739 
10740 			val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[4];
10741 			dhd_sbreg_op(dhd, addr, &val, FALSE);
10742 		}
10743 	}
10744 	return BCME_OK;
10745 }
10746 
10747 static int
dhdpcie_sssr_dump_get_before_sr(dhd_pub_t * dhd)10748 dhdpcie_sssr_dump_get_before_sr(dhd_pub_t *dhd)
10749 {
10750 	int i;
10751 
10752 	DHD_ERROR(("%s\n", __FUNCTION__));
10753 
10754 	for (i = 0; i < MAX_NUM_D11CORES; i++) {
10755 		if (dhd->sssr_d11_outofreset[i]) {
10756 			dhdpcie_get_sssr_fifo_dump(dhd, dhd->sssr_d11_before[i],
10757 				dhd->sssr_reg_info.mac_regs[i].sr_size,
10758 				dhd->sssr_reg_info.mac_regs[i].base_regs.xmtaddress,
10759 				dhd->sssr_reg_info.mac_regs[i].base_regs.xmtdata);
10760 		}
10761 	}
10762 
10763 	if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
10764 		dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_before,
10765 			dhd->sssr_reg_info.vasip_regs.vasip_sr_size,
10766 			dhd->sssr_reg_info.vasip_regs.vasip_sr_addr);
10767 	} else if ((dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
10768 		dhd->sssr_reg_info.dig_mem_info.dig_sr_addr) {
10769 		dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_before,
10770 			dhd->sssr_reg_info.dig_mem_info.dig_sr_size,
10771 			dhd->sssr_reg_info.dig_mem_info.dig_sr_addr);
10772 	}
10773 
10774 	return BCME_OK;
10775 }
10776 
10777 static int
dhdpcie_sssr_dump_get_after_sr(dhd_pub_t * dhd)10778 dhdpcie_sssr_dump_get_after_sr(dhd_pub_t *dhd)
10779 {
10780 	int i;
10781 
10782 	DHD_ERROR(("%s\n", __FUNCTION__));
10783 
10784 	for (i = 0; i < MAX_NUM_D11CORES; i++) {
10785 		if (dhd->sssr_d11_outofreset[i]) {
10786 			dhdpcie_get_sssr_fifo_dump(dhd, dhd->sssr_d11_after[i],
10787 				dhd->sssr_reg_info.mac_regs[i].sr_size,
10788 				dhd->sssr_reg_info.mac_regs[i].base_regs.xmtaddress,
10789 				dhd->sssr_reg_info.mac_regs[i].base_regs.xmtdata);
10790 		}
10791 	}
10792 
10793 	if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
10794 		dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_after,
10795 			dhd->sssr_reg_info.vasip_regs.vasip_sr_size,
10796 			dhd->sssr_reg_info.vasip_regs.vasip_sr_addr);
10797 	} else if ((dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
10798 		dhd->sssr_reg_info.dig_mem_info.dig_sr_addr) {
10799 		dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_after,
10800 			dhd->sssr_reg_info.dig_mem_info.dig_sr_size,
10801 			dhd->sssr_reg_info.dig_mem_info.dig_sr_addr);
10802 	}
10803 
10804 	return BCME_OK;
10805 }
10806 
10807 int
dhdpcie_sssr_dump(dhd_pub_t * dhd)10808 dhdpcie_sssr_dump(dhd_pub_t *dhd)
10809 {
10810 	uint32 powerctrl_val;
10811 
10812 	if (!dhd->sssr_inited) {
10813 		DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__));
10814 		return BCME_ERROR;
10815 	}
10816 
10817 	if (dhd->bus->is_linkdown) {
10818 		DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
10819 		return BCME_ERROR;
10820 	}
10821 
10822 	DHD_ERROR(("%s: Before WL down (powerctl: pcie:0x%x chipc:0x%x) "
10823 		"PMU rctl:0x%x res_state:0x%x\n", __FUNCTION__,
10824 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10825 			OFFSETOF(chipcregs_t, powerctl), 0, 0),
10826 		si_corereg(dhd->bus->sih, 0, OFFSETOF(chipcregs_t, powerctl), 0, 0),
10827 		PMU_REG(dhd->bus->sih, retention_ctl, 0, 0),
10828 		PMU_REG(dhd->bus->sih, res_state, 0, 0)));
10829 
10830 	dhdpcie_d11_check_outofreset(dhd);
10831 
10832 	DHD_ERROR(("%s: Collecting Dump before SR\n", __FUNCTION__));
10833 	if (dhdpcie_sssr_dump_get_before_sr(dhd) != BCME_OK) {
10834 		DHD_ERROR(("%s: dhdpcie_sssr_dump_get_before_sr failed\n", __FUNCTION__));
10835 		return BCME_ERROR;
10836 	}
10837 
10838 	dhdpcie_clear_intmask_and_timer(dhd);
10839 	powerctrl_val = dhdpcie_suspend_chipcommon_powerctrl(dhd);
10840 	dhdpcie_clear_clk_req(dhd);
10841 	dhdpcie_pcie_send_ltrsleep(dhd);
10842 
10843 	if (MULTIBP_ENAB(dhd->bus->sih)) {
10844 		dhd_bus_pcie_pwr_req_wl_domain(dhd->bus, OFFSETOF(chipcregs_t, powerctl), FALSE);
10845 	}
10846 
10847 	/* Wait for some time before Restore */
10848 	OSL_DELAY(6000);
10849 
10850 	DHD_ERROR(("%s: After WL down (powerctl: pcie:0x%x chipc:0x%x) "
10851 		"PMU rctl:0x%x res_state:0x%x\n", __FUNCTION__,
10852 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10853 			OFFSETOF(chipcregs_t, powerctl), 0, 0),
10854 		si_corereg(dhd->bus->sih, 0, OFFSETOF(chipcregs_t, powerctl), 0, 0),
10855 		PMU_REG(dhd->bus->sih, retention_ctl, 0, 0),
10856 		PMU_REG(dhd->bus->sih, res_state, 0, 0)));
10857 
10858 	if (MULTIBP_ENAB(dhd->bus->sih)) {
10859 		dhd_bus_pcie_pwr_req_wl_domain(dhd->bus, OFFSETOF(chipcregs_t, powerctl), TRUE);
10860 		/* Add delay for WL domain to power up */
10861 		OSL_DELAY(15000);
10862 
10863 		DHD_ERROR(("%s: After WL up again (powerctl: pcie:0x%x chipc:0x%x) "
10864 			"PMU rctl:0x%x res_state:0x%x\n", __FUNCTION__,
10865 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10866 				OFFSETOF(chipcregs_t, powerctl), 0, 0),
10867 			si_corereg(dhd->bus->sih, 0, OFFSETOF(chipcregs_t, powerctl), 0, 0),
10868 			PMU_REG(dhd->bus->sih, retention_ctl, 0, 0),
10869 			PMU_REG(dhd->bus->sih, res_state, 0, 0)));
10870 	}
10871 
10872 	dhdpcie_arm_resume_clk_req(dhd);
10873 	dhdpcie_resume_chipcommon_powerctrl(dhd, powerctrl_val);
10874 	dhdpcie_bring_d11_outofreset(dhd);
10875 
10876 	DHD_ERROR(("%s: Collecting Dump after SR\n", __FUNCTION__));
10877 	if (dhdpcie_sssr_dump_get_after_sr(dhd) != BCME_OK) {
10878 		DHD_ERROR(("%s: dhdpcie_sssr_dump_get_after_sr failed\n", __FUNCTION__));
10879 		return BCME_ERROR;
10880 	}
10881 	dhd->sssr_dump_collected = TRUE;
10882 	dhd_write_sssr_dump(dhd, SSSR_DUMP_MODE_SSSR);
10883 
10884 	return BCME_OK;
10885 }
10886 
10887 static int
dhdpcie_fis_trigger(dhd_pub_t * dhd)10888 dhdpcie_fis_trigger(dhd_pub_t *dhd)
10889 {
10890 	if (!dhd->sssr_inited) {
10891 		DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__));
10892 		return BCME_ERROR;
10893 	}
10894 
10895 	if (dhd->bus->is_linkdown) {
10896 		DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
10897 		return BCME_ERROR;
10898 	}
10899 
10900 	/* Trigger FIS */
10901 	si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
10902 		DAR_FIS_CTRL(dhd->bus->sih->buscorerev), ~0, DAR_FIS_START_MASK);
10903 	OSL_DELAY(100 * 1000);
10904 
10905 	return BCME_OK;
10906 }
10907 
10908 int
dhd_bus_fis_trigger(dhd_pub_t * dhd)10909 dhd_bus_fis_trigger(dhd_pub_t *dhd)
10910 {
10911 	return dhdpcie_fis_trigger(dhd);
10912 }
10913 
10914 static int
dhdpcie_fis_dump(dhd_pub_t * dhd)10915 dhdpcie_fis_dump(dhd_pub_t *dhd)
10916 {
10917 	int i;
10918 
10919 	if (!dhd->sssr_inited) {
10920 		DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__));
10921 		return BCME_ERROR;
10922 	}
10923 
10924 	if (dhd->bus->is_linkdown) {
10925 		DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
10926 		return BCME_ERROR;
10927 	}
10928 
10929 	/* bring up all pmu resources */
10930 	PMU_REG(dhd->bus->sih, min_res_mask, ~0,
10931 		PMU_REG(dhd->bus->sih, max_res_mask, 0, 0));
10932 	OSL_DELAY(10 * 1000);
10933 
10934 	for (i = 0; i < MAX_NUM_D11CORES; i++) {
10935 		dhd->sssr_d11_outofreset[i] = TRUE;
10936 	}
10937 
10938 	dhdpcie_bring_d11_outofreset(dhd);
10939 	OSL_DELAY(6000);
10940 
10941 	/* clear FIS Done */
10942 	PMU_REG(dhd->bus->sih, fis_ctrl_status, PMU_CLEAR_FIS_DONE_MASK, PMU_CLEAR_FIS_DONE_MASK);
10943 
10944 	dhdpcie_d11_check_outofreset(dhd);
10945 
10946 	DHD_ERROR(("%s: Collecting Dump after SR\n", __FUNCTION__));
10947 	if (dhdpcie_sssr_dump_get_after_sr(dhd) != BCME_OK) {
10948 		DHD_ERROR(("%s: dhdpcie_sssr_dump_get_after_sr failed\n", __FUNCTION__));
10949 		return BCME_ERROR;
10950 	}
10951 
10952 	dhd_write_sssr_dump(dhd, SSSR_DUMP_MODE_FIS);
10953 
10954 	return BCME_OK;
10955 }
10956 
10957 int
dhd_bus_fis_dump(dhd_pub_t * dhd)10958 dhd_bus_fis_dump(dhd_pub_t *dhd)
10959 {
10960 	return dhdpcie_fis_dump(dhd);
10961 }
10962 #endif /* DHD_SSSR_DUMP */
10963 
10964 #ifdef DHD_WAKE_STATUS
10965 wake_counts_t*
dhd_bus_get_wakecount(dhd_pub_t * dhd)10966 dhd_bus_get_wakecount(dhd_pub_t *dhd)
10967 {
10968 	return &dhd->bus->wake_counts;
10969 }
10970 int
dhd_bus_get_bus_wake(dhd_pub_t * dhd)10971 dhd_bus_get_bus_wake(dhd_pub_t *dhd)
10972 {
10973 	return bcmpcie_set_get_wake(dhd->bus, 0);
10974 }
10975 #endif /* DHD_WAKE_STATUS */
10976 
10977 /* Writes random number(s) to the TCM. FW upon initialization reads this register
10978  * to fetch the random number, and uses it to randomize heap address space layout.
10979  */
10980 static int
dhdpcie_wrt_rnd(struct dhd_bus * bus)10981 dhdpcie_wrt_rnd(struct dhd_bus *bus)
10982 {
10983 	bcm_rand_metadata_t rnd_data;
10984 	uint8 rand_buf[BCM_ENTROPY_HOST_NBYTES];
10985 	uint32 count = BCM_ENTROPY_HOST_NBYTES;
10986 	int ret = 0;
10987 	uint32 addr = bus->dongle_ram_base + (bus->ramsize - BCM_NVRAM_OFFSET_TCM) -
10988 		((bus->nvram_csm & 0xffff)* BCM_NVRAM_IMG_COMPRS_FACTOR + sizeof(rnd_data));
10989 
10990 	memset(rand_buf, 0, BCM_ENTROPY_HOST_NBYTES);
10991 	rnd_data.signature = htol32(BCM_NVRAM_RNG_SIGNATURE);
10992 	rnd_data.count = htol32(count);
10993 	/* write the metadata about random number */
10994 	dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&rnd_data, sizeof(rnd_data));
10995 	/* scale back by number of random number counts */
10996 	addr -= count;
10997 
10998 	/* Now get & write the random number(s) */
10999 	ret = dhd_get_random_bytes(rand_buf, count);
11000 	if (ret != BCME_OK) {
11001 		return ret;
11002 	}
11003 	dhdpcie_bus_membytes(bus, TRUE, addr, rand_buf, count);
11004 
11005 	return BCME_OK;
11006 }
11007 
11008 void
dhd_pcie_intr_count_dump(dhd_pub_t * dhd)11009 dhd_pcie_intr_count_dump(dhd_pub_t *dhd)
11010 {
11011 	struct dhd_bus *bus = dhd->bus;
11012 	uint64 current_time;
11013 
11014 	DHD_ERROR(("\n ------- DUMPING INTR enable/disable counters  ------- \r\n"));
11015 	DHD_ERROR(("resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n",
11016 		bus->resume_intr_enable_count, bus->dpc_intr_enable_count));
11017 	DHD_ERROR(("isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n",
11018 		bus->isr_intr_disable_count, bus->suspend_intr_disable_count));
11019 #ifdef BCMPCIE_OOB_HOST_WAKE
11020 	DHD_ERROR(("oob_intr_count=%lu oob_intr_enable_count=%lu oob_intr_disable_count=%lu\n",
11021 		bus->oob_intr_count, bus->oob_intr_enable_count,
11022 		bus->oob_intr_disable_count));
11023 	DHD_ERROR(("oob_irq_num=%d last_oob_irq_time="SEC_USEC_FMT"\n",
11024 		dhdpcie_get_oob_irq_num(bus),
11025 		GET_SEC_USEC(bus->last_oob_irq_time)));
11026 	DHD_ERROR(("last_oob_irq_enable_time="SEC_USEC_FMT
11027 		" last_oob_irq_disable_time="SEC_USEC_FMT"\n",
11028 		GET_SEC_USEC(bus->last_oob_irq_enable_time),
11029 		GET_SEC_USEC(bus->last_oob_irq_disable_time)));
11030 	DHD_ERROR(("oob_irq_enabled=%d oob_gpio_level=%d\n",
11031 		dhdpcie_get_oob_irq_status(bus),
11032 		dhdpcie_get_oob_irq_level()));
11033 #endif /* BCMPCIE_OOB_HOST_WAKE */
11034 	DHD_ERROR(("dpc_return_busdown_count=%lu non_ours_irq_count=%lu\n",
11035 		bus->dpc_return_busdown_count, bus->non_ours_irq_count));
11036 
11037 	current_time = OSL_LOCALTIME_NS();
11038 	DHD_ERROR(("\ncurrent_time="SEC_USEC_FMT"\n",
11039 		GET_SEC_USEC(current_time)));
11040 	DHD_ERROR(("isr_entry_time="SEC_USEC_FMT
11041 		" isr_exit_time="SEC_USEC_FMT"\n",
11042 		GET_SEC_USEC(bus->isr_entry_time),
11043 		GET_SEC_USEC(bus->isr_exit_time)));
11044 	DHD_ERROR(("dpc_sched_time="SEC_USEC_FMT
11045 		" last_non_ours_irq_time="SEC_USEC_FMT"\n",
11046 		GET_SEC_USEC(bus->dpc_sched_time),
11047 		GET_SEC_USEC(bus->last_non_ours_irq_time)));
11048 	DHD_ERROR(("dpc_entry_time="SEC_USEC_FMT
11049 		" last_process_ctrlbuf_time="SEC_USEC_FMT"\n",
11050 		GET_SEC_USEC(bus->dpc_entry_time),
11051 		GET_SEC_USEC(bus->last_process_ctrlbuf_time)));
11052 	DHD_ERROR(("last_process_flowring_time="SEC_USEC_FMT
11053 		" last_process_txcpl_time="SEC_USEC_FMT"\n",
11054 		GET_SEC_USEC(bus->last_process_flowring_time),
11055 		GET_SEC_USEC(bus->last_process_txcpl_time)));
11056 	DHD_ERROR(("last_process_rxcpl_time="SEC_USEC_FMT
11057 		" last_process_infocpl_time="SEC_USEC_FMT
11058 		" last_process_edl_time="SEC_USEC_FMT"\n",
11059 		GET_SEC_USEC(bus->last_process_rxcpl_time),
11060 		GET_SEC_USEC(bus->last_process_infocpl_time),
11061 		GET_SEC_USEC(bus->last_process_edl_time)));
11062 	DHD_ERROR(("dpc_exit_time="SEC_USEC_FMT
11063 		" resched_dpc_time="SEC_USEC_FMT"\n",
11064 		GET_SEC_USEC(bus->dpc_exit_time),
11065 		GET_SEC_USEC(bus->resched_dpc_time)));
11066 	DHD_ERROR(("last_d3_inform_time="SEC_USEC_FMT"\n",
11067 		GET_SEC_USEC(bus->last_d3_inform_time)));
11068 
11069 	DHD_ERROR(("\nlast_suspend_start_time="SEC_USEC_FMT
11070 		" last_suspend_end_time="SEC_USEC_FMT"\n",
11071 		GET_SEC_USEC(bus->last_suspend_start_time),
11072 		GET_SEC_USEC(bus->last_suspend_end_time)));
11073 	DHD_ERROR(("last_resume_start_time="SEC_USEC_FMT
11074 		" last_resume_end_time="SEC_USEC_FMT"\n",
11075 		GET_SEC_USEC(bus->last_resume_start_time),
11076 		GET_SEC_USEC(bus->last_resume_end_time)));
11077 
11078 #if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE)
11079 	DHD_ERROR(("logtrace_thread_entry_time="SEC_USEC_FMT
11080 		" logtrace_thread_sem_down_time="SEC_USEC_FMT
11081 		"\nlogtrace_thread_flush_time="SEC_USEC_FMT
11082 		" logtrace_thread_unexpected_break_time="SEC_USEC_FMT
11083 		"\nlogtrace_thread_complete_time="SEC_USEC_FMT"\n",
11084 		GET_SEC_USEC(dhd->logtrace_thr_ts.entry_time),
11085 		GET_SEC_USEC(dhd->logtrace_thr_ts.sem_down_time),
11086 		GET_SEC_USEC(dhd->logtrace_thr_ts.flush_time),
11087 		GET_SEC_USEC(dhd->logtrace_thr_ts.unexpected_break_time),
11088 		GET_SEC_USEC(dhd->logtrace_thr_ts.complete_time)));
11089 #endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */
11090 }
11091 
11092 void
dhd_bus_intr_count_dump(dhd_pub_t * dhd)11093 dhd_bus_intr_count_dump(dhd_pub_t *dhd)
11094 {
11095 	dhd_pcie_intr_count_dump(dhd);
11096 }
11097 
11098 int
dhd_pcie_dump_wrapper_regs(dhd_pub_t * dhd)11099 dhd_pcie_dump_wrapper_regs(dhd_pub_t *dhd)
11100 {
11101 	uint32 save_idx, val;
11102 	si_t *sih = dhd->bus->sih;
11103 	uint32 oob_base, oob_base1;
11104 	uint32 wrapper_dump_list[] = {
11105 		AI_OOBSELOUTA30, AI_OOBSELOUTA74, AI_OOBSELOUTB30, AI_OOBSELOUTB74,
11106 		AI_OOBSELOUTC30, AI_OOBSELOUTC74, AI_OOBSELOUTD30, AI_OOBSELOUTD74,
11107 		AI_RESETSTATUS, AI_RESETCTRL,
11108 		AI_ITIPOOBA, AI_ITIPOOBB, AI_ITIPOOBC, AI_ITIPOOBD,
11109 		AI_ITIPOOBAOUT, AI_ITIPOOBBOUT, AI_ITIPOOBCOUT, AI_ITIPOOBDOUT
11110 	};
11111 	uint32 i;
11112 	hndoobr_reg_t *reg;
11113 	cr4regs_t *cr4regs;
11114 	ca7regs_t *ca7regs;
11115 
11116 	save_idx = si_coreidx(sih);
11117 
11118 	DHD_ERROR(("%s: Master wrapper Reg\n", __FUNCTION__));
11119 
11120 	if (si_setcore(sih, PCIE2_CORE_ID, 0) != NULL) {
11121 		for (i = 0; i < sizeof(wrapper_dump_list) / 4; i++) {
11122 			val = si_wrapperreg(sih, wrapper_dump_list[i], 0, 0);
11123 			DHD_ERROR(("sbreg: addr:0x%x val:0x%x\n", wrapper_dump_list[i], val));
11124 		}
11125 	}
11126 
11127 	if ((cr4regs = si_setcore(sih, ARMCR4_CORE_ID, 0)) != NULL) {
11128 		DHD_ERROR(("%s: ARM CR4 wrapper Reg\n", __FUNCTION__));
11129 		for (i = 0; i < sizeof(wrapper_dump_list) / 4; i++) {
11130 			val = si_wrapperreg(sih, wrapper_dump_list[i], 0, 0);
11131 			DHD_ERROR(("sbreg: addr:0x%x val:0x%x\n", wrapper_dump_list[i], val));
11132 		}
11133 		DHD_ERROR(("%s: ARM CR4 core Reg\n", __FUNCTION__));
11134 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corecontrol));
11135 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, corecontrol), val));
11136 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corecapabilities));
11137 		DHD_ERROR(("reg:0x%x val:0x%x\n",
11138 			(uint)OFFSETOF(cr4regs_t, corecapabilities), val));
11139 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corestatus));
11140 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, corestatus), val));
11141 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, nmiisrst));
11142 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, nmiisrst), val));
11143 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, nmimask));
11144 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, nmimask), val));
11145 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, isrmask));
11146 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, isrmask), val));
11147 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, swintreg));
11148 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, swintreg), val));
11149 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, intstatus));
11150 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, intstatus), val));
11151 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, cyclecnt));
11152 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, cyclecnt), val));
11153 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, inttimer));
11154 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, inttimer), val));
11155 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, clk_ctl_st));
11156 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, clk_ctl_st), val));
11157 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, powerctl));
11158 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, powerctl), val));
11159 	}
11160 
11161 	if ((ca7regs = si_setcore(sih, ARMCA7_CORE_ID, 0)) != NULL) {
11162 		DHD_ERROR(("%s: ARM CA7 core Reg\n", __FUNCTION__));
11163 		val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corecontrol));
11164 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, corecontrol), val));
11165 		val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corecapabilities));
11166 		DHD_ERROR(("reg:0x%x val:0x%x\n",
11167 			(uint)OFFSETOF(ca7regs_t, corecapabilities), val));
11168 		val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corestatus));
11169 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, corestatus), val));
11170 		val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, tracecontrol));
11171 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, tracecontrol), val));
11172 		val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, clk_ctl_st));
11173 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, clk_ctl_st), val));
11174 		val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, powerctl));
11175 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, powerctl), val));
11176 	}
11177 
11178 	DHD_ERROR(("%s: OOBR Reg\n", __FUNCTION__));
11179 
11180 	oob_base = si_oobr_baseaddr(sih, FALSE);
11181 	oob_base1 = si_oobr_baseaddr(sih, TRUE);
11182 	if (oob_base) {
11183 		dhd_sbreg_op(dhd, oob_base + OOB_STATUSA, &val, TRUE);
11184 		dhd_sbreg_op(dhd, oob_base + OOB_STATUSB, &val, TRUE);
11185 		dhd_sbreg_op(dhd, oob_base + OOB_STATUSC, &val, TRUE);
11186 		dhd_sbreg_op(dhd, oob_base + OOB_STATUSD, &val, TRUE);
11187 	} else if ((reg = si_setcore(sih, HND_OOBR_CORE_ID, 0)) != NULL) {
11188 		val = R_REG(dhd->osh, &reg->intstatus[0]);
11189 		DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
11190 		val = R_REG(dhd->osh, &reg->intstatus[1]);
11191 		DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
11192 		val = R_REG(dhd->osh, &reg->intstatus[2]);
11193 		DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
11194 		val = R_REG(dhd->osh, &reg->intstatus[3]);
11195 		DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
11196 	}
11197 
11198 	if (oob_base1) {
11199 		DHD_ERROR(("%s: Second OOBR Reg\n", __FUNCTION__));
11200 
11201 		dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSA, &val, TRUE);
11202 		dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSB, &val, TRUE);
11203 		dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSC, &val, TRUE);
11204 		dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSD, &val, TRUE);
11205 	}
11206 
11207 	si_setcoreidx(dhd->bus->sih, save_idx);
11208 
11209 	return 0;
11210 }
11211 
11212 static void
dhdpcie_hw_war_regdump(dhd_bus_t * bus)11213 dhdpcie_hw_war_regdump(dhd_bus_t *bus)
11214 {
11215 	uint32 save_idx, val;
11216 	volatile uint32 *reg;
11217 
11218 	save_idx = si_coreidx(bus->sih);
11219 	if ((reg = si_setcore(bus->sih, CC_CORE_ID, 0)) != NULL) {
11220 		val = R_REG(bus->osh, reg + REG_WORK_AROUND);
11221 		DHD_ERROR(("CC HW_WAR :0x%x\n", val));
11222 	}
11223 
11224 	if ((reg = si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) != NULL) {
11225 		val = R_REG(bus->osh, reg + REG_WORK_AROUND);
11226 		DHD_ERROR(("ARM HW_WAR:0x%x\n", val));
11227 	}
11228 
11229 	if ((reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0)) != NULL) {
11230 		val = R_REG(bus->osh, reg + REG_WORK_AROUND);
11231 		DHD_ERROR(("PCIE HW_WAR :0x%x\n", val));
11232 	}
11233 	si_setcoreidx(bus->sih, save_idx);
11234 
11235 	val = PMU_REG_NEW(bus->sih, min_res_mask, 0, 0);
11236 	DHD_ERROR(("MINRESMASK :0x%x\n", val));
11237 }
11238 
11239 int
dhd_pcie_dma_info_dump(dhd_pub_t * dhd)11240 dhd_pcie_dma_info_dump(dhd_pub_t *dhd)
11241 {
11242 	if (dhd->bus->is_linkdown) {
11243 		DHD_ERROR(("\n ------- SKIP DUMPING DMA Registers "
11244 			"due to PCIe link down ------- \r\n"));
11245 		return 0;
11246 	}
11247 
11248 	DHD_ERROR(("\n ------- DUMPING DMA Registers ------- \r\n"));
11249 
11250 	//HostToDev
11251 	DHD_ERROR(("HostToDev TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n",
11252 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x200, 0, 0),
11253 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x204, 0, 0)));
11254 	DHD_ERROR(("            : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n",
11255 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x208, 0, 0),
11256 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x20C, 0, 0)));
11257 	DHD_ERROR(("            : XmtStatus0=0x%08x XmtStatus1=0x%08x\n",
11258 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x210, 0, 0),
11259 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x214, 0, 0)));
11260 
11261 	DHD_ERROR(("HostToDev RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n",
11262 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x220, 0, 0),
11263 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x224, 0, 0)));
11264 	DHD_ERROR(("            : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n",
11265 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x228, 0, 0),
11266 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x22C, 0, 0)));
11267 	DHD_ERROR(("            : RcvStatus0=0x%08x RcvStatus1=0x%08x\n",
11268 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x230, 0, 0),
11269 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x234, 0, 0)));
11270 
11271 	//DevToHost
11272 	DHD_ERROR(("DevToHost TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n",
11273 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x240, 0, 0),
11274 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x244, 0, 0)));
11275 	DHD_ERROR(("            : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n",
11276 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x248, 0, 0),
11277 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x24C, 0, 0)));
11278 	DHD_ERROR(("            : XmtStatus0=0x%08x XmtStatus1=0x%08x\n",
11279 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x250, 0, 0),
11280 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x254, 0, 0)));
11281 
11282 	DHD_ERROR(("DevToHost RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n",
11283 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x260, 0, 0),
11284 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x264, 0, 0)));
11285 	DHD_ERROR(("            : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n",
11286 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x268, 0, 0),
11287 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x26C, 0, 0)));
11288 	DHD_ERROR(("            : RcvStatus0=0x%08x RcvStatus1=0x%08x\n",
11289 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x270, 0, 0),
11290 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x274, 0, 0)));
11291 
11292 	return 0;
11293 }
11294 
11295 bool
dhd_pcie_dump_int_regs(dhd_pub_t * dhd)11296 dhd_pcie_dump_int_regs(dhd_pub_t *dhd)
11297 {
11298 	uint32 intstatus = 0;
11299 	uint32 intmask = 0;
11300 	uint32 d2h_db0 = 0;
11301 	uint32 d2h_mb_data = 0;
11302 
11303 	DHD_ERROR(("\n ------- DUMPING INTR Status and Masks ------- \r\n"));
11304 	intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
11305 		dhd->bus->pcie_mailbox_int, 0, 0);
11306 	if (intstatus == (uint32)-1) {
11307 		DHD_ERROR(("intstatus=0x%x \n", intstatus));
11308 		return FALSE;
11309 	}
11310 
11311 	intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
11312 		dhd->bus->pcie_mailbox_mask, 0, 0);
11313 	if (intmask == (uint32) -1) {
11314 		DHD_ERROR(("intstatus=0x%x intmask=0x%x \n", intstatus, intmask));
11315 		return FALSE;
11316 	}
11317 
11318 	d2h_db0 = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
11319 		PCID2H_MailBox, 0, 0);
11320 	if (d2h_db0 == (uint32)-1) {
11321 		DHD_ERROR(("intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
11322 		intstatus, intmask, d2h_db0));
11323 		return FALSE;
11324 	}
11325 
11326 	DHD_ERROR(("intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
11327 		intstatus, intmask, d2h_db0));
11328 	dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0);
11329 	DHD_ERROR(("d2h_mb_data=0x%x def_intmask=0x%x \r\n", d2h_mb_data,
11330 		dhd->bus->def_intmask));
11331 
11332 	return TRUE;
11333 }
11334 
11335 void
dhd_pcie_dump_rc_conf_space_cap(dhd_pub_t * dhd)11336 dhd_pcie_dump_rc_conf_space_cap(dhd_pub_t *dhd)
11337 {
11338 	DHD_ERROR(("\n ------- DUMPING PCIE RC config space Registers ------- \r\n"));
11339 	DHD_ERROR(("Pcie RC Uncorrectable Error Status Val=0x%x\n",
11340 		dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
11341 		PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0)));
11342 #ifdef EXTENDED_PCIE_DEBUG_DUMP
11343 	DHD_ERROR(("hdrlog0 =0x%08x hdrlog1 =0x%08x hdrlog2 =0x%08x hdrlog3 =0x%08x\n",
11344 		dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
11345 		PCIE_EXTCAP_ERR_HEADER_LOG_0, TRUE, FALSE, 0),
11346 		dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
11347 		PCIE_EXTCAP_ERR_HEADER_LOG_1, TRUE, FALSE, 0),
11348 		dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
11349 		PCIE_EXTCAP_ERR_HEADER_LOG_2, TRUE, FALSE, 0),
11350 		dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
11351 		PCIE_EXTCAP_ERR_HEADER_LOG_3, TRUE, FALSE, 0)));
11352 #endif /* EXTENDED_PCIE_DEBUG_DUMP */
11353 }
11354 
11355 int
dhd_pcie_debug_info_dump(dhd_pub_t * dhd)11356 dhd_pcie_debug_info_dump(dhd_pub_t *dhd)
11357 {
11358 	int host_irq_disabled;
11359 
11360 	DHD_ERROR(("bus->bus_low_power_state = %d\n", dhd->bus->bus_low_power_state));
11361 	host_irq_disabled = dhdpcie_irq_disabled(dhd->bus);
11362 	DHD_ERROR(("host pcie_irq disabled = %d\n", host_irq_disabled));
11363 	dhd_print_tasklet_status(dhd);
11364 	dhd_pcie_intr_count_dump(dhd);
11365 
11366 	DHD_ERROR(("\n ------- DUMPING PCIE EP Resouce Info ------- \r\n"));
11367 	dhdpcie_dump_resource(dhd->bus);
11368 
11369 	dhd_pcie_dump_rc_conf_space_cap(dhd);
11370 
11371 	DHD_ERROR(("RootPort PCIe linkcap=0x%08x\n",
11372 		dhd_debug_get_rc_linkcap(dhd->bus)));
11373 	DHD_ERROR(("\n ------- DUMPING PCIE EP config space Registers ------- \r\n"));
11374 	DHD_ERROR(("Status Command(0x%x)=0x%x, BaseAddress0(0x%x)=0x%x BaseAddress1(0x%x)=0x%x "
11375 		"PCIE_CFG_PMCSR(0x%x)=0x%x\n",
11376 		PCIECFGREG_STATUS_CMD,
11377 		dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_STATUS_CMD, sizeof(uint32)),
11378 		PCIECFGREG_BASEADDR0,
11379 		dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_BASEADDR0, sizeof(uint32)),
11380 		PCIECFGREG_BASEADDR1,
11381 		dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_BASEADDR1, sizeof(uint32)),
11382 		PCIE_CFG_PMCSR,
11383 		dhd_pcie_config_read(dhd->bus->osh, PCIE_CFG_PMCSR, sizeof(uint32))));
11384 	DHD_ERROR(("LinkCtl(0x%x)=0x%x DeviceStatusControl2(0x%x)=0x%x "
11385 		"L1SSControl(0x%x)=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL,
11386 		dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_LINK_STATUS_CTRL,
11387 		sizeof(uint32)), PCIECFGGEN_DEV_STATUS_CTRL2,
11388 		dhd_pcie_config_read(dhd->bus->osh, PCIECFGGEN_DEV_STATUS_CTRL2,
11389 		sizeof(uint32)), PCIECFGREG_PML1_SUB_CTRL1,
11390 		dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_PML1_SUB_CTRL1,
11391 		sizeof(uint32))));
11392 #ifdef EXTENDED_PCIE_DEBUG_DUMP
11393 	DHD_ERROR(("Pcie EP Uncorrectable Error Status Val=0x%x\n",
11394 		dhdpcie_ep_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
11395 		PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0)));
11396 	DHD_ERROR(("hdrlog0(0x%x)=0x%08x hdrlog1(0x%x)=0x%08x hdrlog2(0x%x)=0x%08x "
11397 		"hdrlog3(0x%x)=0x%08x\n", PCI_TLP_HDR_LOG1,
11398 		dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG1, sizeof(uint32)),
11399 		PCI_TLP_HDR_LOG2,
11400 		dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG2, sizeof(uint32)),
11401 		PCI_TLP_HDR_LOG3,
11402 		dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG3, sizeof(uint32)),
11403 		PCI_TLP_HDR_LOG4,
11404 		dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG4, sizeof(uint32))));
11405 	if (dhd->bus->sih->buscorerev >= 24) {
11406 		DHD_ERROR(("DeviceStatusControl(0x%x)=0x%x SubsystemControl(0x%x)=0x%x "
11407 			"L1SSControl2(0x%x)=0x%x\n", PCIECFGREG_DEV_STATUS_CTRL,
11408 			dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_DEV_STATUS_CTRL,
11409 			sizeof(uint32)), PCIE_CFG_SUBSYSTEM_CONTROL,
11410 			dhd_pcie_config_read(dhd->bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL,
11411 			sizeof(uint32)), PCIECFGREG_PML1_SUB_CTRL2,
11412 			dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_PML1_SUB_CTRL2,
11413 			sizeof(uint32))));
11414 		dhd_bus_dump_dar_registers(dhd->bus);
11415 	}
11416 #endif /* EXTENDED_PCIE_DEBUG_DUMP */
11417 
11418 	if (dhd->bus->is_linkdown) {
11419 		DHD_ERROR(("Skip dumping the PCIe Core registers. link may be DOWN\n"));
11420 		return 0;
11421 	}
11422 
11423 	DHD_ERROR(("\n ------- DUMPING PCIE core Registers ------- \r\n"));
11424 
11425 	DHD_ERROR(("ClkReq0(0x%x)=0x%x ClkReq1(0x%x)=0x%x ClkReq2(0x%x)=0x%x "
11426 		"ClkReq3(0x%x)=0x%x\n", PCIECFGREG_PHY_DBG_CLKREQ0,
11427 		dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ0),
11428 		PCIECFGREG_PHY_DBG_CLKREQ1,
11429 		dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ1),
11430 		PCIECFGREG_PHY_DBG_CLKREQ2,
11431 		dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ2),
11432 		PCIECFGREG_PHY_DBG_CLKREQ3,
11433 		dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ3)));
11434 
11435 #ifdef EXTENDED_PCIE_DEBUG_DUMP
11436 	if (dhd->bus->sih->buscorerev >= 24) {
11437 
11438 		DHD_ERROR(("ltssm_hist_0(0x%x)=0x%x ltssm_hist_1(0x%x)=0x%x "
11439 			"ltssm_hist_2(0x%x)=0x%x "
11440 			"ltssm_hist_3(0x%x)=0x%x\n", PCIECFGREG_PHY_LTSSM_HIST_0,
11441 			dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_0),
11442 			PCIECFGREG_PHY_LTSSM_HIST_1,
11443 			dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_1),
11444 			PCIECFGREG_PHY_LTSSM_HIST_2,
11445 			dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_2),
11446 			PCIECFGREG_PHY_LTSSM_HIST_3,
11447 			dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_3)));
11448 
11449 		DHD_ERROR(("trefup(0x%x)=0x%x trefup_ext(0x%x)=0x%x\n",
11450 			PCIECFGREG_TREFUP,
11451 			dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_TREFUP),
11452 			PCIECFGREG_TREFUP_EXT,
11453 			dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_TREFUP_EXT)));
11454 		DHD_ERROR(("errlog(0x%x)=0x%x errlog_addr(0x%x)=0x%x "
11455 			"Function_Intstatus(0x%x)=0x%x "
11456 			"Function_Intmask(0x%x)=0x%x Power_Intstatus(0x%x)=0x%x "
11457 			"Power_Intmask(0x%x)=0x%x\n",
11458 			PCIE_CORE_REG_ERRLOG,
11459 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
11460 			PCIE_CORE_REG_ERRLOG, 0, 0),
11461 			PCIE_CORE_REG_ERR_ADDR,
11462 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
11463 				PCIE_CORE_REG_ERR_ADDR, 0, 0),
11464 			PCIFunctionIntstatus(dhd->bus->sih->buscorerev),
11465 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
11466 				PCIFunctionIntstatus(dhd->bus->sih->buscorerev), 0, 0),
11467 			PCIFunctionIntmask(dhd->bus->sih->buscorerev),
11468 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
11469 				PCIFunctionIntmask(dhd->bus->sih->buscorerev), 0, 0),
11470 			PCIPowerIntstatus(dhd->bus->sih->buscorerev),
11471 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
11472 				PCIPowerIntstatus(dhd->bus->sih->buscorerev), 0, 0),
11473 			PCIPowerIntmask(dhd->bus->sih->buscorerev),
11474 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
11475 				PCIPowerIntmask(dhd->bus->sih->buscorerev), 0, 0)));
11476 		DHD_ERROR(("err_hdrlog1(0x%x)=0x%x err_hdrlog2(0x%x)=0x%x "
11477 			"err_hdrlog3(0x%x)=0x%x err_hdrlog4(0x%x)=0x%x\n",
11478 			(uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg1),
11479 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
11480 				OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg1), 0, 0),
11481 			(uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg2),
11482 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
11483 				OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg2), 0, 0),
11484 			(uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg3),
11485 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
11486 				OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg3), 0, 0),
11487 			(uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg4),
11488 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
11489 				OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg4), 0, 0)));
11490 		DHD_ERROR(("err_code(0x%x)=0x%x\n",
11491 			(uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_code_logreg),
11492 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
11493 				OFFSETOF(sbpcieregs_t, u.pcie2.err_code_logreg), 0, 0)));
11494 
11495 		dhd_pcie_dump_wrapper_regs(dhd);
11496 		dhdpcie_hw_war_regdump(dhd->bus);
11497 	}
11498 #endif /* EXTENDED_PCIE_DEBUG_DUMP */
11499 
11500 	dhd_pcie_dma_info_dump(dhd);
11501 
11502 	return 0;
11503 }
11504 
11505 bool
dhd_bus_force_bt_quiesce_enabled(struct dhd_bus * bus)11506 dhd_bus_force_bt_quiesce_enabled(struct dhd_bus *bus)
11507 {
11508 	return bus->force_bt_quiesce;
11509 }
11510 
11511 #ifdef DHD_HP2P
11512 uint16
dhd_bus_get_hp2p_ring_max_size(struct dhd_bus * bus,bool tx)11513 dhd_bus_get_hp2p_ring_max_size(struct dhd_bus *bus, bool tx)
11514 {
11515 	if (tx)
11516 		return bus->hp2p_txcpl_max_items;
11517 	else
11518 		return bus->hp2p_rxcpl_max_items;
11519 }
11520 
11521 static uint16
dhd_bus_set_hp2p_ring_max_size(struct dhd_bus * bus,bool tx,uint16 val)11522 dhd_bus_set_hp2p_ring_max_size(struct dhd_bus *bus, bool tx, uint16 val)
11523 {
11524 	if (tx)
11525 		bus->hp2p_txcpl_max_items = val;
11526 	else
11527 		bus->hp2p_rxcpl_max_items = val;
11528 	return val;
11529 }
11530 #endif /* DHD_HP2P */
11531 
11532 static bool
dhd_bus_tcm_test(struct dhd_bus * bus)11533 dhd_bus_tcm_test(struct dhd_bus *bus)
11534 {
11535 	int ret = 0;
11536 	int size; /* Full mem size */
11537 	int start; /* Start address */
11538 	int read_size = 0; /* Read size of each iteration */
11539 	int num = 0;
11540 	uint8 *read_buf, *write_buf;
11541 	uint8 init_val[NUM_PATTERNS] = {
11542 		0xFFu, /* 11111111 */
11543 		0x00u, /* 00000000 */
11544 	};
11545 
11546 	if (!bus) {
11547 		DHD_ERROR(("%s: bus is NULL !\n", __FUNCTION__));
11548 		return FALSE;
11549 	}
11550 
11551 	read_buf = MALLOCZ(bus->dhd->osh, MEMBLOCK);
11552 
11553 	if (!read_buf) {
11554 		DHD_ERROR(("%s: MALLOC of read_buf failed\n", __FUNCTION__));
11555 		return FALSE;
11556 	}
11557 
11558 	write_buf = MALLOCZ(bus->dhd->osh, MEMBLOCK);
11559 
11560 	if (!write_buf) {
11561 		MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
11562 		DHD_ERROR(("%s: MALLOC of write_buf failed\n", __FUNCTION__));
11563 		return FALSE;
11564 	}
11565 
11566 	DHD_ERROR(("%s: start %x,  size: %x\n", __FUNCTION__, bus->dongle_ram_base, bus->ramsize));
11567 	DHD_ERROR(("%s: memblock size %d,  #pattern %d\n", __FUNCTION__, MEMBLOCK, NUM_PATTERNS));
11568 
11569 	while (num < NUM_PATTERNS) {
11570 		start = bus->dongle_ram_base;
11571 		/* Get full mem size */
11572 		size = bus->ramsize;
11573 
11574 		memset(write_buf, init_val[num], MEMBLOCK);
11575 		while (size > 0) {
11576 			read_size = MIN(MEMBLOCK, size);
11577 			memset(read_buf, 0, read_size);
11578 
11579 			/* Write */
11580 			if ((ret = dhdpcie_bus_membytes(bus, TRUE, start, write_buf, read_size))) {
11581 				DHD_ERROR(("%s: Write Error membytes %d\n", __FUNCTION__, ret));
11582 				MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
11583 				MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
11584 				return FALSE;
11585 			}
11586 
11587 			/* Read */
11588 			if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, read_buf, read_size))) {
11589 				DHD_ERROR(("%s: Read Error membytes %d\n", __FUNCTION__, ret));
11590 				MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
11591 				MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
11592 				return FALSE;
11593 			}
11594 
11595 			/* Compare */
11596 			if (memcmp(read_buf, write_buf, read_size)) {
11597 				DHD_ERROR(("%s: Mismatch at %x, iter : %d\n",
11598 					__FUNCTION__, start, num));
11599 				prhex("Readbuf", read_buf, read_size);
11600 				prhex("Writebuf", write_buf, read_size);
11601 				MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
11602 				MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
11603 				return FALSE;
11604 			}
11605 
11606 			/* Decrement size and increment start address */
11607 			size -= read_size;
11608 			start += read_size;
11609 		}
11610 		num++;
11611 	}
11612 
11613 	MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
11614 	MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
11615 
11616 	DHD_ERROR(("%s: Success iter : %d\n", __FUNCTION__, num));
11617 	return TRUE;
11618 }
11619