• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * DHD Bus Module for PCIE
3  *
4  * Copyright (C) 2020, Broadcom.
5  *
6  *      Unless you and Broadcom execute a separate written software license
7  * agreement governing use of this software, this software is licensed to you
8  * under the terms of the GNU General Public License version 2 (the "GPL"),
9  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10  * following added to such license:
11  *
12  *      As a special exception, the copyright holders of this software give you
13  * permission to link this software with independent modules, and to copy and
14  * distribute the resulting executable under terms of your choice, provided that
15  * you also meet, for each linked independent module, the terms and conditions of
16  * the license of that module.  An independent module is a module which is not
17  * derived from this software.  The special exception does not apply to any
18  * modifications of the software.
19  *
20  *
21  * <<Broadcom-WL-IPTag/Open:>>
22  *
23  * $Id$
24  */
25 
26 /** XXX Twiki: [PCIeFullDongleArchitecture] */
27 
28 /* include files */
29 #include <typedefs.h>
30 #include <bcmutils.h>
31 #include <bcmrand.h>
32 #include <bcmdevs.h>
33 #include <bcmdevs_legacy.h>    /* need to still support chips no longer in trunk firmware */
34 #include <siutils.h>
35 #include <hndoobr.h>
36 #include <hndsoc.h>
37 #include <hndpmu.h>
38 #include <etd.h>
39 #include <hnd_debug.h>
40 #include <sbchipc.h>
41 #include <sbhndarm.h>
42 #include <hnd_armtrap.h>
43 #if defined(DHD_DEBUG)
44 #include <hnd_cons.h>
45 #endif /* defined(DHD_DEBUG) */
46 #include <dngl_stats.h>
47 #include <pcie_core.h>
48 #include <dhd.h>
49 #include <dhd_bus.h>
50 #include <dhd_flowring.h>
51 #include <dhd_proto.h>
52 #include <dhd_dbg.h>
53 #include <dhd_debug.h>
54 #if defined(LINUX) || defined(linux)
55 #include <dhd_daemon.h>
56 #endif /* LINUX || linux */
57 #include <dhdioctl.h>
58 #include <sdiovar.h>
59 #include <bcmmsgbuf.h>
60 #include <pcicfg.h>
61 #include <dhd_pcie.h>
62 #include <bcmpcie.h>
63 #include <bcmendian.h>
64 #include <bcmstdlib_s.h>
65 #ifdef DHDTCPACK_SUPPRESS
66 #include <dhd_ip.h>
67 #endif /* DHDTCPACK_SUPPRESS */
68 #include <bcmevent.h>
69 #include <dhd_config.h>
70 
71 #ifdef DHD_TIMESYNC
72 #include <dhd_timesync.h>
73 #endif /* DHD_TIMESYNC */
74 
75 #ifdef BCM_ROUTER_DHD
76 #include <bcmnvram.h>
77 #define STR_END		"END\0\0"
78 #define BOARDREV_PROMOTABLE_STR	"0xff"
79 #endif
80 #if defined(BCMEMBEDIMAGE)
81 #include BCMEMBEDIMAGE
82 #endif /* BCMEMBEDIMAGE */
83 
84 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
85 #include <linux/pm_runtime.h>
86 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
87 
88 #if defined(DEBUGGER) || defined (DHD_DSCOPE)
89 #include <debugger.h>
90 #endif /* DEBUGGER || DHD_DSCOPE */
91 
92 #if defined(FW_SIGNATURE)
93 #include <dngl_rtlv.h>
94 #include <bcm_fwsign.h>
95 #endif /* FW_SIGNATURE */
96 
97 #ifdef DNGL_AXI_ERROR_LOGGING
98 #include <dhd_linux_wq.h>
99 #include <dhd_linux.h>
100 #endif /* DNGL_AXI_ERROR_LOGGING */
101 
102 #if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
103 #include <dhd_linux_priv.h>
104 #endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
105 
106 #define EXTENDED_PCIE_DEBUG_DUMP 1	/* Enable Extended pcie registers dump */
107 
108 #define MEMBLOCK	2048		/* Block size used for downloading of dongle image */
109 #ifdef LINUX
110 #define MAX_WKLK_IDLE_CHECK	3 /* times dhd_wake_lock checked before deciding not to suspend */
111 #endif /* LINUX */
112 
113 #define	DHD_MAX_ITEMS_HPP_TXCPL_RING	512
114 #define	DHD_MAX_ITEMS_HPP_RXCPL_RING	512
115 #define MAX_HP2P_CMPL_RINGS		2u
116 
117 /* XXX defines for 4378 */
118 #define ARMCR4REG_CORECAP	(0x4/sizeof(uint32))
119 #define ARMCR4REG_MPUCTRL	(0x90/sizeof(uint32))
120 #define ACC_MPU_SHIFT		25
121 #define ACC_MPU_MASK		(0x1u << ACC_MPU_SHIFT)
122 
123 /* XXX Offset for 4375 work around register */
124 #define REG_WORK_AROUND		(0x1e4/sizeof(uint32))
125 
126 /* XXX defines for 43602a0 workaround JIRA CRWLARMCR4-53 */
127 #define ARMCR4REG_BANKIDX	(0x40/sizeof(uint32))
128 #define ARMCR4REG_BANKPDA	(0x4C/sizeof(uint32))
129 /* Temporary war to fix precommit till sync issue between trunk & precommit branch is resolved */
130 
131 /* CTO Prevention Recovery */
132 #define CTO_TO_CLEAR_WAIT_MS 50
133 #define CTO_TO_CLEAR_WAIT_MAX_CNT 200
134 
135 /* FLR setting */
136 #define PCIE_FLR_CAPAB_BIT		28
137 #define PCIE_FUNCTION_LEVEL_RESET_BIT	15
138 
139 #ifdef BCMQT_HW
140 extern int qt_flr_reset;
141 /* FLR takes longer on QT Z boards so increasing the delay by 30% */
142 #define DHD_FUNCTION_LEVEL_RESET_DELAY	70u
143 #define DHD_SSRESET_STATUS_RETRY_DELAY	55u
144 #else
145 #define DHD_FUNCTION_LEVEL_RESET_DELAY	70u	/* 70 msec delay */
146 #define DHD_SSRESET_STATUS_RETRY_DELAY	40u
147 #endif /* BCMQT_HW */
148 /*
149  * Increase SSReset de-assert time to 8ms.
150  * since it takes longer time if re-scan time on 4378B0.
151  */
152 #define DHD_SSRESET_STATUS_RETRIES	200u
153 
154 /* Fetch address of a member in the pciedev_shared structure in dongle memory */
155 #define DHD_PCIE_SHARED_MEMBER_ADDR(bus, member) \
156 	(bus)->shared_addr + OFFSETOF(pciedev_shared_t, member)
157 
158 /* Fetch address of a member in rings_info_ptr structure in dongle memory */
159 #define DHD_RING_INFO_MEMBER_ADDR(bus, member) \
160 	(bus)->pcie_sh->rings_info_ptr + OFFSETOF(ring_info_t, member)
161 
162 /* Fetch address of a member in the ring_mem structure in dongle memory */
163 #define DHD_RING_MEM_MEMBER_ADDR(bus, ringid, member) \
164 	(bus)->ring_sh[ringid].ring_mem_addr + OFFSETOF(ring_mem_t, member)
165 
166 #ifdef DHD_REPLACE_LOG_INFO_TO_TRACE
167 #define DHD_PCIE_INFO DHD_TRACE
168 #else
169 #define DHD_PCIE_INFO DHD_INFO
170 #endif /* DHD_REPLACE_LOG_INFO_TO_TRACE */
171 
172 #if defined(SUPPORT_MULTIPLE_BOARD_REV)
173 	extern unsigned int system_rev;
174 #endif /* SUPPORT_MULTIPLE_BOARD_REV */
175 
176 #ifdef EWP_EDL
177 extern int host_edl_support;
178 #endif
179 
180 #ifdef BCMQT_HW
181 extern int qt_dngl_timeout;
182 #endif /* BCMQT_HW */
183 
184 /* This can be overwritten by module parameter(dma_ring_indices) defined in dhd_linux.c */
185 uint dma_ring_indices = 0;
186 /* This can be overwritten by module parameter(h2d_phase) defined in dhd_linux.c */
187 bool h2d_phase = 0;
188 /* This can be overwritten by module parameter(force_trap_bad_h2d_phase)
189  * defined in dhd_linux.c
190  */
191 bool force_trap_bad_h2d_phase = 0;
192 
193 int dhd_dongle_ramsize;
194 struct dhd_bus *g_dhd_bus = NULL;
195 #ifdef DNGL_AXI_ERROR_LOGGING
196 static void dhd_log_dump_axi_error(uint8 *axi_err);
197 #endif /* DNGL_AXI_ERROR_LOGGING */
198 
199 static int dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size);
200 static int dhdpcie_bus_readconsole(dhd_bus_t *bus);
201 #if defined(DHD_FW_COREDUMP)
202 static int dhdpcie_mem_dump(dhd_bus_t *bus);
203 static int dhdpcie_get_mem_dump(dhd_bus_t *bus);
204 #endif /* DHD_FW_COREDUMP */
205 
206 static int dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size);
207 static int dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid,
208 	const char *name, void *params,
209 	uint plen, void *arg, uint len, int val_size);
210 static int dhdpcie_bus_lpback_req(struct  dhd_bus *bus, uint32 intval);
211 static int dhdpcie_bus_dmaxfer_req(struct  dhd_bus *bus,
212 	uint32 len, uint32 srcdelay, uint32 destdelay,
213 	uint32 d11_lpbk, uint32 core_num, uint32 wait,
214 	uint32 mem_addr);
215 #ifdef BCMINTERNAL
216 static int dhdpcie_bus_set_tx_lpback(struct  dhd_bus *bus, bool enable);
217 static int dhdpcie_bus_get_tx_lpback(struct  dhd_bus *bus);
218 static uint64 serialized_backplane_access_64(dhd_bus_t* bus, uint addr, uint size, uint64* val,
219 	bool read);
220 #endif /* BCMINTERNAL */
221 static uint serialized_backplane_access(dhd_bus_t* bus, uint addr, uint size, uint* val, bool read);
222 static int dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter);
223 static int _dhdpcie_download_firmware(struct dhd_bus *bus);
224 static int dhdpcie_download_firmware(dhd_bus_t *bus, osl_t *osh);
225 
226 #if defined(FW_SIGNATURE)
227 static int dhdpcie_bus_download_fw_signature(dhd_bus_t *bus, bool *do_write);
228 static int dhdpcie_bus_download_ram_bootloader(dhd_bus_t *bus);
229 static int dhdpcie_bus_write_fws_status(dhd_bus_t *bus);
230 static int dhdpcie_bus_write_fws_mem_info(dhd_bus_t *bus);
231 static int dhdpcie_bus_write_fwsig(dhd_bus_t *bus, char *fwsig_path, char *nvsig_path);
232 static int dhdpcie_download_rtlv_end(dhd_bus_t *bus);
233 static int dhdpcie_bus_save_download_info(dhd_bus_t *bus, uint32 download_addr,
234 	uint32 download_size, const char *signature_fname,
235 	const char *bloader_fname, uint32 bloader_download_addr);
236 #endif /* FW_SIGNATURE */
237 
238 static int dhdpcie_bus_write_vars(dhd_bus_t *bus);
239 static bool dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus);
240 static bool dhdpci_bus_read_frames(dhd_bus_t *bus);
241 static int dhdpcie_readshared(dhd_bus_t *bus);
242 static void dhdpcie_init_shared_addr(dhd_bus_t *bus);
243 static bool dhdpcie_dongle_attach(dhd_bus_t *bus);
244 static void dhdpcie_bus_dongle_setmemsize(dhd_bus_t *bus, int mem_size);
245 static void dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh,
246 	bool dongle_isolation, bool reset_flag);
247 static void dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh);
248 static int dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len);
249 static void dhdpcie_setbar1win(dhd_bus_t *bus, uint32 addr);
250 static void dhd_init_bar1_switch_lock(dhd_bus_t *bus);
251 static void dhd_deinit_bar1_switch_lock(dhd_bus_t *bus);
252 static void dhd_init_pwr_req_lock(dhd_bus_t *bus);
253 static void dhd_deinit_pwr_req_lock(dhd_bus_t *bus);
254 static void dhd_init_bus_lp_state_lock(dhd_bus_t *bus);
255 static void dhd_deinit_bus_lp_state_lock(dhd_bus_t *bus);
256 static void dhd_init_backplane_access_lock(dhd_bus_t *bus);
257 static void dhd_deinit_backplane_access_lock(dhd_bus_t *bus);
258 static uint8 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset);
259 static void dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data);
260 static void dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data);
261 static uint16 dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset);
262 static void dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data);
263 static uint32 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset);
264 #ifdef DHD_SUPPORT_64BIT
265 static void dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data) __attribute__ ((used));
266 static uint64 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset) __attribute__ ((used));
267 #endif /* DHD_SUPPORT_64BIT */
268 static void dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data);
269 static void dhdpcie_bus_reg_unmap(osl_t *osh, volatile char *addr, int size);
270 static int dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b);
271 static void dhdpcie_fw_trap(dhd_bus_t *bus);
272 static void dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info);
273 static void dhdpcie_handle_mb_data(dhd_bus_t *bus);
274 extern void dhd_dpc_enable(dhd_pub_t *dhdp);
275 #ifdef PCIE_INB_DW
276 static void dhd_bus_ds_trace(dhd_bus_t *bus, uint32 dsval,
277 	bool d2h, enum dhd_bus_ds_state inbstate);
278 #else
279 static void dhd_bus_ds_trace(dhd_bus_t *bus, uint32 dsval, bool d2h);
280 #endif /* PCIE_INB_DW */
281 #ifdef DHD_MMIO_TRACE
282 static void dhd_bus_mmio_trace(dhd_bus_t *bus, uint32 addr, uint32 value, bool set);
283 #endif /* defined(DHD_MMIO_TRACE) */
284 #if defined(LINUX) || defined(linux)
285 extern void dhd_dpc_kill(dhd_pub_t *dhdp);
286 #endif /* LINUX || linux */
287 
288 #ifdef IDLE_TX_FLOW_MGMT
289 static void dhd_bus_check_idle_scan(dhd_bus_t *bus);
290 static void dhd_bus_idle_scan(dhd_bus_t *bus);
291 #endif /* IDLE_TX_FLOW_MGMT */
292 
293 #ifdef BCMEMBEDIMAGE
294 static int dhdpcie_download_code_array(dhd_bus_t *bus);
295 #endif /* BCMEMBEDIMAGE */
296 #ifdef BCM_ROUTER_DHD
297 extern char * nvram_get(const char *name);
298 #endif
299 #if defined(BCMEMBEDIMAGE) && defined (BCM_ROUTER_DHD)
300 static void  select_fd_image(
301 		struct dhd_bus *bus, unsigned char **p_dlarray,
302 		char **p_dlimagename, char **p_dlimagever,
303 		char **p_dlimagedate, int *image_size);
304 #endif /* defined(BCMEMBEDIMAGE) && defined (BCM_ROUTER_DHD) */
305 
306 #ifdef BCM_ROUTER_DHD
307 int dbushost_initvars_flash(si_t *sih, osl_t *osh, char **base, uint len);
308 #endif
309 
310 #ifdef EXYNOS_PCIE_DEBUG
311 extern void exynos_pcie_register_dump(int ch_num);
312 #endif /* EXYNOS_PCIE_DEBUG */
313 
314 #if defined(DHD_H2D_LOG_TIME_SYNC)
315 static void dhdpci_bus_rte_log_time_sync_poll(dhd_bus_t *bus);
316 #endif /* DHD_H2D_LOG_TIME_SYNC */
317 
318 #define     PCI_VENDOR_ID_BROADCOM          0x14e4
319 
320 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
321 #define MAX_D3_ACK_TIMEOUT	100
322 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
323 
324 #ifdef BCMQT
325 #define DHD_DEFAULT_DOORBELL_TIMEOUT 40	/* ms */
326 #else
327 #define DHD_DEFAULT_DOORBELL_TIMEOUT 200	/* ms */
328 #endif
329 #if defined(PCIE_OOB) || defined(PCIE_INB_DW)
330 static uint dhd_doorbell_timeout = DHD_DEFAULT_DOORBELL_TIMEOUT;
331 #endif /* PCIE_OOB || PCIE_INB_DW */
332 
333 static bool dhdpcie_check_firmware_compatible(uint32 f_api_version, uint32 h_api_version);
334 static int dhdpcie_cto_error_recovery(struct dhd_bus *bus);
335 
336 static int dhdpcie_init_d11status(struct dhd_bus *bus);
337 
338 static int dhdpcie_wrt_rnd(struct dhd_bus *bus);
339 
340 #ifdef BCMINTERNAL
341 #ifdef DHD_FWTRACE
342 #include <dhd_fwtrace.h>
343 #endif /* DHD_FWTRACE */
344 #endif /* BCMINTERNAL */
345 
346 #ifdef DHD_HP2P
347 extern enum hrtimer_restart dhd_hp2p_write(struct hrtimer *timer);
348 static uint16 dhd_bus_set_hp2p_ring_max_size(struct dhd_bus *bus, bool tx, uint16 val);
349 #endif
350 #if defined(linux) || defined(LINUX)
351 #ifdef DHD_FW_MEM_CORRUPTION
352 #define NUM_PATTERNS 2
353 #else
354 #define NUM_PATTERNS 6
355 #endif /* DHD_FW_MEM_CORRUPTION */
356 static bool dhd_bus_tcm_test(struct dhd_bus *bus);
357 #endif /* LINUX || linux */
358 
359 #if defined(FW_SIGNATURE)
360 static int dhd_bus_dump_fws(dhd_bus_t *bus, struct bcmstrbuf *strbuf);
361 #endif
362 static void dhdpcie_pme_stat_clear(dhd_bus_t *bus);
363 
364 /* IOVar table */
365 enum {
366 	IOV_INTR = 1,
367 #ifdef DHD_BUS_MEM_ACCESS
368 	IOV_MEMBYTES,
369 #endif /* DHD_BUS_MEM_ACCESS */
370 	IOV_MEMSIZE,
371 	IOV_SET_DOWNLOAD_STATE,
372 	IOV_SET_DOWNLOAD_INFO,
373 	IOV_DEVRESET,
374 	IOV_VARS,
375 	IOV_MSI_SIM,
376 	IOV_PCIE_LPBK,
377 	IOV_CC_NVMSHADOW,
378 	IOV_RAMSIZE,
379 	IOV_RAMSTART,
380 	IOV_SLEEP_ALLOWED,
381 #ifdef BCMINTERNAL
382 	IOV_PCIE_TX_LPBK,
383 #endif /* BCMINTERNAL */
384 	IOV_PCIE_DMAXFER,
385 	IOV_PCIE_SUSPEND,
386 #ifdef DHD_PCIE_REG_ACCESS
387 	IOV_PCIEREG,
388 	IOV_PCIECFGREG,
389 	IOV_PCIECOREREG,
390 	IOV_PCIESERDESREG,
391 	IOV_PCIEASPM,
392 	IOV_BAR0_SECWIN_REG,
393 	IOV_SBREG,
394 #endif /* DHD_PCIE_REG_ACCESS */
395 	IOV_DONGLEISOLATION,
396 	IOV_LTRSLEEPON_UNLOOAD,
397 	IOV_METADATA_DBG,
398 	IOV_RX_METADATALEN,
399 	IOV_TX_METADATALEN,
400 	IOV_TXP_THRESHOLD,
401 	IOV_BUZZZ_DUMP,
402 	IOV_DUMP_RINGUPD_BLOCK,
403 	IOV_DMA_RINGINDICES,
404 	IOV_FORCE_FW_TRAP,
405 	IOV_DB1_FOR_MB,
406 	IOV_FLOW_PRIO_MAP,
407 #ifdef DHD_PCIE_RUNTIMEPM
408 	IOV_IDLETIME,
409 #endif /* DHD_PCIE_RUNTIMEPM */
410 	IOV_RXBOUND,
411 	IOV_TXBOUND,
412 	IOV_HANGREPORT,
413 	IOV_H2D_MAILBOXDATA,
414 	IOV_INFORINGS,
415 	IOV_H2D_PHASE,
416 	IOV_H2D_ENABLE_TRAP_BADPHASE,
417 	IOV_H2D_TXPOST_MAX_ITEM,
418 #if defined(DHD_HTPUT_TUNABLES)
419 	IOV_H2D_HTPUT_TXPOST_MAX_ITEM,
420 #endif /* DHD_HTPUT_TUNABLES */
421 	IOV_TRAPDATA,
422 	IOV_TRAPDATA_RAW,
423 	IOV_CTO_PREVENTION,
424 #ifdef PCIE_OOB
425 	IOV_OOB_BT_REG_ON,
426 	IOV_OOB_ENABLE,
427 #endif /* PCIE_OOB */
428 #ifdef DEVICE_TX_STUCK_DETECT
429 	IOV_DEVICE_TX_STUCK_DETECT,
430 #endif /* DEVICE_TX_STUCK_DETECT */
431 	IOV_PCIE_WD_RESET,
432 	IOV_DUMP_DONGLE,
433 #ifdef DHD_EFI
434 	IOV_WIFI_PROPERTIES,
435 	IOV_CONTROL_SIGNAL,
436 	IOV_OTP_DUMP,
437 #ifdef BT_OVER_PCIE
438 	IOV_BTOP_TEST,
439 #endif
440 #endif /* DHD_EFI */
441 	IOV_IDMA_ENABLE,
442 	IOV_IFRM_ENABLE,
443 	IOV_CLEAR_RING,
444 	IOV_DAR_ENABLE,
445 	IOV_DHD_CAPS,   /**< returns string with dhd capabilities */
446 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
447 	IOV_GDB_SERVER,  /**< starts gdb server on given interface */
448 #endif /* DEBUGGER || DHD_DSCOPE */
449 #if defined(GDB_PROXY)
450 	IOV_GDB_PROXY_PROBE, /**< gdb proxy support presence check */
451 	IOV_GDB_PROXY_STOP_COUNT, /**< gdb proxy firmware stop count */
452 #endif /* GDB_PROXY */
453 	IOV_INB_DW_ENABLE,
454 #if defined(PCIE_OOB) || defined(PCIE_INB_DW)
455 	IOV_DEEP_SLEEP,
456 #endif /* PCIE_OOB || PCIE_INB_DW */
457 	IOV_CTO_THRESHOLD,
458 #ifdef D2H_MINIDUMP
459 	IOV_MINIDUMP_OVERRIDE,
460 #endif /* D2H_MINIDUMP */
461 #ifdef BCMINTERNAL
462 	IOV_DMA_CHAN,
463 	IOV_HYBRIDFW,
464 #endif /* BCMINTERNAL */
465 	IOV_HSCBSIZE, /* get HSCB buffer size */
466 #ifdef DHD_BUS_MEM_ACCESS
467 	IOV_HSCBBYTES, /* copy HSCB buffer */
468 #endif
469 #ifdef BCMINTERNAL
470 #ifdef DHD_FWTRACE
471 	IOV_FWTRACE,   /* Enable/disable firmware tracing */
472 #endif /* DHD_FWTRACE */
473 #endif /* BCMINTERNAL */
474 	IOV_HP2P_ENABLE,
475 	IOV_HP2P_PKT_THRESHOLD,
476 	IOV_HP2P_TIME_THRESHOLD,
477 	IOV_HP2P_PKT_EXPIRY,
478 	IOV_HP2P_TXCPL_MAXITEMS,
479 	IOV_HP2P_RXCPL_MAXITEMS,
480 	IOV_EXTDTXS_IN_TXCPL,
481 	IOV_HOSTRDY_AFTER_INIT,
482 #ifdef BCMINTERNAL
483 	IOV_SBREG_64,
484 #endif	/* BCMINTERNAL */
485 	IOV_HP2P_MF_ENABLE,
486 	IOV_PCIE_LAST /**< unused IOVAR */
487 };
488 
489 const bcm_iovar_t dhdpcie_iovars[] = {
490 	{"intr",	IOV_INTR,	0, 	0, IOVT_BOOL,	0 },
491 #ifdef DHD_BUS_MEM_ACCESS
492 	{"membytes",	IOV_MEMBYTES,	0, 	0, IOVT_BUFFER,	2 * sizeof(int) },
493 #endif /* DHD_BUS_MEM_ACCESS */
494 	{"memsize",	IOV_MEMSIZE,	0, 	0, IOVT_UINT32,	0 },
495 	{"dwnldstate",	IOV_SET_DOWNLOAD_STATE,	0, 	0, IOVT_BOOL,	0 },
496 	{"dwnldinfo",	IOV_SET_DOWNLOAD_INFO,	0, 	0, IOVT_BUFFER,
497 	sizeof(fw_download_info_t) },
498 	{"vars",	IOV_VARS,	0, 	0, IOVT_BUFFER,	0 },
499 	{"devreset",	IOV_DEVRESET,	0, 	0, IOVT_UINT8,	0 },
500 	{"pcie_device_trap", IOV_FORCE_FW_TRAP, 0, 	0, 0,	0 },
501 	{"pcie_lpbk",	IOV_PCIE_LPBK,	0,	0, IOVT_UINT32,	0 },
502 #ifdef BCMINTERNAL
503 	{"msi_sim",     IOV_MSI_SIM,    0,	0, IOVT_BOOL,      0 },
504 #endif /* BCMINTERNAL */
505 	{"cc_nvmshadow", IOV_CC_NVMSHADOW, 0,	0, IOVT_BUFFER, 0 },
506 	{"ramsize",	IOV_RAMSIZE,	0, 	0, IOVT_UINT32,	0 },
507 	{"ramstart",	IOV_RAMSTART,	0, 	0, IOVT_UINT32,	0 },
508 #ifdef DHD_PCIE_REG_ACCESS
509 	{"pciereg",	IOV_PCIEREG,	0, 	0, IOVT_BUFFER,	2 * sizeof(int32) },
510 	{"pciecfgreg",	IOV_PCIECFGREG,	DHD_IOVF_PWRREQ_BYPASS,	0, IOVT_BUFFER,	2 * sizeof(int32) },
511 	{"pciecorereg",	IOV_PCIECOREREG,	0, 	0, IOVT_BUFFER,	2 * sizeof(int32) },
512 	{"pcieserdesreg",	IOV_PCIESERDESREG,	0, 	0, IOVT_BUFFER,	3 * sizeof(int32) },
513 	{"bar0secwinreg",	IOV_BAR0_SECWIN_REG,	0, 	0, IOVT_BUFFER,	sizeof(sdreg_t) },
514 	{"sbreg",	IOV_SBREG,	0,	0, IOVT_BUFFER,	sizeof(uint8) },
515 #endif /* DHD_PCIE_REG_ACCESS */
516 #ifdef BCMINTERNAL
517 	{"pcie_tx_lpbk",	IOV_PCIE_TX_LPBK,	0, 	0, IOVT_UINT32,	0 },
518 #endif /* BCMINTERNAL */
519 	{"pcie_dmaxfer", IOV_PCIE_DMAXFER, 0, 0, IOVT_BUFFER, sizeof(dma_xfer_info_t)},
520 	{"pcie_suspend", IOV_PCIE_SUSPEND,	DHD_IOVF_PWRREQ_BYPASS,	0, IOVT_UINT32,	0 },
521 #ifdef PCIE_OOB
522 	{"oob_bt_reg_on", IOV_OOB_BT_REG_ON,    0,	0, IOVT_UINT32,    0 },
523 	{"oob_enable",   IOV_OOB_ENABLE,    0,	0, IOVT_UINT32,    0 },
524 #endif /* PCIE_OOB */
525 	{"sleep_allowed",	IOV_SLEEP_ALLOWED,	0,	0, IOVT_BOOL,	0 },
526 	{"dngl_isolation", IOV_DONGLEISOLATION,	0, 	0, IOVT_UINT32,	0 },
527 	{"ltrsleep_on_unload", IOV_LTRSLEEPON_UNLOOAD,	0,	0, IOVT_UINT32,	0 },
528 	{"dump_ringupdblk", IOV_DUMP_RINGUPD_BLOCK,	0, 	0, IOVT_BUFFER,	0 },
529 	{"dma_ring_indices", IOV_DMA_RINGINDICES,	0, 	0, IOVT_UINT32,	0},
530 	{"metadata_dbg", IOV_METADATA_DBG,	0,	0, IOVT_BOOL,	0 },
531 	{"rx_metadata_len", IOV_RX_METADATALEN,	0, 	0, IOVT_UINT32,	0 },
532 	{"tx_metadata_len", IOV_TX_METADATALEN,	0, 	0, IOVT_UINT32,	0 },
533 	{"db1_for_mb", IOV_DB1_FOR_MB,	0, 	0, IOVT_UINT32,	0 },
534 	{"txp_thresh", IOV_TXP_THRESHOLD,	0,	0, IOVT_UINT32,	0 },
535 	{"buzzz_dump", IOV_BUZZZ_DUMP,		0, 	0, IOVT_UINT32,	0 },
536 	{"flow_prio_map", IOV_FLOW_PRIO_MAP,	0, 	0, IOVT_UINT32,	0 },
537 #ifdef DHD_PCIE_RUNTIMEPM
538 	{"idletime",    IOV_IDLETIME,   0,	0, IOVT_INT32,     0 },
539 #endif /* DHD_PCIE_RUNTIMEPM */
540 	{"rxbound",     IOV_RXBOUND,    0, 0,	IOVT_UINT32,    0 },
541 	{"txbound",     IOV_TXBOUND,    0, 0,	IOVT_UINT32,    0 },
542 #ifdef DHD_PCIE_REG_ACCESS
543 	{"aspm", IOV_PCIEASPM, 0, 0, IOVT_INT32, 0 },
544 #endif /* DHD_PCIE_REG_ACCESS */
545 	{"fw_hang_report", IOV_HANGREPORT,	0, 0,	IOVT_BOOL,	0 },
546 	{"h2d_mb_data",     IOV_H2D_MAILBOXDATA,    0, 0,      IOVT_UINT32,    0 },
547 	{"inforings",   IOV_INFORINGS,    0, 0,      IOVT_UINT32,    0 },
548 	{"h2d_phase",   IOV_H2D_PHASE,    0, 0,      IOVT_UINT32,    0 },
549 	{"force_trap_bad_h2d_phase", IOV_H2D_ENABLE_TRAP_BADPHASE,    0, 0,
550 	IOVT_UINT32,    0 },
551 	{"h2d_max_txpost",   IOV_H2D_TXPOST_MAX_ITEM,    0, 0,      IOVT_UINT32,    0 },
552 #if defined(DHD_HTPUT_TUNABLES)
553 	{"h2d_htput_max_txpost", IOV_H2D_HTPUT_TXPOST_MAX_ITEM,    0, 0,      IOVT_UINT32,    0 },
554 #endif /* DHD_HTPUT_TUNABLES */
555 	{"trap_data",	IOV_TRAPDATA,	0, 0,	IOVT_BUFFER,	0 },
556 	{"trap_data_raw",	IOV_TRAPDATA_RAW,	0, 0,	IOVT_BUFFER,	0 },
557 	{"cto_prevention",	IOV_CTO_PREVENTION,	0, 0,	IOVT_UINT32,	0 },
558 	{"pcie_wd_reset",	IOV_PCIE_WD_RESET,	0,	0, IOVT_BOOL,	0 },
559 #ifdef DEVICE_TX_STUCK_DETECT
560 	{"dev_tx_stuck_monitor", IOV_DEVICE_TX_STUCK_DETECT, 0, 0, IOVT_UINT32, 0 },
561 #endif /* DEVICE_TX_STUCK_DETECT */
562 	{"dump_dongle", IOV_DUMP_DONGLE, 0, 0, IOVT_BUFFER,
563 	MAX(sizeof(dump_dongle_in_t), sizeof(dump_dongle_out_t))},
564 	{"clear_ring",   IOV_CLEAR_RING,    0, 0,  IOVT_UINT32,    0 },
565 #ifdef DHD_EFI
566 	{"properties", IOV_WIFI_PROPERTIES,	0, 0, IOVT_BUFFER, 0},
567 	{"otp_dump", IOV_OTP_DUMP,	0, 0, IOVT_BUFFER, 0},
568 	{"control_signal", IOV_CONTROL_SIGNAL,	0, 0, IOVT_UINT32, 0},
569 #ifdef BT_OVER_PCIE
570 	{"btop_test", IOV_BTOP_TEST,	0, 0, IOVT_UINT32, 0},
571 #endif
572 #endif /* DHD_EFI */
573 	{"idma_enable",   IOV_IDMA_ENABLE,    0, 0,  IOVT_UINT32,    0 },
574 	{"ifrm_enable",   IOV_IFRM_ENABLE,    0, 0,  IOVT_UINT32,    0 },
575 	{"dar_enable",   IOV_DAR_ENABLE,    0, 0,  IOVT_UINT32,    0 },
576 	{"cap", IOV_DHD_CAPS,	0, 0, IOVT_BUFFER,	0},
577 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
578 	{"gdb_server", IOV_GDB_SERVER,    0, 0,      IOVT_UINT32,    0 },
579 #endif /* DEBUGGER || DHD_DSCOPE */
580 #if defined(GDB_PROXY)
581 	{"gdb_proxy_probe", IOV_GDB_PROXY_PROBE, 0, 0, IOVT_BUFFER, 2 * sizeof(int32) },
582 	{"gdb_proxy_stop_count", IOV_GDB_PROXY_STOP_COUNT, 0, 0, IOVT_UINT32, 0 },
583 #endif /* GDB_PROXY */
584 	{"inb_dw_enable",   IOV_INB_DW_ENABLE,    0, 0,  IOVT_UINT32,    0 },
585 #if defined(PCIE_OOB) || defined(PCIE_INB_DW)
586 	{"deep_sleep", IOV_DEEP_SLEEP, 0, 0, IOVT_UINT32,    0},
587 #endif /* PCIE_OOB || PCIE_INB_DW */
588 	{"cto_threshold",	IOV_CTO_THRESHOLD,	0,	0, IOVT_UINT32,	0 },
589 #ifdef D2H_MINIDUMP
590 	{"minidump_override", IOV_MINIDUMP_OVERRIDE, 0, 0, IOVT_UINT32, 0 },
591 #endif /* D2H_MINIDUMP */
592 #ifdef BCMINTERNAL
593 	{"dma_chan_db0",   IOV_DMA_CHAN,    0, 0,  IOVT_UINT32,    0 },
594 	{"hybridfw",   IOV_HYBRIDFW,    0, 0,  IOVT_BUFFER,    0 },
595 #endif /* BCMINTERNAL */
596 	{"hscbsize",	IOV_HSCBSIZE,	0,	0,	IOVT_UINT32,	0 },
597 #ifdef DHD_BUS_MEM_ACCESS
598 	{"hscbbytes",	IOV_HSCBBYTES,	0,	0,	IOVT_BUFFER,	2 * sizeof(int32) },
599 #endif
600 
601 #ifdef BCMINTERNAL
602 #ifdef DHD_FWTRACE
603 	{"fwtrace",	IOV_FWTRACE,	0,      0,	IOVT_UINT32,	0 },
604 #endif	/* DHD_FWTRACE */
605 #endif /* BCMINTERNAL */
606 
607 #ifdef DHD_HP2P
608 	{"hp2p_enable", IOV_HP2P_ENABLE,	0,	0, IOVT_UINT32,	0 },
609 	{"hp2p_pkt_thresh", IOV_HP2P_PKT_THRESHOLD,	0,	0, IOVT_UINT32,	0 },
610 	{"hp2p_time_thresh", IOV_HP2P_TIME_THRESHOLD,	0,	0, IOVT_UINT32,	0 },
611 	{"hp2p_pkt_expiry", IOV_HP2P_PKT_EXPIRY,	0,	0, IOVT_UINT32,	0 },
612 	{"hp2p_txcpl_maxitems", IOV_HP2P_TXCPL_MAXITEMS,	0,	0, IOVT_UINT32,	0 },
613 	{"hp2p_rxcpl_maxitems", IOV_HP2P_RXCPL_MAXITEMS,	0,	0, IOVT_UINT32,	0 },
614 #endif /* DHD_HP2P */
615 	{"extdtxs_in_txcpl", IOV_EXTDTXS_IN_TXCPL,	0,	0, IOVT_UINT32,	0 },
616 	{"hostrdy_after_init", IOV_HOSTRDY_AFTER_INIT,	0,	0, IOVT_UINT32,	0 },
617 #ifdef BCMINTERNAL
618 	{"sbreg_64",	IOV_SBREG_64,	0,		0, IOVT_BUFFER, sizeof(uint8) },
619 #endif	/* BCMINTERNAL */
620 	{"hp2p_mf_enable", IOV_HP2P_MF_ENABLE,	0,	0, IOVT_UINT32,	0 },
621 	{NULL, 0, 0, 0, 0, 0 }
622 };
623 
624 #ifdef BCMINTERNAL
625 #define MSI_SIM_BUFSIZE                 64
626 #define PCIE_CFG_MSICAP_OFFSET          0x58
627 #define PCIE_CFG_MSIADDR_LOW_OFFSET     0x5C
628 #define PCIE_CFG_MSIDATA_OFFSET         0x64
629 #define PCIE_CFG_MSI_GENDATA            0x5678
630 #define PCIE_CFG_MSICAP_ENABLE_MSI      0x816805
631 #define PCIE_CFG_MSICAP_DISABLE_MSI     0x806805
632 #endif
633 
634 #ifdef BCMQT_HW
635 #define MAX_READ_TIMEOUT	100 * 1000	/* 100 ms in dongle time */
636 #elif defined(NDIS)
637 #define MAX_READ_TIMEOUT	5 * 1000 * 1000
638 #else
639 #define MAX_READ_TIMEOUT	2 * 1000 * 1000
640 #endif
641 
642 #ifndef DHD_RXBOUND
643 #define DHD_RXBOUND		64
644 #endif
645 #ifndef DHD_TXBOUND
646 #define DHD_TXBOUND		64
647 #endif
648 
649 #define DHD_INFORING_BOUND	32
650 #define DHD_BTLOGRING_BOUND	32
651 
652 uint dhd_rxbound = DHD_RXBOUND;
653 uint dhd_txbound = DHD_TXBOUND;
654 
655 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
656 /** the GDB debugger layer will call back into this (bus) layer to read/write dongle memory */
657 static struct dhd_gdb_bus_ops_s  bus_ops = {
658 	.read_u16 = dhdpcie_bus_rtcm16,
659 	.read_u32 = dhdpcie_bus_rtcm32,
660 	.write_u32 = dhdpcie_bus_wtcm32,
661 };
662 #endif /* DEBUGGER || DHD_DSCOPE */
663 
664 bool
dhd_bus_get_flr_force_fail(struct dhd_bus * bus)665 dhd_bus_get_flr_force_fail(struct dhd_bus *bus)
666 {
667 	return bus->flr_force_fail;
668 }
669 
670 /**
671  * Register/Unregister functions are called by the main DHD entry point (eg module insertion) to
672  * link with the bus driver, in order to look for or await the device.
673  */
674 int
dhd_bus_register(void)675 dhd_bus_register(void)
676 {
677 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
678 
679 	return dhdpcie_bus_register();
680 }
681 
682 void
dhd_bus_unregister(void)683 dhd_bus_unregister(void)
684 {
685 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
686 
687 	dhdpcie_bus_unregister();
688 	return;
689 }
690 
691 /** returns a host virtual address */
692 uint32 *
dhdpcie_bus_reg_map(osl_t * osh,ulong addr,int size)693 dhdpcie_bus_reg_map(osl_t *osh, ulong addr, int size)
694 {
695 	return (uint32 *)REG_MAP(addr, size);
696 }
697 
698 void
dhdpcie_bus_reg_unmap(osl_t * osh,volatile char * addr,int size)699 dhdpcie_bus_reg_unmap(osl_t *osh, volatile char *addr, int size)
700 {
701 	REG_UNMAP(addr);
702 	return;
703 }
704 
705 /**
706  * retrun H2D Doorbell registers address
707  * use DAR registers instead of enum register for corerev >= 23 (4347B0)
708  */
709 static INLINE uint
dhd_bus_db0_addr_get(struct dhd_bus * bus)710 dhd_bus_db0_addr_get(struct dhd_bus *bus)
711 {
712 	uint addr = PCIH2D_MailBox;
713 	uint dar_addr = DAR_PCIH2D_DB0_0(bus->sih->buscorerev);
714 
715 #ifdef BCMINTERNAL
716 	if (bus->dma_chan == 1) {
717 		addr = PCIH2D_MailBox_1;
718 		dar_addr = DAR_PCIH2D_DB1_0(bus->sih->buscorerev);
719 	} else if (bus->dma_chan == 2) {
720 		addr = PCIH2D_MailBox_2;
721 		dar_addr = DAR_PCIH2D_DB2_0(bus->sih->buscorerev);
722 	}
723 #endif /* BCMINTERNAL */
724 
725 	return ((DAR_ACTIVE(bus->dhd)) ? dar_addr : addr);
726 }
727 
728 static INLINE uint
dhd_bus_db0_addr_2_get(struct dhd_bus * bus)729 dhd_bus_db0_addr_2_get(struct dhd_bus *bus)
730 {
731 	return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB2_0(bus->sih->buscorerev) : PCIH2D_MailBox_2);
732 }
733 
734 static INLINE uint
dhd_bus_db1_addr_get(struct dhd_bus * bus)735 dhd_bus_db1_addr_get(struct dhd_bus *bus)
736 {
737 	return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB0_1(bus->sih->buscorerev) : PCIH2D_DB1);
738 }
739 
740 static INLINE uint
dhd_bus_db1_addr_3_get(struct dhd_bus * bus)741 dhd_bus_db1_addr_3_get(struct dhd_bus *bus)
742 {
743 	return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB3_1(bus->sih->buscorerev) : PCIH2D_DB1_3);
744 }
745 
746 static void
dhd_init_pwr_req_lock(dhd_bus_t * bus)747 dhd_init_pwr_req_lock(dhd_bus_t *bus)
748 {
749 	if (!bus->pwr_req_lock) {
750 		bus->pwr_req_lock = osl_spin_lock_init(bus->osh);
751 	}
752 }
753 
754 static void
dhd_deinit_pwr_req_lock(dhd_bus_t * bus)755 dhd_deinit_pwr_req_lock(dhd_bus_t *bus)
756 {
757 	if (bus->pwr_req_lock) {
758 		osl_spin_lock_deinit(bus->osh, bus->pwr_req_lock);
759 		bus->pwr_req_lock = NULL;
760 	}
761 }
762 
763 #ifdef PCIE_INB_DW
764 void
dhdpcie_set_dongle_deepsleep(dhd_bus_t * bus,bool val)765 dhdpcie_set_dongle_deepsleep(dhd_bus_t *bus, bool val)
766 {
767 	ulong flags_ds;
768 	if (INBAND_DW_ENAB(bus)) {
769 		DHD_BUS_DONGLE_DS_LOCK(bus->dongle_ds_lock, flags_ds);
770 		bus->dongle_in_deepsleep = val;
771 		DHD_BUS_DONGLE_DS_UNLOCK(bus->dongle_ds_lock, flags_ds);
772 	}
773 }
774 void
dhd_init_dongle_ds_lock(dhd_bus_t * bus)775 dhd_init_dongle_ds_lock(dhd_bus_t *bus)
776 {
777 	if (!bus->dongle_ds_lock) {
778 		bus->dongle_ds_lock = osl_spin_lock_init(bus->osh);
779 	}
780 }
781 void
dhd_deinit_dongle_ds_lock(dhd_bus_t * bus)782 dhd_deinit_dongle_ds_lock(dhd_bus_t *bus)
783 {
784 	if (bus->dongle_ds_lock) {
785 		osl_spin_lock_deinit(bus->osh, bus->dongle_ds_lock);
786 		bus->dongle_ds_lock = NULL;
787 	}
788 }
789 #endif /* PCIE_INB_DW */
790 
791 /*
792  * WAR for SWWLAN-215055 - [4378B0] ARM fails to boot without DAR WL domain request
793  */
794 static INLINE void
dhd_bus_pcie_pwr_req_wl_domain(struct dhd_bus * bus,uint offset,bool enable)795 dhd_bus_pcie_pwr_req_wl_domain(struct dhd_bus *bus, uint offset, bool enable)
796 {
797 	if (enable) {
798 		si_corereg(bus->sih, bus->sih->buscoreidx, offset,
799 			SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT,
800 			SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT);
801 	} else {
802 		si_corereg(bus->sih, bus->sih->buscoreidx, offset,
803 			SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT, 0);
804 	}
805 }
806 
807 static INLINE void
_dhd_bus_pcie_pwr_req_clear_cmn(struct dhd_bus * bus)808 _dhd_bus_pcie_pwr_req_clear_cmn(struct dhd_bus *bus)
809 {
810 	uint mask;
811 
812 	/*
813 	 * If multiple de-asserts, decrement ref and return
814 	 * Clear power request when only one pending
815 	 * so initial request is not removed unexpectedly
816 	 */
817 	if (bus->pwr_req_ref > 1) {
818 		bus->pwr_req_ref--;
819 		return;
820 	}
821 
822 	ASSERT(bus->pwr_req_ref == 1);
823 
824 	if (MULTIBP_ENAB(bus->sih)) {
825 		/* Common BP controlled by HW so only need to toggle WL/ARM backplane */
826 		mask = SRPWR_DMN1_ARMBPSD_MASK;
827 	} else {
828 		mask = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK;
829 	}
830 
831 	si_srpwr_request(bus->sih, mask, 0);
832 	bus->pwr_req_ref = 0;
833 }
834 
835 static INLINE void
dhd_bus_pcie_pwr_req_clear(struct dhd_bus * bus)836 dhd_bus_pcie_pwr_req_clear(struct dhd_bus *bus)
837 {
838 	unsigned long flags = 0;
839 
840 	DHD_BUS_PWR_REQ_LOCK(bus->pwr_req_lock, flags);
841 	_dhd_bus_pcie_pwr_req_clear_cmn(bus);
842 	DHD_BUS_PWR_REQ_UNLOCK(bus->pwr_req_lock, flags);
843 }
844 
845 static INLINE void
dhd_bus_pcie_pwr_req_clear_nolock(struct dhd_bus * bus)846 dhd_bus_pcie_pwr_req_clear_nolock(struct dhd_bus *bus)
847 {
848 	_dhd_bus_pcie_pwr_req_clear_cmn(bus);
849 }
850 
851 static INLINE void
_dhd_bus_pcie_pwr_req_cmn(struct dhd_bus * bus)852 _dhd_bus_pcie_pwr_req_cmn(struct dhd_bus *bus)
853 {
854 	uint mask, val;
855 
856 	/* If multiple request entries, increment reference and return */
857 	if (bus->pwr_req_ref > 0) {
858 		bus->pwr_req_ref++;
859 		return;
860 	}
861 
862 	ASSERT(bus->pwr_req_ref == 0);
863 
864 	if (MULTIBP_ENAB(bus->sih)) {
865 		/* Common BP controlled by HW so only need to toggle WL/ARM backplane */
866 		mask = SRPWR_DMN1_ARMBPSD_MASK;
867 		val = SRPWR_DMN1_ARMBPSD_MASK;
868 	} else {
869 		mask = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK;
870 		val = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK;
871 	}
872 
873 	si_srpwr_request(bus->sih, mask, val);
874 
875 	bus->pwr_req_ref = 1;
876 }
877 
878 static INLINE void
dhd_bus_pcie_pwr_req(struct dhd_bus * bus)879 dhd_bus_pcie_pwr_req(struct dhd_bus *bus)
880 {
881 	unsigned long flags = 0;
882 
883 	DHD_BUS_PWR_REQ_LOCK(bus->pwr_req_lock, flags);
884 	_dhd_bus_pcie_pwr_req_cmn(bus);
885 	DHD_BUS_PWR_REQ_UNLOCK(bus->pwr_req_lock, flags);
886 }
887 
888 static INLINE void
_dhd_bus_pcie_pwr_req_pd0123_cmn(struct dhd_bus * bus)889 _dhd_bus_pcie_pwr_req_pd0123_cmn(struct dhd_bus *bus)
890 {
891 	uint mask, val;
892 
893 	mask = SRPWR_DMN_ALL_MASK(bus->sih);
894 	val = SRPWR_DMN_ALL_MASK(bus->sih);
895 
896 	si_srpwr_request(bus->sih, mask, val);
897 }
898 
899 void
dhd_bus_pcie_pwr_req_reload_war(struct dhd_bus * bus)900 dhd_bus_pcie_pwr_req_reload_war(struct dhd_bus *bus)
901 {
902 	unsigned long flags = 0;
903 
904 	/*
905 	 * Few corerevs need the power domain to be active for FLR.
906 	 * Return if the pwr req is not applicable for the corerev
907 	 */
908 	if (!(PCIE_PWR_REQ_RELOAD_WAR_ENAB(bus->sih->buscorerev))) {
909 		return;
910 	}
911 
912 	DHD_BUS_PWR_REQ_LOCK(bus->pwr_req_lock, flags);
913 	_dhd_bus_pcie_pwr_req_pd0123_cmn(bus);
914 	DHD_BUS_PWR_REQ_UNLOCK(bus->pwr_req_lock, flags);
915 }
916 
917 static INLINE void
_dhd_bus_pcie_pwr_req_clear_pd0123_cmn(struct dhd_bus * bus)918 _dhd_bus_pcie_pwr_req_clear_pd0123_cmn(struct dhd_bus *bus)
919 {
920 	uint mask;
921 
922 	mask = SRPWR_DMN_ALL_MASK(bus->sih);
923 
924 	si_srpwr_request(bus->sih, mask, 0);
925 }
926 
927 void
dhd_bus_pcie_pwr_req_clear_reload_war(struct dhd_bus * bus)928 dhd_bus_pcie_pwr_req_clear_reload_war(struct dhd_bus *bus)
929 {
930 	unsigned long flags = 0;
931 
932 	/* return if the pwr clear is not applicable for the corerev */
933 	if (!(PCIE_PWR_REQ_RELOAD_WAR_ENAB(bus->sih->buscorerev))) {
934 		return;
935 	}
936 	DHD_BUS_PWR_REQ_LOCK(bus->pwr_req_lock, flags);
937 	_dhd_bus_pcie_pwr_req_clear_pd0123_cmn(bus);
938 	DHD_BUS_PWR_REQ_UNLOCK(bus->pwr_req_lock, flags);
939 }
940 
941 static INLINE void
dhd_bus_pcie_pwr_req_nolock(struct dhd_bus * bus)942 dhd_bus_pcie_pwr_req_nolock(struct dhd_bus *bus)
943 {
944 	_dhd_bus_pcie_pwr_req_cmn(bus);
945 }
946 
947 bool
dhdpcie_chip_support_msi(dhd_bus_t * bus)948 dhdpcie_chip_support_msi(dhd_bus_t *bus)
949 {
950 	/* XXX For chips with buscorerev <= 14 intstatus
951 	 * is not getting cleared from these firmwares.
952 	 * Either host can read and clear intstatus for these
953 	 * or not enable MSI at all.
954 	 * Here option 2 of not enabling MSI is choosen.
955 	 * Also for hw4 chips, msi is not enabled.
956 	 */
957 	DHD_INFO(("%s: buscorerev=%d chipid=0x%x\n",
958 		__FUNCTION__, bus->sih->buscorerev, si_chipid(bus->sih)));
959 	if (bus->sih->buscorerev <= 14 ||
960 		si_chipid(bus->sih) == BCM4389_CHIP_ID ||
961 		si_chipid(bus->sih) == BCM4385_CHIP_ID ||
962 		si_chipid(bus->sih) == BCM4375_CHIP_ID ||
963 		si_chipid(bus->sih) == BCM4376_CHIP_ID ||
964 		si_chipid(bus->sih) == BCM4362_CHIP_ID ||
965 		si_chipid(bus->sih) == BCM43751_CHIP_ID ||
966 		si_chipid(bus->sih) == BCM43752_CHIP_ID ||
967 		si_chipid(bus->sih) == BCM4361_CHIP_ID ||
968 		si_chipid(bus->sih) == BCM4359_CHIP_ID) {
969 		return FALSE;
970 	} else {
971 		return TRUE;
972 	}
973 }
974 
975 /**
976  * Called once for each hardware (dongle) instance that this DHD manages.
977  *
978  * 'regs' is the host virtual address that maps to the start of the PCIe BAR0 window. The first 4096
979  * bytes in this window are mapped to the backplane address in the PCIEBAR0Window register. The
980  * precondition is that the PCIEBAR0Window register 'points' at the PCIe core.
981  *
982  * 'tcm' is the *host* virtual address at which tcm is mapped.
983  */
dhdpcie_bus_attach(osl_t * osh,dhd_bus_t ** bus_ptr,volatile char * regs,volatile char * tcm,void * pci_dev,wifi_adapter_info_t * adapter)984 int dhdpcie_bus_attach(osl_t *osh, dhd_bus_t **bus_ptr,
985 	volatile char *regs, volatile char *tcm, void *pci_dev, wifi_adapter_info_t *adapter)
986 {
987 	dhd_bus_t *bus = NULL;
988 	int ret = BCME_OK;
989 
990 	DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
991 
992 	do {
993 		if (!(bus = MALLOCZ(osh, sizeof(dhd_bus_t)))) {
994 			DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
995 			ret = BCME_NORESOURCE;
996 			break;
997 		}
998 		bus->bus = adapter->bus_type;
999 		bus->bus_num = adapter->bus_num;
1000 		bus->slot_num = adapter->slot_num;
1001 
1002 		bus->regs = regs;
1003 		bus->tcm = tcm;
1004 		bus->osh = osh;
1005 #ifndef NDIS
1006 		/* Save pci_dev into dhd_bus, as it may be needed in dhd_attach */
1007 		bus->dev = (struct pci_dev *)pci_dev;
1008 #endif
1009 #ifdef DHD_EFI
1010 		bus->pcie_dev = pci_dev;
1011 #endif
1012 
1013 		dll_init(&bus->flowring_active_list);
1014 #ifdef IDLE_TX_FLOW_MGMT
1015 		bus->active_list_last_process_ts = OSL_SYSUPTIME();
1016 #endif /* IDLE_TX_FLOW_MGMT */
1017 
1018 #ifdef DEVICE_TX_STUCK_DETECT
1019 		/* Enable the Device stuck detection feature by default */
1020 		bus->dev_tx_stuck_monitor = TRUE;
1021 		bus->device_tx_stuck_check = OSL_SYSUPTIME();
1022 #endif /* DEVICE_TX_STUCK_DETECT */
1023 
1024 		/* Attach pcie shared structure */
1025 		if (!(bus->pcie_sh = MALLOCZ(osh, sizeof(pciedev_shared_t)))) {
1026 			DHD_ERROR(("%s: MALLOC of bus->pcie_sh failed\n", __FUNCTION__));
1027 			ret = BCME_NORESOURCE;
1028 			break;
1029 		}
1030 
1031 		/* dhd_common_init(osh); */
1032 
1033 		if (dhdpcie_dongle_attach(bus)) {
1034 			DHD_ERROR(("%s: dhdpcie_probe_attach failed\n", __FUNCTION__));
1035 			ret = BCME_NOTREADY;
1036 			break;
1037 		}
1038 
1039 		/* software resources */
1040 		if (!(bus->dhd = dhd_attach(osh, bus, PCMSGBUF_HDRLEN))) {
1041 			DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__));
1042 			ret = BCME_NORESOURCE;
1043 			break;
1044 		}
1045 #if defined(GET_OTP_MAC_ENABLE) || defined(GET_OTP_MODULE_NAME)
1046 		dhd_conf_get_otp(bus->dhd, bus->sih);
1047 #endif
1048 		DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
1049 		bus->dhd->busstate = DHD_BUS_DOWN;
1050 		bus->dhd->hostrdy_after_init = TRUE;
1051 		bus->db1_for_mb = TRUE;
1052 		bus->dhd->hang_report = TRUE;
1053 		bus->use_mailbox = FALSE;
1054 		bus->use_d0_inform = FALSE;
1055 		bus->intr_enabled = FALSE;
1056 		bus->flr_force_fail = FALSE;
1057 		/* update the dma indices if set through module parameter. */
1058 		if (dma_ring_indices != 0) {
1059 			dhdpcie_set_dma_ring_indices(bus->dhd, dma_ring_indices);
1060 		}
1061 		/* update h2d phase support if set through module parameter */
1062 		bus->dhd->h2d_phase_supported = h2d_phase ? TRUE : FALSE;
1063 		/* update force trap on bad phase if set through module parameter */
1064 		bus->dhd->force_dongletrap_on_bad_h2d_phase =
1065 			force_trap_bad_h2d_phase ? TRUE : FALSE;
1066 #ifdef BTLOG
1067 		bus->dhd->bt_logging_enabled = TRUE;
1068 #endif
1069 #ifdef IDLE_TX_FLOW_MGMT
1070 		bus->enable_idle_flowring_mgmt = FALSE;
1071 #endif /* IDLE_TX_FLOW_MGMT */
1072 		bus->irq_registered = FALSE;
1073 
1074 #ifdef DHD_MSI_SUPPORT
1075 		bus->d2h_intr_method = enable_msi && dhdpcie_chip_support_msi(bus) ?
1076 			PCIE_MSI : PCIE_INTX;
1077 		if (bus->dhd->conf->d2h_intr_method >= 0)
1078 			bus->d2h_intr_method = bus->dhd->conf->d2h_intr_method;
1079 #else
1080 		bus->d2h_intr_method = PCIE_INTX;
1081 #endif /* DHD_MSI_SUPPORT */
1082 
1083 #ifdef DHD_HP2P
1084 		bus->hp2p_txcpl_max_items = DHD_MAX_ITEMS_HPP_TXCPL_RING;
1085 		bus->hp2p_rxcpl_max_items = DHD_MAX_ITEMS_HPP_RXCPL_RING;
1086 #endif /* DHD_HP2P */
1087 
1088 		DHD_TRACE(("%s: EXIT SUCCESS\n",
1089 			__FUNCTION__));
1090 		g_dhd_bus = bus;
1091 		*bus_ptr = bus;
1092 		return ret;
1093 	} while (0);
1094 
1095 	DHD_TRACE(("%s: EXIT FAILURE\n", __FUNCTION__));
1096 #ifdef DHD_EFI
1097 	/* for EFI even if there is an error, load still succeeds
1098 	* so 'bus' should not be freed here, it is freed during unload
1099 	*/
1100 	if (bus) {
1101 		*bus_ptr = bus;
1102 	}
1103 #else
1104 	if (bus && bus->pcie_sh) {
1105 		MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t));
1106 	}
1107 
1108 	if (bus) {
1109 		MFREE(osh, bus, sizeof(dhd_bus_t));
1110 	}
1111 #endif /* DHD_EFI */
1112 
1113 	return ret;
1114 }
1115 
1116 bool
dhd_bus_skip_clm(dhd_pub_t * dhdp)1117 dhd_bus_skip_clm(dhd_pub_t *dhdp)
1118 {
1119 	switch (dhd_bus_chip_id(dhdp)) {
1120 		case BCM4369_CHIP_ID:
1121 			return TRUE;
1122 		default:
1123 			return FALSE;
1124 	}
1125 }
1126 
1127 uint
dhd_bus_chip(struct dhd_bus * bus)1128 dhd_bus_chip(struct dhd_bus *bus)
1129 {
1130 	ASSERT(bus->sih != NULL);
1131 	return bus->sih->chip;
1132 }
1133 
1134 uint
dhd_bus_chiprev(struct dhd_bus * bus)1135 dhd_bus_chiprev(struct dhd_bus *bus)
1136 {
1137 	ASSERT(bus);
1138 	ASSERT(bus->sih != NULL);
1139 	return bus->sih->chiprev;
1140 }
1141 
1142 void *
dhd_bus_pub(struct dhd_bus * bus)1143 dhd_bus_pub(struct dhd_bus *bus)
1144 {
1145 	return bus->dhd;
1146 }
1147 
1148 void *
dhd_bus_sih(struct dhd_bus * bus)1149 dhd_bus_sih(struct dhd_bus *bus)
1150 {
1151 	return (void *)bus->sih;
1152 }
1153 
1154 void *
dhd_bus_txq(struct dhd_bus * bus)1155 dhd_bus_txq(struct dhd_bus *bus)
1156 {
1157 	return &bus->txq;
1158 }
1159 
1160 /** Get Chip ID version */
dhd_bus_chip_id(dhd_pub_t * dhdp)1161 uint dhd_bus_chip_id(dhd_pub_t *dhdp)
1162 {
1163 	dhd_bus_t *bus = dhdp->bus;
1164 	return  bus->sih->chip;
1165 }
1166 
1167 /** Get Chip Rev ID version */
dhd_bus_chiprev_id(dhd_pub_t * dhdp)1168 uint dhd_bus_chiprev_id(dhd_pub_t *dhdp)
1169 {
1170 	dhd_bus_t *bus = dhdp->bus;
1171 	return bus->sih->chiprev;
1172 }
1173 
1174 /** Get Chip Pkg ID version */
dhd_bus_chippkg_id(dhd_pub_t * dhdp)1175 uint dhd_bus_chippkg_id(dhd_pub_t *dhdp)
1176 {
1177 	dhd_bus_t *bus = dhdp->bus;
1178 	return bus->sih->chippkg;
1179 }
1180 
dhd_bus_get_ids(struct dhd_bus * bus,uint32 * bus_type,uint32 * bus_num,uint32 * slot_num)1181 int dhd_bus_get_ids(struct dhd_bus *bus, uint32 *bus_type, uint32 *bus_num, uint32 *slot_num)
1182 {
1183 	*bus_type = bus->bus;
1184 	*bus_num = bus->bus_num;
1185 	*slot_num = bus->slot_num;
1186 	return 0;
1187 }
1188 
1189 /** Conduct Loopback test */
1190 int
dhd_bus_dmaxfer_lpbk(dhd_pub_t * dhdp,uint32 type)1191 dhd_bus_dmaxfer_lpbk(dhd_pub_t *dhdp, uint32 type)
1192 {
1193 	dma_xfer_info_t dmaxfer_lpbk;
1194 	int ret = BCME_OK;
1195 
1196 #define PCIE_DMAXFER_LPBK_LENGTH	4096
1197 	memset(&dmaxfer_lpbk, 0, sizeof(dma_xfer_info_t));
1198 	dmaxfer_lpbk.version = DHD_DMAXFER_VERSION;
1199 	dmaxfer_lpbk.length = (uint16)sizeof(dma_xfer_info_t);
1200 	dmaxfer_lpbk.num_bytes = PCIE_DMAXFER_LPBK_LENGTH;
1201 	dmaxfer_lpbk.type = type;
1202 	dmaxfer_lpbk.should_wait = TRUE;
1203 
1204 	ret = dhd_bus_iovar_op(dhdp, "pcie_dmaxfer", NULL, 0,
1205 		(char *)&dmaxfer_lpbk, sizeof(dma_xfer_info_t), IOV_SET);
1206 	if (ret < 0) {
1207 		DHD_ERROR(("failed to start PCIe Loopback Test!!! "
1208 			"Type:%d Reason:%d\n", type, ret));
1209 		return ret;
1210 	}
1211 
1212 	if (dmaxfer_lpbk.status != DMA_XFER_SUCCESS) {
1213 		DHD_ERROR(("failed to check PCIe Loopback Test!!! "
1214 			"Type:%d Status:%d Error code:%d\n", type,
1215 			dmaxfer_lpbk.status, dmaxfer_lpbk.error_code));
1216 		ret = BCME_ERROR;
1217 	} else {
1218 		DHD_ERROR(("successful to check PCIe Loopback Test"
1219 			" Type:%d\n", type));
1220 	}
1221 #undef PCIE_DMAXFER_LPBK_LENGTH
1222 
1223 	return ret;
1224 }
1225 
1226 /* Check if there is DPC scheduling errors */
1227 bool
dhd_bus_query_dpc_sched_errors(dhd_pub_t * dhdp)1228 dhd_bus_query_dpc_sched_errors(dhd_pub_t *dhdp)
1229 {
1230 	dhd_bus_t *bus = dhdp->bus;
1231 	bool sched_err;
1232 
1233 	if (bus->dpc_entry_time < bus->isr_exit_time) {
1234 		/* Kernel doesn't schedule the DPC after processing PCIe IRQ */
1235 		sched_err = TRUE;
1236 	} else if (bus->dpc_entry_time < bus->resched_dpc_time) {
1237 		/* Kernel doesn't schedule the DPC after DHD tries to reschedule
1238 		 * the DPC due to pending work items to be processed.
1239 		 */
1240 		sched_err = TRUE;
1241 	} else {
1242 		sched_err = FALSE;
1243 	}
1244 
1245 	if (sched_err) {
1246 		/* print out minimum timestamp info */
1247 		DHD_ERROR(("isr_entry_time="SEC_USEC_FMT
1248 			" isr_exit_time="SEC_USEC_FMT
1249 			" dpc_entry_time="SEC_USEC_FMT
1250 			"\ndpc_exit_time="SEC_USEC_FMT
1251 			" isr_sched_dpc_time="SEC_USEC_FMT
1252 			" resched_dpc_time="SEC_USEC_FMT"\n",
1253 			GET_SEC_USEC(bus->isr_entry_time),
1254 			GET_SEC_USEC(bus->isr_exit_time),
1255 			GET_SEC_USEC(bus->dpc_entry_time),
1256 			GET_SEC_USEC(bus->dpc_exit_time),
1257 			GET_SEC_USEC(bus->isr_sched_dpc_time),
1258 			GET_SEC_USEC(bus->resched_dpc_time)));
1259 	}
1260 
1261 	return sched_err;
1262 }
1263 
1264 /** Read and clear intstatus. This should be called with interrupts disabled or inside isr */
1265 uint32
dhdpcie_bus_intstatus(dhd_bus_t * bus)1266 dhdpcie_bus_intstatus(dhd_bus_t *bus)
1267 {
1268 	uint32 intstatus = 0;
1269 	uint32 intmask = 0;
1270 
1271 	if (__DHD_CHK_BUS_LPS_D3_ACKED(bus)) {
1272 #ifdef DHD_EFI
1273 		DHD_INFO(("%s: trying to clear intstatus after D3 Ack\n", __FUNCTION__));
1274 #else
1275 		DHD_ERROR(("%s: trying to clear intstatus after D3 Ack\n", __FUNCTION__));
1276 #endif /* !DHD_EFI */
1277 		return intstatus;
1278 	}
1279 	/* XXX: check for PCIE Gen2 also */
1280 	if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) ||
1281 		(bus->sih->buscorerev == 2)) {
1282 		intstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
1283 		dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, intstatus);
1284 		intstatus &= I_MB;
1285 	} else {
1286 		/* this is a PCIE core register..not a config register... */
1287 		intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, 0, 0);
1288 
1289 #ifdef DHD_MMIO_TRACE
1290 		dhd_bus_mmio_trace(bus, bus->pcie_mailbox_int, intstatus, FALSE);
1291 #endif /* defined(DHD_MMIO_TRACE) */
1292 
1293 		/* this is a PCIE core register..not a config register... */
1294 		intmask = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask, 0, 0);
1295 		/* Is device removed. intstatus & intmask read 0xffffffff */
1296 		if (intstatus == (uint32)-1 || intmask == (uint32)-1) {
1297 			DHD_ERROR(("%s: Device is removed or Link is down.\n", __FUNCTION__));
1298 			DHD_ERROR(("%s: INTSTAT : 0x%x INTMASK : 0x%x.\n",
1299 			    __FUNCTION__, intstatus, intmask));
1300 			bus->is_linkdown = TRUE;
1301 			dhd_pcie_debug_info_dump(bus->dhd);
1302 #ifdef CUSTOMER_HW4_DEBUG
1303 #if defined(OEM_ANDROID)
1304 #ifdef SUPPORT_LINKDOWN_RECOVERY
1305 #ifdef CONFIG_ARCH_MSM
1306 			bus->no_cfg_restore = 1;
1307 #endif /* CONFIG_ARCH_MSM */
1308 #endif /* SUPPORT_LINKDOWN_RECOVERY */
1309 			bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN_EP_DETECT;
1310 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
1311 			copy_hang_info_linkdown(bus->dhd);
1312 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
1313 			dhd_os_send_hang_message(bus->dhd);
1314 #endif /* OEM_ANDROID */
1315 #endif /* CUSTOMER_HW4_DEBUG */
1316 			return intstatus;
1317 		}
1318 
1319 #ifndef DHD_READ_INTSTATUS_IN_DPC
1320 		intstatus &= intmask;
1321 #endif /* DHD_READ_INTSTATUS_IN_DPC */
1322 
1323 #ifdef DHD_MMIO_TRACE
1324 		dhd_bus_mmio_trace(bus, bus->pcie_mailbox_mask, intmask, FALSE);
1325 #endif /* defined(DHD_MMIO_TRACE) */
1326 
1327 		/* XXX: define the mask in a .h file  */
1328 		/*
1329 		 * The fourth argument to si_corereg is the "mask" fields of the register to update
1330 		 * and the fifth field is the "value" to update. Now if we are interested in only
1331 		 * few fields of the "mask" bit map, we should not be writing back what we read
1332 		 * By doing so, we might clear/ack interrupts that are not handled yet.
1333 		 */
1334 #ifdef DHD_MMIO_TRACE
1335 		dhd_bus_mmio_trace(bus, bus->pcie_mailbox_int, intstatus, TRUE);
1336 #endif /* defined(DHD_MMIO_TRACE) */
1337 		si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, bus->def_intmask,
1338 			intstatus);
1339 
1340 		intstatus &= bus->def_intmask;
1341 	}
1342 
1343 	return intstatus;
1344 }
1345 
1346 void
dhdpcie_cto_recovery_handler(dhd_pub_t * dhd)1347 dhdpcie_cto_recovery_handler(dhd_pub_t *dhd)
1348 {
1349 	dhd_bus_t *bus = dhd->bus;
1350 	int ret;
1351 
1352 	/* Disable PCIe Runtime PM to avoid D3_ACK timeout.
1353 	 */
1354 	DHD_DISABLE_RUNTIME_PM(dhd);
1355 
1356 	/* Sleep for 1 seconds so that any AXI timeout
1357 	 * if running on ALP clock also will be captured
1358 	 */
1359 	OSL_SLEEP(1000);
1360 
1361 	/* reset backplane and cto,
1362 	 * then access through pcie is recovered.
1363 	 */
1364 	ret = dhdpcie_cto_error_recovery(bus);
1365 	if (!ret) {
1366 		/* Waiting for backplane reset */
1367 		OSL_SLEEP(10);
1368 		/* Dump debug Info */
1369 		dhd_prot_debug_info_print(bus->dhd);
1370 		/* Dump console buffer */
1371 		dhd_bus_dump_console_buffer(bus);
1372 #if defined(DHD_FW_COREDUMP)
1373 		/* save core dump or write to a file */
1374 		if (!bus->is_linkdown && bus->dhd->memdump_enabled) {
1375 #ifdef DHD_SSSR_DUMP
1376 			DHD_ERROR(("%s : Set collect_sssr as TRUE\n", __FUNCTION__));
1377 			bus->dhd->collect_sssr = TRUE;
1378 #endif /* DHD_SSSR_DUMP */
1379 			bus->dhd->memdump_type = DUMP_TYPE_CTO_RECOVERY;
1380 			dhdpcie_mem_dump(bus);
1381 		}
1382 #endif /* DHD_FW_COREDUMP */
1383 	}
1384 #ifdef OEM_ANDROID
1385 #ifdef SUPPORT_LINKDOWN_RECOVERY
1386 #ifdef CONFIG_ARCH_MSM
1387 	bus->no_cfg_restore = 1;
1388 #endif /* CONFIG_ARCH_MSM */
1389 #endif /* SUPPORT_LINKDOWN_RECOVERY */
1390 	bus->is_linkdown = TRUE;
1391 	bus->dhd->hang_reason = HANG_REASON_PCIE_CTO_DETECT;
1392 	/* Send HANG event */
1393 	dhd_os_send_hang_message(bus->dhd);
1394 #endif /* OEM_ANDROID */
1395 }
1396 
1397 void
dhd_bus_dump_imp_cfg_registers(struct dhd_bus * bus)1398 dhd_bus_dump_imp_cfg_registers(struct dhd_bus *bus)
1399 {
1400 	uint32 status_cmd = dhd_pcie_config_read(bus, PCIECFGREG_STATUS_CMD, sizeof(uint32));
1401 	uint32 pmcsr = dhd_pcie_config_read(bus, PCIE_CFG_PMCSR, sizeof(uint32));
1402 	uint32 base_addr0 = dhd_pcie_config_read(bus, PCIECFGREG_BASEADDR0, sizeof(uint32));
1403 	uint32 base_addr1 = dhd_pcie_config_read(bus, PCIECFGREG_BASEADDR1, sizeof(uint32));
1404 	uint32 linkctl = dhd_pcie_config_read(bus, PCIECFGREG_LINK_STATUS_CTRL, sizeof(uint32));
1405 	uint32 l1ssctrl =
1406 		dhd_pcie_config_read(bus, PCIECFGREG_PML1_SUB_CTRL1, sizeof(uint32));
1407 	uint32 devctl = dhd_pcie_config_read(bus, PCIECFGREG_DEV_STATUS_CTRL, sizeof(uint32));
1408 	uint32 devctl2 = dhd_pcie_config_read(bus, PCIECFGGEN_DEV_STATUS_CTRL2, sizeof(uint32));
1409 
1410 	DHD_ERROR(("status_cmd(0x%x)=0x%x, pmcsr(0x%x)=0x%x "
1411 		"base_addr0(0x%x)=0x%x base_addr1(0x%x)=0x%x "
1412 		"linkctl(0x%x)=0x%x l1ssctrl(0x%x)=0x%x "
1413 		"devctl(0x%x)=0x%x devctl2(0x%x)=0x%x \n",
1414 		PCIECFGREG_STATUS_CMD, status_cmd,
1415 		PCIE_CFG_PMCSR, pmcsr,
1416 		PCIECFGREG_BASEADDR0, base_addr0,
1417 		PCIECFGREG_BASEADDR1, base_addr1,
1418 		PCIECFGREG_LINK_STATUS_CTRL, linkctl,
1419 		PCIECFGREG_PML1_SUB_CTRL1, l1ssctrl,
1420 		PCIECFGREG_DEV_STATUS_CTRL, devctl,
1421 		PCIECFGGEN_DEV_STATUS_CTRL2, devctl2));
1422 }
1423 
1424 /**
1425  * Name:  dhdpcie_bus_isr
1426  * Parameters:
1427  * 1: IN int irq   -- interrupt vector
1428  * 2: IN void *arg      -- handle to private data structure
1429  * Return value:
1430  * Status (TRUE or FALSE)
1431  *
1432  * Description:
1433  * Interrupt Service routine checks for the status register,
1434  * disable interrupt and queue DPC if mail box interrupts are raised.
1435  */
1436 int32
dhdpcie_bus_isr(dhd_bus_t * bus)1437 dhdpcie_bus_isr(dhd_bus_t *bus)
1438 {
1439 	uint32 intstatus = 0;
1440 
1441 	do {
1442 		DHD_INTR(("%s: Enter\n", __FUNCTION__));
1443 		/* verify argument */
1444 		if (!bus) {
1445 			DHD_LOG_MEM(("%s : bus is null pointer, exit \n", __FUNCTION__));
1446 			break;
1447 		}
1448 
1449 		if (bus->dhd->dongle_reset) {
1450 			DHD_LOG_MEM(("%s : dongle is reset\n", __FUNCTION__));
1451 			break;
1452 		}
1453 
1454 		if (bus->dhd->busstate == DHD_BUS_DOWN) {
1455 			DHD_LOG_MEM(("%s : bus is down \n", __FUNCTION__));
1456 			break;
1457 		}
1458 
1459 		/* avoid processing of interrupts until msgbuf prot is inited */
1460 		if (!bus->intr_enabled) {
1461 			DHD_INFO(("%s, not ready to receive interrupts\n", __FUNCTION__));
1462 			break;
1463 		}
1464 
1465 		if (PCIECTO_ENAB(bus)) {
1466 			/* read pci_intstatus */
1467 			intstatus = dhdpcie_bus_cfg_read_dword(bus, PCI_INT_STATUS, 4);
1468 
1469 			if (intstatus == (uint32)-1) {
1470 				DHD_ERROR(("%s : Invalid intstatus for cto recovery\n",
1471 					__FUNCTION__));
1472 				bus->is_linkdown = 1;
1473 				dhdpcie_disable_irq_nosync(bus);
1474 				dhd_prot_debug_info_print(bus->dhd);
1475 				break;
1476 			}
1477 
1478 			if (intstatus & PCI_CTO_INT_MASK) {
1479 				DHD_ERROR(("%s: ##### CTO RECOVERY REPORTED BY DONGLE "
1480 					"intstat=0x%x enab=%d\n", __FUNCTION__,
1481 					intstatus, bus->cto_enable));
1482 				bus->cto_triggered = 1;
1483 				dhd_bus_dump_imp_cfg_registers(bus);
1484 				/*
1485 				 * DAR still accessible
1486 				 */
1487 				dhd_bus_dump_dar_registers(bus);
1488 
1489 				/* Disable further PCIe interrupts */
1490 #ifndef NDIS
1491 				dhdpcie_disable_irq_nosync(bus); /* Disable interrupt!! */
1492 #endif
1493 				/* Stop Tx flow */
1494 				dhd_bus_stop_queue(bus);
1495 
1496 				/* Schedule CTO recovery */
1497 				dhd_schedule_cto_recovery(bus->dhd);
1498 
1499 				return TRUE;
1500 			}
1501 		}
1502 
1503 		if (bus->d2h_intr_method == PCIE_MSI &&
1504 				!dhd_conf_legacy_msi_chip(bus->dhd)) {
1505 			/* For MSI, as intstatus is cleared by firmware, no need to read */
1506 			goto skip_intstatus_read;
1507 		}
1508 
1509 #ifndef DHD_READ_INTSTATUS_IN_DPC
1510 		intstatus = dhdpcie_bus_intstatus(bus);
1511 
1512 		/* Check if the interrupt is ours or not */
1513 		if (intstatus == 0) {
1514 			bus->non_ours_irq_count++;
1515 			bus->last_non_ours_irq_time = OSL_LOCALTIME_NS();
1516 			break;
1517 		}
1518 
1519 		/* save the intstatus */
1520 		/* read interrupt status register!! Status bits will be cleared in DPC !! */
1521 		bus->intstatus = intstatus;
1522 
1523 		/* return error for 0xFFFFFFFF */
1524 		if (intstatus == (uint32)-1) {
1525 			DHD_LOG_MEM(("%s : wrong interrupt status val : 0x%x\n",
1526 				__FUNCTION__, intstatus));
1527 			bus->is_linkdown = 1;
1528 			dhdpcie_disable_irq_nosync(bus);
1529 			break;
1530 		}
1531 
1532 skip_intstatus_read:
1533 		/*  Overall operation:
1534 		 *    - Mask further interrupts
1535 		 *    - Read/ack intstatus
1536 		 *    - Take action based on bits and state
1537 		 *    - Reenable interrupts (as per state)
1538 		 */
1539 
1540 		/* Count the interrupt call */
1541 		bus->intrcount++;
1542 #endif /* DHD_READ_INTSTATUS_IN_DPC */
1543 
1544 		bus->ipend = TRUE;
1545 
1546 		bus->isr_intr_disable_count++;
1547 
1548 #ifdef CHIP_INTR_CONTROL
1549 		dhdpcie_bus_intr_disable(bus); /* Disable interrupt using IntMask!! */
1550 #else
1551 		/* For Linux, Macos etc (otherthan NDIS) instead of disabling
1552 		* dongle interrupt by clearing the IntMask, disable directly
1553 		* interrupt from the host side, so that host will not recieve
1554 		* any interrupts at all, even though dongle raises interrupts
1555 		*/
1556 		dhdpcie_disable_irq_nosync(bus); /* Disable interrupt!! */
1557 #endif /* HOST_INTR_CONTROL */
1558 
1559 		bus->intdis = TRUE;
1560 #ifdef DHD_FLOW_RING_STATUS_TRACE
1561 		if (bus->dhd->dma_h2d_ring_upd_support && bus->dhd->dma_d2h_ring_upd_support &&
1562 			(bus->dhd->ring_attached == TRUE)) {
1563 			dhd_bus_flow_ring_status_isr_trace(bus->dhd);
1564 		}
1565 #endif /* DHD_FLOW_RING_STATUS_TRACE */
1566 #if defined(PCIE_ISR_THREAD)
1567 
1568 		DHD_TRACE(("Calling dhd_bus_dpc() from %s\n", __FUNCTION__));
1569 		DHD_OS_WAKE_LOCK(bus->dhd);
1570 		while (dhd_bus_dpc(bus));
1571 		DHD_OS_WAKE_UNLOCK(bus->dhd);
1572 #else
1573 		bus->dpc_sched = TRUE;
1574 		bus->isr_sched_dpc_time = OSL_LOCALTIME_NS();
1575 #ifndef NDIS
1576 		dhd_sched_dpc(bus->dhd);     /* queue DPC now!! */
1577 #endif /* !NDIS */
1578 #endif /* defined(SDIO_ISR_THREAD) */
1579 
1580 		DHD_INTR(("%s: Exit Success DPC Queued\n", __FUNCTION__));
1581 		return TRUE;
1582 
1583 	} while (0);
1584 
1585 	DHD_INTR(("%s: Exit Failure\n", __FUNCTION__));
1586 	return FALSE;
1587 }
1588 
1589 int
dhdpcie_set_pwr_state(dhd_bus_t * bus,uint state)1590 dhdpcie_set_pwr_state(dhd_bus_t *bus, uint state)
1591 {
1592 	uint32 cur_state = 0;
1593 	uint32 pm_csr = 0;
1594 	osl_t *osh = bus->osh;
1595 
1596 	pm_csr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32));
1597 	cur_state = pm_csr & PCIECFGREG_PM_CSR_STATE_MASK;
1598 
1599 	if (cur_state == state) {
1600 		DHD_ERROR(("%s: Already in state %u \n", __FUNCTION__, cur_state));
1601 		return BCME_OK;
1602 	}
1603 
1604 	if (state > PCIECFGREG_PM_CSR_STATE_D3_HOT)
1605 		return BCME_ERROR;
1606 
1607 	/* Validate the state transition
1608 	* if already in a lower power state, return error
1609 	*/
1610 	if (state != PCIECFGREG_PM_CSR_STATE_D0 &&
1611 			cur_state <= PCIECFGREG_PM_CSR_STATE_D3_COLD &&
1612 			cur_state > state) {
1613 		DHD_ERROR(("%s: Invalid power state transition !\n", __FUNCTION__));
1614 		return BCME_ERROR;
1615 	}
1616 
1617 	pm_csr &= ~PCIECFGREG_PM_CSR_STATE_MASK;
1618 	pm_csr |= state;
1619 
1620 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32), pm_csr);
1621 
1622 	/* need to wait for the specified mandatory pcie power transition delay time */
1623 	if (state == PCIECFGREG_PM_CSR_STATE_D3_HOT ||
1624 			cur_state == PCIECFGREG_PM_CSR_STATE_D3_HOT)
1625 			OSL_DELAY(DHDPCIE_PM_D3_DELAY);
1626 	else if (state == PCIECFGREG_PM_CSR_STATE_D2 ||
1627 			cur_state == PCIECFGREG_PM_CSR_STATE_D2)
1628 			OSL_DELAY(DHDPCIE_PM_D2_DELAY);
1629 
1630 	/* read back the power state and verify */
1631 	pm_csr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32));
1632 	cur_state = pm_csr & PCIECFGREG_PM_CSR_STATE_MASK;
1633 	if (cur_state != state) {
1634 		DHD_ERROR(("%s: power transition failed ! Current state is %u \n",
1635 				__FUNCTION__, cur_state));
1636 		return BCME_ERROR;
1637 	} else {
1638 		DHD_ERROR(("%s: power transition to %u success \n",
1639 				__FUNCTION__, cur_state));
1640 	}
1641 
1642 	return BCME_OK;
1643 }
1644 
1645 int
dhdpcie_config_check(dhd_bus_t * bus)1646 dhdpcie_config_check(dhd_bus_t *bus)
1647 {
1648 	uint32 i, val;
1649 	int ret = BCME_ERROR;
1650 
1651 	for (i = 0; i < DHDPCIE_CONFIG_CHECK_RETRY_COUNT; i++) {
1652 		val = OSL_PCI_READ_CONFIG(bus->osh, PCI_CFG_VID, sizeof(uint32));
1653 		if ((val & 0xFFFF) == VENDOR_BROADCOM) {
1654 			ret = BCME_OK;
1655 			break;
1656 		}
1657 		OSL_DELAY(DHDPCIE_CONFIG_CHECK_DELAY_MS * 1000);
1658 	}
1659 
1660 	return ret;
1661 }
1662 
1663 int
dhdpcie_config_restore(dhd_bus_t * bus,bool restore_pmcsr)1664 dhdpcie_config_restore(dhd_bus_t *bus, bool restore_pmcsr)
1665 {
1666 	uint32 i;
1667 	osl_t *osh = bus->osh;
1668 
1669 	if (BCME_OK != dhdpcie_config_check(bus)) {
1670 		return BCME_ERROR;
1671 	}
1672 
1673 	for (i = PCI_CFG_REV >> 2; i < DHDPCIE_CONFIG_HDR_SIZE; i++) {
1674 		OSL_PCI_WRITE_CONFIG(osh, i << 2, sizeof(uint32), bus->saved_config.header[i]);
1675 	}
1676 	OSL_PCI_WRITE_CONFIG(osh, PCI_CFG_CMD, sizeof(uint32), bus->saved_config.header[1]);
1677 
1678 	if (restore_pmcsr)
1679 		OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PM_CSR,
1680 			sizeof(uint32), bus->saved_config.pmcsr);
1681 
1682 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_CAP, sizeof(uint32), bus->saved_config.msi_cap);
1683 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_ADDR_L, sizeof(uint32),
1684 			bus->saved_config.msi_addr0);
1685 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_ADDR_H,
1686 			sizeof(uint32), bus->saved_config.msi_addr1);
1687 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_DATA,
1688 			sizeof(uint32), bus->saved_config.msi_data);
1689 
1690 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_DEV_STATUS_CTRL,
1691 			sizeof(uint32), bus->saved_config.exp_dev_ctrl_stat);
1692 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGGEN_DEV_STATUS_CTRL2,
1693 			sizeof(uint32), bus->saved_config.exp_dev_ctrl_stat2);
1694 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_LINK_STATUS_CTRL,
1695 			sizeof(uint32), bus->saved_config.exp_link_ctrl_stat);
1696 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_LINK_STATUS_CTRL2,
1697 			sizeof(uint32), bus->saved_config.exp_link_ctrl_stat2);
1698 
1699 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL1,
1700 			sizeof(uint32), bus->saved_config.l1pm0);
1701 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL2,
1702 			sizeof(uint32), bus->saved_config.l1pm1);
1703 
1704 	OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, sizeof(uint32),
1705 			bus->saved_config.bar0_win);
1706 	dhdpcie_setbar1win(bus, bus->saved_config.bar1_win);
1707 
1708 	return BCME_OK;
1709 }
1710 
1711 int
dhdpcie_config_save(dhd_bus_t * bus)1712 dhdpcie_config_save(dhd_bus_t *bus)
1713 {
1714 	uint32 i;
1715 	osl_t *osh = bus->osh;
1716 
1717 	if (BCME_OK != dhdpcie_config_check(bus)) {
1718 		return BCME_ERROR;
1719 	}
1720 
1721 	for (i = 0; i < DHDPCIE_CONFIG_HDR_SIZE; i++) {
1722 		bus->saved_config.header[i] = OSL_PCI_READ_CONFIG(osh, i << 2, sizeof(uint32));
1723 	}
1724 
1725 	bus->saved_config.pmcsr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32));
1726 
1727 	bus->saved_config.msi_cap = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_CAP,
1728 			sizeof(uint32));
1729 	bus->saved_config.msi_addr0 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_ADDR_L,
1730 			sizeof(uint32));
1731 	bus->saved_config.msi_addr1 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_ADDR_H,
1732 			sizeof(uint32));
1733 	bus->saved_config.msi_data = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_DATA,
1734 			sizeof(uint32));
1735 
1736 	bus->saved_config.exp_dev_ctrl_stat = OSL_PCI_READ_CONFIG(osh,
1737 			PCIECFGREG_DEV_STATUS_CTRL, sizeof(uint32));
1738 	bus->saved_config.exp_dev_ctrl_stat2 = OSL_PCI_READ_CONFIG(osh,
1739 			PCIECFGGEN_DEV_STATUS_CTRL2, sizeof(uint32));
1740 	bus->saved_config.exp_link_ctrl_stat = OSL_PCI_READ_CONFIG(osh,
1741 			PCIECFGREG_LINK_STATUS_CTRL, sizeof(uint32));
1742 	bus->saved_config.exp_link_ctrl_stat2 = OSL_PCI_READ_CONFIG(osh,
1743 			PCIECFGREG_LINK_STATUS_CTRL2, sizeof(uint32));
1744 
1745 	bus->saved_config.l1pm0 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL1,
1746 			sizeof(uint32));
1747 	bus->saved_config.l1pm1 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL2,
1748 			sizeof(uint32));
1749 
1750 	bus->saved_config.bar0_win = OSL_PCI_READ_CONFIG(osh, PCI_BAR0_WIN,
1751 			sizeof(uint32));
1752 	bus->saved_config.bar1_win = OSL_PCI_READ_CONFIG(osh, PCI_BAR1_WIN,
1753 			sizeof(uint32));
1754 
1755 	return BCME_OK;
1756 }
1757 
1758 #ifdef CONFIG_ARCH_EXYNOS
1759 dhd_pub_t *link_recovery = NULL;
1760 #endif /* CONFIG_ARCH_EXYNOS */
1761 
1762 static void
dhdpcie_bus_intr_init(dhd_bus_t * bus)1763 dhdpcie_bus_intr_init(dhd_bus_t *bus)
1764 {
1765 	uint buscorerev = bus->sih->buscorerev;
1766 	bus->pcie_mailbox_int = PCIMailBoxInt(buscorerev);
1767 	bus->pcie_mailbox_mask = PCIMailBoxMask(buscorerev);
1768 	bus->d2h_mb_mask = PCIE_MB_D2H_MB_MASK(buscorerev);
1769 	bus->def_intmask = PCIE_MB_D2H_MB_MASK(buscorerev);
1770 	if (buscorerev < 64) {
1771 		bus->def_intmask |= PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1;
1772 	}
1773 }
1774 
1775 static void
dhdpcie_cc_watchdog_reset(dhd_bus_t * bus)1776 dhdpcie_cc_watchdog_reset(dhd_bus_t *bus)
1777 {
1778 	uint32 wd_en = (bus->sih->buscorerev >= 66) ? WD_SSRESET_PCIE_F0_EN :
1779 		(WD_SSRESET_PCIE_F0_EN | WD_SSRESET_PCIE_ALL_FN_EN);
1780 	pcie_watchdog_reset(bus->osh, bus->sih, WD_ENABLE_MASK, wd_en);
1781 }
1782 void
dhdpcie_dongle_reset(dhd_bus_t * bus)1783 dhdpcie_dongle_reset(dhd_bus_t *bus)
1784 {
1785 
1786 	/* if the pcie link is down, watchdog reset
1787 	 * should not be done, as it may hang
1788 	 */
1789 	if (bus->is_linkdown) {
1790 		return;
1791 	}
1792 
1793 	/* Currently BP reset using CFG reg is done only for android platforms */
1794 #ifdef DHD_USE_BP_RESET_SPROM
1795 	/* This is for architectures that does NOT control subsystem reset */
1796 	(void)dhd_bus_cfg_sprom_ctrl_bp_reset(bus);
1797 	return;
1798 #elif defined(DHD_USE_BP_RESET_SS_CTRL)
1799 	/* This is for architectures that supports Subsystem Control */
1800 	(void)dhd_bus_cfg_ss_ctrl_bp_reset(bus);
1801 	return;
1802 #else
1803 
1804 #ifdef BCMQT_HW
1805 	/* flr takes a long time on qt and is only required when testing with BT
1806 	 * included database. Fall back to watchdog reset by default and only perform
1807 	 * flr if enabled through module parameter
1808 	 */
1809 	if (qt_flr_reset && (dhd_bus_perform_flr(bus, FALSE) != BCME_UNSUPPORTED)) {
1810 		return;
1811 	}
1812 #else
1813 	/* dhd_bus_perform_flr will return BCME_UNSUPPORTED if chip is not FLR capable */
1814 	if (dhd_bus_perform_flr(bus, FALSE) == BCME_UNSUPPORTED)
1815 #endif
1816 	{
1817 		/* Legacy chipcommon watchdog reset */
1818 		dhdpcie_cc_watchdog_reset(bus);
1819 	}
1820 	return;
1821 #endif /* DHD_USE_BP_RESET */
1822 }
1823 
1824 #ifdef BCMQT_HW
1825 /* Calculate dongle/host clock ratio for QT so the waiting period in host driver can be scaled
1826  * properly. The dongle uses ALP clock by default which can't be read directly. But ILP and
1827  * ALP clocks are scaled disproportionally in QT. So DHD must know the preset crystal frequency
1828  * for ALP clock in order to calculate the scale ratio. The logic below takes 3 sources of xtal
1829  * frequency as following priority:
1830  * 1 module parameter
1831  * 2 nvram "xtalfreq" line (not available for the first dongle reset)
1832  * 3 Hard coded 37.4MHz
1833  * If the QT simulation of a chip uses a different frequency xtal other than 37.4MHz, it's
1834  * strongly recommended to expend the hard coded value to per chip basis or override with module
1835  * parameter.
1836  */
1837 #define XTAL_FREQ_37M4		37400000u
dhdpcie_htclkratio_cal(dhd_bus_t * bus)1838 void dhdpcie_htclkratio_cal(dhd_bus_t *bus)
1839 {
1840 	uint cur_coreidx, pmu_idx;
1841 	uint32 ilp_start, ilp_tick, xtal_ratio;
1842 	int xtalfreq = 0;
1843 
1844 	/* If a larger than 1 htclkratio is set through module parameter, use it directly */
1845 	if (htclkratio > 1) {
1846 		goto exit;
1847 	}
1848 
1849 	/* Store current core id */
1850 	cur_coreidx = si_coreidx(bus->sih);
1851 	if (!si_setcore(bus->sih, PMU_CORE_ID, 0)) {
1852 		htclkratio = 2000;
1853 		goto exit;
1854 	}
1855 
1856 	pmu_idx = si_coreidx(bus->sih);
1857 
1858 	/* Count IPL ticks in 1 second of host domain clock */
1859 	ilp_start = si_corereg(bus->sih, pmu_idx, offsetof(pmuregs_t, pmutimer), 0, 0);
1860 	osl_sleep(1000);
1861 	ilp_tick = si_corereg(bus->sih, pmu_idx, offsetof(pmuregs_t, pmutimer), 0, 0);
1862 	/* -1 to compensate the incomplete cycle at the beginning */
1863 	ilp_tick -= ilp_start - 1;
1864 
1865 	/* Get xtal vs ILP ratio from XtalFreqRatio(0x66c) */
1866 	xtal_ratio = si_corereg(bus->sih, pmu_idx, offsetof(pmuregs_t, pmu_xtalfreq), 0, 0);
1867 	xtal_ratio = (xtal_ratio & PMU_XTALFREQ_REG_ILPCTR_MASK) / 4;
1868 
1869 	/* Go back to original core */
1870 	si_setcoreidx(bus->sih, cur_coreidx);
1871 
1872 	/* Use module parameter if one is provided. Otherwise use default 37.4MHz */
1873 	if (dngl_xtalfreq) {
1874 		xtalfreq = dngl_xtalfreq;
1875 	} else {
1876 		xtalfreq = XTAL_FREQ_37M4;
1877 	}
1878 
1879 	/* htclkratio = xtalfreq / QT_XTAL_FREQ
1880 	 *            = xtalfreq / (ilp_tick * xtal_ratio)
1881 	 */
1882 	htclkratio = xtalfreq / (ilp_tick * xtal_ratio);
1883 	bus->xtalfreq = xtalfreq;
1884 	bus->ilp_tick = ilp_tick;
1885 	bus->xtal_ratio = xtal_ratio;
1886 
1887 exit:
1888 	DHD_ERROR(("Dongle/Host clock ratio %u with %dHz xtal frequency\n", htclkratio, xtalfreq));
1889 }
1890 
1891 /* Re-calculate htclkratio if nvram provides a different xtalfreq */
dhdpcie_htclkratio_recal(dhd_bus_t * bus,char * nvram,uint nvram_sz)1892 void dhdpcie_htclkratio_recal(dhd_bus_t *bus, char *nvram, uint nvram_sz)
1893 {
1894 	char *freq_c = NULL;
1895 	uint len, p;
1896 	int xtalfreq = 0;
1897 
1898 	/* Do not re-calculate if xtalfreq is overridden by module parameter */
1899 	if (dngl_xtalfreq)
1900 		return;
1901 
1902 	/* look for "xtalfreq=xxxx" line in nvram */
1903 	len = strlen("xtalfreq");
1904 	for (p = 0; p < (nvram_sz - len) && nvram[p]; ) {
1905 		if ((bcmp(&nvram[p], "xtalfreq", len) == 0) && (nvram[p + len] == '=')) {
1906 			freq_c = &nvram[p + len + 1u];
1907 			break;
1908 		}
1909 		/* jump to next line */
1910 		while (nvram[p++]);
1911 	}
1912 
1913 	if (freq_c) {
1914 		xtalfreq = bcm_strtoul(freq_c, NULL, 0);
1915 		if (xtalfreq > (INT_MAX / 1000u)) {
1916 			DHD_ERROR(("xtalfreq %d in nvram is too big\n", xtalfreq));
1917 			xtalfreq = 0;
1918 		}
1919 		xtalfreq *= 1000;
1920 	}
1921 
1922 	/* Skip recalculation if:
1923 	 *   nvram doesn't provide "xtalfreq", or
1924 	 *   first calculation was not performed because module parameter override, or
1925 	 *   xtalfreq in nvram is the same as the one used in first calculation
1926 	 */
1927 	if (xtalfreq == 0 || bus->xtalfreq == 0 || xtalfreq == bus->xtalfreq) {
1928 		return;
1929 	}
1930 
1931 	/* Print out a error message here. Even if the ratio is corrected with nvram setting, dongle
1932 	 * reset has been performed before DHD has access to NVRAM. Insufficient waiting period
1933 	 * for reset might cause unexpected behavior.
1934 	 */
1935 	DHD_ERROR(("Re-calculating htclkratio because nvram xtalfreq %dHz is different from %dHz\n",
1936 		xtalfreq, bus->xtalfreq));
1937 
1938 	htclkratio = xtalfreq / (bus->ilp_tick * bus->xtal_ratio);
1939 	bus->xtalfreq = xtalfreq;
1940 
1941 	DHD_ERROR(("Corrected dongle/Host clock ratio %u with %dHz xtal frequency\n",
1942 			htclkratio, xtalfreq));
1943 }
1944 #endif /* BCMQT_HW */
1945 
1946 static bool
is_bmpu_supported(dhd_bus_t * bus)1947 is_bmpu_supported(dhd_bus_t *bus)
1948 {
1949 	if (BCM4378_CHIP(bus->sih->chip) ||
1950 		BCM4376_CHIP(bus->sih->chip) ||
1951 		BCM4387_CHIP(bus->sih->chip) ||
1952 		BCM4385_CHIP(bus->sih->chip)) {
1953 		return TRUE;
1954 	}
1955 	return FALSE;
1956 }
1957 
1958 #define CHIP_COMMON_SCR_DHD_TO_BL_ADDR(sih) (SI_ENUM_BASE(sih) + CC_SCR_DHD_TO_BL)
1959 #define CHIP_COMMON_SCR_BL_TO_DHD_ADDR(sih) (SI_ENUM_BASE(sih) + CC_SCR_BL_TO_DHD)
1960 void
dhdpcie_bus_mpu_disable(dhd_bus_t * bus)1961 dhdpcie_bus_mpu_disable(dhd_bus_t *bus)
1962 {
1963 	volatile uint32 *cr4_regs;
1964 	uint val = 0;
1965 
1966 	if (is_bmpu_supported(bus) == FALSE) {
1967 		return;
1968 	}
1969 
1970 	/* reset to default values dhd_to_bl and bl_to_dhd regs */
1971 	(void)serialized_backplane_access(bus, CHIP_COMMON_SCR_DHD_TO_BL_ADDR(bus->sih),
1972 		sizeof(val), &val, FALSE);
1973 	(void)serialized_backplane_access(bus, CHIP_COMMON_SCR_BL_TO_DHD_ADDR(bus->sih),
1974 		sizeof(val), &val, FALSE);
1975 
1976 	cr4_regs = si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
1977 	if (cr4_regs == NULL) {
1978 		DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
1979 		return;
1980 	}
1981 	if (R_REG(bus->osh, cr4_regs + ARMCR4REG_CORECAP) & ACC_MPU_MASK) {
1982 		/* bus mpu is supported */
1983 		W_REG(bus->osh, cr4_regs + ARMCR4REG_MPUCTRL, 0);
1984 	}
1985 }
1986 
1987 static bool
dhdpcie_dongle_attach(dhd_bus_t * bus)1988 dhdpcie_dongle_attach(dhd_bus_t *bus)
1989 {
1990 	osl_t *osh = bus->osh;
1991 	volatile void *regsva = (volatile void*)bus->regs;
1992 	uint16 devid;
1993 	uint32 val;
1994 	sbpcieregs_t *sbpcieregs;
1995 	bool dongle_reset_needed;
1996 	uint16 chipid;
1997 
1998 	BCM_REFERENCE(chipid);
1999 
2000 	DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
2001 
2002 	/* Configure CTO Prevention functionality */
2003 #if defined(BCMFPGA_HW) || defined(BCMQT_HW)
2004 	DHD_ERROR(("Disable CTO\n"));
2005 	bus->cto_enable = FALSE;
2006 #else
2007 #if defined(BCMPCIE_CTO_PREVENTION)
2008 	chipid = dhd_get_chipid(bus);
2009 
2010 	if (BCM4349_CHIP(chipid) || BCM4350_CHIP(chipid) || BCM4345_CHIP(chipid)) {
2011 		DHD_ERROR(("Disable CTO\n"));
2012 		bus->cto_enable = FALSE;
2013 	} else {
2014 		DHD_ERROR(("Enable CTO\n"));
2015 		bus->cto_enable = TRUE;
2016 	}
2017 #else
2018 	DHD_ERROR(("Disable CTO\n"));
2019 	bus->cto_enable = FALSE;
2020 #endif /* BCMPCIE_CTO_PREVENTION */
2021 #endif /* BCMFPGA_HW || BCMQT_HW */
2022 
2023 	if (PCIECTO_ENAB(bus)) {
2024 		dhdpcie_cto_init(bus, TRUE);
2025 	}
2026 
2027 #ifdef CONFIG_ARCH_EXYNOS
2028 	link_recovery = bus->dhd;
2029 #endif /* CONFIG_ARCH_EXYNOS */
2030 
2031 	dhd_init_pwr_req_lock(bus);
2032 	dhd_init_bus_lp_state_lock(bus);
2033 	dhd_init_backplane_access_lock(bus);
2034 
2035 	bus->alp_only = TRUE;
2036 	bus->sih = NULL;
2037 
2038 	/* Checking PCIe bus status with reading configuration space */
2039 	val = OSL_PCI_READ_CONFIG(osh, PCI_CFG_VID, sizeof(uint32));
2040 	if ((val & 0xFFFF) != VENDOR_BROADCOM) {
2041 		DHD_ERROR(("%s : failed to read PCI configuration space!\n", __FUNCTION__));
2042 		goto fail;
2043 	}
2044 	devid = (val >> 16) & 0xFFFF;
2045 	bus->cl_devid = devid;
2046 
2047 	/* Set bar0 window to si_enum_base */
2048 	dhdpcie_bus_cfg_set_bar0_win(bus, si_enum_base(devid));
2049 
2050 	/*
2051 	 * Checking PCI_SPROM_CONTROL register for preventing invalid address access
2052 	 * due to switch address space from PCI_BUS to SI_BUS.
2053 	 */
2054 	val = OSL_PCI_READ_CONFIG(osh, PCI_SPROM_CONTROL, sizeof(uint32));
2055 	if (val == 0xffffffff) {
2056 		DHD_ERROR(("%s : failed to read SPROM control register\n", __FUNCTION__));
2057 		goto fail;
2058 	}
2059 
2060 #if defined(DHD_EFI) || defined(NDIS)
2061 	/* Save good copy of PCIe config space */
2062 	if (BCME_OK != dhdpcie_config_save(bus)) {
2063 		DHD_ERROR(("%s : failed to save PCI configuration space!\n", __FUNCTION__));
2064 		goto fail;
2065 	}
2066 #endif /* DHD_EFI */
2067 
2068 	/* si_attach() will provide an SI handle and scan the backplane */
2069 	if (!(bus->sih = si_attach((uint)devid, osh, regsva, PCI_BUS, bus,
2070 	                           &bus->vars, &bus->varsz))) {
2071 		DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__));
2072 		goto fail;
2073 	}
2074 
2075 	if (MULTIBP_ENAB(bus->sih) && (bus->sih->buscorerev >= 66)) {
2076 		/*
2077 		 * HW JIRA - CRWLPCIEGEN2-672
2078 		 * Producer Index Feature which is used by F1 gets reset on F0 FLR
2079 		 * fixed in REV68
2080 		 */
2081 		if (PCIE_ENUM_RESET_WAR_ENAB(bus->sih->buscorerev)) {
2082 			dhdpcie_ssreset_dis_enum_rst(bus);
2083 		}
2084 
2085 		/* IOV_DEVRESET could exercise si_detach()/si_attach() again so reset
2086 		*   dhdpcie_bus_release_dongle() --> si_detach()
2087 		*   dhdpcie_dongle_attach() --> si_attach()
2088 		*/
2089 		bus->pwr_req_ref = 0;
2090 	}
2091 
2092 	if (MULTIBP_ENAB(bus->sih)) {
2093 		dhd_bus_pcie_pwr_req_nolock(bus);
2094 	}
2095 
2096 	/* Get info on the ARM and SOCRAM cores... */
2097 	/* Should really be qualified by device id */
2098 	if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) ||
2099 	    (si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) ||
2100 	    (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) ||
2101 	    (si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
2102 		bus->armrev = si_corerev(bus->sih);
2103 		bus->coreid = si_coreid(bus->sih);
2104 	} else {
2105 		DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__));
2106 		goto fail;
2107 	}
2108 
2109 	/* CA7 requires coherent bits on */
2110 	if (bus->coreid == ARMCA7_CORE_ID) {
2111 		val = dhdpcie_bus_cfg_read_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4);
2112 		dhdpcie_bus_cfg_write_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4,
2113 			(val | PCIE_BARCOHERENTACCEN_MASK));
2114 	}
2115 
2116 	/* EFI requirement - stop driver load if FW is already running
2117 	*  need to do this here before pcie_watchdog_reset, because
2118 	*  pcie_watchdog_reset will put the ARM back into halt state
2119 	*/
2120 	if (!dhdpcie_is_arm_halted(bus)) {
2121 		DHD_ERROR(("%s: ARM is not halted,FW is already running! Abort.\n",
2122 				__FUNCTION__));
2123 		goto fail;
2124 	}
2125 
2126 	BCM_REFERENCE(dongle_reset_needed);
2127 
2128 	/* For inbuilt drivers pcie clk req will be done by RC,
2129 	 * so do not do clkreq from dhd
2130 	 */
2131 #if defined(linux) || defined(LINUX)
2132 	if (dhd_download_fw_on_driverload)
2133 #endif /* linux || LINUX */
2134 	{
2135 		/* Enable CLKREQ# */
2136 		dhdpcie_clkreq(bus->osh, 1, 1);
2137 	}
2138 
2139 	/* Calculate htclkratio only for QT, for FPGA it is fixed at 30 */
2140 #ifdef BCMQT_HW
2141 	dhdpcie_htclkratio_cal(bus);
2142 #endif /* BCMQT_HW */
2143 
2144 	/*
2145 	 * bus->dhd will be NULL if it is called from dhd_bus_attach, so need to reset
2146 	 * without checking dongle_isolation flag, but if it is called via some other path
2147 	 * like quiesce FLR, then based on dongle_isolation flag, watchdog_reset should
2148 	 * be called.
2149 	 */
2150 	if (bus->dhd == NULL) {
2151 		/* dhd_attach not yet happened, do dongle reset */
2152 #ifdef DHD_SKIP_DONGLE_RESET_IN_ATTACH
2153 		dongle_reset_needed = FALSE;
2154 #else
2155 		dongle_reset_needed = TRUE;
2156 #endif /* DHD_SKIP_DONGLE_RESET_IN_ATTACH */
2157 	} else {
2158 		/* Based on dongle_isolationflag, reset dongle */
2159 		dongle_reset_needed = !(bus->dhd->dongle_isolation);
2160 	}
2161 
2162 	/* Fix for FLR reset specific to 4397a0. Write a value 0x1E in PMU CC reg18 */
2163 	if (BCM4397_CHIP(dhd_get_chipid(bus)) && (bus->sih->chiprev == 0)) {
2164 		uint origidx = 0;
2165 
2166 		origidx = si_coreidx(bus->sih);
2167 		pmu_corereg(bus->sih, SI_CC_IDX, chipcontrol_addr, ~0, PMU_CHIPCTL18);
2168 		pmu_corereg(bus->sih, SI_CC_IDX, chipcontrol_data,
2169 			(PMU_CC18_WL_BOOKER_FORCEPWRDWN_EN | PMU_CC18_WL_P_CHAN_TIMER_SEL_MASK),
2170 			(PMU_CC18_WL_BOOKER_FORCEPWRDWN_EN |
2171 			((PMU_CC18_WL_P_CHAN_TIMER_SEL_8ms << PMU_CC18_WL_P_CHAN_TIMER_SEL_OFF) &
2172 			PMU_CC18_WL_P_CHAN_TIMER_SEL_MASK)));
2173 		si_setcore(bus->sih, origidx, 0);
2174 	}
2175 
2176 	/*
2177 	 * Issue dongle to reset all the cores on the chip - similar to rmmod dhd
2178 	 * This is required to avoid spurious interrupts to the Host and bring back
2179 	 * dongle to a sane state (on host soft-reboot / watchdog-reboot).
2180 	 */
2181 	if (dongle_reset_needed) {
2182 		dhdpcie_dongle_reset(bus);
2183 	}
2184 
2185 	/* need to set the force_bt_quiesce flag here
2186 	 * before calling dhdpcie_dongle_flr_or_pwr_toggle
2187 	 */
2188 	bus->force_bt_quiesce = TRUE;
2189 	/*
2190 	 * For buscorerev = 66 and after, F0 FLR should be done independent from F1.
2191 	 * So don't need BT quiesce.
2192 	 */
2193 	if (bus->sih->buscorerev >= 66) {
2194 		bus->force_bt_quiesce = FALSE;
2195 	}
2196 
2197 	dhdpcie_dongle_flr_or_pwr_toggle(bus);
2198 
2199 	dhdpcie_bus_mpu_disable(bus);
2200 
2201 	si_setcore(bus->sih, PCIE2_CORE_ID, 0);
2202 	sbpcieregs = (sbpcieregs_t*)(bus->regs);
2203 
2204 	/* WAR where the BAR1 window may not be sized properly */
2205 	W_REG(osh, &sbpcieregs->configaddr, 0x4e0);
2206 	val = R_REG(osh, &sbpcieregs->configdata);
2207 	W_REG(osh, &sbpcieregs->configdata, val);
2208 
2209 	/* if chip uses sysmem instead of tcm, typically ARM CA chips */
2210 	if (si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) {
2211 		if (!(bus->orig_ramsize = si_sysmem_size(bus->sih))) {
2212 			DHD_ERROR(("%s: failed to find SYSMEM memory!\n", __FUNCTION__));
2213 			goto fail;
2214 		}
2215 		/* also populate base address */
2216 		switch ((uint16)bus->sih->chip) {
2217 			case BCM4385_CHIP_ID:
2218 				bus->dongle_ram_base = CA7_4385_RAM_BASE;
2219 				break;
2220 			case BCM4388_CHIP_ID:
2221 			case BCM4389_CHIP_ID:
2222 				bus->dongle_ram_base = CA7_4389_RAM_BASE;
2223 				break;
2224 #ifdef UNRELEASEDCHIP
2225 			case BCM4397_CHIP_ID:
2226 				bus->dongle_ram_base = CA7_4389_RAM_BASE;
2227 				break;
2228 #endif
2229 			default:
2230 				/* also populate base address */
2231 				bus->dongle_ram_base = 0x200000;
2232 				DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n",
2233 					__FUNCTION__, bus->dongle_ram_base));
2234 				break;
2235 		}
2236 	} else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
2237 		if (!(bus->orig_ramsize = si_socram_size(bus->sih))) {
2238 			DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__));
2239 			goto fail;
2240 		}
2241 	} else {
2242 		/* cr4 has a different way to find the RAM size from TCM's */
2243 		if (!(bus->orig_ramsize = si_tcm_size(bus->sih))) {
2244 			DHD_ERROR(("%s: failed to find CR4-TCM memory!\n", __FUNCTION__));
2245 			goto fail;
2246 		}
2247 		/* also populate base address */
2248 		switch ((uint16)bus->sih->chip) {
2249 		case BCM4339_CHIP_ID:
2250 		case BCM4335_CHIP_ID:
2251 			bus->dongle_ram_base = CR4_4335_RAM_BASE;
2252 			break;
2253 		case BCM4358_CHIP_ID:
2254 		case BCM4354_CHIP_ID:
2255 		case BCM43567_CHIP_ID:
2256 		case BCM43569_CHIP_ID:
2257 		case BCM4350_CHIP_ID:
2258 		case BCM43570_CHIP_ID:
2259 			bus->dongle_ram_base = CR4_4350_RAM_BASE;
2260 			break;
2261 		case BCM4360_CHIP_ID:
2262 			bus->dongle_ram_base = CR4_4360_RAM_BASE;
2263 			break;
2264 
2265 		case BCM4364_CHIP_ID:
2266 			bus->dongle_ram_base = CR4_4364_RAM_BASE;
2267 			break;
2268 
2269 		CASE_BCM4345_CHIP:
2270 			bus->dongle_ram_base = (bus->sih->chiprev < 6)  /* changed at 4345C0 */
2271 				? CR4_4345_LT_C0_RAM_BASE : CR4_4345_GE_C0_RAM_BASE;
2272 			break;
2273 		CASE_BCM43602_CHIP:
2274 			bus->dongle_ram_base = CR4_43602_RAM_BASE;
2275 			break;
2276 		case BCM4349_CHIP_GRPID:
2277 			/* RAM based changed from 4349c0(revid=9) onwards */
2278 			bus->dongle_ram_base = ((bus->sih->chiprev < 9) ?
2279 				CR4_4349_RAM_BASE : CR4_4349_RAM_BASE_FROM_REV_9);
2280 			break;
2281 		case BCM4347_CHIP_ID:
2282 		case BCM4357_CHIP_ID:
2283 		case BCM4361_CHIP_ID:
2284 			bus->dongle_ram_base = CR4_4347_RAM_BASE;
2285 			break;
2286 		case BCM43751_CHIP_ID:
2287 			bus->dongle_ram_base = CR4_43751_RAM_BASE;
2288 			break;
2289 		case BCM43752_CHIP_ID:
2290 			bus->dongle_ram_base = CR4_43752_RAM_BASE;
2291 			break;
2292 		case BCM4376_CHIP_GRPID:
2293 			bus->dongle_ram_base = CR4_4376_RAM_BASE;
2294 			break;
2295 		case BCM4378_CHIP_GRPID:
2296 			bus->dongle_ram_base = CR4_4378_RAM_BASE;
2297 			break;
2298 		case BCM4362_CHIP_ID:
2299 			bus->dongle_ram_base = CR4_4362_RAM_BASE;
2300 			break;
2301 		case BCM4375_CHIP_ID:
2302 		case BCM4369_CHIP_ID:
2303 			bus->dongle_ram_base = CR4_4369_RAM_BASE;
2304 			break;
2305 		case BCM4377_CHIP_ID:
2306 			bus->dongle_ram_base = CR4_4377_RAM_BASE;
2307 			break;
2308 		case BCM4387_CHIP_GRPID:
2309 			bus->dongle_ram_base = CR4_4387_RAM_BASE;
2310 			break;
2311 		case BCM4385_CHIP_ID:
2312 			bus->dongle_ram_base = CR4_4385_RAM_BASE;
2313 			break;
2314 		default:
2315 			bus->dongle_ram_base = 0;
2316 			DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n",
2317 			           __FUNCTION__, bus->dongle_ram_base));
2318 		}
2319 	}
2320 	bus->ramsize = bus->orig_ramsize;
2321 	if (dhd_dongle_ramsize) {
2322 		dhdpcie_bus_dongle_setmemsize(bus, dhd_dongle_ramsize);
2323 	}
2324 
2325 	if (bus->ramsize > DONGLE_TCM_MAP_SIZE) {
2326 		DHD_ERROR(("%s : invalid ramsize %d(0x%x) is returned from dongle\n",
2327 				__FUNCTION__, bus->ramsize, bus->ramsize));
2328 		goto fail;
2329 	}
2330 
2331 	DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d) at 0x%x\n",
2332 	           bus->ramsize, bus->orig_ramsize, bus->dongle_ram_base));
2333 
2334 	dhdpcie_bar1_window_switch_enab(bus);
2335 
2336 	/* Init bar1_switch_lock only after bar1_switch_enab is inited */
2337 	dhd_init_bar1_switch_lock(bus);
2338 
2339 	bus->srmemsize = si_socram_srmem_size(bus->sih);
2340 
2341 	dhdpcie_bus_intr_init(bus);
2342 
2343 	/* Set the poll and/or interrupt flags */
2344 	bus->intr = (bool)dhd_intr;
2345 	if ((bus->poll = (bool)dhd_poll))
2346 		bus->pollrate = 1;
2347 #ifdef DHD_DISABLE_ASPM
2348 	dhd_bus_aspm_enable_rc_ep(bus, FALSE);
2349 #endif /* DHD_DISABLE_ASPM */
2350 #ifdef PCIE_OOB
2351 	dhdpcie_oob_init(bus);
2352 #endif /* PCIE_OOB */
2353 #ifdef PCIE_INB_DW
2354 	bus->inb_enabled = TRUE;
2355 #endif /* PCIE_INB_DW */
2356 #if defined(PCIE_OOB) || defined(PCIE_INB_DW)
2357 	bus->ds_enabled = TRUE;
2358 	bus->deep_sleep = TRUE;
2359 #endif
2360 
2361 	bus->idma_enabled = TRUE;
2362 	bus->ifrm_enabled = TRUE;
2363 #ifdef BCMINTERNAL
2364 	bus->dma_chan = 0;
2365 #endif /* BCMINTERNAL */
2366 
2367 	dhdpcie_pme_stat_clear(bus);
2368 
2369 	if (MULTIBP_ENAB(bus->sih)) {
2370 		dhd_bus_pcie_pwr_req_clear_nolock(bus);
2371 
2372 		/*
2373 		 * One time clearing of Common Power Domain since HW default is set
2374 		 * Needs to be after FLR because FLR resets PCIe enum back to HW defaults
2375 		 * for 4378B0 (rev 68).
2376 		 * On 4378A0 (rev 66), PCIe enum reset is disabled due to CRWLPCIEGEN2-672
2377 		 */
2378 		si_srpwr_request(bus->sih, SRPWR_DMN0_PCIE_MASK, 0);
2379 
2380 		/*
2381 		 * WAR to fix ARM cold boot;
2382 		 * Assert WL domain in DAR helps but not enum
2383 		 */
2384 		if (bus->sih->buscorerev >= 68) {
2385 			dhd_bus_pcie_pwr_req_wl_domain(bus,
2386 				DAR_PCIE_PWR_CTRL((bus->sih)->buscorerev), TRUE);
2387 		}
2388 	}
2389 
2390 	DHD_TRACE(("%s: EXIT: SUCCESS\n", __FUNCTION__));
2391 
2392 	return 0;
2393 
2394 fail:
2395 /* for EFI even if there is an error, load still succeeds
2396 * so si_detach should not be called here, it is called during unload
2397 */
2398 #ifndef DHD_EFI
2399 	/*
2400 	 * As request irq is done later, till then CTO will not be detected,
2401 	 * so unconditionally dump cfg and DAR registers.
2402 	 */
2403 	dhd_bus_dump_imp_cfg_registers(bus);
2404 	/* Check if CTO has happened */
2405 	if (PCIECTO_ENAB(bus)) {
2406 		/* read pci_intstatus */
2407 		uint32 pci_intstatus =
2408 			dhdpcie_bus_cfg_read_dword(bus, PCI_INT_STATUS, 4);
2409 		if (pci_intstatus == (uint32)-1) {
2410 			DHD_ERROR(("%s : Invalid pci_intstatus(0x%x)\n",
2411 				__FUNCTION__, pci_intstatus));
2412 		} else if (pci_intstatus & PCI_CTO_INT_MASK) {
2413 			DHD_ERROR(("%s: ##### CTO REPORTED BY DONGLE "
2414 				"intstat=0x%x enab=%d\n", __FUNCTION__,
2415 				pci_intstatus, bus->cto_enable));
2416 		}
2417 	}
2418 	dhd_deinit_pwr_req_lock(bus);
2419 	dhd_deinit_bus_lp_state_lock(bus);
2420 	dhd_deinit_backplane_access_lock(bus);
2421 
2422 	if (bus->sih != NULL) {
2423 		/* Dump DAR registers only if si_attach has succeeded */
2424 		dhd_bus_dump_dar_registers(bus);
2425 		if (MULTIBP_ENAB(bus->sih)) {
2426 			dhd_bus_pcie_pwr_req_clear_nolock(bus);
2427 		}
2428 
2429 		si_detach(bus->sih);
2430 		bus->sih = NULL;
2431 	}
2432 
2433 #endif /* DHD_EFI */
2434 	DHD_TRACE(("%s: EXIT: FAILURE\n", __FUNCTION__));
2435 	return -1;
2436 }
2437 
2438 int
dhpcie_bus_unmask_interrupt(dhd_bus_t * bus)2439 dhpcie_bus_unmask_interrupt(dhd_bus_t *bus)
2440 {
2441 	dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, I_MB);
2442 	return 0;
2443 }
2444 int
dhpcie_bus_mask_interrupt(dhd_bus_t * bus)2445 dhpcie_bus_mask_interrupt(dhd_bus_t *bus)
2446 {
2447 	dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, 0x0);
2448 	return 0;
2449 }
2450 
2451 /* Non atomic function, caller should hold appropriate lock */
2452 void
dhdpcie_bus_intr_enable(dhd_bus_t * bus)2453 dhdpcie_bus_intr_enable(dhd_bus_t *bus)
2454 {
2455 	DHD_TRACE(("%s Enter\n", __FUNCTION__));
2456 	if (bus) {
2457 		if (bus->sih && !bus->is_linkdown) {
2458 			/* Skip after recieving D3 ACK */
2459 			if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
2460 				return;
2461 			}
2462 			if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
2463 				(bus->sih->buscorerev == 4)) {
2464 				dhpcie_bus_unmask_interrupt(bus);
2465 			} else {
2466 #ifdef DHD_MMIO_TRACE
2467 				dhd_bus_mmio_trace(bus, bus->pcie_mailbox_mask,
2468 					bus->def_intmask, TRUE);
2469 #endif /* defined(DHD_MMIO_TRACE) */
2470 				si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask,
2471 					bus->def_intmask, bus->def_intmask);
2472 			}
2473 		}
2474 
2475 #if defined(NDIS)
2476 		dhd_msix_message_set(bus->dhd, 0, 0, TRUE);
2477 #endif
2478 	}
2479 
2480 	DHD_TRACE(("%s Exit\n", __FUNCTION__));
2481 }
2482 
2483 /* Non atomic function, caller should hold appropriate lock */
2484 void
dhdpcie_bus_intr_disable(dhd_bus_t * bus)2485 dhdpcie_bus_intr_disable(dhd_bus_t *bus)
2486 {
2487 	DHD_TRACE(("%s Enter\n", __FUNCTION__));
2488 	if (bus && bus->sih && !bus->is_linkdown) {
2489 		/* Skip after recieving D3 ACK */
2490 		if (DHD_CHK_BUS_LPS_D3_ACKED(bus)) {
2491 			return;
2492 		}
2493 
2494 		if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
2495 			(bus->sih->buscorerev == 4)) {
2496 			dhpcie_bus_mask_interrupt(bus);
2497 		} else {
2498 #ifdef DHD_MMIO_TRACE
2499 			dhd_bus_mmio_trace(bus, bus->pcie_mailbox_mask, 0, TRUE);
2500 #endif /* defined(DHD_MMIO_TRACE) */
2501 			si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask,
2502 				bus->def_intmask, 0);
2503 		}
2504 	}
2505 #if defined(NDIS)
2506 	/*
2507 	 * dhdpcie_bus_intr_disable may get called from
2508 	 * dhdpcie_dongle_attach -> dhdpcie_dongle_reset
2509 	 * with dhd = NULL during attach time. So check for bus->dhd NULL before
2510 	 * calling dhd_msix_message_set
2511 	 */
2512 	if (bus && bus->dhd) {
2513 		dhd_msix_message_set(bus->dhd, 0, 0, FALSE);
2514 	}
2515 #endif
2516 
2517 	DHD_TRACE(("%s Exit\n", __FUNCTION__));
2518 }
2519 
2520 /*
2521  *  dhdpcie_advertise_bus_cleanup advertises that clean up is under progress
2522  * to other bus user contexts like Tx, Rx, IOVAR, WD etc and it waits for other contexts
2523  * to gracefully exit. All the bus usage contexts before marking busstate as busy, will check for
2524  * whether the busstate is DHD_BUS_DOWN or DHD_BUS_DOWN_IN_PROGRESS, if so
2525  * they will exit from there itself without marking dhd_bus_busy_state as BUSY.
2526  */
2527 void
dhdpcie_advertise_bus_cleanup(dhd_pub_t * dhdp)2528 dhdpcie_advertise_bus_cleanup(dhd_pub_t *dhdp)
2529 {
2530 	unsigned long flags;
2531 	int timeleft;
2532 
2533 #ifdef DHD_PCIE_RUNTIMEPM
2534 	dhdpcie_runtime_bus_wake(dhdp, TRUE, dhdpcie_advertise_bus_cleanup);
2535 #endif /* DHD_PCIE_RUNTIMEPM */
2536 
2537 	dhdp->dhd_watchdog_ms_backup = dhd_watchdog_ms;
2538 	if (dhdp->dhd_watchdog_ms_backup) {
2539 		DHD_ERROR(("%s: Disabling wdtick before dhd deinit\n",
2540 			__FUNCTION__));
2541 		dhd_os_wd_timer(dhdp, 0);
2542 	}
2543 	if (dhdp->busstate != DHD_BUS_DOWN) {
2544 #ifdef DHD_DONGLE_TRAP_IN_DETACH
2545 		/*
2546 		 * For x86 platforms, rmmod/insmod is failing due to some power
2547 		 * resources are not held high.
2548 		 * Hence induce DB7 trap during detach and in FW trap handler all
2549 		 * power resources are held high.
2550 		 */
2551 		if (!dhd_query_bus_erros(dhdp) && dhdp->db7_trap.fw_db7w_trap) {
2552 			dhdp->db7_trap.fw_db7w_trap_inprogress = TRUE;
2553 			dhdpcie_fw_trap(dhdp->bus);
2554 			OSL_DELAY(100 * 1000); // wait 100 msec
2555 			dhdp->db7_trap.fw_db7w_trap_inprogress = FALSE;
2556 		} else {
2557 			DHD_ERROR(("%s: DB7 Not sent!!!\n",
2558 				__FUNCTION__));
2559 		}
2560 #endif /* DHD_DONGLE_TRAP_IN_DETACH */
2561 		DHD_GENERAL_LOCK(dhdp, flags);
2562 		dhdp->busstate = DHD_BUS_DOWN_IN_PROGRESS;
2563 		DHD_GENERAL_UNLOCK(dhdp, flags);
2564 	}
2565 
2566 	timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state);
2567 #ifdef LINUX
2568 	if ((timeleft == 0) || (timeleft == 1)) {
2569 #else
2570 	if (timeleft == 0) {
2571 #endif
2572 		/* XXX This condition ideally should not occur, this means some
2573 		 * bus usage context is not clearing the respective usage bit, print
2574 		 * dhd_bus_busy_state and crash the host for further debugging.
2575 		 */
2576 		DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n",
2577 				__FUNCTION__, dhdp->dhd_bus_busy_state));
2578 		ASSERT(0);
2579 	}
2580 
2581 	return;
2582 }
2583 
2584 static void
2585 dhdpcie_advertise_bus_remove(dhd_pub_t	 *dhdp)
2586 {
2587 	unsigned long flags;
2588 	int timeleft;
2589 
2590 	DHD_GENERAL_LOCK(dhdp, flags);
2591 	dhdp->busstate = DHD_BUS_REMOVE;
2592 	DHD_GENERAL_UNLOCK(dhdp, flags);
2593 
2594 	timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state);
2595 	if ((timeleft == 0) || (timeleft == 1)) {
2596 		DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n",
2597 				__FUNCTION__, dhdp->dhd_bus_busy_state));
2598 		ASSERT(0);
2599 	}
2600 
2601 	return;
2602 }
2603 
2604 static void
2605 dhdpcie_bus_remove_prep(dhd_bus_t *bus)
2606 {
2607 	unsigned long flags;
2608 	DHD_TRACE(("%s Enter\n", __FUNCTION__));
2609 
2610 	DHD_GENERAL_LOCK(bus->dhd, flags);
2611 	DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
2612 	bus->dhd->busstate = DHD_BUS_DOWN;
2613 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
2614 
2615 #ifdef PCIE_INB_DW
2616 	/* De-Initialize the lock to serialize Device Wake Inband activities */
2617 	if (bus->inb_lock) {
2618 		osl_spin_lock_deinit(bus->dhd->osh, bus->inb_lock);
2619 		bus->inb_lock = NULL;
2620 	}
2621 #endif
2622 
2623 	dhd_os_sdlock(bus->dhd);
2624 
2625 	if (bus->sih && !bus->dhd->dongle_isolation) {
2626 
2627 		dhd_bus_pcie_pwr_req_reload_war(bus);
2628 
2629 		/* Skip below WARs for Android as insmod fails after rmmod in Brix Android */
2630 #if !defined(OEM_ANDROID)
2631 		/* HW4347-909, Set PCIE TRefUp time to 100us for 4347/4377 */
2632 		if ((bus->sih->buscorerev == 19) || (bus->sih->buscorerev == 23)) {
2633 			pcie_set_trefup_time_100us(bus->sih);
2634 		}
2635 
2636 		/* disable fast lpo from 4347/4377 */
2637 		/* For 4378/4387/4389, do not disable fast lpo because we always enable fast lpo.
2638 		 * it causes insmod/rmmod reload failure.
2639 		 */
2640 		if ((PMUREV(bus->sih->pmurev) > 31) &&
2641 			!(PCIE_FASTLPO_ENABLED(bus->sih->buscorerev))) {
2642 			si_pmu_fast_lpo_disable(bus->sih);
2643 		}
2644 #endif /* !OEM_ANDROID */
2645 
2646 		/* if the pcie link is down, watchdog reset
2647 		* should not be done, as it may hang
2648 		*/
2649 
2650 		if (!bus->is_linkdown) {
2651 			/* For Non-EFI modular builds, do dongle reset during rmmod */
2652 #ifndef DHD_EFI
2653 			/* For EFI-DHD this compile flag will be defined.
2654 			 * In EFI, depending on bt over pcie mode
2655 			 * we either power toggle or do F0 FLR
2656 			 * from dhdpcie_bus_release dongle. So no need to
2657 			 * do dongle reset from here
2658 			 */
2659 			dhdpcie_dongle_reset(bus);
2660 #endif /* !DHD_EFI */
2661 		}
2662 
2663 		bus->dhd->is_pcie_watchdog_reset = TRUE;
2664 	}
2665 
2666 	dhd_os_sdunlock(bus->dhd);
2667 
2668 	DHD_TRACE(("%s Exit\n", __FUNCTION__));
2669 }
2670 
2671 void
2672 dhd_init_bus_lp_state_lock(dhd_bus_t *bus)
2673 {
2674 	if (!bus->bus_lp_state_lock) {
2675 		bus->bus_lp_state_lock = osl_spin_lock_init(bus->osh);
2676 	}
2677 }
2678 
2679 void
2680 dhd_deinit_bus_lp_state_lock(dhd_bus_t *bus)
2681 {
2682 	if (bus->bus_lp_state_lock) {
2683 		osl_spin_lock_deinit(bus->osh, bus->bus_lp_state_lock);
2684 		bus->bus_lp_state_lock = NULL;
2685 	}
2686 }
2687 
2688 void
2689 dhd_init_backplane_access_lock(dhd_bus_t *bus)
2690 {
2691 	if (!bus->backplane_access_lock) {
2692 		bus->backplane_access_lock = osl_spin_lock_init(bus->osh);
2693 	}
2694 }
2695 
2696 void
2697 dhd_deinit_backplane_access_lock(dhd_bus_t *bus)
2698 {
2699 	if (bus->backplane_access_lock) {
2700 		osl_spin_lock_deinit(bus->osh, bus->backplane_access_lock);
2701 		bus->backplane_access_lock = NULL;
2702 	}
2703 }
2704 
2705 /** Detach and free everything */
2706 void
2707 dhdpcie_bus_release(dhd_bus_t *bus)
2708 {
2709 	bool dongle_isolation = FALSE;
2710 #ifdef BCMQT
2711 	uint buscorerev = 0;
2712 #endif /* BCMQT */
2713 	osl_t *osh = NULL;
2714 
2715 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2716 
2717 	if (bus) {
2718 
2719 		osh = bus->osh;
2720 		ASSERT(osh);
2721 
2722 		if (bus->dhd) {
2723 #if defined(DEBUGGER) || defined (DHD_DSCOPE)
2724 			debugger_close();
2725 #endif /* DEBUGGER || DHD_DSCOPE */
2726 			dhdpcie_advertise_bus_remove(bus->dhd);
2727 			dongle_isolation = bus->dhd->dongle_isolation;
2728 			bus->dhd->is_pcie_watchdog_reset = FALSE;
2729 			dhdpcie_bus_remove_prep(bus);
2730 
2731 			if (bus->intr) {
2732 				dhdpcie_bus_intr_disable(bus);
2733 				dhdpcie_free_irq(bus);
2734 			}
2735 			dhd_deinit_bus_lp_state_lock(bus);
2736 			dhd_deinit_bar1_switch_lock(bus);
2737 			dhd_deinit_backplane_access_lock(bus);
2738 			dhd_deinit_pwr_req_lock(bus);
2739 #ifdef PCIE_INB_DW
2740 			dhd_deinit_dongle_ds_lock(bus);
2741 #endif /* PCIE_INB_DW */
2742 #ifdef BCMQT
2743 			if (IDMA_ACTIVE(bus->dhd)) {
2744 			/**
2745 			 * On FPGA during exit path force set "IDMA Control Register"
2746 			 * to default value 0x0. Otherwise host dongle syc for IDMA fails
2747 			 * during next IDMA initilization(without system reboot)
2748 			 */
2749 				buscorerev = bus->sih->buscorerev;
2750 				si_corereg(bus->sih, bus->sih->buscoreidx,
2751 					IDMAControl(buscorerev), ~0, 0);
2752 			}
2753 #endif /* BCMQT */
2754 			/**
2755 			 * dhdpcie_bus_release_dongle free bus->sih  handle, which is needed to
2756 			 * access Dongle registers.
2757 			 * dhd_detach will communicate with dongle to delete flowring ..etc.
2758 			 * So dhdpcie_bus_release_dongle should be called only after the dhd_detach.
2759 			 */
2760 			dhd_detach(bus->dhd);
2761 			dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
2762 			dhd_free(bus->dhd);
2763 			bus->dhd = NULL;
2764 		}
2765 #ifdef DHD_EFI
2766 		else {
2767 			if (bus->intr) {
2768 				dhdpcie_bus_intr_disable(bus);
2769 				dhdpcie_free_irq(bus);
2770 			}
2771 			dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
2772 		}
2773 #endif /* DHD_EFI */
2774 		/* unmap the regs and tcm here!! */
2775 		if (bus->regs) {
2776 			dhdpcie_bus_reg_unmap(osh, bus->regs, DONGLE_REG_MAP_SIZE);
2777 			bus->regs = NULL;
2778 		}
2779 		if (bus->tcm) {
2780 			dhdpcie_bus_reg_unmap(osh, bus->tcm, DONGLE_TCM_MAP_SIZE);
2781 			bus->tcm = NULL;
2782 		}
2783 
2784 		dhdpcie_bus_release_malloc(bus, osh);
2785 		/* Detach pcie shared structure */
2786 		if (bus->pcie_sh) {
2787 			MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t));
2788 		}
2789 
2790 		if (bus->console.buf != NULL) {
2791 			MFREE(osh, bus->console.buf, bus->console.bufsize);
2792 		}
2793 
2794 #ifdef BCMINTERNAL
2795 		if (bus->msi_sim) {
2796 			DMA_UNMAP(osh, bus->msi_sim_phys, MSI_SIM_BUFSIZE, DMA_RX, 0, 0);
2797 			 MFREE(osh, bus->msi_sim_addr, MSI_SIM_BUFSIZE);
2798 		}
2799 
2800 		/* free host fw buffer if there is any */
2801 		if (bus->hostfw_buf.va) {
2802 			DMA_FREE_CONSISTENT(osh, bus->hostfw_buf.va, bus->hostfw_buf._alloced,
2803 				bus->hostfw_buf.pa, bus->hostfw_buf.dmah);
2804 			memset(&bus->hostfw_buf, 0, sizeof(bus->hostfw_buf));
2805 		}
2806 #endif /* BCMINTERNAL */
2807 
2808 		/* Finally free bus info */
2809 		MFREE(osh, bus, sizeof(dhd_bus_t));
2810 
2811 		g_dhd_bus = NULL;
2812 	}
2813 
2814 	DHD_TRACE(("%s: Exit\n", __FUNCTION__));
2815 } /* dhdpcie_bus_release */
2816 
2817 void
2818 dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag)
2819 {
2820 	DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n", __FUNCTION__,
2821 		bus->dhd, bus->dhd->dongle_reset));
2822 
2823 	if ((bus->dhd && bus->dhd->dongle_reset) && reset_flag) {
2824 		goto fail;
2825 	}
2826 
2827 	if (bus->is_linkdown) {
2828 		DHD_ERROR(("%s : Skip release dongle due to linkdown \n", __FUNCTION__));
2829 		goto fail;
2830 	}
2831 
2832 	if (bus->sih) {
2833 #ifdef BCMINTERNAL
2834 		if (bus->msi_sim)  {
2835 			/* disable MSI */
2836 			si_corereg(bus->sih, bus->sih->buscoreidx,
2837 				OFFSETOF(sbpcieregs_t, configaddr), ~0, PCIE_CFG_MSICAP_OFFSET);
2838 			si_corereg(bus->sih, bus->sih->buscoreidx,
2839 				OFFSETOF(sbpcieregs_t, configdata), ~0,
2840 				PCIE_CFG_MSICAP_DISABLE_MSI);
2841 		}
2842 #endif /* BCMINTERNAL */
2843 
2844 		/*
2845 		 * Perform dongle reset only if dongle isolation is not enabled.
2846 		 * In android platforms, dongle isolation will be enabled and
2847 		 * quiescing dongle will be done using DB7 trap.
2848 		 */
2849 		if (!dongle_isolation &&
2850 			bus->dhd && !bus->dhd->is_pcie_watchdog_reset) {
2851 			dhdpcie_dongle_reset(bus);
2852 		}
2853 
2854 		/* Only for EFI this will be effective */
2855 		dhdpcie_dongle_flr_or_pwr_toggle(bus);
2856 
2857 		if (bus->ltrsleep_on_unload) {
2858 			si_corereg(bus->sih, bus->sih->buscoreidx,
2859 				OFFSETOF(sbpcieregs_t, u.pcie2.ltr_state), ~0, 0);
2860 		}
2861 
2862 		if (bus->sih->buscorerev == 13)
2863 			 pcie_serdes_iddqdisable(bus->osh, bus->sih,
2864 			                         (sbpcieregs_t *) bus->regs);
2865 
2866 		/* For inbuilt drivers pcie clk req will be done by RC,
2867 		 * so do not do clkreq from dhd
2868 		 */
2869 #if defined(linux) || defined(LINUX)
2870 		if (dhd_download_fw_on_driverload)
2871 #endif /* linux || LINUX */
2872 		{
2873 			/* Disable CLKREQ# */
2874 			dhdpcie_clkreq(bus->osh, 1, 0);
2875 		}
2876 	}
2877 fail:
2878 	/* Resources should be freed */
2879 	if (bus->sih) {
2880 		si_detach(bus->sih);
2881 		bus->sih = NULL;
2882 	}
2883 	if (bus->vars && bus->varsz) {
2884 		MFREE(osh, bus->vars, bus->varsz);
2885 		bus->vars = NULL;
2886 	}
2887 
2888 	DHD_TRACE(("%s Exit\n", __FUNCTION__));
2889 }
2890 
2891 uint32
2892 dhdpcie_bus_cfg_read_dword(dhd_bus_t *bus, uint32 addr, uint32 size)
2893 {
2894 	uint32 data = OSL_PCI_READ_CONFIG(bus->osh, addr, size);
2895 	return data;
2896 }
2897 
2898 /** 32 bit config write */
2899 void
2900 dhdpcie_bus_cfg_write_dword(dhd_bus_t *bus, uint32 addr, uint32 size, uint32 data)
2901 {
2902 	OSL_PCI_WRITE_CONFIG(bus->osh, addr, size, data);
2903 }
2904 
2905 void
2906 dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data)
2907 {
2908 	OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, 4, data);
2909 }
2910 
2911 void
2912 dhdpcie_bus_dongle_setmemsize(struct dhd_bus *bus, int mem_size)
2913 {
2914 	int32 min_size =  DONGLE_MIN_MEMSIZE;
2915 	/* Restrict the memsize to user specified limit */
2916 	DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d max accepted %d\n",
2917 		mem_size, min_size, (int32)bus->orig_ramsize));
2918 	if ((mem_size > min_size) &&
2919 		(mem_size < (int32)bus->orig_ramsize)) {
2920 		bus->ramsize = mem_size;
2921 	} else {
2922 		DHD_ERROR(("%s: Invalid mem_size %d\n", __FUNCTION__, mem_size));
2923 	}
2924 }
2925 
2926 void
2927 dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh)
2928 {
2929 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2930 
2931 	if (bus->dhd && bus->dhd->dongle_reset)
2932 		return;
2933 
2934 	if (bus->vars && bus->varsz) {
2935 		MFREE(osh, bus->vars, bus->varsz);
2936 	}
2937 
2938 	DHD_TRACE(("%s: Exit\n", __FUNCTION__));
2939 	return;
2940 
2941 }
2942 
2943 /** Stop bus module: clear pending frames, disable data flow */
2944 void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex)
2945 {
2946 	unsigned long flags;
2947 
2948 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2949 
2950 	if (!bus->dhd)
2951 		return;
2952 
2953 	if (bus->dhd->busstate == DHD_BUS_DOWN) {
2954 		DHD_ERROR(("%s: already down by net_dev_reset\n", __FUNCTION__));
2955 		goto done;
2956 	}
2957 
2958 	DHD_STOP_RPM_TIMER(bus->dhd);
2959 
2960 	DHD_GENERAL_LOCK(bus->dhd, flags);
2961 	DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
2962 	bus->dhd->busstate = DHD_BUS_DOWN;
2963 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
2964 
2965 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
2966 	atomic_set(&bus->dhd->block_bus, TRUE);
2967 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
2968 
2969 	dhdpcie_bus_intr_disable(bus);
2970 
2971 	if (!bus->is_linkdown) {
2972 		uint32 status;
2973 		status = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
2974 		dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, status);
2975 	}
2976 
2977 #if defined(linux) || defined(LINUX)
2978 	if (!dhd_download_fw_on_driverload) {
2979 		dhd_dpc_kill(bus->dhd);
2980 	}
2981 #endif /* linux || LINUX */
2982 
2983 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
2984 	pm_runtime_disable(dhd_bus_to_dev(bus));
2985 	pm_runtime_set_suspended(dhd_bus_to_dev(bus));
2986 	pm_runtime_enable(dhd_bus_to_dev(bus));
2987 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
2988 
2989 	/* Clear rx control and wake any waiters */
2990 	/* XXX More important in disconnect, but no context? */
2991 	dhd_os_set_ioctl_resp_timeout(IOCTL_DISABLE_TIMEOUT);
2992 	dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_BUS_STOP);
2993 
2994 done:
2995 	return;
2996 }
2997 
2998 #ifdef DEVICE_TX_STUCK_DETECT
2999 void
3000 dhd_bus_send_msg_to_daemon(int reason)
3001 {
3002 	bcm_to_info_t to_info;
3003 
3004 	to_info.magic = BCM_TO_MAGIC;
3005 	to_info.reason = reason;
3006 
3007 	dhd_send_msg_to_daemon(NULL, (void *)&to_info, sizeof(bcm_to_info_t));
3008 	return;
3009 }
3010 
3011 #define DHD_MEMORY_SET_PATTERN 0xAA
3012 
3013 /**
3014  * scan the flow rings in active list to check if stuck and notify application
3015  * The conditions for warn/stuck detection are
3016  * 1. Flow ring is active
3017  * 2. There are packets to be consumed by the consumer (wr != rd)
3018  * If 1 and 2 are true, then
3019  * 3. Warn, if Tx completion is not received for a duration of DEVICE_TX_STUCK_WARN_DURATION
3020  * 4. Trap FW, if Tx completion is not received for a duration of DEVICE_TX_STUCK_DURATION
3021  */
3022 static void
3023 dhd_bus_device_tx_stuck_scan(dhd_bus_t *bus)
3024 {
3025 	uint32 tx_cmpl;
3026 	unsigned long list_lock_flags;
3027 	unsigned long ring_lock_flags;
3028 	dll_t *item, *prev;
3029 	flow_ring_node_t *flow_ring_node;
3030 	if_flow_lkup_t *if_flow_lkup = (if_flow_lkup_t *)bus->dhd->if_flow_lkup;
3031 	uint8 ifindex;
3032 #ifndef FW_HAS_AGING_LOGIC_ALL_IF
3033 	/**
3034 	 * Since the aging logic is implemented only for INFRA in FW,
3035 	 * DHD should monitor only INFRA for stuck detection.
3036 	 */
3037 	uint8 role;
3038 #endif /* FW_HAS_AGING_LOGIC_ALL_IF */
3039 	bool ring_empty;
3040 	bool active;
3041 	uint8 status;
3042 
3043 	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, list_lock_flags);
3044 
3045 	for (item = dll_tail_p(&bus->flowring_active_list);
3046 			!dll_end(&bus->flowring_active_list, item); item = prev) {
3047 
3048 		prev = dll_prev_p(item);
3049 
3050 		flow_ring_node = dhd_constlist_to_flowring(item);
3051 		ifindex = flow_ring_node->flow_info.ifindex;
3052 #ifndef FW_HAS_AGING_LOGIC_ALL_IF
3053 		role = if_flow_lkup[ifindex].role;
3054 		if (role != WLC_E_IF_ROLE_STA) {
3055 			continue;
3056 		}
3057 #endif /* FW_HAS_AGING_LOGIC_ALL_IF */
3058 		DHD_FLOWRING_LOCK(flow_ring_node->lock, ring_lock_flags);
3059 		tx_cmpl = flow_ring_node->tx_cmpl;
3060 		active = flow_ring_node->active;
3061 		status = flow_ring_node->status;
3062 		ring_empty = dhd_prot_is_cmpl_ring_empty(bus->dhd, flow_ring_node->prot_info);
3063 		DHD_FLOWRING_UNLOCK(flow_ring_node->lock, ring_lock_flags);
3064 		/*
3065 		 * Need not monitor the flow ring if,
3066 		 * 1. flow ring is empty
3067 		 * 2. LINK is down
3068 		 * 3. flow ring is not in FLOW_RING_STATUS_OPEN state
3069 		 */
3070 		if ((ring_empty) || !(if_flow_lkup[ifindex].status) ||
3071 			(status != FLOW_RING_STATUS_OPEN)) {
3072 			/* reset conters... etc */
3073 			flow_ring_node->stuck_count = 0;
3074 			flow_ring_node->tx_cmpl_prev = tx_cmpl;
3075 			continue;
3076 		}
3077 		/**
3078 		 * DEVICE_TX_STUCK_WARN_DURATION, DEVICE_TX_STUCK_DURATION are integer
3079 		 * representation of time, to decide if a flow is in warn state or stuck.
3080 		 *
3081 		 * flow_ring_node->stuck_count is an integer counter representing how long
3082 		 * tx_cmpl is not received though there are pending packets in the ring
3083 		 * to be consumed by the dongle for that particular flow.
3084 		 *
3085 		 * This method of determining time elapsed is helpful in sleep/wake scenarios.
3086 		 * If host sleeps and wakes up, that sleep time is not considered into
3087 		 * stuck duration.
3088 		 */
3089 		if ((tx_cmpl == flow_ring_node->tx_cmpl_prev) && active) {
3090 
3091 			flow_ring_node->stuck_count++;
3092 
3093 			DHD_ERROR(("%s: flowid: %d tx_cmpl: %u tx_cmpl_prev: %u stuck_count: %d\n",
3094 				__func__, flow_ring_node->flowid, tx_cmpl,
3095 				flow_ring_node->tx_cmpl_prev, flow_ring_node->stuck_count));
3096 			dhd_prot_dump_ring_ptrs(flow_ring_node->prot_info);
3097 
3098 			switch (flow_ring_node->stuck_count) {
3099 				case DEVICE_TX_STUCK_WARN_DURATION:
3100 					/**
3101 					 * Notify Device Tx Stuck Notification App about the
3102 					 * device Tx stuck warning for this flowid.
3103 					 * App will collect the logs required.
3104 					 */
3105 					DHD_ERROR(("stuck warning for flowid: %d sent to app\n",
3106 						flow_ring_node->flowid));
3107 					dhd_bus_send_msg_to_daemon(REASON_DEVICE_TX_STUCK_WARNING);
3108 					break;
3109 				case DEVICE_TX_STUCK_DURATION:
3110 					/**
3111 					 * Notify Device Tx Stuck Notification App about the
3112 					 * device Tx stuck info for this flowid.
3113 					 * App will collect the logs required.
3114 					 */
3115 					DHD_ERROR(("stuck information for flowid: %d sent to app\n",
3116 						flow_ring_node->flowid));
3117 					dhd_bus_send_msg_to_daemon(REASON_DEVICE_TX_STUCK);
3118 					break;
3119 				default:
3120 					break;
3121 			}
3122 		} else {
3123 			flow_ring_node->tx_cmpl_prev = tx_cmpl;
3124 			flow_ring_node->stuck_count = 0;
3125 		}
3126 	}
3127 	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, list_lock_flags);
3128 }
3129 /**
3130  * schedules dhd_bus_device_tx_stuck_scan after DEVICE_TX_STUCK_CKECK_TIMEOUT,
3131  * to determine if any flowid is stuck.
3132  */
3133 static void
3134 dhd_bus_device_stuck_scan(dhd_bus_t *bus)
3135 {
3136 	uint32 time_stamp; /* in millisec */
3137 	uint32 diff;
3138 
3139 	/* Need not run the algorith if Dongle has trapped */
3140 	if (bus->dhd->dongle_trap_occured) {
3141 		return;
3142 	}
3143 	time_stamp = OSL_SYSUPTIME();
3144 	diff = time_stamp - bus->device_tx_stuck_check;
3145 	if (diff > DEVICE_TX_STUCK_CKECK_TIMEOUT) {
3146 		dhd_bus_device_tx_stuck_scan(bus);
3147 		bus->device_tx_stuck_check = OSL_SYSUPTIME();
3148 	}
3149 	return;
3150 }
3151 #endif /* DEVICE_TX_STUCK_DETECT */
3152 
3153 /**
3154  * Watchdog timer function.
3155  * @param dhd   Represents a specific hardware (dongle) instance that this DHD manages
3156  */
3157 bool dhd_bus_watchdog(dhd_pub_t *dhd)
3158 {
3159 	unsigned long flags;
3160 	dhd_bus_t *bus = dhd->bus;
3161 
3162 	if (dhd_query_bus_erros(bus->dhd)) {
3163 		return FALSE;
3164 	}
3165 
3166 	DHD_GENERAL_LOCK(dhd, flags);
3167 	if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd) ||
3168 			DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd)) {
3169 		DHD_GENERAL_UNLOCK(dhd, flags);
3170 		return FALSE;
3171 	}
3172 	DHD_BUS_BUSY_SET_IN_WD(dhd);
3173 	DHD_GENERAL_UNLOCK(dhd, flags);
3174 
3175 #ifdef DHD_PCIE_RUNTIMEPM
3176 	dhdpcie_runtime_bus_wake(dhd, TRUE, __builtin_return_address(0));
3177 #endif /* DHD_PCIE_RUNTIMEPM */
3178 
3179 #ifdef BCMINTERNAL
3180 	if ((bus->msi_sim) && (++bus->polltick >= bus->pollrate)) {
3181 			uint32 val;
3182 			bus->polltick = 0;
3183 			val = *(uint32 *)bus->msi_sim_addr;
3184 			*(uint32 *)bus->msi_sim_addr = 0;
3185 			if (val) {
3186 				DHD_INFO(("calling	dhdpcie_bus_isr 0x%04x\n", val));
3187 				dhdpcie_bus_isr(bus);
3188 			}
3189 	}
3190 #endif /* BCMINTERNAL */
3191 
3192 #if defined(DHD_EFI) && defined(DHD_INTR_POLL_PERIOD_DYNAMIC)
3193 	dhd_intr_poll_pkt_thresholds(dhd);
3194 #endif /* DHD_EFI && DHD_INTR_POLL_PERIOD_DYNAMIC */
3195 
3196 	/* Poll for console output periodically */
3197 	if (dhd->busstate == DHD_BUS_DATA &&
3198 		dhd->dhd_console_ms != 0 &&
3199 		DHD_CHK_BUS_NOT_IN_LPS(bus)) {
3200 		bus->console.count += dhd_watchdog_ms;
3201 		if (bus->console.count >= dhd->dhd_console_ms) {
3202 			bus->console.count -= dhd->dhd_console_ms;
3203 
3204 			if (MULTIBP_ENAB(bus->sih)) {
3205 				dhd_bus_pcie_pwr_req(bus);
3206 			}
3207 
3208 			/* Make sure backplane clock is on */
3209 			if (dhd->db7_trap.fw_db7w_trap_inprogress == FALSE) {
3210 				if (dhdpcie_bus_readconsole(bus) < 0) {
3211 					DHD_ERROR(("%s: disable dconpoll\n", __FUNCTION__));
3212 					dhd->dhd_console_ms = 0; /* On error, stop trying */
3213 				}
3214 			}
3215 
3216 			if (MULTIBP_ENAB(bus->sih)) {
3217 				dhd_bus_pcie_pwr_req_clear(bus);
3218 			}
3219 		}
3220 	}
3221 
3222 #ifdef DHD_READ_INTSTATUS_IN_DPC
3223 	if (bus->poll) {
3224 		bus->ipend = TRUE;
3225 		bus->dpc_sched = TRUE;
3226 		dhd_sched_dpc(bus->dhd);     /* queue DPC now!! */
3227 	}
3228 #endif /* DHD_READ_INTSTATUS_IN_DPC */
3229 
3230 #ifdef DEVICE_TX_STUCK_DETECT
3231 	if (dhd->bus->dev_tx_stuck_monitor == TRUE) {
3232 		dhd_bus_device_stuck_scan(bus);
3233 	}
3234 #endif /* DEVICE_TX_STUCK_DETECT */
3235 
3236 	DHD_GENERAL_LOCK(dhd, flags);
3237 	DHD_BUS_BUSY_CLEAR_IN_WD(dhd);
3238 	dhd_os_busbusy_wake(dhd);
3239 	DHD_GENERAL_UNLOCK(dhd, flags);
3240 #if !defined(DHD_PCIE_RUNTIMEPM) && (defined(PCIE_OOB) || defined(PCIE_INB_DW))
3241 	dhd->bus->inb_dw_deassert_cnt += dhd_watchdog_ms;
3242 	if (dhd->bus->inb_dw_deassert_cnt >=
3243 		DHD_INB_DW_DEASSERT_MS) {
3244 		dhd->bus->inb_dw_deassert_cnt = 0;
3245 		/* Inband device wake is deasserted from DPC context after DS_Exit is received,
3246 		 * but if at all there is no d2h interrupt received, dpc will not be scheduled
3247 		 * and inband DW is not deasserted, hence DW is deasserted from watchdog thread
3248 		 * for every 250ms.
3249 		 */
3250 		dhd_bus_dw_deassert(dhd);
3251 	}
3252 #endif /* !DHD_PCIE_RUNTIMEPM && PCIE_OOB || PCIE_INB_DW */
3253 	return TRUE;
3254 } /* dhd_bus_watchdog */
3255 
3256 #if defined(SUPPORT_MULTIPLE_REVISION)
3257 static int concate_revision_bcm4358(dhd_bus_t *bus, char *fw_path, char *nv_path)
3258 {
3259 	uint32 chiprev;
3260 #if defined(SUPPORT_MULTIPLE_CHIPS)
3261 	char chipver_tag[20] = "_4358";
3262 #else
3263 	char chipver_tag[10] = {0, };
3264 #endif /* SUPPORT_MULTIPLE_CHIPS */
3265 
3266 	chiprev = dhd_bus_chiprev(bus);
3267 	if (chiprev == 0) {
3268 		DHD_ERROR(("----- CHIP 4358 A0 -----\n"));
3269 		strcat(chipver_tag, "_a0");
3270 	} else if (chiprev == 1) {
3271 		DHD_ERROR(("----- CHIP 4358 A1 -----\n"));
3272 #if defined(SUPPORT_MULTIPLE_CHIPS) || defined(SUPPORT_MULTIPLE_MODULE_CIS)
3273 		strcat(chipver_tag, "_a1");
3274 #endif /* defined(SUPPORT_MULTIPLE_CHIPS) || defined(SUPPORT_MULTIPLE_MODULE_CIS) */
3275 	} else if (chiprev == 3) {
3276 		DHD_ERROR(("----- CHIP 4358 A3 -----\n"));
3277 #if defined(SUPPORT_MULTIPLE_CHIPS)
3278 		strcat(chipver_tag, "_a3");
3279 #endif /* SUPPORT_MULTIPLE_CHIPS */
3280 	} else {
3281 		DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chiprev));
3282 	}
3283 
3284 	strcat(fw_path, chipver_tag);
3285 
3286 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK)
3287 	if (chiprev == 1 || chiprev == 3) {
3288 		int ret = dhd_check_module_b85a();
3289 		if ((chiprev == 1) && (ret < 0)) {
3290 			memset(chipver_tag, 0x00, sizeof(chipver_tag));
3291 			strcat(chipver_tag, "_b85");
3292 			strcat(chipver_tag, "_a1");
3293 		}
3294 	}
3295 
3296 	DHD_ERROR(("%s: chipver_tag %s \n", __FUNCTION__, chipver_tag));
3297 #endif /* defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) */
3298 
3299 #if defined(SUPPORT_MULTIPLE_BOARD_REV)
3300 	if (system_rev >= 10) {
3301 		DHD_ERROR(("----- Board Rev  [%d]-----\n", system_rev));
3302 		strcat(chipver_tag, "_r10");
3303 	}
3304 #endif /* SUPPORT_MULTIPLE_BOARD_REV */
3305 	strcat(nv_path, chipver_tag);
3306 
3307 	return 0;
3308 }
3309 
3310 static int concate_revision_bcm4359(dhd_bus_t *bus, char *fw_path, char *nv_path)
3311 {
3312 	uint32 chip_ver;
3313 	char chipver_tag[10] = {0, };
3314 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \
3315 	defined(SUPPORT_BCM4359_MIXED_MODULES)
3316 	int module_type = -1;
3317 #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
3318 
3319 	chip_ver = bus->sih->chiprev;
3320 	if (chip_ver == 4) {
3321 		DHD_ERROR(("----- CHIP 4359 B0 -----\n"));
3322 		strncat(chipver_tag, "_b0", strlen("_b0"));
3323 	} else if (chip_ver == 5) {
3324 		DHD_ERROR(("----- CHIP 4359 B1 -----\n"));
3325 		strncat(chipver_tag, "_b1", strlen("_b1"));
3326 	} else if (chip_ver == 9) {
3327 		DHD_ERROR(("----- CHIP 4359 C0 -----\n"));
3328 		strncat(chipver_tag, "_c0", strlen("_c0"));
3329 	} else {
3330 		DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chip_ver));
3331 		return -1;
3332 	}
3333 
3334 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \
3335 	defined(SUPPORT_BCM4359_MIXED_MODULES)
3336 	module_type =  dhd_check_module_b90();
3337 
3338 	switch (module_type) {
3339 		case BCM4359_MODULE_TYPE_B90B:
3340 			strcat(fw_path, chipver_tag);
3341 			break;
3342 		case BCM4359_MODULE_TYPE_B90S:
3343 		default:
3344 			/*
3345 			 * .cid.info file not exist case,
3346 			 * loading B90S FW force for initial MFG boot up.
3347 			*/
3348 			if (chip_ver == 5) {
3349 				strncat(fw_path, "_b90s", strlen("_b90s"));
3350 			}
3351 			strcat(fw_path, chipver_tag);
3352 			strcat(nv_path, chipver_tag);
3353 			break;
3354 	}
3355 #else /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
3356 	strcat(fw_path, chipver_tag);
3357 	strcat(nv_path, chipver_tag);
3358 #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
3359 
3360 	return 0;
3361 }
3362 
3363 #define NVRAM_FEM_MURATA	"_murata"
3364 static int
3365 concate_revision_from_cisinfo(dhd_bus_t *bus, char *fw_path, char *nv_path)
3366 {
3367 	int ret = BCME_OK;
3368 #if defined(SUPPORT_MIXED_MODULES)
3369 #if defined(USE_CID_CHECK)
3370 	char module_type[MAX_VNAME_LEN];
3371 	naming_info_t *info = NULL;
3372 	bool is_murata_fem = FALSE;
3373 
3374 	memset(module_type, 0, sizeof(module_type));
3375 
3376 	if (dhd_check_module_bcm(module_type,
3377 			MODULE_NAME_INDEX_MAX, &is_murata_fem) == BCME_OK) {
3378 		info = dhd_find_naming_info(bus->dhd, module_type);
3379 	} else {
3380 		/* in case of .cid.info doesn't exists */
3381 		info = dhd_find_naming_info_by_chip_rev(bus->dhd, &is_murata_fem);
3382 	}
3383 
3384 #ifdef BCM4361_CHIP
3385 	if (bcmstrnstr(nv_path, PATH_MAX,  "_murata", 7)) {
3386 		is_murata_fem = FALSE;
3387 	}
3388 #endif /* BCM4361_CHIP */
3389 
3390 	if (info) {
3391 #ifdef BCM4361_CHIP
3392 		if (is_murata_fem) {
3393 			strncat(nv_path, NVRAM_FEM_MURATA, strlen(NVRAM_FEM_MURATA));
3394 		}
3395 #endif /* BCM4361_CHIP */
3396 		strncat(nv_path, info->nvram_ext, strlen(info->nvram_ext));
3397 		strncat(fw_path, info->fw_ext, strlen(info->fw_ext));
3398 	} else {
3399 		DHD_ERROR(("%s:failed to find extension for nvram and firmware\n", __FUNCTION__));
3400 		ret = BCME_ERROR;
3401 	}
3402 #endif /* USE_CID_CHECK */
3403 #ifdef USE_DIRECT_VID_TAG
3404 	int revid = bus->sih->chiprev;
3405 	unsigned char chipstr[MAX_VID_LEN];
3406 
3407 	memset(chipstr, 0, sizeof(chipstr));
3408 	snprintf(chipstr, sizeof(chipstr), "_4389");
3409 
3410 	/* write chipstr/vid into nvram tag */
3411 	ret = concate_nvram_by_vid(bus, nv_path, chipstr);
3412 	/* write chiprev into FW tag */
3413 	if (ret == BCME_OK) {
3414 		if (revid == 3) {
3415 			strncat(fw_path, A0_REV, strlen(fw_path));
3416 			DHD_ERROR(("%s: fw_path : %s\n", __FUNCTION__, fw_path));
3417 		} else if (revid == 1) {
3418 			strncat(fw_path, B0_REV, strlen(fw_path));
3419 			DHD_ERROR(("%s: fw_path : %s\n", __FUNCTION__, fw_path));
3420 		} else {
3421 			DHD_ERROR(("%s: INVALID CHIPREV %d\n", __FUNCTION__, revid));
3422 		}
3423 	}
3424 #endif /* USE_DIRECT_VID_TAG */
3425 #else /* SUPPORT_MIXED_MODULE */
3426 	char chipver_tag[10] = {0, };
3427 
3428 	strcat(fw_path, chipver_tag);
3429 	strcat(nv_path, chipver_tag);
3430 #endif /* SUPPORT_MIXED_MODULE */
3431 
3432 	return ret;
3433 }
3434 
3435 int
3436 concate_revision(dhd_bus_t *bus, char *fw_path, char *nv_path)
3437 {
3438 	int res = 0;
3439 
3440 	if (!bus || !bus->sih) {
3441 		DHD_ERROR(("%s:Bus is Invalid\n", __FUNCTION__));
3442 		return -1;
3443 	}
3444 
3445 	if (!fw_path || !nv_path) {
3446 		DHD_ERROR(("fw_path or nv_path is null.\n"));
3447 		return res;
3448 	}
3449 
3450 	switch (si_chipid(bus->sih)) {
3451 
3452 	case BCM43569_CHIP_ID:
3453 	case BCM4358_CHIP_ID:
3454 		res = concate_revision_bcm4358(bus, fw_path, nv_path);
3455 		break;
3456 	case BCM4355_CHIP_ID:
3457 	case BCM4359_CHIP_ID:
3458 		res = concate_revision_bcm4359(bus, fw_path, nv_path);
3459 		break;
3460 	case BCM4361_CHIP_ID:
3461 	case BCM4347_CHIP_ID:
3462 	case BCM4375_CHIP_ID:
3463 	case BCM4389_CHIP_ID:
3464 		res = concate_revision_from_cisinfo(bus, fw_path, nv_path);
3465 		break;
3466 	default:
3467 		DHD_ERROR(("REVISION SPECIFIC feature is not required\n"));
3468 		return res;
3469 	}
3470 
3471 	return res;
3472 }
3473 #endif /* SUPPORT_MULTIPLE_REVISION */
3474 
3475 uint16
3476 dhd_get_chipid(struct dhd_bus *bus)
3477 {
3478 	if (bus && bus->sih) {
3479 		return (uint16)si_chipid(bus->sih);
3480 	} else if (bus && bus->regs) {
3481 		chipcregs_t *cc = (chipcregs_t *)bus->regs;
3482 		uint w, chipid;
3483 
3484 		/* Set bar0 window to si_enum_base */
3485 		dhdpcie_bus_cfg_set_bar0_win(bus, si_enum_base(0));
3486 
3487 		w = R_REG(bus->osh, &cc->chipid);
3488 		chipid = w & CID_ID_MASK;
3489 
3490 		return (uint16)chipid;
3491 	} else {
3492 		return 0;
3493 	}
3494 }
3495 
3496 /**
3497  * Loads firmware given by caller supplied path and nvram image into PCIe dongle.
3498  *
3499  * BCM_REQUEST_FW specific :
3500  * Given the chip type, determines the to be used file paths within /lib/firmware/brcm/ containing
3501  * firmware and nvm for that chip. If the download fails, retries download with a different nvm file
3502  *
3503  * BCMEMBEDIMAGE specific:
3504  * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header
3505  * file will be used instead.
3506  *
3507  * @return BCME_OK on success
3508  */
3509 int
3510 dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
3511                           char *pfw_path, char *pnv_path,
3512                           char *pclm_path, char *pconf_path)
3513 {
3514 	int ret;
3515 
3516 	bus->fw_path = pfw_path;
3517 	bus->nv_path = pnv_path;
3518 	bus->dhd->clm_path = pclm_path;
3519 	bus->dhd->conf_path = pconf_path;
3520 
3521 #if defined(SUPPORT_MULTIPLE_REVISION)
3522 	if (concate_revision(bus, bus->fw_path, bus->nv_path) != 0) {
3523 		DHD_ERROR(("%s: fail to concatnate revison \n",
3524 			__FUNCTION__));
3525 		/* Proceed if SUPPORT_MULTIPLE_CHIPS is enabled */
3526 #ifndef SUPPORT_MULTIPLE_CHIPS
3527 		return BCME_BADARG;
3528 #endif /* !SUPPORT_MULTIPLE_CHIPS */
3529 	}
3530 #endif /* SUPPORT_MULTIPLE_REVISION */
3531 
3532 #if defined(DHD_BLOB_EXISTENCE_CHECK)
3533 	dhd_set_blob_support(bus->dhd, bus->fw_path);
3534 #endif /* DHD_BLOB_EXISTENCE_CHECK */
3535 
3536 	DHD_ERROR(("%s: firmware path=%s, nvram path=%s\n",
3537 		__FUNCTION__, bus->fw_path, bus->nv_path));
3538 #if defined(LINUX) || defined(linux)
3539 	dhdpcie_dump_resource(bus);
3540 #endif /* LINUX || linux */
3541 
3542 	ret = dhdpcie_download_firmware(bus, osh);
3543 
3544 	return ret;
3545 }
3546 
3547 void
3548 dhd_set_bus_params(struct dhd_bus *bus)
3549 {
3550 	if (bus->dhd->conf->dhd_poll >= 0) {
3551 		bus->poll = bus->dhd->conf->dhd_poll;
3552 		if (!bus->pollrate)
3553 			bus->pollrate = 1;
3554 		printf("%s: set polling mode %d\n", __FUNCTION__, bus->dhd->conf->dhd_poll);
3555 	}
3556 }
3557 
3558 /**
3559  * Loads firmware given by 'bus->fw_path' into PCIe dongle.
3560  *
3561  * BCM_REQUEST_FW specific :
3562  * Given the chip type, determines the to be used file paths within /lib/firmware/brcm/ containing
3563  * firmware and nvm for that chip. If the download fails, retries download with a different nvm file
3564  *
3565  * BCMEMBEDIMAGE specific:
3566  * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header
3567  * file will be used instead.
3568  *
3569  * @return BCME_OK on success
3570  */
3571 static int
3572 dhdpcie_download_firmware(struct dhd_bus *bus, osl_t *osh)
3573 {
3574 	int ret = 0;
3575 #if defined(BCM_REQUEST_FW)
3576 	uint chipid = bus->sih->chip;
3577 	uint revid = bus->sih->chiprev;
3578 	char fw_path[64] = "/lib/firmware/brcm/bcm";	/* path to firmware image */
3579 	char nv_path[64];		/* path to nvram vars file */
3580 	bus->fw_path = fw_path;
3581 	bus->nv_path = nv_path;
3582 	switch (chipid) {
3583 	case BCM43570_CHIP_ID:
3584 		bcmstrncat(fw_path, "43570", 5);
3585 		switch (revid) {
3586 		case 0:
3587 			bcmstrncat(fw_path, "a0", 2);
3588 			break;
3589 		case 2:
3590 			bcmstrncat(fw_path, "a2", 2);
3591 			break;
3592 		default:
3593 			DHD_ERROR(("%s: revid is not found %x\n", __FUNCTION__,
3594 			revid));
3595 			break;
3596 		}
3597 		break;
3598 	default:
3599 		DHD_ERROR(("%s: unsupported device %x\n", __FUNCTION__,
3600 		chipid));
3601 		return 0;
3602 	}
3603 	/* load board specific nvram file */
3604 	snprintf(bus->nv_path, sizeof(nv_path), "%s.nvm", fw_path);
3605 	/* load firmware */
3606 	snprintf(bus->fw_path, sizeof(fw_path), "%s-firmware.bin", fw_path);
3607 #endif /* BCM_REQUEST_FW */
3608 
3609 	DHD_OS_WAKE_LOCK(bus->dhd);
3610 
3611 	dhd_conf_set_path_params(bus->dhd, bus->fw_path, bus->nv_path);
3612 	dhd_set_bus_params(bus);
3613 
3614 	ret = _dhdpcie_download_firmware(bus);
3615 
3616 	DHD_OS_WAKE_UNLOCK(bus->dhd);
3617 	return ret;
3618 } /* dhdpcie_download_firmware */
3619 
3620 #ifdef BCMINTERNAL
3621 #define PCIE_HYBRIDFW_MAGICNUM		0x434F464Cu
3622 #define PCIE_HYBRIDFW_HOSTOFFSET_MASK	0xFFC00000u
3623 #define PCIE_HYBRIDFW_TYPE_DNGL		0u
3624 #define PCIE_HYBRIDFW_TYPE_HOST		1u
3625 #define PCIE_HYBRIDFW_TYPE_DNGLTBL	2u
3626 #define PCIE_HYBRIDFW_TYPE_HOSTTBL	3u
3627 #define SBtoPCIETranslation2		0xF0
3628 #define SBtoPCIETranslation2Upper	0xF4
3629 #define SBtoPCIETranslation3		0xF8
3630 #define SBtoPCIETranslation3Upper	0xFC
3631 #define SBtoPCIETranslation0		0x100
3632 #define SBtoPCIETranslation1		0x104
3633 #define SBtoPCIETranslation0Upper	0x10C
3634 #define SBtoPCIETranslation1Upper	0x110
3635 
3636 /* Get length of each portion of hybrid fw binary from the header */
3637 static int
3638 dhdpcie_hybridfw_get_next_block(char * fptr, int *fsize, uint32 *type, uint32 *len)
3639 {
3640 	struct portion_hdr {
3641 		uint32 type;
3642 		uint32 len;
3643 	} hdr;
3644 	int ret;
3645 
3646 	/* read and verify header */
3647 	if (*fsize <= sizeof(hdr)) {
3648 		return BCME_BADLEN;
3649 	}
3650 
3651 	ret = dhd_os_get_image_block((char *)&hdr, sizeof(hdr), fptr);
3652 	if (ret <= 0) {
3653 		return BCME_ERROR;
3654 	}
3655 
3656 	*fsize -= sizeof(hdr);
3657 	*type = ltoh32(hdr.type);
3658 	*len = ltoh32(hdr.len);
3659 
3660 	if ((*len > (uint32)*fsize) || ((int)*len < 0)) {
3661 		return BCME_BADLEN;
3662 	}
3663 
3664 	DHD_INFO(("%s Found section %d with length %d\n", __FUNCTION__, hdr.type, hdr.len));
3665 
3666 	return BCME_OK;
3667 }
3668 
3669 /* Replace host offload functions' pointers */
3670 static int
3671 dhdpcie_hybridfw_ptrrpl(char *fw, uint fw_sz, uint32 *jmptbl, uint jmptbl_sz,
3672                         dmaaddr_t hbuf_pa, uint32 hbuf_len)
3673 {
3674 	uint32 *p_ptr;
3675 	uint32 host_addr;
3676 	int ret = BCME_OK;
3677 
3678 	if (jmptbl_sz % 4) {
3679 		DHD_ERROR(("%s table size %u not 4 bytes aligned\n", __FUNCTION__, jmptbl_sz));
3680 		return BCME_ERROR;
3681 	}
3682 
3683 	host_addr = PCIEDEV_ARM_ADDR(PHYSADDRLO(hbuf_pa), PCIEDEV_TRANS_WIN_HOSTMEM);
3684 	for (; jmptbl_sz > 0; jmptbl_sz -= 4, jmptbl++) {
3685 		if (*jmptbl >= fw_sz) {
3686 			DHD_ERROR(("%s offset %u >= fw size %u\n", __FUNCTION__, *jmptbl, fw_sz));
3687 			ret = BCME_ERROR;
3688 			break;
3689 		}
3690 		p_ptr = (uint32 *)(fw + *jmptbl);
3691 		*p_ptr &= ~(uint32)PCIE_HYBRIDFW_HOSTOFFSET_MASK;
3692 		if (*p_ptr > hbuf_len) {
3693 			DHD_ERROR(("%s function offset %x >= host buffer len %x\n",
3694 				__FUNCTION__, *p_ptr, hbuf_len));
3695 			ret = BCME_ERROR;
3696 			break;
3697 		}
3698 		*p_ptr += host_addr;
3699 	}
3700 
3701 	return ret;
3702 }
3703 
3704 /* configure back plane to pcie translation window */
3705 static void
3706 dhdpcie_sbtopcie_translation_config(struct dhd_bus *bus, int bp_window, dmaaddr_t addr)
3707 {
3708 	uint32 trans_reg_offset, trans_u_reg_offset;
3709 
3710 	switch (bp_window) {
3711 		case PCIEDEV_TRANS_WIN_0:
3712 			trans_reg_offset = SBtoPCIETranslation0;
3713 			trans_u_reg_offset = SBtoPCIETranslation0Upper;
3714 			break;
3715 
3716 		case PCIEDEV_TRANS_WIN_1:
3717 			trans_reg_offset = SBtoPCIETranslation1;
3718 			trans_u_reg_offset = SBtoPCIETranslation1Upper;
3719 			break;
3720 
3721 		case PCIEDEV_TRANS_WIN_2:
3722 			trans_reg_offset = SBtoPCIETranslation2;
3723 			trans_u_reg_offset = SBtoPCIETranslation2Upper;
3724 			break;
3725 
3726 		case PCIEDEV_TRANS_WIN_3:
3727 			trans_reg_offset = SBtoPCIETranslation3;
3728 			trans_u_reg_offset = SBtoPCIETranslation3Upper;
3729 			break;
3730 
3731 		default:
3732 			DHD_ERROR(("%s Invalid bp translation window %d\n",
3733 				__FUNCTION__, bp_window));
3734 			return;
3735 	}
3736 
3737 	si_corereg(bus->sih, bus->sih->buscoreidx, trans_reg_offset, ~0,
3738 		((PHYSADDRLO(addr) & PCIEDEV_HOSTADDR_MAP_WIN_MASK) | 0xC));
3739 	si_corereg(bus->sih, bus->sih->buscoreidx, trans_u_reg_offset, ~0, PHYSADDRHI(addr));
3740 }
3741 
3742 /**
3743  * hybrid firmware download handler
3744  *
3745  * Parse, prepare and download a hybrid firmware
3746  * - Identify a hybrid firmware
3747  * - Place the host offload portion in an allocated DMA consistent buffer
3748  * - Modifying the host portion function pointers according to info table
3749  */
3750 static int
3751 dhdpcie_hybridfw_download(struct dhd_bus *bus, char *fp)
3752 {
3753 	uint32 magic_num;
3754 	int ret = BCME_OK;
3755 	dhd_dma_buf_t *hstfw = &bus->hostfw_buf;
3756 	char *dnglfw = NULL, *dngltbl = NULL, *hsttbl = NULL;
3757 	int dnglfw_sz = 0, dngltbl_sz = 0, hsttbl_sz = 0;
3758 	int fsize;
3759 	int offset = 0;
3760 	uint32 type = 0, len = 0;
3761 	void * ptr = NULL;
3762 
3763 	fsize = dhd_os_get_image_size(fp);
3764 
3765 	/* Verify magic number */
3766 	if (fsize < sizeof(magic_num)) {
3767 		return BCME_UNSUPPORTED;
3768 	}
3769 	ret = dhd_os_get_image_block((char *)&magic_num, sizeof(magic_num), fp);
3770 	if (ret <= 0) {
3771 		return BCME_ERROR;
3772 	}
3773 	magic_num = ltoh32(magic_num);
3774 	if (magic_num != PCIE_HYBRIDFW_MAGICNUM) {
3775 		return BCME_UNSUPPORTED;
3776 	}
3777 	fsize -= sizeof(magic_num);
3778 
3779 	do {
3780 		ret = dhdpcie_hybridfw_get_next_block(fp, &fsize, &type, &len);
3781 		if (ret != BCME_OK) {
3782 			break;
3783 		}
3784 
3785 		if (len == 0) {
3786 			continue;
3787 		}
3788 
3789 		if ((ptr = MALLOC(bus->dhd->osh, len)) == NULL) {
3790 			ret = BCME_NOMEM;
3791 			break;
3792 		}
3793 
3794 		len = dhd_os_get_image_block(ptr, len, fp);
3795 		if (len <= 0) {
3796 			MFREE(bus->dhd->osh, ptr, len);
3797 			ret = BCME_ERROR;
3798 			break;
3799 		}
3800 		fsize -= len;
3801 
3802 		switch (type) {
3803 			case PCIE_HYBRIDFW_TYPE_DNGL:
3804 				/* cannot have more than one RAM image blocks */
3805 				if (dnglfw_sz) {
3806 					MFREE(bus->dhd->osh, ptr, len);
3807 					ret = BCME_ERROR;
3808 					break;
3809 				}
3810 
3811 				/* RAM portion of the FW image */
3812 				dnglfw = ptr;
3813 				dnglfw_sz = len;
3814 
3815 				if ((uint32)len > bus->ramsize) {
3816 					ret = BCME_BADLEN;
3817 					break;
3818 				}
3819 				break;
3820 
3821 			case PCIE_HYBRIDFW_TYPE_HOST:
3822 				/* Host portion of FW image
3823 				 * Check if a -hostmem- fw has already been loaded, if yes and
3824 				 * the buffer can accommodate the new firmware host portion,
3825 				 * reuse the allocated buffer
3826 				 * For Insufficient size buffer or freshly loaded dhd, allocate
3827 				 * a coherent buffer
3828 				 */
3829 				if (hstfw->va) {
3830 					if (hstfw->len >= len) {
3831 						hstfw->len = len;
3832 					} else {
3833 						DMA_FREE_CONSISTENT(bus->dhd->osh, hstfw->va,
3834 							hstfw->_alloced, hstfw->pa, hstfw->dmah);
3835 						memset(hstfw, 0, sizeof(*hstfw));
3836 					}
3837 				}
3838 
3839 				if (hstfw->va == NULL) {
3840 					hstfw->len = len;
3841 					hstfw->va = DMA_ALLOC_CONSISTENT(bus->dhd->osh, hstfw->len,
3842 						4, &hstfw->_alloced, &hstfw->pa, &hstfw->dmah);
3843 					if (hstfw->va == NULL) {
3844 						MFREE(bus->dhd->osh, ptr, len);
3845 						ret = BCME_NOMEM;
3846 						break;
3847 					}
3848 				}
3849 
3850 				ret = memcpy_s(hstfw->va, hstfw->len, ptr, len);
3851 				MFREE(bus->dhd->osh, ptr, len);
3852 				break;
3853 
3854 			case PCIE_HYBRIDFW_TYPE_DNGLTBL:
3855 				/* cannot have more than one ram image relocation information */
3856 				if (dngltbl) {
3857 					MFREE(bus->dhd->osh, ptr, len);
3858 					ret = BCME_ERROR;
3859 					break;
3860 				}
3861 
3862 				/* RAM image relocation information */
3863 				dngltbl = ptr;
3864 				dngltbl_sz = len;
3865 
3866 				/* RAM image should be included before RAM reloc info */
3867 				if ((dnglfw == NULL) || (hstfw->va == NULL)) {
3868 					ret = BCME_ERROR;
3869 					break;
3870 				}
3871 				/* Store the fw assumed host memory base */
3872 				bus->hostfw_base = *(uint32 *)(dnglfw + *(uint32 *)dngltbl);
3873 				bus->hostfw_base &= PCIE_HYBRIDFW_HOSTOFFSET_MASK;
3874 
3875 				DHD_INFO(("%s FW assumed host base address is %08x\n",
3876 					__FUNCTION__, bus->hostfw_base));
3877 
3878 				ret = dhdpcie_hybridfw_ptrrpl(dnglfw, dnglfw_sz,
3879 					(uint32 *)dngltbl, dngltbl_sz, hstfw->pa, hstfw->len);
3880 				break;
3881 
3882 		case PCIE_HYBRIDFW_TYPE_HOSTTBL:
3883 				/* cannot have more than one host image relocation info */
3884 				if (hsttbl) {
3885 					MFREE(bus->dhd->osh, ptr, len);
3886 					ret = BCME_ERROR;
3887 					break;
3888 				}
3889 				/* Host image relocation information */
3890 				hsttbl = ptr;
3891 				hsttbl_sz = len;
3892 
3893 				/* Host image should be included before host reloc info */
3894 				if (hstfw->va == NULL) {
3895 					ret = BCME_ERROR;
3896 					break;
3897 				}
3898 				ret = dhdpcie_hybridfw_ptrrpl(hstfw->va, hstfw->len,
3899 					(uint32 *)hsttbl, hsttbl_sz, hstfw->pa, hstfw->len);
3900 				break;
3901 
3902 			default:
3903 				ret = BCME_ERROR;
3904 				break;
3905 		}
3906 
3907 	} while (!ret && (fsize > 0));
3908 
3909 	if (ret != BCME_OK) {
3910 		DHD_ERROR(("%s: err:%d, fsize:%d, t:%d, l:%d\n",
3911 			__FUNCTION__, ret, fsize, type, len));
3912 		goto exit;
3913 	}
3914 
3915 	if ((uint32*)dnglfw == NULL) {
3916 		DHD_ERROR(("%s: Dongle image should be present in combo file\n",
3917 			__FUNCTION__));
3918 		ret = BCME_ERROR;
3919 		goto exit;
3920 	}
3921 
3922 	if (hstfw->va) {
3923 		OSL_CACHE_FLUSH((void *)hstfw->va, hstfw->len);
3924 	}
3925 
3926 	/* for CR4/CA7 store the reset instruction to be written in 0 */
3927 	if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) || si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) {
3928 		bus->resetinstr = *(((uint32*)dnglfw));
3929 		/* Add start of RAM address to the address given by user */
3930 		offset += bus->dongle_ram_base;
3931 	}
3932 
3933 	ret = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)dnglfw, dnglfw_sz);
3934 	if (ret) {
3935 		DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
3936 			__FUNCTION__, ret, dnglfw_sz, offset));
3937 		goto exit;
3938 	}
3939 
3940 	/* Configrue bptopcie register to allow ARM access the host offload area */
3941 	bus->bp_base = PCIEDEV_ARM_ADDR(PHYSADDRLO(hstfw->pa), PCIEDEV_TRANS_WIN_HOSTMEM);
3942 	dhdpcie_sbtopcie_translation_config(bus, PCIEDEV_TRANS_WIN_HOSTMEM, hstfw->pa);
3943 
3944 	/* Check if the buffer is crossing 32MB Window */
3945 	if (((bus->bp_base + hstfw->len) & PCIEDEV_ARM_ADDR_SPACE) <
3946 		(bus->bp_base & PCIEDEV_ARM_ADDR_SPACE)) {
3947 		DHD_ERROR(("Host memomery crissing 32MB window."
3948 			" Entire hostmem block should be within continuous 32MB block"));
3949 		ret = BCME_ERROR;
3950 		goto exit;
3951 	}
3952 
3953 	DHD_ERROR(("%s %d bytes host offload firmware placed at pa %08x %08x\n",
3954 		__FUNCTION__, hstfw->len,
3955 		(uint)PHYSADDRHI(hstfw->pa), (uint)PHYSADDRLO(hstfw->pa)));
3956 
3957 exit:
3958 	if (dnglfw) {
3959 		MFREE(bus->dhd->osh, dnglfw, dnglfw_sz);
3960 	}
3961 
3962 	if (dngltbl) {
3963 		MFREE(bus->dhd->osh, dngltbl, dngltbl_sz);
3964 	}
3965 
3966 	if (hsttbl) {
3967 		MFREE(bus->dhd->osh, hsttbl, hsttbl_sz);
3968 	}
3969 
3970 	if (ret && hstfw->va) {
3971 		DMA_FREE_CONSISTENT(bus->dhd->osh, hstfw->va, hstfw->_alloced,
3972 				hstfw->pa, hstfw->dmah);
3973 		memset(hstfw, 0, sizeof(*hstfw));
3974 	}
3975 
3976 	return ret;
3977 }
3978 #endif /* BCMINTERNAL */
3979 
3980 /**
3981  * Downloads a file containing firmware into dongle memory. In case of a .bea file, the DHD
3982  * is updated with the event logging partitions within that file as well.
3983  *
3984  * @param pfw_path    Path to .bin or .bea file
3985  */
3986 static int
3987 dhdpcie_download_code_file(struct dhd_bus *bus, char *pfw_path)
3988 {
3989 	int bcmerror = BCME_ERROR;
3990 	int offset = 0;
3991 	int len = 0;
3992 	bool store_reset;
3993 	char *imgbuf = NULL; /**< XXX a file pointer, contradicting its name and type */
3994 	uint8 *memblock = NULL, *memptr = NULL;
3995 #ifdef CHECK_DOWNLOAD_FW
3996 	uint8 *memptr_tmp = NULL; // terence: check downloaded firmware is correct
3997 #endif
3998 	int offset_end = bus->ramsize;
3999 	uint32 file_size = 0, read_len = 0;
4000 
4001 #if defined(CACHE_FW_IMAGES)
4002 	int buf_offset, total_len, residual_len;
4003 	char * dnld_buf;
4004 #endif /* CACHE_FW_IMAGE */
4005 
4006 #if defined(linux) || defined(LINUX)
4007 #if defined(DHD_FW_MEM_CORRUPTION)
4008 	if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) {
4009 		dhd_tcm_test_enable = TRUE;
4010 	} else {
4011 		dhd_tcm_test_enable = FALSE;
4012 	}
4013 #endif /* DHD_FW_MEM_CORRUPTION */
4014 	DHD_ERROR(("%s: dhd_tcm_test_enable %u\n", __FUNCTION__, dhd_tcm_test_enable));
4015 	/* TCM check */
4016 	if (dhd_tcm_test_enable && !dhd_bus_tcm_test(bus)) {
4017 		DHD_ERROR(("dhd_bus_tcm_test failed\n"));
4018 		bcmerror = BCME_ERROR;
4019 		goto err;
4020 	}
4021 #endif /* LINUX || linux */
4022 #ifndef DHD_EFI
4023 	DHD_ERROR(("%s: download firmware %s\n", __FUNCTION__, pfw_path));
4024 #endif /* DHD_EFI */
4025 
4026 	/* Should succeed in opening image if it is actually given through registry
4027 	 * entry or in module param.
4028 	 */
4029 	imgbuf = dhd_os_open_image1(bus->dhd, pfw_path);
4030 	if (imgbuf == NULL) {
4031 		printf("%s: Open firmware file failed %s\n", __FUNCTION__, pfw_path);
4032 		goto err;
4033 	}
4034 
4035 	file_size = dhd_os_get_image_size(imgbuf);
4036 	if (!file_size) {
4037 		DHD_ERROR(("%s: get file size fails ! \n", __FUNCTION__));
4038 		goto err;
4039 	}
4040 
4041 #ifdef BCMINTERNAL
4042 	/* dhdpcie_hybridfw_download return BCME_UNSUPPORTED if the binary
4043 	 * doesn't have a recognizable format. Continue to previous routine
4044 	 * in such case. Return and propagate the result for BCME_OK or
4045 	 * other errors
4046 	 */
4047 	bcmerror = dhdpcie_hybridfw_download(bus, imgbuf);
4048 	if (bcmerror != BCME_UNSUPPORTED) {
4049 		goto err;
4050 	}
4051 
4052 	/* Close and re-open the image file to reset the file pointer.
4053 	 * Needed because dhdpcie_hybridfw_download() already read 4 bytes from the file.
4054 	 */
4055 	dhd_os_close_image1(bus->dhd, imgbuf);
4056 	imgbuf = dhd_os_open_image1(bus->dhd, pfw_path);
4057 	if (imgbuf == NULL) {
4058 		goto err;
4059 	}
4060 #endif /* BCMINTERNAL */
4061 
4062 	memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
4063 	if (memblock == NULL) {
4064 		DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
4065 		bcmerror = BCME_NOMEM;
4066 		goto err;
4067 	}
4068 #ifdef CHECK_DOWNLOAD_FW
4069 	if (bus->dhd->conf->fwchk) {
4070 		memptr_tmp = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
4071 		if (memptr_tmp == NULL) {
4072 			DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
4073 			goto err;
4074 		}
4075 	}
4076 #endif
4077 	if ((uint32)(uintptr)memblock % DHD_SDALIGN) {
4078 		memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
4079 	}
4080 
4081 	/* check if CR4/CA7 */
4082 	store_reset = (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) ||
4083 			si_setcore(bus->sih, ARMCA7_CORE_ID, 0));
4084 #if defined(CACHE_FW_IMAGES)
4085 	total_len = bus->ramsize;
4086 	dhd_os_close_image(imgbuf);
4087 	imgbuf = NULL;
4088 	buf_offset = 0;
4089 	bcmerror = dhd_get_download_buffer(bus->dhd, pfw_path, FW, &dnld_buf, &total_len);
4090 	if (bcmerror != BCME_OK) {
4091 		DHD_ERROR(("%s: dhd_get_download_buffer failed (%d)\n", __FUNCTION__, bcmerror));
4092 		goto err;
4093 	}
4094 	residual_len = total_len;
4095 	/* Download image with MEMBLOCK size */
4096 	while (residual_len) {
4097 		len = MIN(residual_len, MEMBLOCK);
4098 		memcpy(memptr, dnld_buf + buf_offset, len);
4099 		residual_len -= len;
4100 		buf_offset += len;
4101 #else
4102 	/* Download image with MEMBLOCK size */
4103 	while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, imgbuf))) {
4104 		if (len < 0) {
4105 			DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len));
4106 			bcmerror = BCME_ERROR;
4107 			goto err;
4108 		}
4109 #endif /* CACHE_FW_IMAGE */
4110 
4111 		read_len += len;
4112 		if (read_len > file_size) {
4113 			DHD_ERROR(("%s: WARNING! reading beyond EOF, len=%d; read_len=%u;"
4114 				" file_size=%u truncating len to %d \n", __FUNCTION__,
4115 				len, read_len, file_size, (len - (read_len - file_size))));
4116 			len -= (read_len - file_size);
4117 		}
4118 
4119 		/* if address is 0, store the reset instruction to be written in 0 */
4120 		if (store_reset) {
4121 			ASSERT(offset == 0);
4122 			bus->resetinstr = *(((uint32*)memptr));
4123 			/* Add start of RAM address to the address given by user */
4124 			offset += bus->dongle_ram_base;
4125 			offset_end += offset;
4126 			store_reset = FALSE;
4127 		}
4128 
4129 		bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len);
4130 		if (bcmerror) {
4131 			DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
4132 				__FUNCTION__, bcmerror, MEMBLOCK, offset));
4133 			goto err;
4134 		}
4135 
4136 #ifdef CHECK_DOWNLOAD_FW
4137 		if (bus->dhd->conf->fwchk) {
4138 			bcmerror = dhdpcie_bus_membytes(bus, FALSE, offset, memptr_tmp, len);
4139 			if (bcmerror) {
4140 				DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
4141 				        __FUNCTION__, bcmerror, MEMBLOCK, offset));
4142 				goto err;
4143 			}
4144 			if (memcmp(memptr_tmp, memptr, len)) {
4145 				DHD_ERROR(("%s: Downloaded image is corrupted.\n", __FUNCTION__));
4146 				goto err;
4147 			} else
4148 				DHD_INFO(("%s: Download, Upload and compare succeeded.\n", __FUNCTION__));
4149 		}
4150 #endif
4151 		offset += MEMBLOCK;
4152 
4153 		if (offset >= offset_end) {
4154 			DHD_ERROR(("%s: invalid address access to %x (offset end: %x)\n",
4155 				__FUNCTION__, offset, offset_end));
4156 			bcmerror = BCME_ERROR;
4157 			goto err;
4158 		}
4159 
4160 		if (read_len >= file_size) {
4161 			break;
4162 		}
4163 	}
4164 err:
4165 	if (memblock) {
4166 		MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
4167 #ifdef CHECK_DOWNLOAD_FW
4168 		if (memptr_tmp)
4169 			MFREE(bus->dhd->osh, memptr_tmp, MEMBLOCK + DHD_SDALIGN);
4170 #endif
4171 	}
4172 
4173 	if (imgbuf) {
4174 		dhd_os_close_image1(bus->dhd, imgbuf);
4175 	}
4176 
4177 	return bcmerror;
4178 } /* dhdpcie_download_code_file */
4179 
4180 #ifdef CUSTOMER_HW4_DEBUG
4181 #define MIN_NVRAMVARS_SIZE 128
4182 #endif /* CUSTOMER_HW4_DEBUG */
4183 
4184 static int
4185 dhdpcie_download_nvram(struct dhd_bus *bus)
4186 {
4187 	int bcmerror = BCME_ERROR;
4188 	uint len;
4189 	char * memblock = NULL;
4190 	char *bufp;
4191 	char *pnv_path;
4192 	bool nvram_file_exists;
4193 	bool nvram_uefi_exists = FALSE;
4194 	bool local_alloc = FALSE;
4195 	pnv_path = bus->nv_path;
4196 
4197 	nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0'));
4198 
4199 	/* First try UEFI */
4200 	len = MAX_NVRAMBUF_SIZE;
4201 	dhd_get_download_buffer(bus->dhd, NULL, NVRAM, &memblock, (int *)&len);
4202 
4203 	/* If UEFI empty, then read from file system */
4204 	if ((len <= 0) || (memblock == NULL)) {
4205 
4206 		if (nvram_file_exists) {
4207 			len = MAX_NVRAMBUF_SIZE;
4208 			dhd_get_download_buffer(bus->dhd, pnv_path, NVRAM, &memblock, (int *)&len);
4209 			if ((len <= 0 || len > MAX_NVRAMBUF_SIZE)) {
4210 				goto err;
4211 			}
4212 		}
4213 #ifdef BCM_ROUTER_DHD
4214 		else if (bus->nvram_params_len) {
4215 			memblock = MALLOCZ(bus->dhd->osh, MAX_NVRAMBUF_SIZE);
4216 			if (memblock == NULL) {
4217 				DHD_ERROR(("%s: Failed to allocate memory %d bytes\n",
4218 					__FUNCTION__, MAX_NVRAMBUF_SIZE));
4219 				goto err;
4220 			}
4221 			local_alloc = TRUE;
4222 			/* nvram is string with null terminated. cannot use strlen */
4223 			len = bus->nvram_params_len;
4224 			ASSERT(len <= MAX_NVRAMBUF_SIZE);
4225 			memcpy(memblock, bus->nvram_params, len);
4226 		}
4227 #endif /* BCM_ROUTER_DHD */
4228 		else {
4229 			/* For SROM OTP no external file or UEFI required */
4230 			bcmerror = BCME_OK;
4231 		}
4232 	} else {
4233 		nvram_uefi_exists = TRUE;
4234 	}
4235 
4236 	DHD_ERROR(("%s: dhd_get_download_buffer len %d\n", __FUNCTION__, len));
4237 
4238 	if (len > 0 && len <= MAX_NVRAMBUF_SIZE && memblock != NULL) {
4239 		bufp = (char *) memblock;
4240 
4241 #ifdef DHD_EFI
4242 		dhd_insert_random_mac_addr(bus->dhd, bufp, &len);
4243 
4244 #endif /* DHD_EFI */
4245 
4246 #ifdef CACHE_FW_IMAGES
4247 		if (bus->processed_nvram_params_len) {
4248 			len = bus->processed_nvram_params_len;
4249 		}
4250 
4251 		if (!bus->processed_nvram_params_len) {
4252 			bufp[len] = 0;
4253 			if (nvram_uefi_exists || nvram_file_exists) {
4254 				len = process_nvram_vars(bufp, len);
4255 				bus->processed_nvram_params_len = len;
4256 			}
4257 		} else
4258 #else
4259 		{
4260 			bufp[len] = 0;
4261 			if (nvram_uefi_exists || nvram_file_exists) {
4262 				len = process_nvram_vars(bufp, len);
4263 			}
4264 		}
4265 #endif /* CACHE_FW_IMAGES */
4266 
4267 		DHD_ERROR(("%s: process_nvram_vars len %d\n", __FUNCTION__, len));
4268 #ifdef CUSTOMER_HW4_DEBUG
4269 		if (len < MIN_NVRAMVARS_SIZE) {
4270 			DHD_ERROR(("%s: invalid nvram size in process_nvram_vars \n",
4271 				__FUNCTION__));
4272 			bcmerror = BCME_ERROR;
4273 			goto err;
4274 		}
4275 #endif /* CUSTOMER_HW4_DEBUG */
4276 
4277 		if (len % 4) {
4278 			len += 4 - (len % 4);
4279 		}
4280 		bufp += len;
4281 		*bufp++ = 0;
4282 		if (len)
4283 			bcmerror = dhdpcie_downloadvars(bus, memblock, len + 1);
4284 		if (bcmerror) {
4285 			DHD_ERROR(("%s: error downloading vars: %d\n",
4286 				__FUNCTION__, bcmerror));
4287 		}
4288 	}
4289 
4290 err:
4291 	if (memblock) {
4292 		if (local_alloc) {
4293 			MFREE(bus->dhd->osh, memblock, MAX_NVRAMBUF_SIZE);
4294 		} else {
4295 			dhd_free_download_buffer(bus->dhd, memblock, MAX_NVRAMBUF_SIZE);
4296 		}
4297 	}
4298 
4299 	return bcmerror;
4300 }
4301 
4302 #if defined(BCMEMBEDIMAGE) && defined (BCM_ROUTER_DHD)
4303 
4304 #ifdef DLIMAGE_43602a1
4305 #define CHIPID_43602            BCM43602_CHIP_ID
4306 #define CHIPID_43462            BCM43462_CHIP_ID
4307 #define CHIPID_43522            BCM43522_CHIP_ID
4308 #define CHIP_43602_CHIPREV_A0   0
4309 #define CHIP_43602_CHIPREV_A1   1
4310 #define CHIP_43602_PKG_OPT      1
4311 #endif
4312 
4313 #define CHIPID_NONE            -1
4314 
4315 struct fd_chip_image
4316 {
4317 	unsigned char *dlarray;
4318 	int dlimagesize;
4319 	char *dlimagename;
4320 	char *dlimagever;
4321 	char *dliamgedate;
4322 } static chip_dl_image_array[] __initdata =
4323 {
4324 #ifdef DLIMAGE_43602a1
4325 	{dlarray_43602a1, sizeof(dlarray_43602a1), dlimagename_43602a1,
4326 	dlimagever_43602a1, dlimagedate_43602a1},
4327 #endif
4328 	/* {image attributes for other chips, only if image is compiled} */
4329 };
4330 
4331 enum chip_image_rev
4332 {
4333 #ifdef DLIMAGE_43602a1
4334 	CHIP_43602_A1_CHIP_IMAGE,
4335 #endif
4336 	/* index in the above array */
4337 };
4338 
4339 struct chip_image_map
4340 {
4341 	uint32 chipid;
4342 	uint32 chiprev;
4343 	uint32 chippkg;
4344 	uint32 image_idx;
4345 } static chip_image_index_map_table [] __initdata  =
4346 {
4347 #ifdef DLIMAGE_43602a1
4348 	{CHIPID_43602, CHIP_43602_CHIPREV_A1, CHIP_43602_PKG_OPT, CHIP_43602_A1_CHIP_IMAGE},
4349 	{CHIPID_43462, CHIP_43602_CHIPREV_A1, CHIP_43602_PKG_OPT, CHIP_43602_A1_CHIP_IMAGE},
4350 	{CHIPID_43522, CHIP_43602_CHIPREV_A1, CHIP_43602_PKG_OPT, CHIP_43602_A1_CHIP_IMAGE},
4351 #endif
4352 	/* {for a given chipid, chiprev, chippkg, what is the index (the above enum) */
4353 	{CHIPID_NONE, 0, 0, 0} /* CHIPID_NONE is -1, used to mark end of list */
4354 };
4355 
4356 static void  __init select_fd_image(
4357 		struct dhd_bus *bus, unsigned char **p_dlarray,
4358 		char **p_dlimagename, char **p_dlimagever,
4359 		char **p_dlimagedate, int *image_size) {
4360 
4361 	uint32 chipid, chiprev, chippkg_opt;
4362 	int image_index;
4363 	struct chip_image_map *p_image_index;
4364 
4365 	chipid = 0;
4366 	image_index = -1;
4367 	p_image_index = &chip_image_index_map_table[0];
4368 	while (chipid != CHIPID_NONE) {
4369 		chipid = p_image_index->chipid;
4370 		chiprev = p_image_index->chiprev;
4371 		chippkg_opt = p_image_index->chippkg;
4372 
4373 		if ((chipid == bus->sih->chip) && (chiprev == bus->sih->chiprev) &&
4374 			(chippkg_opt == bus->sih->chippkg)) {
4375 			image_index = p_image_index->image_idx;
4376 			break;
4377 		}
4378 		p_image_index++;
4379 	}
4380 
4381 	if (image_index != -1) {
4382 		*p_dlarray     = chip_dl_image_array[image_index].dlarray;
4383 		*p_dlimagename = chip_dl_image_array[image_index].dlimagename;
4384 		*p_dlimagever  = chip_dl_image_array[image_index].dlimagever;
4385 		*p_dlimagedate = chip_dl_image_array[image_index].dliamgedate;
4386 		*image_size    = chip_dl_image_array[image_index].dlimagesize;
4387 	} else {
4388 		*p_dlarray     = 0;
4389 		DHD_ERROR(("####################################################################\n"
4390 			"# %s: Dongle image not available for chipid = 0x%x"
4391 			"  chiprev = %d  chippkg = %d\n"
4392 			"####################################################################\n",
4393 			__FUNCTION__, bus->sih->chip, bus->sih->chiprev, bus->sih->chippkg));
4394 	}
4395 }
4396 #endif /* defined(BCMEMBEDIMAGE) && defined (BCM_ROUTER_DHD) */
4397 
4398 #ifdef BCMEMBEDIMAGE
4399 int
4400 dhdpcie_download_code_array(struct dhd_bus *bus)
4401 {
4402 	int bcmerror = -1;
4403 	int offset = 0;
4404 	unsigned char *p_dlarray  = NULL;
4405 	unsigned int dlarray_size = 0;
4406 	unsigned int downloded_len, remaining_len, len;
4407 	char *p_dlimagename, *p_dlimagever, *p_dlimagedate;
4408 	uint8 *memblock = NULL, *memptr;
4409 
4410 	downloded_len = 0;
4411 	remaining_len = 0;
4412 	len = 0;
4413 
4414 #ifdef DHD_EFI
4415 	p_dlarray = rtecdc_fw_arr;
4416 	dlarray_size = sizeof(rtecdc_fw_arr);
4417 #else
4418 #ifndef BCM_ROUTER_DHD
4419 	p_dlarray = dlarray;
4420 	dlarray_size = sizeof(dlarray);
4421 	p_dlimagename = dlimagename;
4422 	p_dlimagever  = dlimagever;
4423 	p_dlimagedate = dlimagedate;
4424 #else
4425 	select_fd_image(bus, &p_dlarray, &p_dlimagename, &p_dlimagever,
4426 		&p_dlimagedate, &dlarray_size);
4427 #endif /* endif for BCM_ROUTER_DHD */
4428 #endif /* DHD_EFI */
4429 
4430 #ifndef DHD_EFI
4431 	if ((p_dlarray == 0) ||	(dlarray_size == 0) ||(dlarray_size > bus->ramsize) ||
4432 		(p_dlimagename == 0) ||	(p_dlimagever  == 0) ||	(p_dlimagedate == 0))
4433 		goto err;
4434 #endif /* DHD_EFI */
4435 
4436 	memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
4437 	if (memblock == NULL) {
4438 		DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
4439 		goto err;
4440 	}
4441 	if ((uint32)(uintptr)memblock % DHD_SDALIGN)
4442 		memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
4443 
4444 	while (downloded_len  < dlarray_size) {
4445 		remaining_len = dlarray_size - downloded_len;
4446 		if (remaining_len >= MEMBLOCK)
4447 			len = MEMBLOCK;
4448 		else
4449 			len = remaining_len;
4450 
4451 		memcpy(memptr, (p_dlarray + downloded_len), len);
4452 		/* check if CR4/CA7 */
4453 		if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) ||
4454 			si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) {
4455 			/* if address is 0, store the reset instruction to be written in 0 */
4456 			if (offset == 0) {
4457 				bus->resetinstr = *(((uint32*)memptr));
4458 				/* Add start of RAM address to the address given by user */
4459 				offset += bus->dongle_ram_base;
4460 			}
4461 		}
4462 		bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len);
4463 		downloded_len += len;
4464 		if (bcmerror) {
4465 			DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
4466 				__FUNCTION__, bcmerror, MEMBLOCK, offset));
4467 			goto err;
4468 		}
4469 		offset += MEMBLOCK;
4470 	}
4471 
4472 #ifdef DHD_DEBUG
4473 	/* Upload and compare the downloaded code */
4474 	{
4475 		unsigned char *ularray = NULL;
4476 		unsigned int uploded_len;
4477 		uploded_len = 0;
4478 		bcmerror = -1;
4479 		ularray = MALLOC(bus->dhd->osh, dlarray_size);
4480 		if (ularray == NULL)
4481 			goto upload_err;
4482 		/* Upload image to verify downloaded contents. */
4483 		offset = bus->dongle_ram_base;
4484 		memset(ularray, 0xaa, dlarray_size);
4485 		while (uploded_len  < dlarray_size) {
4486 			remaining_len = dlarray_size - uploded_len;
4487 			if (remaining_len >= MEMBLOCK)
4488 				len = MEMBLOCK;
4489 			else
4490 				len = remaining_len;
4491 			bcmerror = dhdpcie_bus_membytes(bus, FALSE, offset,
4492 				(uint8 *)(ularray + uploded_len), len);
4493 			if (bcmerror) {
4494 				DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
4495 					__FUNCTION__, bcmerror, MEMBLOCK, offset));
4496 				goto upload_err;
4497 			}
4498 
4499 			uploded_len += len;
4500 			offset += MEMBLOCK;
4501 		}
4502 #ifdef DHD_EFI
4503 		if (memcmp(p_dlarray, ularray, dlarray_size)) {
4504 			DHD_ERROR(("%s: Downloaded image is corrupted ! \n", __FUNCTION__));
4505 			goto upload_err;
4506 		} else
4507 			DHD_ERROR(("%s: Download, Upload and compare succeeded .\n", __FUNCTION__));
4508 #else
4509 		if (memcmp(p_dlarray, ularray, dlarray_size)) {
4510 			DHD_ERROR(("%s: Downloaded image is corrupted (%s, %s, %s).\n",
4511 				__FUNCTION__, p_dlimagename, p_dlimagever, p_dlimagedate));
4512 			goto upload_err;
4513 
4514 		} else
4515 			DHD_ERROR(("%s: Download, Upload and compare succeeded (%s, %s, %s).\n",
4516 				__FUNCTION__, p_dlimagename, p_dlimagever, p_dlimagedate));
4517 #endif /* DHD_EFI */
4518 
4519 upload_err:
4520 		if (ularray)
4521 			MFREE(bus->dhd->osh, ularray, dlarray_size);
4522 	}
4523 #endif /* DHD_DEBUG */
4524 err:
4525 
4526 	if (memblock)
4527 		MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
4528 
4529 	return bcmerror;
4530 } /* dhdpcie_download_code_array */
4531 #endif /* BCMEMBEDIMAGE */
4532 
4533 #ifdef BCM_ROUTER_DHD
4534 static int
4535 _dhdpcie_get_nvram_params(struct dhd_bus *bus)
4536 {
4537 	int nvram_len = MAX_NVRAMBUF_SIZE;
4538 	int tmp_nvram_len, boardrev_str_len;
4539 	char *boardrev_str;
4540 	char *boardtype_str;
4541 	char *ptr;
4542 
4543 	bus->nvram_params = MALLOC(bus->dhd->osh, nvram_len);
4544 	if (!bus->nvram_params) {
4545 		DHD_ERROR(("%s: fail to get nvram buffer to download.\n", __FUNCTION__));
4546 		return -1;
4547 	}
4548 
4549 	bus->nvram_params[0] = 0;
4550 	ptr = bus->nvram_params;
4551 	/*
4552 	 * For full dongle router platforms, we would have two dhd instances running,
4553 	 * serving two radios, one for 5G and another for 2G. But, both dongle instances
4554 	 * would come up as wl0, as one is not aware of the other. In order to avoid
4555 	 * this situation, we pass the dhd instance number through nvram parameter
4556 	 * wlunit=0 and wlunit=1 to the dongle and make sure the two dongle instances
4557 	 * come up as wl0 and wl1.
4558 	 */
4559 
4560 	tmp_nvram_len = strlen("wlunit=xx\n\n") + 1;
4561 	tmp_nvram_len =
4562 		snprintf(ptr, tmp_nvram_len, "wlunit=%d", dhd_get_instance(bus->dhd));
4563 	ptr += (tmp_nvram_len + 1); /* leave NULL */
4564 	tmp_nvram_len++;
4565 
4566 	if ((boardrev_str = si_getdevpathvar(bus->sih, "boardrev")) == NULL)
4567 		boardrev_str = nvram_get("boardrev");
4568 
4569 	boardrev_str_len = strlen("boardrev=0xXXXX") + 1;
4570 	boardrev_str_len = snprintf(ptr, boardrev_str_len, "boardrev=%s",
4571 		boardrev_str? boardrev_str : BOARDREV_PROMOTABLE_STR);
4572 	ptr += (boardrev_str_len + 1); /* leave NULL */
4573 	tmp_nvram_len += (boardrev_str_len + 1);
4574 
4575 	/* If per device boardtype is not available, use global boardtype */
4576 	if ((boardtype_str = si_getdevpathvar(bus->sih, "boardtype")) == NULL) {
4577 		if ((boardtype_str = nvram_get("boardtype")) != NULL) {
4578 			int boardtype_str_len = 0;
4579 
4580 			boardtype_str_len = strlen("boardtype=0xXXXX") + 1;
4581 			boardtype_str_len = snprintf(ptr, boardtype_str_len,
4582 					"boardtype=%s", boardtype_str);
4583 			ptr += (boardtype_str_len + 1); /* leave NULL */
4584 			tmp_nvram_len += (boardtype_str_len + 1);
4585 		}
4586 	}
4587 
4588 	if (dbushost_initvars_flash(bus->sih,
4589 		bus->osh, &ptr,
4590 		(nvram_len - tmp_nvram_len)) != 0) {
4591 		DHD_ERROR(("%s: fail to read nvram from flash.\n", __FUNCTION__));
4592 	}
4593 
4594 	tmp_nvram_len = (int)(ptr - bus->nvram_params);
4595 
4596 	bcopy(STR_END, ptr, sizeof(STR_END));
4597 	tmp_nvram_len += sizeof(STR_END);
4598 	bus->nvram_params_len  = tmp_nvram_len;
4599 	return 0;
4600 }
4601 
4602 static void
4603 _dhdpcie_free_nvram_params(struct dhd_bus *bus)
4604 {
4605 	if (bus->nvram_params) {
4606 		MFREE(bus->dhd->osh, bus->nvram_params, MAX_NVRAMBUF_SIZE);
4607 	}
4608 }
4609 
4610 /** Handler to send a signal to the dhdmonitor process to notify of firmware traps */
4611 void
4612 dhdpcie_handle_dongle_trap(struct dhd_bus *bus)
4613 {
4614 	char *failed_if;
4615 
4616 	/* Call the bus module watchdog */
4617 	dhd_bus_watchdog(bus->dhd);
4618 
4619 	/* Get the failed interface name to be later used by
4620 	 * dhd_monitor to capture the required logs
4621 	 */
4622 	failed_if = dhd_ifname(bus->dhd, 0);
4623 	dhd_schedule_trap_log_dump(bus->dhd, (uint8 *)failed_if, strlen(failed_if));
4624 }
4625 
4626 #endif /* BCM_ROUTER_DHD */
4627 
4628 /**
4629  * Downloads firmware file given by 'bus->fw_path' into PCIe dongle
4630  *
4631  * BCMEMBEDIMAGE specific:
4632  * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header
4633  * file will be used instead.
4634  *
4635  */
4636 static int
4637 _dhdpcie_download_firmware(struct dhd_bus *bus)
4638 {
4639 	int bcmerror = -1;
4640 
4641 	bool embed = FALSE;	/* download embedded firmware */
4642 	bool dlok = FALSE;	/* download firmware succeeded */
4643 
4644 	/* Out immediately if no image to download */
4645 	if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
4646 #ifdef BCMEMBEDIMAGE
4647 		embed = TRUE;
4648 #else
4649 		DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__));
4650 		return 0;
4651 #endif
4652 	}
4653 #ifdef BCM_ROUTER_DHD
4654 	if (_dhdpcie_get_nvram_params(bus) < 0) {
4655 		DHD_ERROR(("%s: fail to get nvram from system.\n", __FUNCTION__));
4656 		return 0;
4657 	}
4658 #endif
4659 	/* Keep arm in reset */
4660 	if (dhdpcie_bus_download_state(bus, TRUE)) {
4661 		DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__));
4662 		goto err;
4663 	}
4664 
4665 	/* External image takes precedence if specified */
4666 	if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
4667 		if (dhdpcie_download_code_file(bus, bus->fw_path)) {
4668 			DHD_ERROR(("%s:%d dongle image file download failed\n", __FUNCTION__,
4669 				__LINE__));
4670 #ifdef BCMEMBEDIMAGE
4671 			embed = TRUE;
4672 #else
4673 			goto err;
4674 #endif
4675 		} else {
4676 			embed = FALSE;
4677 			dlok = TRUE;
4678 		}
4679 	}
4680 
4681 #ifdef BCMEMBEDIMAGE
4682 	if (embed) {
4683 		if (dhdpcie_download_code_array(bus)) {
4684 			DHD_ERROR(("%s: dongle image array download failed\n", __FUNCTION__));
4685 			goto err;
4686 		} else {
4687 			dlok = TRUE;
4688 		}
4689 	}
4690 #else
4691 	BCM_REFERENCE(embed);
4692 #endif
4693 	if (!dlok) {
4694 		DHD_ERROR(("%s:%d dongle image download failed\n", __FUNCTION__, __LINE__));
4695 		goto err;
4696 	}
4697 
4698 	/* EXAMPLE: nvram_array */
4699 	/* If a valid nvram_arry is specified as above, it can be passed down to dongle */
4700 	/* dhd_bus_set_nvram_params(bus, (char *)&nvram_array); */
4701 
4702 	/* External nvram takes precedence if specified */
4703 	if (dhdpcie_download_nvram(bus)) {
4704 		DHD_ERROR(("%s:%d dongle nvram file download failed\n", __FUNCTION__, __LINE__));
4705 		goto err;
4706 	}
4707 
4708 	/* Take arm out of reset */
4709 	if (dhdpcie_bus_download_state(bus, FALSE)) {
4710 		DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__));
4711 		goto err;
4712 	}
4713 
4714 	bcmerror = 0;
4715 
4716 err:
4717 #ifdef BCM_ROUTER_DHD
4718 	_dhdpcie_free_nvram_params(bus);
4719 #endif /* BCM_ROUTER_DHD */
4720 	return bcmerror;
4721 } /* _dhdpcie_download_firmware */
4722 
4723 static int
4724 dhdpcie_bus_readconsole(dhd_bus_t *bus)
4725 {
4726 	dhd_console_t *c = &bus->console;
4727 	uint8 line[CONSOLE_LINE_MAX], ch;
4728 	uint32 n, idx, addr;
4729 	int rv;
4730 	uint readlen = 0;
4731 	uint i = 0;
4732 
4733 	/* Don't do anything until FWREADY updates console address */
4734 	if (bus->console_addr == 0)
4735 		return -1;
4736 
4737 	/* Read console log struct */
4738 	addr = bus->console_addr + OFFSETOF(hnd_cons_t, log);
4739 
4740 	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&c->log, sizeof(c->log))) < 0)
4741 		return rv;
4742 
4743 	/* Allocate console buffer (one time only) */
4744 	if (c->buf == NULL) {
4745 		c->bufsize = ltoh32(c->log.buf_size);
4746 		if ((c->buf = MALLOC(bus->dhd->osh, c->bufsize)) == NULL)
4747 			return BCME_NOMEM;
4748 		DHD_PCIE_INFO(("conlog: bufsize=0x%x\n", c->bufsize));
4749 	}
4750 	idx = ltoh32(c->log.idx);
4751 
4752 	/* Protect against corrupt value */
4753 	if (idx > c->bufsize)
4754 		return BCME_ERROR;
4755 
4756 	/* Skip reading the console buffer if the index pointer has not moved */
4757 	if (idx == c->last)
4758 		return BCME_OK;
4759 
4760 	DHD_PCIE_INFO(("conlog: addr=0x%x, idx=0x%x, last=0x%x \n", c->log.buf,
4761 	   idx, c->last));
4762 
4763 	/* Read the console buffer data to a local buffer
4764 	 * optimize and read only the portion of the buffer needed, but
4765 	 * important to handle wrap-around. Read ptr is 'c->last',
4766 	 * write ptr is 'idx'
4767 	 */
4768 	addr = ltoh32(c->log.buf);
4769 
4770 	/* wrap around case - write ptr < read ptr */
4771 	if (idx < c->last) {
4772 		/* from read ptr to end of buffer */
4773 		readlen = c->bufsize - c->last;
4774 		if ((rv = dhdpcie_bus_membytes(bus, FALSE,
4775 				addr + c->last, c->buf, readlen)) < 0) {
4776 			DHD_ERROR(("conlog: read error[1] ! \n"));
4777 			return rv;
4778 		}
4779 		/* from beginning of buffer to write ptr */
4780 		if ((rv = dhdpcie_bus_membytes(bus, FALSE,
4781 				addr, c->buf + readlen,
4782 				idx)) < 0) {
4783 			DHD_ERROR(("conlog: read error[2] ! \n"));
4784 			return rv;
4785 		}
4786 		readlen += idx;
4787 	} else {
4788 		/* non-wraparound case, write ptr > read ptr */
4789 		readlen = (uint)idx - c->last;
4790 		if ((rv = dhdpcie_bus_membytes(bus, FALSE,
4791 				addr + c->last, c->buf, readlen)) < 0) {
4792 			DHD_ERROR(("conlog: read error[3] ! \n"));
4793 			return rv;
4794 		}
4795 	}
4796 	/* update read ptr */
4797 	c->last = idx;
4798 
4799 	/* now output the read data from the local buffer to the host console */
4800 	while (i < readlen) {
4801 		for (n = 0; n < CONSOLE_LINE_MAX - 2 && i < readlen; n++) {
4802 			ch = c->buf[i];
4803 			++i;
4804 			if (ch == '\n')
4805 				break;
4806 			line[n] = ch;
4807 		}
4808 
4809 		if (n > 0) {
4810 			if (line[n - 1] == '\r')
4811 				n--;
4812 			line[n] = 0;
4813 			printf("CONSOLE: %s\n", line);
4814 		}
4815 	}
4816 
4817 	return BCME_OK;
4818 
4819 } /* dhdpcie_bus_readconsole */
4820 
4821 void
4822 dhd_bus_dump_console_buffer(dhd_bus_t *bus)
4823 {
4824 	uint32 n, i;
4825 	uint32 addr;
4826 	char *console_buffer = NULL;
4827 	uint32 console_ptr, console_size, console_index;
4828 	uint8 line[CONSOLE_LINE_MAX], ch;
4829 	int rv;
4830 
4831 	DHD_ERROR(("%s: Dump Complete Console Buffer\n", __FUNCTION__));
4832 
4833 	if (bus->is_linkdown) {
4834 		DHD_ERROR(("%s: Skip dump Console Buffer due to PCIe link down\n", __FUNCTION__));
4835 		return;
4836 	}
4837 
4838 	addr =	bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log);
4839 	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
4840 		(uint8 *)&console_ptr, sizeof(console_ptr))) < 0) {
4841 		goto exit;
4842 	}
4843 
4844 	addr =	bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.buf_size);
4845 	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
4846 		(uint8 *)&console_size, sizeof(console_size))) < 0) {
4847 		goto exit;
4848 	}
4849 
4850 	addr =	bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.idx);
4851 	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
4852 		(uint8 *)&console_index, sizeof(console_index))) < 0) {
4853 		goto exit;
4854 	}
4855 
4856 	console_ptr = ltoh32(console_ptr);
4857 	console_size = ltoh32(console_size);
4858 	console_index = ltoh32(console_index);
4859 
4860 	if (console_size > CONSOLE_BUFFER_MAX ||
4861 		!(console_buffer = MALLOC(bus->dhd->osh, console_size))) {
4862 		goto exit;
4863 	}
4864 
4865 	if ((rv = dhdpcie_bus_membytes(bus, FALSE, console_ptr,
4866 		(uint8 *)console_buffer, console_size)) < 0) {
4867 		goto exit;
4868 	}
4869 
4870 	for (i = 0, n = 0; i < console_size; i += n + 1) {
4871 		for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
4872 			ch = console_buffer[(console_index + i + n) % console_size];
4873 			if (ch == '\n')
4874 				break;
4875 			line[n] = ch;
4876 		}
4877 
4878 		if (n > 0) {
4879 			if (line[n - 1] == '\r')
4880 				n--;
4881 			line[n] = 0;
4882 			/* Don't use DHD_ERROR macro since we print
4883 			 * a lot of information quickly. The macro
4884 			 * will truncate a lot of the printfs
4885 			 */
4886 
4887 			printf("CONSOLE: %s\n", line);
4888 		}
4889 	}
4890 
4891 exit:
4892 	if (console_buffer)
4893 		MFREE(bus->dhd->osh, console_buffer, console_size);
4894 	return;
4895 }
4896 
4897 static void
4898 dhdpcie_schedule_log_dump(dhd_bus_t *bus)
4899 {
4900 #if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL) && defined(DHD_LOG_DUMP)
4901 	log_dump_type_t *flush_type;
4902 
4903 	/* flush_type is freed at do_dhd_log_dump function */
4904 	flush_type = MALLOCZ(bus->dhd->osh, sizeof(log_dump_type_t));
4905 	if (flush_type) {
4906 		*flush_type = DLD_BUF_TYPE_ALL;
4907 		dhd_schedule_log_dump(bus->dhd, flush_type);
4908 	} else {
4909 		DHD_ERROR(("%s Fail to malloc flush_type\n", __FUNCTION__));
4910 	}
4911 #endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL && DHD_LOG_DUMP */
4912 }
4913 
4914 /**
4915  * Opens the file given by bus->fw_path, reads part of the file into a buffer and closes the file.
4916  *
4917  * @return BCME_OK on success
4918  */
4919 static int
4920 dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size)
4921 {
4922 	int bcmerror = 0;
4923 	uint msize = 512;
4924 	char *mbuffer = NULL;
4925 	uint maxstrlen = 256;
4926 	char *str = NULL;
4927 	pciedev_shared_t *local_pciedev_shared = bus->pcie_sh;
4928 	struct bcmstrbuf strbuf;
4929 	unsigned long flags;
4930 	bool dongle_trap_occured = FALSE;
4931 
4932 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4933 
4934 	if (DHD_NOCHECKDIED_ON()) {
4935 		return 0;
4936 	}
4937 
4938 	if (data == NULL) {
4939 		/*
4940 		 * Called after a rx ctrl timeout. "data" is NULL.
4941 		 * allocate memory to trace the trap or assert.
4942 		 */
4943 		size = msize;
4944 		mbuffer = data = MALLOC(bus->dhd->osh, msize);
4945 
4946 		if (mbuffer == NULL) {
4947 			DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize));
4948 			bcmerror = BCME_NOMEM;
4949 			goto done2;
4950 		}
4951 	}
4952 
4953 	if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) {
4954 		DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen));
4955 		bcmerror = BCME_NOMEM;
4956 		goto done2;
4957 	}
4958 	DHD_GENERAL_LOCK(bus->dhd, flags);
4959 	DHD_BUS_BUSY_SET_IN_CHECKDIED(bus->dhd);
4960 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
4961 
4962 	if (MULTIBP_ENAB(bus->sih)) {
4963 		dhd_bus_pcie_pwr_req(bus);
4964 	}
4965 	if ((bcmerror = dhdpcie_readshared(bus)) < 0) {
4966 		goto done1;
4967 	}
4968 
4969 	bcm_binit(&strbuf, data, size);
4970 
4971 	bcm_bprintf(&strbuf, "msgtrace address : 0x%08X\nconsole address  : 0x%08X\n",
4972 	            local_pciedev_shared->msgtrace_addr, local_pciedev_shared->console_addr);
4973 
4974 	if ((local_pciedev_shared->flags & PCIE_SHARED_ASSERT_BUILT) == 0) {
4975 		/* NOTE: Misspelled assert is intentional - DO NOT FIX.
4976 		 * (Avoids conflict with real asserts for programmatic parsing of output.)
4977 		 */
4978 		bcm_bprintf(&strbuf, "Assrt not built in dongle\n");
4979 	}
4980 
4981 	if ((bus->pcie_sh->flags & (PCIE_SHARED_ASSERT|PCIE_SHARED_TRAP)) == 0) {
4982 		/* NOTE: Misspelled assert is intentional - DO NOT FIX.
4983 		 * (Avoids conflict with real asserts for programmatic parsing of output.)
4984 		 */
4985 		bcm_bprintf(&strbuf, "No trap%s in dongle",
4986 		          (bus->pcie_sh->flags & PCIE_SHARED_ASSERT_BUILT)
4987 		          ?"/assrt" :"");
4988 	} else {
4989 		if (bus->pcie_sh->flags & PCIE_SHARED_ASSERT) {
4990 			/* Download assert */
4991 			bcm_bprintf(&strbuf, "Dongle assert");
4992 			if (bus->pcie_sh->assert_exp_addr != 0) {
4993 				str[0] = '\0';
4994 				if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
4995 					bus->pcie_sh->assert_exp_addr,
4996 					(uint8 *)str, maxstrlen)) < 0) {
4997 					goto done1;
4998 				}
4999 
5000 				str[maxstrlen - 1] = '\0';
5001 				bcm_bprintf(&strbuf, " expr \"%s\"", str);
5002 			}
5003 
5004 			if (bus->pcie_sh->assert_file_addr != 0) {
5005 				str[0] = '\0';
5006 				if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
5007 					bus->pcie_sh->assert_file_addr,
5008 					(uint8 *)str, maxstrlen)) < 0) {
5009 					goto done1;
5010 				}
5011 
5012 				str[maxstrlen - 1] = '\0';
5013 				bcm_bprintf(&strbuf, " file \"%s\"", str);
5014 			}
5015 
5016 			bcm_bprintf(&strbuf, " line %d ",  bus->pcie_sh->assert_line);
5017 		}
5018 
5019 		if (bus->pcie_sh->flags & PCIE_SHARED_TRAP) {
5020 			trap_t *tr = &bus->dhd->last_trap_info;
5021 			dongle_trap_occured = TRUE;
5022 			if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
5023 				bus->pcie_sh->trap_addr, (uint8*)tr, sizeof(trap_t))) < 0) {
5024 				bus->dhd->dongle_trap_occured = TRUE;
5025 				goto done1;
5026 			}
5027 			dhd_bus_dump_trap_info(bus, &strbuf);
5028 		}
5029 	}
5030 
5031 	if (bus->pcie_sh->flags & (PCIE_SHARED_ASSERT | PCIE_SHARED_TRAP)) {
5032 		printf("%s: %s\n", __FUNCTION__, strbuf.origbuf);
5033 #ifdef REPORT_FATAL_TIMEOUTS
5034 		/**
5035 		 * stop the timers as FW trapped
5036 		 */
5037 		if (dhd_stop_scan_timer(bus->dhd, FALSE, 0)) {
5038 			DHD_ERROR(("dhd_stop_scan_timer failed\n"));
5039 			ASSERT(0);
5040 		}
5041 		if (dhd_stop_bus_timer(bus->dhd)) {
5042 			DHD_ERROR(("dhd_stop_bus_timer failed\n"));
5043 			ASSERT(0);
5044 		}
5045 		if (dhd_stop_cmd_timer(bus->dhd)) {
5046 			DHD_ERROR(("dhd_stop_cmd_timer failed\n"));
5047 			ASSERT(0);
5048 		}
5049 		if (dhd_stop_join_timer(bus->dhd)) {
5050 			DHD_ERROR(("dhd_stop_join_timer failed\n"));
5051 			ASSERT(0);
5052 		}
5053 #endif /* REPORT_FATAL_TIMEOUTS */
5054 
5055 		/* wake up IOCTL wait event */
5056 		dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_TRAP);
5057 
5058 		dhd_bus_dump_console_buffer(bus);
5059 		dhd_prot_debug_info_print(bus->dhd);
5060 
5061 #if defined(DHD_FW_COREDUMP)
5062 		/* save core dump or write to a file */
5063 		if (bus->dhd->memdump_enabled) {
5064 #ifdef DHD_SSSR_DUMP
5065 			DHD_ERROR(("%s : Set collect_sssr as TRUE\n", __FUNCTION__));
5066 			bus->dhd->collect_sssr = TRUE;
5067 #endif /* DHD_SSSR_DUMP */
5068 #ifdef DHD_SDTC_ETB_DUMP
5069 			DHD_ERROR(("%s : Set collect_sdtc as TRUE\n", __FUNCTION__));
5070 			bus->dhd->collect_sdtc = TRUE;
5071 #endif /* DHD_SDTC_ETB_DUMP */
5072 			bus->dhd->memdump_type = DUMP_TYPE_DONGLE_TRAP;
5073 			dhdpcie_mem_dump(bus);
5074 		}
5075 #endif /* DHD_FW_COREDUMP */
5076 
5077 		/* set the trap occured flag only after all the memdump,
5078 		* logdump and sssr dump collection has been scheduled
5079 		*/
5080 		if (dongle_trap_occured) {
5081 			bus->dhd->dongle_trap_occured = TRUE;
5082 			if (bus->dhd->check_trap_rot &&
5083 				bus->dhd->ext_trap_data_supported &&
5084 				bus->pcie_sh->flags2 & PCIE_SHARED2_ETD_ADDR_SUPPORT) {
5085 				uint32 trap_data = *(uint32 *)bus->dhd->extended_trap_data;
5086 				DHD_ERROR(("%s : etd data : %x\n", __FUNCTION__, trap_data));
5087 				if (!(trap_data & D2H_DEV_EXT_TRAP_DATA)) {
5088 					uint32 *ext_data = bus->dhd->extended_trap_data;
5089 					/* Skip the first word which is trap_data */
5090 					ext_data++;
5091 					DHD_ERROR(("Dongle trap but no etd\n"));
5092 					if (dhdpcie_bus_membytes(bus, FALSE,
5093 						local_pciedev_shared->etd_addr,
5094 						(uint8 *)ext_data,
5095 						BCMPCIE_EXT_TRAP_DATA_MAXLEN -
5096 							sizeof(trap_data)) < 0) {
5097 						DHD_ERROR(("Error to read etd from dongle\n"));
5098 					}
5099 				} else {
5100 					DHD_ERROR(("Dongle trap with etd\n"));
5101 				}
5102 			}
5103 
5104 		}
5105 
5106 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
5107 		copy_hang_info_trap(bus->dhd);
5108 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
5109 
5110 		dhd_schedule_reset(bus->dhd);
5111 
5112 #ifdef NDIS
5113 		/* ASSERT only if hang detection/recovery is disabled. If enabled then let
5114 		 * windows HDR mechansim trigger FW download via surprise removal
5115 		 */
5116 		dhd_bus_check_died(bus);
5117 #endif
5118 
5119 	}
5120 
5121 done1:
5122 	if (bcmerror) {
5123 		/* dhdpcie_checkdied is invoked only when dongle has trapped
5124 		 * or after PCIe link down..etc. so set dongle_trap_occured so that
5125 		 * log_dump logic can rely on only one flag dongle_trap_occured.
5126 		 */
5127 		bus->dhd->dongle_trap_occured = TRUE;
5128 		dhdpcie_schedule_log_dump(bus);
5129 	}
5130 	if (MULTIBP_ENAB(bus->sih)) {
5131 		dhd_bus_pcie_pwr_req_clear(bus);
5132 	}
5133 
5134 	DHD_GENERAL_LOCK(bus->dhd, flags);
5135 	DHD_BUS_BUSY_CLEAR_IN_CHECKDIED(bus->dhd);
5136 	dhd_os_busbusy_wake(bus->dhd);
5137 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
5138 done2:
5139 	if (mbuffer)
5140 		MFREE(bus->dhd->osh, mbuffer, msize);
5141 	if (str)
5142 		MFREE(bus->dhd->osh, str, maxstrlen);
5143 
5144 	return bcmerror;
5145 } /* dhdpcie_checkdied */
5146 
5147 /* Custom copy of dhdpcie_mem_dump() that can be called at interrupt level */
5148 void dhdpcie_mem_dump_bugcheck(dhd_bus_t *bus, uint8 *buf)
5149 {
5150 	int ret = 0;
5151 	int size; /* Full mem size */
5152 	int start; /* Start address */
5153 	int read_size = 0; /* Read size of each iteration */
5154 	uint8 *databuf = buf;
5155 
5156 	if (bus == NULL) {
5157 		return;
5158 	}
5159 
5160 	start = bus->dongle_ram_base;
5161 	read_size = 4;
5162 	/* check for dead bus */
5163 	{
5164 		uint test_word = 0;
5165 		ret = dhdpcie_bus_membytes(bus, FALSE, start, (uint8*)&test_word, read_size);
5166 		/* if read error or bus timeout */
5167 		if (ret || (test_word == 0xFFFFFFFF)) {
5168 			return;
5169 		}
5170 	}
5171 
5172 	/* Get full mem size */
5173 	size = bus->ramsize;
5174 	/* Read mem content */
5175 	while (size)
5176 	{
5177 		read_size = MIN(MEMBLOCK, size);
5178 		if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size))) {
5179 			return;
5180 		}
5181 
5182 		/* Decrement size and increment start address */
5183 		size -= read_size;
5184 		start += read_size;
5185 		databuf += read_size;
5186 	}
5187 	bus->dhd->soc_ram = buf;
5188 	bus->dhd->soc_ram_length = bus->ramsize;
5189 	return;
5190 }
5191 
5192 #if defined(DHD_FW_COREDUMP)
5193 static int
5194 dhdpcie_get_mem_dump(dhd_bus_t *bus)
5195 {
5196 	int ret = BCME_OK;
5197 	int size = 0;
5198 	int start = 0;
5199 	int read_size = 0; /* Read size of each iteration */
5200 	uint8 *p_buf = NULL, *databuf = NULL;
5201 #ifdef BOARD_HIKEY
5202 	unsigned long flags_bus;
5203 #endif /* BOARD_HIKEY */
5204 
5205 	if (!bus) {
5206 		DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
5207 		return BCME_ERROR;
5208 	}
5209 
5210 	if (!bus->dhd) {
5211 		DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
5212 		return BCME_ERROR;
5213 	}
5214 
5215 	size = bus->ramsize; /* Full mem size */
5216 	start = bus->dongle_ram_base; /* Start address */
5217 
5218 	/* Get full mem size */
5219 	p_buf = dhd_get_fwdump_buf(bus->dhd, size);
5220 	if (!p_buf) {
5221 		DHD_ERROR(("%s: Out of memory (%d bytes)\n",
5222 			__FUNCTION__, size));
5223 		return BCME_ERROR;
5224 	}
5225 
5226 	/* Read mem content */
5227 	DHD_TRACE_HW4(("Dump dongle memory\n"));
5228 	databuf = p_buf;
5229 
5230 	while (size > 0) {
5231 		read_size = MIN(MEMBLOCK, size);
5232 #ifdef BOARD_HIKEY
5233 		/* Hold BUS_LP_STATE_LOCK to avoid simultaneous bus access */
5234 		DHD_BUS_LP_STATE_LOCK(bus->bus_lp_state_lock, flags_bus);
5235 #endif /* BOARD_HIKEY */
5236 		ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size);
5237 #ifdef BOARD_HIKEY
5238 		DHD_BUS_LP_STATE_UNLOCK(bus->bus_lp_state_lock, flags_bus);
5239 #endif /* BOARD_HIKEY */
5240 		if (ret) {
5241 			DHD_ERROR(("%s: Error membytes %d\n", __FUNCTION__, ret));
5242 #ifdef DHD_DEBUG_UART
5243 			bus->dhd->memdump_success = FALSE;
5244 #endif	/* DHD_DEBUG_UART */
5245 			break;
5246 		}
5247 		DHD_TRACE(("."));
5248 
5249 		/* Decrement size and increment start address */
5250 		size -= read_size;
5251 		start += read_size;
5252 		databuf += read_size;
5253 	}
5254 	return ret;
5255 }
5256 
5257 static int
5258 dhdpcie_mem_dump(dhd_bus_t *bus)
5259 {
5260 	dhd_pub_t *dhdp;
5261 	int ret;
5262 	uint32 dhd_console_ms_prev = 0;
5263 
5264 	dhdp = bus->dhd;
5265 	if (!dhdp) {
5266 		DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
5267 		return BCME_ERROR;
5268 	}
5269 
5270 	dhd_console_ms_prev = dhdp->dhd_console_ms;
5271 	if (dhd_console_ms_prev) {
5272 		DHD_ERROR(("%s: Disabling console msgs(0x%d) before mem dump to local buf\n",
5273 			__FUNCTION__, dhd_console_ms_prev));
5274 		dhdp->dhd_console_ms = 0;
5275 	}
5276 #ifdef EXYNOS_PCIE_DEBUG
5277 	exynos_pcie_register_dump(1);
5278 #endif /* EXYNOS_PCIE_DEBUG */
5279 
5280 #ifdef SUPPORT_LINKDOWN_RECOVERY
5281 	if (bus->is_linkdown) {
5282 		DHD_ERROR(("%s: PCIe link is down so skip\n", __FUNCTION__));
5283 		/* panic only for DUMP_MEMFILE_BUGON */
5284 		ASSERT(bus->dhd->memdump_enabled != DUMP_MEMFILE_BUGON);
5285 		ret = BCME_ERROR;
5286 		goto exit;
5287 	}
5288 #endif /* SUPPORT_LINKDOWN_RECOVERY */
5289 
5290 	if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
5291 		DHD_ERROR(("%s: bus is down! can't collect mem dump. \n", __FUNCTION__));
5292 		ret = BCME_ERROR;
5293 		goto exit;
5294 	}
5295 
5296 	/* Induce DB7 trap for below non-trap cases */
5297 	switch (dhdp->memdump_type) {
5298 		case DUMP_TYPE_RESUMED_ON_TIMEOUT:
5299 			/* intentional fall through */
5300 		case DUMP_TYPE_D3_ACK_TIMEOUT:
5301 			/* intentional fall through */
5302 		case DUMP_TYPE_PKTID_AUDIT_FAILURE:
5303 			/* intentional fall through */
5304 		case DUMP_TYPE_PKTID_INVALID:
5305 			/* intentional fall through */
5306 		case DUMP_TYPE_SCAN_TIMEOUT:
5307 			/* intentional fall through */
5308 		case DUMP_TYPE_SCAN_BUSY:
5309 			/* intentional fall through */
5310 		case DUMP_TYPE_BY_LIVELOCK:
5311 			/* intentional fall through */
5312 		case DUMP_TYPE_IFACE_OP_FAILURE:
5313 			/* intentional fall through */
5314 		case DUMP_TYPE_PKTID_POOL_DEPLETED:
5315 			/* intentional fall through */
5316 		case DUMP_TYPE_ESCAN_SYNCID_MISMATCH:
5317 			/* intentional fall through */
5318 		case DUMP_TYPE_INVALID_SHINFO_NRFRAGS:
5319 			if (dhdp->db7_trap.fw_db7w_trap) {
5320 				/* Set fw_db7w_trap_inprogress here and clear from DPC */
5321 				dhdp->db7_trap.fw_db7w_trap_inprogress = TRUE;
5322 				dhdpcie_fw_trap(dhdp->bus);
5323 				OSL_DELAY(100 * 1000); // wait 100 msec
5324 			} else {
5325 				DHD_ERROR(("%s: DB7 Not supported!!!\n",
5326 					__FUNCTION__));
5327 			}
5328 			break;
5329 		default:
5330 			break;
5331 	}
5332 
5333 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5334 	if (pm_runtime_get_sync(dhd_bus_to_dev(bus)) < 0)
5335 		return BCME_ERROR;
5336 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5337 
5338 	ret = dhdpcie_get_mem_dump(bus);
5339 	if (ret) {
5340 		DHD_ERROR(("%s: failed to get mem dump, err=%d\n",
5341 			__FUNCTION__, ret));
5342 		goto exit;
5343 	}
5344 #ifdef DHD_DEBUG_UART
5345 	bus->dhd->memdump_success = TRUE;
5346 #endif	/* DHD_DEBUG_UART */
5347 
5348 #ifdef BCMINTERNAL
5349 	/* TODO: for host offload firmware, need to modify the stack and pc/lr to point it back to
5350 	 * the original offset so gdb can match with symbol files
5351 	 */
5352 #endif
5353 
5354 	dhd_schedule_memdump(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
5355 	/* buf, actually soc_ram free handled in dhd_{free,clear} */
5356 
5357 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5358 	pm_runtime_mark_last_busy(dhd_bus_to_dev(bus));
5359 	pm_runtime_put_autosuspend(dhd_bus_to_dev(bus));
5360 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5361 
5362 exit:
5363 	if (dhd_console_ms_prev) {
5364 		DHD_ERROR(("%s: enable console msgs(0x%d) after collecting memdump to local buf\n",
5365 			__FUNCTION__, dhd_console_ms_prev));
5366 		dhdp->dhd_console_ms = dhd_console_ms_prev;
5367 	}
5368 	return ret;
5369 }
5370 
5371 int
5372 dhd_bus_get_mem_dump(dhd_pub_t *dhdp)
5373 {
5374 	if (!dhdp) {
5375 		DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
5376 		return BCME_ERROR;
5377 	}
5378 
5379 	return dhdpcie_get_mem_dump(dhdp->bus);
5380 }
5381 
5382 int
5383 dhd_bus_mem_dump(dhd_pub_t *dhdp)
5384 {
5385 	dhd_bus_t *bus = dhdp->bus;
5386 	int ret = BCME_ERROR;
5387 
5388 	if (dhdp->busstate == DHD_BUS_DOWN) {
5389 		DHD_ERROR(("%s bus is down\n", __FUNCTION__));
5390 		return BCME_ERROR;
5391 	}
5392 
5393 	/* Try to resume if already suspended or suspend in progress */
5394 #ifdef DHD_PCIE_RUNTIMEPM
5395 	dhdpcie_runtime_bus_wake(dhdp, CAN_SLEEP(), __builtin_return_address(0));
5396 #endif /* DHD_PCIE_RUNTIMEPM */
5397 
5398 	/* Skip if still in suspended or suspend in progress */
5399 	if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhdp)) {
5400 		DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
5401 			__FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
5402 		return BCME_ERROR;
5403 	}
5404 
5405 	DHD_OS_WAKE_LOCK(dhdp);
5406 	ret = dhdpcie_mem_dump(bus);
5407 	DHD_OS_WAKE_UNLOCK(dhdp);
5408 	return ret;
5409 }
5410 #endif	/* DHD_FW_COREDUMP */
5411 
5412 int
5413 dhd_socram_dump(dhd_bus_t *bus)
5414 {
5415 #if defined(DHD_FW_COREDUMP)
5416 	DHD_OS_WAKE_LOCK(bus->dhd);
5417 	dhd_bus_mem_dump(bus->dhd);
5418 	DHD_OS_WAKE_UNLOCK(bus->dhd);
5419 	return 0;
5420 #else
5421 	return -1;
5422 #endif
5423 }
5424 
5425 /**
5426  * Transfers bytes from host to dongle using pio mode.
5427  * Parameter 'address' is a backplane address.
5428  */
5429 static int
5430 dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size)
5431 {
5432 	uint dsize;
5433 	int detect_endian_flag = 0x01;
5434 	bool little_endian;
5435 
5436 	if (write && bus->is_linkdown) {
5437 		DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
5438 		return BCME_ERROR;
5439 	}
5440 
5441 	if (MULTIBP_ENAB(bus->sih)) {
5442 		dhd_bus_pcie_pwr_req(bus);
5443 	}
5444 	/* Detect endianness. */
5445 	little_endian = *(char *)&detect_endian_flag;
5446 
5447 	/* In remap mode, adjust address beyond socram and redirect
5448 	 * to devram at SOCDEVRAM_BP_ADDR since remap address > orig_ramsize
5449 	 * is not backplane accessible
5450 	 */
5451 
5452 	/* Determine initial transfer parameters */
5453 #ifdef DHD_SUPPORT_64BIT
5454 	dsize = sizeof(uint64);
5455 #else /* !DHD_SUPPORT_64BIT */
5456 	dsize = sizeof(uint32);
5457 #endif /* DHD_SUPPORT_64BIT */
5458 
5459 	/* Do the transfer(s) */
5460 	DHD_INFO(("%s: %s %d bytes in window 0x%08lx\n",
5461 	          __FUNCTION__, (write ? "write" : "read"), size, address));
5462 	if (write) {
5463 		while (size) {
5464 #ifdef DHD_SUPPORT_64BIT
5465 			if (size >= sizeof(uint64) && little_endian &&	!(address % 8)) {
5466 				dhdpcie_bus_wtcm64(bus, address, *((uint64 *)data));
5467 			}
5468 #else /* !DHD_SUPPORT_64BIT */
5469 			if (size >= sizeof(uint32) && little_endian &&	!(address % 4)) {
5470 				dhdpcie_bus_wtcm32(bus, address, *((uint32*)data));
5471 			}
5472 #endif /* DHD_SUPPORT_64BIT */
5473 			else {
5474 				dsize = sizeof(uint8);
5475 				dhdpcie_bus_wtcm8(bus, address, *data);
5476 			}
5477 
5478 			/* Adjust for next transfer (if any) */
5479 			if ((size -= dsize)) {
5480 				data += dsize;
5481 				address += dsize;
5482 			}
5483 		}
5484 	} else {
5485 		while (size) {
5486 #ifdef DHD_SUPPORT_64BIT
5487 			if (size >= sizeof(uint64) && little_endian &&	!(address % 8))
5488 			{
5489 				*(uint64 *)data = dhdpcie_bus_rtcm64(bus, address);
5490 			}
5491 #else /* !DHD_SUPPORT_64BIT */
5492 			if (size >= sizeof(uint32) && little_endian &&	!(address % 4))
5493 			{
5494 				*(uint32 *)data = dhdpcie_bus_rtcm32(bus, address);
5495 			}
5496 #endif /* DHD_SUPPORT_64BIT */
5497 			else {
5498 				dsize = sizeof(uint8);
5499 				*data = dhdpcie_bus_rtcm8(bus, address);
5500 			}
5501 
5502 			/* Adjust for next transfer (if any) */
5503 			if ((size -= dsize) > 0) {
5504 				data += dsize;
5505 				address += dsize;
5506 			}
5507 		}
5508 	}
5509 	if (MULTIBP_ENAB(bus->sih)) {
5510 		dhd_bus_pcie_pwr_req_clear(bus);
5511 	}
5512 	return BCME_OK;
5513 } /* dhdpcie_bus_membytes */
5514 
5515 extern bool agg_h2d_db_enab;
5516 /**
5517  * Transfers one transmit (ethernet) packet that was queued in the (flow controlled) flow ring queue
5518  * to the (non flow controlled) flow ring.
5519  */
5520 int
5521 BCMFASTPATH(dhd_bus_schedule_queue)(struct dhd_bus  *bus, uint16 flow_id, bool txs)
5522 /** XXX function name could be more descriptive, eg use 'tx' and 'flow ring' in name */
5523 {
5524 	flow_ring_node_t *flow_ring_node;
5525 	int ret = BCME_OK;
5526 #ifdef DHD_LOSSLESS_ROAMING
5527 	dhd_pub_t *dhdp = bus->dhd;
5528 #endif
5529 
5530 	DHD_PCIE_INFO(("%s: flow_id is %d\n", __FUNCTION__, flow_id));
5531 
5532 	/* ASSERT on flow_id */
5533 	if (flow_id >= bus->max_submission_rings) {
5534 		DHD_ERROR(("%s: flow_id is invalid %d, max %d\n", __FUNCTION__,
5535 			flow_id, bus->max_submission_rings));
5536 		return 0;
5537 	}
5538 
5539 	flow_ring_node = DHD_FLOW_RING(bus->dhd, flow_id);
5540 
5541 	if (flow_ring_node->prot_info == NULL) {
5542 	    DHD_ERROR((" %s : invalid flow_ring_node \n", __FUNCTION__));
5543 	    return BCME_NOTREADY;
5544 	}
5545 
5546 #ifdef DHD_LOSSLESS_ROAMING
5547 	if ((dhdp->dequeue_prec_map & (1 << flow_ring_node->flow_info.tid)) == 0) {
5548 		DHD_ERROR_RLMT(("%s: roam in progress, tid %d is not in precedence map 0x%x."
5549 			" block scheduling\n",
5550 			__FUNCTION__, flow_ring_node->flow_info.tid, dhdp->dequeue_prec_map));
5551 		return BCME_OK;
5552 	}
5553 #endif /* DHD_LOSSLESS_ROAMING */
5554 
5555 	{
5556 		unsigned long flags;
5557 		void *txp = NULL;
5558 		flow_queue_t *queue;
5559 
5560 		queue = &flow_ring_node->queue; /* queue associated with flow ring */
5561 
5562 		DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
5563 
5564 		if (flow_ring_node->status != FLOW_RING_STATUS_OPEN) {
5565 			DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
5566 			return BCME_NOTREADY;
5567 		}
5568 
5569 		while ((txp = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
5570 			if (bus->dhd->conf->orphan_move <= 1)
5571 				PKTORPHAN(txp, bus->dhd->conf->tsq);
5572 
5573 			/*
5574 			 * Modifying the packet length caused P2P cert failures.
5575 			 * Specifically on test cases where a packet of size 52 bytes
5576 			 * was injected, the sniffer capture showed 62 bytes because of
5577 			 * which the cert tests failed. So making the below change
5578 			 * only Router specific.
5579 			 */
5580 #if defined(BCM_ROUTER_DHD)
5581 			if (PKTLEN(bus->dhd->osh, txp) < (ETHER_MIN_LEN - ETHER_CRC_LEN)) {
5582 				PKTSETLEN(bus->dhd->osh, txp, (ETHER_MIN_LEN - ETHER_CRC_LEN));
5583 			}
5584 #endif /* BCM_ROUTER_DHD */
5585 
5586 #ifdef DHDTCPACK_SUPPRESS
5587 			if (bus->dhd->tcpack_sup_mode != TCPACK_SUP_HOLD) {
5588 				ret = dhd_tcpack_check_xmit(bus->dhd, txp);
5589 				if (ret != BCME_OK) {
5590 					DHD_ERROR(("%s: dhd_tcpack_check_xmit() error.\n",
5591 						__FUNCTION__));
5592 				}
5593 			}
5594 #endif /* DHDTCPACK_SUPPRESS */
5595 			/* Attempt to transfer packet over flow ring */
5596 			/* XXX: ifidx is wrong */
5597 			ret = dhd_prot_txdata(bus->dhd, txp, flow_ring_node->flow_info.ifindex);
5598 			if (ret != BCME_OK) { /* may not have resources in flow ring */
5599 				DHD_INFO(("%s: Reinserrt %d\n", __FUNCTION__, ret));
5600 #ifdef AGG_H2D_DB
5601 				if (agg_h2d_db_enab) {
5602 					dhd_prot_schedule_aggregate_h2d_db(bus->dhd, flow_id);
5603 				} else
5604 #endif /* AGG_H2D_DB */
5605 				{
5606 					dhd_prot_txdata_write_flush(bus->dhd, flow_id);
5607 				}
5608 				/* reinsert at head */
5609 				dhd_flow_queue_reinsert(bus->dhd, queue, txp);
5610 				DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
5611 
5612 				/* If we are able to requeue back, return success */
5613 				return BCME_OK;
5614 			}
5615 
5616 #ifdef DHD_MEM_STATS
5617 			DHD_MEM_STATS_LOCK(bus->dhd->mem_stats_lock, flags);
5618 			bus->dhd->txpath_mem += PKTLEN(bus->dhd->osh, txp);
5619 			DHD_PCIE_INFO(("%s txpath_mem: %llu PKTLEN: %d\n",
5620 				__FUNCTION__, bus->dhd->txpath_mem, PKTLEN(bus->dhd->osh, txp)));
5621 			DHD_MEM_STATS_UNLOCK(bus->dhd->mem_stats_lock, flags);
5622 #endif /* DHD_MEM_STATS */
5623 		}
5624 
5625 #ifdef DHD_HP2P
5626 		if (!flow_ring_node->hp2p_ring)
5627 #endif /* DHD_HP2P */
5628 		{
5629 #ifdef AGG_H2D_DB
5630 			if (agg_h2d_db_enab) {
5631 				dhd_prot_schedule_aggregate_h2d_db(bus->dhd, flow_id);
5632 			} else
5633 #endif /* AGG_H2D_DB */
5634 			{
5635 				dhd_prot_txdata_write_flush(bus->dhd, flow_id);
5636 			}
5637 		}
5638 
5639 		DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
5640 	}
5641 
5642 	return ret;
5643 } /* dhd_bus_schedule_queue */
5644 
5645 /** Sends an (ethernet) data frame (in 'txp') to the dongle. Callee disposes of txp. */
5646 int
5647 BCMFASTPATH(dhd_bus_txdata)(struct dhd_bus *bus, void *txp, uint8 ifidx)
5648 {
5649 	uint16 flowid;
5650 #ifdef IDLE_TX_FLOW_MGMT
5651 	uint8	node_status;
5652 #endif /* IDLE_TX_FLOW_MGMT */
5653 	flow_queue_t *queue;
5654 	flow_ring_node_t *flow_ring_node;
5655 	unsigned long flags;
5656 	int ret = BCME_OK;
5657 	void *txp_pend = NULL;
5658 #if (defined(BCM_ROUTER_DHD) && defined(HNDCTF))
5659 	void *ntxp = NULL;
5660 	uint8 prio = PKTPRIO(txp);
5661 #endif
5662 
5663 	if (!bus->dhd->flowid_allocator) {
5664 		DHD_ERROR(("%s: Flow ring not intited yet  \n", __FUNCTION__));
5665 		goto toss;
5666 	}
5667 
5668 	flowid = DHD_PKT_GET_FLOWID(txp);
5669 
5670 	flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
5671 
5672 	DHD_TRACE(("%s: pkt flowid %d, status %d active %d\n",
5673 		__FUNCTION__, flowid, flow_ring_node->status, flow_ring_node->active));
5674 
5675 	DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
5676 	if ((flowid > bus->dhd->max_tx_flowid) ||
5677 #ifdef IDLE_TX_FLOW_MGMT
5678 		(!flow_ring_node->active))
5679 #else
5680 		(!flow_ring_node->active) ||
5681 		(flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) ||
5682 		(flow_ring_node->status == FLOW_RING_STATUS_STA_FREEING))
5683 #endif /* IDLE_TX_FLOW_MGMT */
5684 	{
5685 		DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
5686 		DHD_INFO(("%s: Dropping pkt flowid %d, status %d active %d\n",
5687 			__FUNCTION__, flowid, flow_ring_node->status,
5688 			flow_ring_node->active));
5689 		ret = BCME_ERROR;
5690 			goto toss;
5691 	}
5692 
5693 #ifdef IDLE_TX_FLOW_MGMT
5694 	node_status = flow_ring_node->status;
5695 
5696 	/* handle diffrent status states here!! */
5697 	switch (node_status)
5698 	{
5699 		case FLOW_RING_STATUS_OPEN:
5700 
5701 			if (bus->enable_idle_flowring_mgmt) {
5702 				/* Move the node to the head of active list */
5703 				dhd_flow_ring_move_to_active_list_head(bus, flow_ring_node);
5704 			}
5705 			break;
5706 
5707 		case FLOW_RING_STATUS_SUSPENDED:
5708 			DHD_INFO(("Need to Initiate TX Flow resume\n"));
5709 			/* Issue resume_ring request */
5710 			dhd_bus_flow_ring_resume_request(bus,
5711 					flow_ring_node);
5712 			break;
5713 
5714 		case FLOW_RING_STATUS_CREATE_PENDING:
5715 		case FLOW_RING_STATUS_RESUME_PENDING:
5716 			/* Dont do anything here!! */
5717 			DHD_INFO(("Waiting for Flow create/resume! status is %u\n",
5718 				node_status));
5719 			break;
5720 
5721 		case FLOW_RING_STATUS_DELETE_PENDING:
5722 		default:
5723 			DHD_ERROR(("Dropping packet!! flowid %u status is %u\n",
5724 				flowid, node_status));
5725 			/* error here!! */
5726 			ret = BCME_ERROR;
5727 			DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
5728 			goto toss;
5729 	}
5730 	/* Now queue the packet */
5731 #endif /* IDLE_TX_FLOW_MGMT */
5732 
5733 	queue = &flow_ring_node->queue; /* queue associated with flow ring */
5734 
5735 #if (defined(BCM_ROUTER_DHD) && defined(HNDCTF))
5736 	FOREACH_CHAINED_PKT(txp, ntxp) {
5737 		/* Tag the packet with flowid - Remember, only the head packet */
5738 		/* of the chain has been tagged with the FlowID in dhd_sendpkt */
5739 		/* Also set the priority */
5740 		DHD_PKT_SET_FLOWID(txp, flowid);
5741 		PKTSETPRIO(txp, prio);
5742 
5743 		if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp)) != BCME_OK) {
5744 			txp_pend = txp;
5745 			PKTSETCLINK((txp), ntxp);
5746 			break;
5747 		}
5748 	}
5749 #else  /* !(defined(BCM_ROUTER_DHD) && defined(HNDCTF)) */
5750 	if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp)) != BCME_OK)
5751 		txp_pend = txp;
5752 #endif /* defined(BCM_ROUTER_DHD) && defined(HNDCTF */
5753 
5754 	DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
5755 
5756 	if (flow_ring_node->status) {
5757 		DHD_PCIE_INFO(("%s: Enq pkt flowid %d, status %d active %d\n",
5758 		    __FUNCTION__, flowid, flow_ring_node->status,
5759 		    flow_ring_node->active));
5760 		if (txp_pend) {
5761 			txp = txp_pend;
5762 			goto toss;
5763 		}
5764 		return BCME_OK;
5765 	}
5766 	ret = dhd_bus_schedule_queue(bus, flowid, FALSE); /* from queue to flowring */
5767 
5768 	/* If we have anything pending, try to push into q */
5769 	if (txp_pend) {
5770 		DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
5771 
5772 #if (defined(BCM_ROUTER_DHD) && defined(HNDCTF))
5773 		FOREACH_CHAINED_PKT(txp_pend, ntxp) {
5774 			/* Tag the packet with flowid and set packet priority */
5775 			DHD_PKT_SET_FLOWID(txp_pend, flowid);
5776 			PKTSETPRIO(txp_pend, prio);
5777 
5778 			if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp_pend))
5779 				     != BCME_OK) {
5780 				DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
5781 				PKTSETCLINK((txp_pend), ntxp);
5782 				txp = txp_pend;
5783 				goto toss;
5784 			}
5785 		}
5786 #else  /* !(defined(BCM_ROUTER_DHD) && defined(HNDCTF)) */
5787 		if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp_pend)) != BCME_OK) {
5788 			DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
5789 			txp = txp_pend;
5790 			goto toss;
5791 		}
5792 #endif /* defined(BCM_ROUTER_DHD) && defined(HNDCTF) */
5793 
5794 		DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
5795 	}
5796 
5797 	return ret;
5798 
5799 toss:
5800 	DHD_PCIE_INFO(("%s: Toss %d\n", __FUNCTION__, ret));
5801 #ifdef DHD_EFI
5802 	/* for EFI, pass the 'send' flag as false, to avoid enqueuing the failed tx pkt
5803 	* into the Tx done queue
5804 	*/
5805 	PKTCFREE(bus->dhd->osh, txp, FALSE);
5806 #else
5807 	PKTCFREE(bus->dhd->osh, txp, TRUE);
5808 #endif /* DHD_EFI */
5809 	return ret;
5810 } /* dhd_bus_txdata */
5811 
5812 void
5813 dhd_bus_stop_queue(struct dhd_bus *bus)
5814 {
5815 	dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
5816 }
5817 
5818 void
5819 dhd_bus_start_queue(struct dhd_bus *bus)
5820 {
5821 	/*
5822 	 * Tx queue has been stopped due to resource shortage (or)
5823 	 * bus is not in a state to turn on.
5824 	 *
5825 	 * Note that we try to re-start network interface only
5826 	 * when we have enough resources, one has to first change the
5827 	 * flag indicating we have all the resources.
5828 	 */
5829 	if (dhd_prot_check_tx_resource(bus->dhd)) {
5830 		DHD_ERROR(("%s: Interface NOT started, previously stopped "
5831 			"due to resource shortage\n", __FUNCTION__));
5832 		return;
5833 	}
5834 	dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
5835 }
5836 
5837 /* Device console input function */
5838 int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen)
5839 {
5840 	dhd_bus_t *bus = dhd->bus;
5841 	uint32 addr, val;
5842 	int rv;
5843 #ifdef PCIE_INB_DW
5844 	unsigned long flags = 0;
5845 #endif /* PCIE_INB_DW */
5846 
5847 	/* Address could be zero if CONSOLE := 0 in dongle Makefile */
5848 	if (bus->console_addr == 0)
5849 		return BCME_UNSUPPORTED;
5850 
5851 	/* Don't allow input if dongle is in reset */
5852 	if (bus->dhd->dongle_reset) {
5853 		return BCME_NOTREADY;
5854 	}
5855 
5856 	/* Zero cbuf_index */
5857 	addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf_idx);
5858 	/* handle difference in definition of hnd_log_t in certain branches */
5859 	if (dhd->wlc_ver_major < 14) {
5860 		addr -= (uint32)sizeof(uint32);
5861 	}
5862 	val = htol32(0);
5863 	if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
5864 		goto done;
5865 
5866 	/* Write message into cbuf */
5867 	addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf);
5868 	/* handle difference in definition of hnd_log_t in certain branches */
5869 	if (dhd->wlc_ver_major < 14) {
5870 		addr -= sizeof(uint32);
5871 	}
5872 	if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)msg, msglen)) < 0)
5873 		goto done;
5874 
5875 	/* Write length into vcons_in */
5876 	addr = bus->console_addr + OFFSETOF(hnd_cons_t, vcons_in);
5877 	val = htol32(msglen);
5878 	if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
5879 		goto done;
5880 
5881 #ifdef PCIE_INB_DW
5882 	/* Use a lock to ensure this tx DEVICE_WAKE + tx H2D_HOST_CONS_INT sequence is
5883 	 * mutually exclusive with the rx D2H_DEV_DS_ENTER_REQ + tx H2D_HOST_DS_ACK sequence.
5884 	 */
5885 	DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
5886 #endif /* PCIE_INB_DW */
5887 
5888 	/* generate an interrupt to dongle to indicate that it needs to process cons command */
5889 	dhdpcie_send_mb_data(bus, H2D_HOST_CONS_INT);
5890 
5891 #ifdef PCIE_INB_DW
5892 	DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
5893 #endif /* PCIE_INB_DW */
5894 done:
5895 	return rv;
5896 } /* dhd_bus_console_in */
5897 
5898 /**
5899  * Called on frame reception, the frame was received from the dongle on interface 'ifidx' and is
5900  * contained in 'pkt'. Processes rx frame, forwards up the layer to netif.
5901  */
5902 void
5903 BCMFASTPATH(dhd_bus_rx_frame)(struct dhd_bus *bus, void* pkt, int ifidx, uint pkt_count)
5904 {
5905 	dhd_rx_frame(bus->dhd, ifidx, pkt, pkt_count, 0);
5906 }
5907 
5908 /* Aquire/Release bar1_switch_lock only if the chip supports bar1 switching */
5909 #define DHD_BUS_BAR1_SWITCH_LOCK(bus, flags) \
5910 	((bus)->bar1_switch_enab) ? DHD_BAR1_SWITCH_LOCK((bus)->bar1_switch_lock, flags) : \
5911 		BCM_REFERENCE(flags)
5912 
5913 #define DHD_BUS_BAR1_SWITCH_UNLOCK(bus, flags) \
5914 	((bus)->bar1_switch_enab) ? DHD_BAR1_SWITCH_UNLOCK((bus)->bar1_switch_lock, flags) : \
5915 		BCM_REFERENCE(flags)
5916 
5917 /* Init/Deinit bar1_switch_lock only if the chip supports bar1 switching */
5918 static void
5919 dhd_init_bar1_switch_lock(dhd_bus_t *bus)
5920 {
5921 	if (bus->bar1_switch_enab && !bus->bar1_switch_lock) {
5922 		bus->bar1_switch_lock = osl_spin_lock_init(bus->osh);
5923 	}
5924 }
5925 
5926 static void
5927 dhd_deinit_bar1_switch_lock(dhd_bus_t *bus)
5928 {
5929 	if (bus->bar1_switch_enab && bus->bar1_switch_lock) {
5930 		osl_spin_lock_deinit(bus->osh, bus->bar1_switch_lock);
5931 		bus->bar1_switch_lock = NULL;
5932 	}
5933 }
5934 
5935 /*
5936  * The bpwindow for any address will be lower bound of multiples of bar1_size.
5937  * For eg, if addr=0x938fff and bar1_size is 0x400000, then
5938  * address will fall in the window of 0x800000-0xbfffff, so need
5939  * to select bpwindow as 0x800000.
5940  * To achieve this mask the LSB nibbles of bar1_size of the given addr.
5941  */
5942 #define DHD_BUS_BAR1_BPWIN(addr, bar1_size) \
5943 	(uint32)((addr) & ~((bar1_size) - 1))
5944 
5945 /**
5946  * dhdpcie_bar1_window_switch_enab
5947  *
5948  * Check if the chip requires BAR1 window switching based on
5949  * dongle_ram_base, ramsize and mapped bar1_size and sets
5950  * bus->bar1_switch_enab accordingly
5951  * @bus: dhd bus context
5952  *
5953  */
5954 void
5955 dhdpcie_bar1_window_switch_enab(dhd_bus_t *bus)
5956 {
5957 	uint32 ramstart = bus->dongle_ram_base;
5958 	uint32 ramend = bus->dongle_ram_base + bus->ramsize - 1;
5959 	uint32 bpwinstart = DHD_BUS_BAR1_BPWIN(ramstart, bus->bar1_size);
5960 	uint32 bpwinend = DHD_BUS_BAR1_BPWIN(ramend, bus->bar1_size);
5961 
5962 	bus->bar1_switch_enab = FALSE;
5963 
5964 	/*
5965 	 * Window switch is needed to access complete BAR1
5966 	 * if bpwinstart and bpwinend are different
5967 	 */
5968 	if (bpwinstart != bpwinend) {
5969 		bus->bar1_switch_enab = TRUE;
5970 	}
5971 
5972 	DHD_ERROR(("%s: bar1_switch_enab=%d ramstart=0x%x ramend=0x%x bar1_size=0x%x\n",
5973 		__FUNCTION__, bus->bar1_switch_enab, ramstart, ramend, bus->bar1_size));
5974 }
5975 
5976 /**
5977  * dhdpcie_setbar1win
5978  *
5979  * os independendent function for setting bar1 window in order to allow
5980  * also set current window positon.
5981  *
5982  * @bus: dhd bus context
5983  * @addr: new backplane windows address for BAR1
5984  */
5985 static void
5986 dhdpcie_setbar1win(dhd_bus_t *bus, uint32 addr)
5987 {
5988 	OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR1_WIN, 4, addr);
5989 	bus->curr_bar1_win = addr;
5990 }
5991 
5992 /**
5993  * dhdpcie_bus_chkandshift_bpoffset
5994  *
5995  * Check the provided address is within the current BAR1 window,
5996  * if not, shift the window
5997  *
5998  * @bus: dhd bus context
5999  * @offset: back plane address that the caller wants to access
6000  *
6001  * Return: new offset for access
6002  */
6003 static ulong
6004 dhdpcie_bus_chkandshift_bpoffset(dhd_bus_t *bus, ulong offset)
6005 {
6006 
6007 	uint32 bpwin;
6008 #ifdef DHD_EFI
6009 	/* TODO: bar1_size is hardcoded for EFI. Below logic should be
6010 	 * revisited. Also EFI platform should find bar1_size from
6011 	 * EFI Kernel APIs
6012 	 */
6013 	if (!bus->bar1_switch_enab) {
6014 		return offset;
6015 	}
6016 #endif /* DHD_EFI */
6017 	/* Determine BAR1 backplane window using window size
6018 	 * Window address mask should be ~(size - 1)
6019 	 */
6020 	bpwin = DHD_BUS_BAR1_BPWIN(offset, bus->bar1_size);
6021 
6022 	if (bpwin != bus->curr_bar1_win) {
6023 		DHD_PCIE_INFO(("%s: move BAR1 window curr_bar1_win=0x%x bpwin=0x%x offset=0x%lx\n",
6024 			__FUNCTION__, bus->curr_bar1_win, bpwin, offset));
6025 		/* Move BAR1 window */
6026 		dhdpcie_setbar1win(bus, bpwin);
6027 	}
6028 
6029 	return offset - bpwin;
6030 }
6031 
6032 /** 'offset' is a backplane address */
6033 void
6034 dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data)
6035 {
6036 	ulong flags = 0;
6037 
6038 	if (bus->is_linkdown) {
6039 		DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
6040 		return;
6041 	}
6042 
6043 	DHD_BUS_BAR1_SWITCH_LOCK(bus, flags);
6044 
6045 	offset = dhdpcie_bus_chkandshift_bpoffset(bus, offset);
6046 
6047 #if defined(linux) || defined(LINUX)
6048 	W_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset), data);
6049 #else
6050 	*(volatile uint8 *)(bus->tcm + offset) = (uint8)data;
6051 #endif /* linux || LINUX */
6052 
6053 	DHD_BUS_BAR1_SWITCH_UNLOCK(bus, flags);
6054 }
6055 
6056 void
6057 dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data)
6058 {
6059 	ulong flags = 0;
6060 
6061 	if (bus->is_linkdown) {
6062 		DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
6063 		return;
6064 	}
6065 
6066 	DHD_BUS_BAR1_SWITCH_LOCK(bus, flags);
6067 
6068 	offset = dhdpcie_bus_chkandshift_bpoffset(bus, offset);
6069 
6070 #if defined(linux) || defined(LINUX)
6071 	W_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset), data);
6072 #else
6073 	*(volatile uint16 *)(bus->tcm + offset) = (uint16)data;
6074 #endif /* linux || LINUX */
6075 
6076 	DHD_BUS_BAR1_SWITCH_UNLOCK(bus, flags);
6077 }
6078 
6079 void
6080 dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data)
6081 {
6082 	ulong flags = 0;
6083 
6084 	if (bus->is_linkdown) {
6085 		DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
6086 		return;
6087 	}
6088 
6089 	DHD_BUS_BAR1_SWITCH_LOCK(bus, flags);
6090 
6091 	offset = dhdpcie_bus_chkandshift_bpoffset(bus, offset);
6092 
6093 #if defined(linux) || defined(LINUX)
6094 	W_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset), data);
6095 #else
6096 	*(volatile uint32 *)(bus->tcm + offset) = (uint32)data;
6097 #endif /* linux || LINUX */
6098 
6099 	DHD_BUS_BAR1_SWITCH_UNLOCK(bus, flags);
6100 }
6101 
6102 #ifdef DHD_SUPPORT_64BIT
6103 void
6104 dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data)
6105 {
6106 	ulong flags = 0;
6107 
6108 	if (bus->is_linkdown) {
6109 		DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
6110 		return;
6111 	}
6112 
6113 	DHD_BUS_BAR1_SWITCH_LOCK(bus, flags);
6114 
6115 	offset = dhdpcie_bus_chkandshift_bpoffset(bus, offset);
6116 
6117 #if defined(linux) || defined(LINUX)
6118 	W_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset), data);
6119 #else
6120 	*(volatile uint64 *)(bus->tcm + offset) = (uint64)data;
6121 #endif /* linux || LINUX */
6122 
6123 	DHD_BUS_BAR1_SWITCH_UNLOCK(bus, flags);
6124 }
6125 #endif /* DHD_SUPPORT_64BIT */
6126 
6127 uint8
6128 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset)
6129 {
6130 	volatile uint8 data;
6131 	ulong flags = 0;
6132 
6133 	if (bus->is_linkdown) {
6134 		DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
6135 		data = (uint8)-1;
6136 		return data;
6137 	}
6138 
6139 	DHD_BUS_BAR1_SWITCH_LOCK(bus, flags);
6140 
6141 	offset = dhdpcie_bus_chkandshift_bpoffset(bus, offset);
6142 
6143 #if defined(linux) || defined(LINUX)
6144 	data = R_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset));
6145 #else
6146 	data = *(volatile uint8 *)(bus->tcm + offset);
6147 #endif /* linux || LINUX */
6148 
6149 	DHD_BUS_BAR1_SWITCH_UNLOCK(bus, flags);
6150 	return data;
6151 }
6152 
6153 uint16
6154 dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset)
6155 {
6156 	volatile uint16 data;
6157 	ulong flags = 0;
6158 
6159 	if (bus->is_linkdown) {
6160 		DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
6161 		data = (uint16)-1;
6162 		return data;
6163 	}
6164 
6165 	DHD_BUS_BAR1_SWITCH_LOCK(bus, flags);
6166 
6167 	offset = dhdpcie_bus_chkandshift_bpoffset(bus, offset);
6168 
6169 #if (defined(linux) || defined (LINUX))
6170 	data = R_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset));
6171 #else
6172 	data = *(volatile uint16 *)(bus->tcm + offset);
6173 #endif /* linux || LINUX */
6174 
6175 	DHD_BUS_BAR1_SWITCH_UNLOCK(bus, flags);
6176 	return data;
6177 }
6178 
6179 uint32
6180 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset)
6181 {
6182 	volatile uint32 data;
6183 	ulong flags = 0;
6184 
6185 	if (bus->is_linkdown) {
6186 		DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
6187 		data = (uint32)-1;
6188 		return data;
6189 	}
6190 
6191 	DHD_BUS_BAR1_SWITCH_LOCK(bus, flags);
6192 
6193 	offset = dhdpcie_bus_chkandshift_bpoffset(bus, offset);
6194 
6195 #if (defined(linux) || defined (LINUX))
6196 	data = R_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset));
6197 #else
6198 	data = *(volatile uint32 *)(bus->tcm + offset);
6199 #endif /* linux || LINUX */
6200 
6201 	DHD_BUS_BAR1_SWITCH_UNLOCK(bus, flags);
6202 	return data;
6203 }
6204 
6205 #ifdef DHD_SUPPORT_64BIT
6206 uint64
6207 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset)
6208 {
6209 	volatile uint64 data;
6210 	ulong flags = 0;
6211 
6212 	if (bus->is_linkdown) {
6213 		DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
6214 		data = (uint64)-1;
6215 		return data;
6216 	}
6217 
6218 	DHD_BUS_BAR1_SWITCH_LOCK(bus, flags);
6219 
6220 	offset = dhdpcie_bus_chkandshift_bpoffset(bus, offset);
6221 
6222 #if (defined(linux) || defined (LINUX))
6223 	data = R_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset));
6224 #else
6225 	data = *(volatile uint64 *)(bus->tcm + offset);
6226 #endif /* linux || LINUX */
6227 
6228 	DHD_BUS_BAR1_SWITCH_UNLOCK(bus, flags);
6229 	return data;
6230 }
6231 #endif /* DHD_SUPPORT_64BIT */
6232 
6233 void
6234 dhdpcie_update_ring_ptrs_in_tcm_with_req_pwr(dhd_bus_t *bus, void *data, uint8 type,
6235 	uint16 ringid, bool read, bool req_pwr)
6236 {
6237 	ulong addr;
6238 
6239 	if (type == RING_WR_UPD) {
6240 		addr = bus->ring_sh[ringid].ring_state_w;
6241 	} else if (type == RING_RD_UPD) {
6242 		addr = bus->ring_sh[ringid].ring_state_r;
6243 	} else {
6244 		DHD_ERROR(("%s: invalid type:%d\n", __FUNCTION__, type));
6245 		return;
6246 	}
6247 
6248 	if (req_pwr && MULTIBP_ENAB(bus->sih)) {
6249 		dhd_bus_pcie_pwr_req(bus);
6250 	}
6251 
6252 	if (read) {
6253 		/* Read */
6254 		*(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
6255 	} else {
6256 		/* Write */
6257 		dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
6258 	}
6259 
6260 	if (req_pwr && MULTIBP_ENAB(bus->sih)) {
6261 		dhd_bus_pcie_pwr_req_clear(bus);
6262 	}
6263 }
6264 
6265 void
6266 dhdpcie_update_ring_ptrs_in_tcm(dhd_bus_t *bus, void *data, uint8 type,	uint16 ringid,
6267 	bool read)
6268 {
6269 #ifdef PCIE_INB_DW
6270 	if (INBAND_DW_ENAB(bus)) {
6271 		ulong flags_ds;
6272 		DHD_BUS_DONGLE_DS_LOCK(bus->dongle_ds_lock, flags_ds);
6273 		dhdpcie_update_ring_ptrs_in_tcm_with_req_pwr(bus, data, type, ringid, read,
6274 			bus->dongle_in_deepsleep);
6275 		DHD_BUS_DONGLE_DS_UNLOCK(bus->dongle_ds_lock, flags_ds);
6276 	} else
6277 #endif /* PCIE_INB_DW */
6278 	{
6279 		/* Request power explicitly */
6280 		dhdpcie_update_ring_ptrs_in_tcm_with_req_pwr(bus, data, type, ringid, read, TRUE);
6281 	}
6282 }
6283 
6284 /** A snippet of dongle memory is shared between host and dongle */
6285 void
6286 dhd_bus_cmn_writeshared(dhd_bus_t *bus, void *data, uint32 len, uint8 type, uint16 ringid)
6287 {
6288 	uint64 long_data;
6289 	ulong addr; /* dongle address */
6290 
6291 	DHD_PCIE_INFO(("%s: writing to dongle type %d len %d\n", __FUNCTION__, type, len));
6292 
6293 	if (bus->is_linkdown) {
6294 		DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
6295 		return;
6296 	}
6297 
6298 	/*
6299 	 * Use explicit tcm ring ptr update functions when DMA indices are not enabled to
6300 	 * as backplane power request calls are causing TPUT drops
6301 	 */
6302 	if (!(bus->dhd->dma_d2h_ring_upd_support || bus->dhd->dma_h2d_ring_upd_support)) {
6303 		if ((type == RING_WR_UPD) || (type == RING_RD_UPD)) {
6304 			dhdpcie_update_ring_ptrs_in_tcm(bus, data, type, ringid, FALSE);
6305 			return;
6306 		}
6307 	}
6308 
6309 	if (MULTIBP_ENAB(bus->sih)) {
6310 		dhd_bus_pcie_pwr_req(bus);
6311 	}
6312 	switch (type) {
6313 		case RING_WR_UPD :
6314 			addr = bus->ring_sh[ringid].ring_state_w;
6315 			dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
6316 			break;
6317 
6318 		case RING_RD_UPD :
6319 			addr = bus->ring_sh[ringid].ring_state_r;
6320 			dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
6321 			break;
6322 
6323 		case D2H_DMA_SCRATCH_BUF:
6324 			addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_dma_scratch_buffer);
6325 			long_data = HTOL64(*(uint64 *)data);
6326 			dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
6327 			if (dhd_msg_level & DHD_INFO_VAL) {
6328 				prhex(__FUNCTION__, data, len);
6329 			}
6330 			break;
6331 
6332 		case D2H_DMA_SCRATCH_BUF_LEN :
6333 			addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_dma_scratch_buffer_len);
6334 			dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data));
6335 			if (dhd_msg_level & DHD_INFO_VAL) {
6336 				prhex(__FUNCTION__, data, len);
6337 			}
6338 			break;
6339 
6340 		case H2D_DMA_INDX_WR_BUF:
6341 			long_data = HTOL64(*(uint64 *)data);
6342 			addr = DHD_RING_INFO_MEMBER_ADDR(bus, h2d_w_idx_hostaddr);
6343 			dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
6344 			if (dhd_msg_level & DHD_INFO_VAL) {
6345 				prhex(__FUNCTION__, data, len);
6346 			}
6347 			break;
6348 
6349 		case H2D_DMA_INDX_RD_BUF:
6350 			long_data = HTOL64(*(uint64 *)data);
6351 			addr = DHD_RING_INFO_MEMBER_ADDR(bus, h2d_r_idx_hostaddr);
6352 			dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
6353 			if (dhd_msg_level & DHD_INFO_VAL) {
6354 				prhex(__FUNCTION__, data, len);
6355 			}
6356 			break;
6357 
6358 		case D2H_DMA_INDX_WR_BUF:
6359 			long_data = HTOL64(*(uint64 *)data);
6360 			addr = DHD_RING_INFO_MEMBER_ADDR(bus, d2h_w_idx_hostaddr);
6361 			dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
6362 			if (dhd_msg_level & DHD_INFO_VAL) {
6363 				prhex(__FUNCTION__, data, len);
6364 			}
6365 			break;
6366 
6367 		case D2H_DMA_INDX_RD_BUF:
6368 			long_data = HTOL64(*(uint64 *)data);
6369 			addr = DHD_RING_INFO_MEMBER_ADDR(bus, d2h_r_idx_hostaddr);
6370 			dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
6371 			if (dhd_msg_level & DHD_INFO_VAL) {
6372 				prhex(__FUNCTION__, data, len);
6373 			}
6374 			break;
6375 
6376 		case H2D_IFRM_INDX_WR_BUF:
6377 			long_data = HTOL64(*(uint64 *)data);
6378 			addr = DHD_RING_INFO_MEMBER_ADDR(bus, ifrm_w_idx_hostaddr);
6379 			dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
6380 			if (dhd_msg_level & DHD_INFO_VAL) {
6381 				prhex(__FUNCTION__, data, len);
6382 			}
6383 			break;
6384 
6385 		case RING_ITEM_LEN :
6386 			addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, len_items);
6387 			dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
6388 			break;
6389 
6390 		case RING_MAX_ITEMS :
6391 			addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, max_item);
6392 			dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
6393 			break;
6394 
6395 		case RING_BUF_ADDR :
6396 			long_data = HTOL64(*(uint64 *)data);
6397 			addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, base_addr);
6398 			dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *) &long_data, len);
6399 			if (dhd_msg_level & DHD_INFO_VAL) {
6400 				prhex(__FUNCTION__, data, len);
6401 			}
6402 			break;
6403 
6404 		case D2H_MB_DATA:
6405 			addr = bus->d2h_mb_data_ptr_addr;
6406 			dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data));
6407 			break;
6408 
6409 		case H2D_MB_DATA:
6410 			addr = bus->h2d_mb_data_ptr_addr;
6411 			dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data));
6412 			break;
6413 
6414 		case HOST_API_VERSION:
6415 			addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_cap);
6416 			dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data));
6417 			break;
6418 
6419 		case DNGL_TO_HOST_TRAP_ADDR:
6420 			long_data = HTOL64(*(uint64 *)data);
6421 			addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_trap_addr);
6422 			dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *) &long_data, len);
6423 			DHD_PCIE_INFO(("Wrote trap addr:0x%x\n", (uint32) HTOL32(*(uint32 *)data)));
6424 			break;
6425 
6426 #ifdef D2H_MINIDUMP
6427 		case DNGL_TO_HOST_TRAP_ADDR_LEN:
6428 			addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, device_trap_debug_buffer_len);
6429 			dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
6430 			break;
6431 #endif /* D2H_MINIDUMP */
6432 
6433 		case HOST_SCB_ADDR:
6434 			addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_scb_addr);
6435 #ifdef DHD_SUPPORT_64BIT
6436 			dhdpcie_bus_wtcm64(bus, addr, (uint64) HTOL64(*(uint64 *)data));
6437 #else /* !DHD_SUPPORT_64BIT */
6438 			dhdpcie_bus_wtcm32(bus, addr, *((uint32*)data));
6439 #endif /* DHD_SUPPORT_64BIT */
6440 			DHD_PCIE_INFO(("Wrote host_scb_addr:0x%x\n",
6441 				(uint32) HTOL32(*(uint32 *)data)));
6442 			break;
6443 
6444 		default:
6445 			break;
6446 	}
6447 	if (MULTIBP_ENAB(bus->sih)) {
6448 		dhd_bus_pcie_pwr_req_clear(bus);
6449 	}
6450 } /* dhd_bus_cmn_writeshared */
6451 
6452 /** A snippet of dongle memory is shared between host and dongle */
6453 void
6454 dhd_bus_cmn_readshared(dhd_bus_t *bus, void* data, uint8 type, uint16 ringid)
6455 {
6456 	ulong addr; /* dongle address */
6457 
6458 	/*
6459 	 * Use explicit tcm ring ptr update functions when DMA indices are not enabled to
6460 	 * as backplane power request calls are causing TPUT drops
6461 	 */
6462 	if (!(bus->dhd->dma_d2h_ring_upd_support || bus->dhd->dma_h2d_ring_upd_support)) {
6463 		if ((type == RING_WR_UPD) || (type == RING_RD_UPD)) {
6464 			dhdpcie_update_ring_ptrs_in_tcm(bus, data, type, ringid, TRUE);
6465 			return;
6466 		}
6467 	}
6468 
6469 	if (MULTIBP_ENAB(bus->sih)) {
6470 		dhd_bus_pcie_pwr_req(bus);
6471 	}
6472 	switch (type) {
6473 		case RING_WR_UPD :
6474 			addr = bus->ring_sh[ringid].ring_state_w;
6475 			*(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
6476 			break;
6477 
6478 		case RING_RD_UPD :
6479 			addr = bus->ring_sh[ringid].ring_state_r;
6480 			*(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
6481 			break;
6482 
6483 		case TOTAL_LFRAG_PACKET_CNT :
6484 			addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, total_lfrag_pkt_cnt);
6485 			*(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
6486 			break;
6487 
6488 		case H2D_MB_DATA:
6489 			addr = bus->h2d_mb_data_ptr_addr;
6490 			*(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, addr));
6491 			break;
6492 
6493 		case D2H_MB_DATA:
6494 			addr = bus->d2h_mb_data_ptr_addr;
6495 			*(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, addr));
6496 			break;
6497 
6498 		case MAX_HOST_RXBUFS :
6499 			addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, max_host_rxbufs);
6500 			*(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
6501 			break;
6502 
6503 		case HOST_SCB_ADDR:
6504 			addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_scb_size);
6505 			*(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, addr));
6506 			break;
6507 
6508 		default :
6509 			break;
6510 	}
6511 	if (MULTIBP_ENAB(bus->sih)) {
6512 		dhd_bus_pcie_pwr_req_clear(bus);
6513 	}
6514 }
6515 
6516 uint32 dhd_bus_get_sharedflags(dhd_bus_t *bus)
6517 {
6518 	return ((pciedev_shared_t*)bus->pcie_sh)->flags;
6519 }
6520 
6521 void dhd_prot_clearcounts(dhd_pub_t *dhd);
6522 
6523 void
6524 dhd_bus_clearcounts(dhd_pub_t *dhdp)
6525 {
6526 	dhd_prot_clearcounts(dhdp);
6527 }
6528 
6529 /**
6530  * @param params    input buffer, NULL for 'set' operation.
6531  * @param plen      length of 'params' buffer, 0 for 'set' operation.
6532  * @param arg       output buffer
6533  */
6534 int
6535 dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
6536                  void *params, uint plen, void *arg, uint len, bool set)
6537 {
6538 	dhd_bus_t *bus = dhdp->bus;
6539 	const bcm_iovar_t *vi = NULL;
6540 	int bcmerror = BCME_UNSUPPORTED;
6541 	uint val_size;
6542 	uint32 actionid;
6543 
6544 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
6545 
6546 	ASSERT(name);
6547 	if (!name)
6548 		return BCME_BADARG;
6549 
6550 	/* Get MUST have return space */
6551 	ASSERT(set || (arg && len));
6552 	if (!(set || (arg && len)))
6553 		return BCME_BADARG;
6554 
6555 	/* Set does NOT take qualifiers */
6556 	ASSERT(!set || (!params && !plen));
6557 	if (!(!set || (!params && !plen)))
6558 		return BCME_BADARG;
6559 
6560 	DHD_PCIE_INFO(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
6561 	         name, (set ? "set" : "get"), len, plen));
6562 
6563 	/* Look up var locally; if not found pass to host driver */
6564 	if ((vi = bcm_iovar_lookup(dhdpcie_iovars, name)) == NULL) {
6565 		goto exit;
6566 	}
6567 
6568 	if (MULTIBP_ENAB(bus->sih)) {
6569 		if (vi->flags & DHD_IOVF_PWRREQ_BYPASS) {
6570 			DHD_ERROR(("%s: Bypass pwr request\n", __FUNCTION__));
6571 		} else {
6572 			dhd_bus_pcie_pwr_req(bus);
6573 		}
6574 	}
6575 
6576 	/* set up 'params' pointer in case this is a set command so that
6577 	 * the convenience int and bool code can be common to set and get
6578 	 */
6579 	if (params == NULL) {
6580 		params = arg;
6581 		plen = len;
6582 	}
6583 
6584 	if (vi->type == IOVT_VOID)
6585 		val_size = 0;
6586 	else if (vi->type == IOVT_BUFFER)
6587 		val_size = len;
6588 	else
6589 		/* all other types are integer sized */
6590 		val_size = sizeof(int);
6591 
6592 	actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
6593 	bcmerror = dhdpcie_bus_doiovar(bus, vi, actionid, name, params, plen, arg, len, val_size);
6594 
6595 exit:
6596 	/* In DEVRESET_QUIESCE/DEVRESET_ON,
6597 	 * this includes dongle re-attach which initialize pwr_req_ref count to 0 and
6598 	 * causes pwr_req_ref count miss-match in pwr req clear function and hang.
6599 	 * In this case, bypass pwr req clear.
6600 	 */
6601 	if (bcmerror == BCME_DNGL_DEVRESET) {
6602 		bcmerror = BCME_OK;
6603 	} else {
6604 		if (MULTIBP_ENAB(bus->sih)) {
6605 			if (vi != NULL) {
6606 				if (vi->flags & DHD_IOVF_PWRREQ_BYPASS) {
6607 					DHD_ERROR(("%s: Bypass pwr request clear\n", __FUNCTION__));
6608 				} else {
6609 					dhd_bus_pcie_pwr_req_clear(bus);
6610 				}
6611 			}
6612 		}
6613 	}
6614 	return bcmerror;
6615 } /* dhd_bus_iovar_op */
6616 
6617 #ifdef BCM_BUZZZ
6618 #include <bcm_buzzz.h>
6619 
6620 int
6621 dhd_buzzz_dump_cntrs(char *p, uint32 *core, uint32 *log,
6622 	const int num_counters)
6623 {
6624 	int bytes = 0;
6625 	uint32 ctr;
6626 	uint32 curr[BCM_BUZZZ_COUNTERS_MAX], prev[BCM_BUZZZ_COUNTERS_MAX];
6627 	uint32 delta[BCM_BUZZZ_COUNTERS_MAX];
6628 
6629 	/* Compute elapsed counter values per counter event type */
6630 	for (ctr = 0U; ctr < num_counters; ctr++) {
6631 		prev[ctr] = core[ctr];
6632 		curr[ctr] = *log++;
6633 		core[ctr] = curr[ctr];  /* saved for next log */
6634 
6635 		if (curr[ctr] < prev[ctr])
6636 			delta[ctr] = curr[ctr] + (~0U - prev[ctr]);
6637 		else
6638 			delta[ctr] = (curr[ctr] - prev[ctr]);
6639 
6640 		bytes += sprintf(p + bytes, "%12u ", delta[ctr]);
6641 	}
6642 
6643 	return bytes;
6644 }
6645 
6646 typedef union cm3_cnts { /* export this in bcm_buzzz.h */
6647 	uint32 u32;
6648 	uint8  u8[4];
6649 	struct {
6650 		uint8 cpicnt;
6651 		uint8 exccnt;
6652 		uint8 sleepcnt;
6653 		uint8 lsucnt;
6654 	};
6655 } cm3_cnts_t;
6656 
6657 int
6658 dhd_bcm_buzzz_dump_cntrs6(char *p, uint32 *core, uint32 *log)
6659 {
6660 	int bytes = 0;
6661 
6662 	uint32 cyccnt, instrcnt;
6663 	cm3_cnts_t cm3_cnts;
6664 	uint8 foldcnt;
6665 
6666 	{   /* 32bit cyccnt */
6667 		uint32 curr, prev, delta;
6668 		prev = core[0]; curr = *log++; core[0] = curr;
6669 		if (curr < prev)
6670 			delta = curr + (~0U - prev);
6671 		else
6672 			delta = (curr - prev);
6673 
6674 		bytes += sprintf(p + bytes, "%12u ", delta);
6675 		cyccnt = delta;
6676 	}
6677 
6678 	{	/* Extract the 4 cnts: cpi, exc, sleep and lsu */
6679 		int i;
6680 		uint8 max8 = ~0;
6681 		cm3_cnts_t curr, prev, delta;
6682 		prev.u32 = core[1]; curr.u32 = * log++; core[1] = curr.u32;
6683 		for (i = 0; i < 4; i++) {
6684 			if (curr.u8[i] < prev.u8[i])
6685 				delta.u8[i] = curr.u8[i] + (max8 - prev.u8[i]);
6686 			else
6687 				delta.u8[i] = (curr.u8[i] - prev.u8[i]);
6688 			bytes += sprintf(p + bytes, "%4u ", delta.u8[i]);
6689 		}
6690 		cm3_cnts.u32 = delta.u32;
6691 	}
6692 
6693 	{   /* Extract the foldcnt from arg0 */
6694 		uint8 curr, prev, delta, max8 = ~0;
6695 		bcm_buzzz_arg0_t arg0; arg0.u32 = *log;
6696 		prev = core[2]; curr = arg0.klog.cnt; core[2] = curr;
6697 		if (curr < prev)
6698 			delta = curr + (max8 - prev);
6699 		else
6700 			delta = (curr - prev);
6701 		bytes += sprintf(p + bytes, "%4u ", delta);
6702 		foldcnt = delta;
6703 	}
6704 
6705 	instrcnt = cyccnt - (cm3_cnts.u8[0] + cm3_cnts.u8[1] + cm3_cnts.u8[2]
6706 		                 + cm3_cnts.u8[3]) + foldcnt;
6707 	if (instrcnt > 0xFFFFFF00)
6708 		bytes += sprintf(p + bytes, "[%10s] ", "~");
6709 	else
6710 		bytes += sprintf(p + bytes, "[%10u] ", instrcnt);
6711 	return bytes;
6712 }
6713 
6714 int
6715 dhd_buzzz_dump_log(char *p, uint32 *core, uint32 *log, bcm_buzzz_t *buzzz)
6716 {
6717 	int bytes = 0;
6718 	bcm_buzzz_arg0_t arg0;
6719 	static uint8 * fmt[] = BCM_BUZZZ_FMT_STRINGS;
6720 
6721 	if (buzzz->counters == 6) {
6722 		bytes += dhd_bcm_buzzz_dump_cntrs6(p, core, log);
6723 		log += 2; /* 32bit cyccnt + (4 x 8bit) CM3 */
6724 	} else {
6725 		bytes += dhd_buzzz_dump_cntrs(p, core, log, buzzz->counters);
6726 		log += buzzz->counters; /* (N x 32bit) CR4=3, CA7=4 */
6727 	}
6728 
6729 	/* Dump the logged arguments using the registered formats */
6730 	arg0.u32 = *log++;
6731 
6732 	switch (arg0.klog.args) {
6733 		case 0:
6734 			bytes += sprintf(p + bytes, fmt[arg0.klog.id]);
6735 			break;
6736 		case 1:
6737 		{
6738 			uint32 arg1 = *log++;
6739 			bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1);
6740 			break;
6741 		}
6742 		case 2:
6743 		{
6744 			uint32 arg1, arg2;
6745 			arg1 = *log++; arg2 = *log++;
6746 			bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2);
6747 			break;
6748 		}
6749 		case 3:
6750 		{
6751 			uint32 arg1, arg2, arg3;
6752 			arg1 = *log++; arg2 = *log++; arg3 = *log++;
6753 			bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2, arg3);
6754 			break;
6755 		}
6756 		case 4:
6757 		{
6758 			uint32 arg1, arg2, arg3, arg4;
6759 			arg1 = *log++; arg2 = *log++;
6760 			arg3 = *log++; arg4 = *log++;
6761 			bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2, arg3, arg4);
6762 			break;
6763 		}
6764 		default:
6765 			printf("%s: Maximum one argument supported\n", __FUNCTION__);
6766 			break;
6767 	}
6768 
6769 	bytes += sprintf(p + bytes, "\n");
6770 
6771 	return bytes;
6772 }
6773 
6774 void dhd_buzzz_dump(bcm_buzzz_t *buzzz_p, void *buffer_p, char *p)
6775 {
6776 	int i;
6777 	uint32 total, part1, part2, log_sz, core[BCM_BUZZZ_COUNTERS_MAX];
6778 	void * log;
6779 
6780 	for (i = 0; i < BCM_BUZZZ_COUNTERS_MAX; i++) {
6781 		core[i] = 0;
6782 	}
6783 
6784 	log_sz = buzzz_p->log_sz;
6785 
6786 	part1 = ((uint32)buzzz_p->cur - (uint32)buzzz_p->log) / log_sz;
6787 
6788 	if (buzzz_p->wrap == TRUE) {
6789 		part2 = ((uint32)buzzz_p->end - (uint32)buzzz_p->cur) / log_sz;
6790 		total = (buzzz_p->buffer_sz - BCM_BUZZZ_LOGENTRY_MAXSZ) / log_sz;
6791 	} else {
6792 		part2 = 0U;
6793 		total = buzzz_p->count;
6794 	}
6795 
6796 	if (total == 0U) {
6797 		printf("%s: bcm_buzzz_dump total<%u> done\n", __FUNCTION__, total);
6798 		return;
6799 	} else {
6800 		printf("%s: bcm_buzzz_dump total<%u> : part2<%u> + part1<%u>\n", __FUNCTION__,
6801 		       total, part2, part1);
6802 	}
6803 
6804 	if (part2) {   /* with wrap */
6805 		log = (void*)((size_t)buffer_p + (buzzz_p->cur - buzzz_p->log));
6806 		while (part2--) {   /* from cur to end : part2 */
6807 			p[0] = '\0';
6808 			dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p);
6809 			printf("%s", p);
6810 			log = (void*)((size_t)log + buzzz_p->log_sz);
6811 		}
6812 	}
6813 
6814 	log = (void*)buffer_p;
6815 	while (part1--) {
6816 		p[0] = '\0';
6817 		dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p);
6818 		printf("%s", p);
6819 		log = (void*)((size_t)log + buzzz_p->log_sz);
6820 	}
6821 
6822 	printf("%s: bcm_buzzz_dump done.\n", __FUNCTION__);
6823 }
6824 
6825 int dhd_buzzz_dump_dngl(dhd_bus_t *bus)
6826 {
6827 	bcm_buzzz_t * buzzz_p = NULL;
6828 	void * buffer_p = NULL;
6829 	char * page_p = NULL;
6830 	pciedev_shared_t *sh;
6831 	int ret = 0;
6832 
6833 	if (bus->dhd->busstate != DHD_BUS_DATA) {
6834 		return BCME_UNSUPPORTED;
6835 	}
6836 	if ((page_p = (char *)MALLOC(bus->dhd->osh, 4096)) == NULL) {
6837 		printf("%s: Page memory allocation failure\n", __FUNCTION__);
6838 		goto done;
6839 	}
6840 	if ((buzzz_p = MALLOC(bus->dhd->osh, sizeof(bcm_buzzz_t))) == NULL) {
6841 		printf("%s: BCM BUZZZ memory allocation failure\n", __FUNCTION__);
6842 		goto done;
6843 	}
6844 
6845 	ret = dhdpcie_readshared(bus);
6846 	if (ret < 0) {
6847 		DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__));
6848 		goto done;
6849 	}
6850 
6851 	sh = bus->pcie_sh;
6852 
6853 	DHD_INFO(("%s buzzz:%08x\n", __FUNCTION__, sh->buzz_dbg_ptr));
6854 
6855 	if (sh->buzz_dbg_ptr != 0U) {	/* Fetch and display dongle BUZZZ Trace */
6856 
6857 		dhdpcie_bus_membytes(bus, FALSE, (ulong)sh->buzz_dbg_ptr,
6858 		                     (uint8 *)buzzz_p, sizeof(bcm_buzzz_t));
6859 
6860 		printf("BUZZZ[0x%08x]: log<0x%08x> cur<0x%08x> end<0x%08x> "
6861 			"count<%u> status<%u> wrap<%u>\n"
6862 			"cpu<0x%02X> counters<%u> group<%u> buffer_sz<%u> log_sz<%u>\n",
6863 			(int)sh->buzz_dbg_ptr,
6864 			(int)buzzz_p->log, (int)buzzz_p->cur, (int)buzzz_p->end,
6865 			buzzz_p->count, buzzz_p->status, buzzz_p->wrap,
6866 			buzzz_p->cpu_idcode, buzzz_p->counters, buzzz_p->group,
6867 			buzzz_p->buffer_sz, buzzz_p->log_sz);
6868 
6869 		if (buzzz_p->count == 0) {
6870 			printf("%s: Empty dongle BUZZZ trace\n\n", __FUNCTION__);
6871 			goto done;
6872 		}
6873 
6874 		/* Allocate memory for trace buffer and format strings */
6875 		buffer_p = MALLOC(bus->dhd->osh, buzzz_p->buffer_sz);
6876 		if (buffer_p == NULL) {
6877 			printf("%s: Buffer memory allocation failure\n", __FUNCTION__);
6878 			goto done;
6879 		}
6880 
6881 		/* Fetch the trace. format strings are exported via bcm_buzzz.h */
6882 		dhdpcie_bus_membytes(bus, FALSE, (uint32)buzzz_p->log,   /* Trace */
6883 		                     (uint8 *)buffer_p, buzzz_p->buffer_sz);
6884 
6885 		/* Process and display the trace using formatted output */
6886 
6887 		{
6888 			int ctr;
6889 			for (ctr = 0; ctr < buzzz_p->counters; ctr++) {
6890 				printf("<Evt[%02X]> ", buzzz_p->eventid[ctr]);
6891 			}
6892 			printf("<code execution point>\n");
6893 		}
6894 
6895 		dhd_buzzz_dump(buzzz_p, buffer_p, page_p);
6896 
6897 		printf("%s: ----- End of dongle BCM BUZZZ Trace -----\n\n", __FUNCTION__);
6898 
6899 		MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz); buffer_p = NULL;
6900 	}
6901 
6902 done:
6903 
6904 	if (page_p)   MFREE(bus->dhd->osh, page_p, 4096);
6905 	if (buzzz_p)  MFREE(bus->dhd->osh, buzzz_p, sizeof(bcm_buzzz_t));
6906 	if (buffer_p) MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz);
6907 
6908 	return BCME_OK;
6909 }
6910 #endif /* BCM_BUZZZ */
6911 
6912 #define PCIE_GEN2(sih) ((BUSTYPE((sih)->bustype) == PCI_BUS) &&	\
6913 	((sih)->buscoretype == PCIE2_CORE_ID))
6914 #ifdef DHD_PCIE_REG_ACCESS
6915 static bool
6916 pcie2_mdiosetblock(dhd_bus_t *bus, uint blk)
6917 {
6918 	uint mdiodata, mdioctrl, i = 0;
6919 	uint pcie_serdes_spinwait = 200;
6920 
6921 	mdioctrl = MDIOCTL2_DIVISOR_VAL | (0x1F << MDIOCTL2_REGADDR_SHF);
6922 	mdiodata = (blk << MDIODATA2_DEVADDR_SHF) | MDIODATA2_DONE;
6923 
6924 	si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_CONTROL, ~0, mdioctrl);
6925 	si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA, ~0, mdiodata);
6926 
6927 	OSL_DELAY(10);
6928 	/* retry till the transaction is complete */
6929 	while (i < pcie_serdes_spinwait) {
6930 		uint mdioctrl_read = si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA,
6931 			0, 0);
6932 		if (!(mdioctrl_read & MDIODATA2_DONE)) {
6933 			break;
6934 		}
6935 		OSL_DELAY(1000);
6936 		i++;
6937 	}
6938 
6939 	if (i >= pcie_serdes_spinwait) {
6940 		DHD_ERROR(("pcie_mdiosetblock: timed out\n"));
6941 		return FALSE;
6942 	}
6943 
6944 	return TRUE;
6945 }
6946 #endif /* DHD_PCIE_REG_ACCESS */
6947 
6948 static void
6949 dhdpcie_enum_reg_init(dhd_bus_t *bus)
6950 {
6951 	/* initialize Function control register (clear bit 4) to HW init value */
6952 	si_corereg(bus->sih, bus->sih->buscoreidx,
6953 		OFFSETOF(sbpcieregs_t, ftn_ctrl.control), ~0,
6954 		PCIE_CPLCA_ENABLE | PCIE_DLY_PERST_TO_COE);
6955 
6956 	/* clear IntMask */
6957 	si_corereg(bus->sih, bus->sih->buscoreidx,
6958 		OFFSETOF(sbpcieregs_t, ftn_ctrl.intmask), ~0, 0);
6959 	/* clear IntStatus */
6960 	si_corereg(bus->sih, bus->sih->buscoreidx,
6961 		OFFSETOF(sbpcieregs_t, ftn_ctrl.intstatus), ~0,
6962 		si_corereg(bus->sih, bus->sih->buscoreidx,
6963 			OFFSETOF(sbpcieregs_t, ftn_ctrl.intstatus), 0, 0));
6964 
6965 	/* clear MSIVector */
6966 	si_corereg(bus->sih, bus->sih->buscoreidx,
6967 		OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_vector), ~0, 0);
6968 	/* clear MSIIntMask */
6969 	si_corereg(bus->sih, bus->sih->buscoreidx,
6970 		OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intmask), ~0, 0);
6971 	/* clear MSIIntStatus */
6972 	si_corereg(bus->sih, bus->sih->buscoreidx,
6973 		OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intstatus), ~0,
6974 		si_corereg(bus->sih, bus->sih->buscoreidx,
6975 			OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intstatus), 0, 0));
6976 
6977 	/* clear PowerIntMask */
6978 	si_corereg(bus->sih, bus->sih->buscoreidx,
6979 		OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intmask), ~0, 0);
6980 	/* clear PowerIntStatus */
6981 	si_corereg(bus->sih, bus->sih->buscoreidx,
6982 		OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intstatus), ~0,
6983 		si_corereg(bus->sih, bus->sih->buscoreidx,
6984 			OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intstatus), 0, 0));
6985 
6986 	/* clear MailboxIntMask */
6987 	si_corereg(bus->sih, bus->sih->buscoreidx,
6988 		OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intmask), ~0, 0);
6989 	/* clear MailboxInt */
6990 	si_corereg(bus->sih, bus->sih->buscoreidx,
6991 		OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intstatus), ~0,
6992 		si_corereg(bus->sih, bus->sih->buscoreidx,
6993 			OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intstatus), 0, 0));
6994 }
6995 
6996 int
6997 dhd_bus_perform_flr(dhd_bus_t *bus, bool force_fail)
6998 {
6999 	uint flr_capab;
7000 	uint val;
7001 	int retry = 0;
7002 
7003 	DHD_ERROR(("******** Perform FLR ********\n"));
7004 
7005 	/* Kernel Panic for 4378Ax during traptest/devreset4 reload case:
7006 	 * For 4378Ax, enum registers will not be reset with FLR (producer index WAR).
7007 	 * So, the MailboxIntMask is left as 0xffff during fw boot-up,
7008 	 * and the fw trap handling during fw boot causes Kernel Panic.
7009 	 * Jira: SWWLAN-212578: [4378A0 PCIe DVT] :
7010 	 *       Kernel Panic seen in F0 FLR with BT Idle/Traffic/DMA
7011 	 */
7012 	if (bus->sih && PCIE_ENUM_RESET_WAR_ENAB(bus->sih->buscorerev)) {
7013 		if (bus->pcie_mailbox_mask != 0) {
7014 			dhdpcie_bus_intr_disable(bus);
7015 		}
7016 		/* initialize F0 enum registers before FLR for rev66/67 */
7017 		dhdpcie_enum_reg_init(bus);
7018 	}
7019 
7020 	/* Read PCIE_CFG_DEVICE_CAPABILITY bit 28 to check FLR capability */
7021 	val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CAPABILITY, sizeof(val));
7022 	flr_capab =  val & (1 << PCIE_FLR_CAPAB_BIT);
7023 	DHD_INFO(("Read Device Capability: reg=0x%x read val=0x%x flr_capab=0x%x\n",
7024 		PCIE_CFG_DEVICE_CAPABILITY, val, flr_capab));
7025 	if (!flr_capab) {
7026 	       DHD_ERROR(("Chip does not support FLR\n"));
7027 	       return BCME_UNSUPPORTED;
7028 	}
7029 
7030 #if defined(NDIS) && defined(BT_OVER_PCIE)
7031 	dhd_bwm_bt_quiesce(bus);
7032 #endif
7033 
7034 	/* Save pcie config space */
7035 	DHD_INFO(("Save Pcie Config Space\n"));
7036 	DHD_PCIE_CONFIG_SAVE(bus);
7037 
7038 	/* Set bit 15 of PCIE_CFG_DEVICE_CONTROL */
7039 	DHD_INFO(("Set PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n",
7040 		PCIE_FUNCTION_LEVEL_RESET_BIT, PCIE_CFG_DEVICE_CONTROL));
7041 	val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val));
7042 	DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
7043 	val = val | (1 << PCIE_FUNCTION_LEVEL_RESET_BIT);
7044 	DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
7045 	OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val), val);
7046 
7047 	/* wait for DHD_FUNCTION_LEVEL_RESET_DELAY msec */
7048 #ifdef BCMSLTGT
7049 	DHD_ERROR(("Delay of %d msec\n", DHD_FUNCTION_LEVEL_RESET_DELAY * htclkratio));
7050 #else
7051 	DHD_INFO(("Delay of %d msec\n", DHD_FUNCTION_LEVEL_RESET_DELAY));
7052 #endif /* BCMSLTGT */
7053 
7054 	CAN_SLEEP() ? OSL_SLEEP(DHD_FUNCTION_LEVEL_RESET_DELAY) :
7055 		OSL_DELAY(DHD_FUNCTION_LEVEL_RESET_DELAY * USEC_PER_MSEC);
7056 
7057 	if (force_fail) {
7058 		DHD_ERROR(("Set PCIE_SSRESET_DISABLE_BIT(%d) of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)\n",
7059 			PCIE_SSRESET_DISABLE_BIT, PCIE_CFG_SUBSYSTEM_CONTROL));
7060 		val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
7061 		DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL,
7062 			val));
7063 		val = val | (1 << PCIE_SSRESET_DISABLE_BIT);
7064 		DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL,
7065 			val));
7066 		OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val), val);
7067 
7068 		val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
7069 		DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL,
7070 			val));
7071 	}
7072 
7073 	/* Clear bit 15 of PCIE_CFG_DEVICE_CONTROL */
7074 	DHD_INFO(("Clear PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n",
7075 		PCIE_FUNCTION_LEVEL_RESET_BIT, PCIE_CFG_DEVICE_CONTROL));
7076 	val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val));
7077 	DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
7078 	val = val & ~(1 << PCIE_FUNCTION_LEVEL_RESET_BIT);
7079 	DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
7080 	OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val), val);
7081 
7082 	/* Wait till bit 13 of PCIE_CFG_SUBSYSTEM_CONTROL is cleared */
7083 	DHD_INFO(("Wait till PCIE_SSRESET_STATUS_BIT(%d) of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)"
7084 		"is cleared\n",	PCIE_SSRESET_STATUS_BIT, PCIE_CFG_SUBSYSTEM_CONTROL));
7085 	do {
7086 		val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
7087 		DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n",
7088 			PCIE_CFG_SUBSYSTEM_CONTROL, val));
7089 		val = val & (1 << PCIE_SSRESET_STATUS_BIT);
7090 		OSL_DELAY(DHD_SSRESET_STATUS_RETRY_DELAY);
7091 	} while (val && (retry++ < DHD_SSRESET_STATUS_RETRIES));
7092 
7093 	if (val) {
7094 		DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
7095 			PCIE_CFG_SUBSYSTEM_CONTROL, PCIE_SSRESET_STATUS_BIT));
7096 		/* User has to fire the IOVAR again, if force_fail is needed */
7097 		if (force_fail) {
7098 			bus->flr_force_fail = FALSE;
7099 			DHD_ERROR(("%s cleared flr_force_fail flag\n", __FUNCTION__));
7100 		}
7101 		return BCME_DONGLE_DOWN;
7102 	}
7103 
7104 	/* Restore pcie config space */
7105 	DHD_INFO(("Restore Pcie Config Space\n"));
7106 	DHD_PCIE_CONFIG_RESTORE(bus);
7107 
7108 #if defined(NDIS) && defined(BT_OVER_PCIE)
7109 	dhd_bwm_bt_resume(bus);
7110 #endif
7111 
7112 	DHD_ERROR(("******** FLR Succedeed ********\n"));
7113 
7114 	return BCME_OK;
7115 }
7116 
7117 #define DHD_BP_RESET_ASPM_DISABLE_DELAY		500u	/* usec */
7118 
7119 #define DHD_BP_RESET_STATUS_RETRY_DELAY		40u	/* usec */
7120 #define DHD_BP_RESET_STATUS_RETRIES		50u
7121 
7122 #define PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_BIT		10
7123 #define PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT	12
7124 
7125 int
7126 dhd_bus_cfg_ss_ctrl_bp_reset(struct dhd_bus *bus)
7127 {
7128 	uint val;
7129 	int retry = 0;
7130 	int ret = BCME_OK;
7131 	bool reset_stat_bit;
7132 
7133 	DHD_ERROR(("******** Perform BP reset ********\n"));
7134 
7135 	/* Disable ASPM */
7136 	DHD_ERROR(("Disable ASPM: Clear bits(1-0) of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n",
7137 		PCIECFGREG_LINK_STATUS_CTRL));
7138 	val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val));
7139 	DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
7140 	val = val & (~PCIE_ASPM_ENAB);
7141 	DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
7142 	OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val), val);
7143 
7144 	/* wait for delay usec */
7145 	DHD_ERROR(("Delay of %d usec\n", DHD_BP_RESET_ASPM_DISABLE_DELAY));
7146 	OSL_DELAY(DHD_BP_RESET_ASPM_DISABLE_DELAY);
7147 
7148 	/* Set bp reset bit 10 of PCIE_CFG_SUBSYSTEM_CONTROL */
7149 	DHD_ERROR(("Set PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_BIT(%d)"
7150 		" of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)\n",
7151 		PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_BIT, PCIE_CFG_SUBSYSTEM_CONTROL));
7152 	val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
7153 	DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL, val));
7154 	val = val | (1 << PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_BIT);
7155 	DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL, val));
7156 	OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val), val);
7157 
7158 	/* Wait till bp reset status bit 12 of PCIE_CFG_SUBSYSTEM_CONTROL is set */
7159 	DHD_ERROR(("Wait till PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT(%d) of "
7160 		"PCIE_CFG_SUBSYSTEM_CONTROL(0x%x) is set\n",
7161 		PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT, PCIE_CFG_SUBSYSTEM_CONTROL));
7162 	do {
7163 		val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
7164 		DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n",
7165 			PCIE_CFG_SUBSYSTEM_CONTROL, val));
7166 		reset_stat_bit = val & (1 << PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT);
7167 		OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY);
7168 	} while (!reset_stat_bit && (retry++ < DHD_BP_RESET_STATUS_RETRIES));
7169 
7170 	if (!reset_stat_bit) {
7171 		DHD_ERROR(("ERROR: reg=0x%x bit %d is not set\n",
7172 			PCIE_CFG_SUBSYSTEM_CONTROL,
7173 			PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT));
7174 		ret = BCME_ERROR;
7175 		goto aspm_enab;
7176 	}
7177 
7178 	/* Clear bp reset bit 10 of PCIE_CFG_SUBSYSTEM_CONTROL */
7179 	DHD_ERROR(("Clear PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_BIT(%d)"
7180 		" of PCIECFGREG_SPROM_CTRL(0x%x)\n",
7181 		PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_BIT, PCIE_CFG_SUBSYSTEM_CONTROL));
7182 	val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
7183 	DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL, val));
7184 	val = val & ~(1 << PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_BIT);
7185 	DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL, val));
7186 	OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val), val);
7187 
7188 	/* Wait till bp reset status bit 12 of PCIE_CFG_SUBSYSTEM_CONTROL is cleared */
7189 	DHD_ERROR(("Wait till PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT(%d) of "
7190 		"PCIE_CFG_SUBSYSTEM_CONTROL(0x%x) is cleared\n",
7191 		PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT, PCIE_CFG_SUBSYSTEM_CONTROL));
7192 	do {
7193 		val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
7194 		DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n",
7195 			PCIE_CFG_SUBSYSTEM_CONTROL, val));
7196 		reset_stat_bit = val & (1 << PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT);
7197 		OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY);
7198 	} while (reset_stat_bit && (retry++ < DHD_BP_RESET_STATUS_RETRIES));
7199 
7200 	if (reset_stat_bit) {
7201 		DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
7202 			PCIE_CFG_SUBSYSTEM_CONTROL,
7203 			PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT));
7204 		ret = BCME_ERROR;
7205 	}
7206 
7207 aspm_enab:
7208 	/* Enable ASPM */
7209 	DHD_ERROR(("Enable ASPM: set bit 1 of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n",
7210 		PCIECFGREG_LINK_STATUS_CTRL));
7211 	val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val));
7212 	DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
7213 	val = val | (PCIE_ASPM_L1_ENAB);
7214 	DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
7215 	OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val), val);
7216 
7217 	if (ret) {
7218 		DHD_ERROR(("******** BP reset Failed ********\n"));
7219 	} else {
7220 		DHD_ERROR(("******** BP reset Succedeed ********\n"));
7221 	}
7222 
7223 	return ret;
7224 }
7225 
7226 #define PCIE_CFG_SPROM_CTRL_SB_RESET_BIT	10
7227 #define PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT	21
7228 
7229 int
7230 dhd_bus_cfg_sprom_ctrl_bp_reset(struct dhd_bus *bus)
7231 {
7232 	uint val;
7233 	int retry = 0;
7234 	uint dar_clk_ctrl_status_reg = DAR_CLK_CTRL(bus->sih->buscorerev);
7235 	int ret = BCME_OK;
7236 	bool cond;
7237 
7238 	DHD_ERROR(("******** Perform BP reset ********\n"));
7239 
7240 	/* Disable ASPM */
7241 	DHD_INFO(("Disable ASPM: Clear bits(1-0) of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n",
7242 		PCIECFGREG_LINK_STATUS_CTRL));
7243 	val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val));
7244 	DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
7245 	val = val & (~PCIE_ASPM_ENAB);
7246 	DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
7247 	OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val), val);
7248 
7249 	/* wait for delay usec */
7250 	DHD_INFO(("Delay of %d usec\n", DHD_BP_RESET_ASPM_DISABLE_DELAY));
7251 	OSL_DELAY(DHD_BP_RESET_ASPM_DISABLE_DELAY);
7252 
7253 	/* Set bit 10 of PCIECFGREG_SPROM_CTRL */
7254 	DHD_INFO(("Set PCIE_CFG_SPROM_CTRL_SB_RESET_BIT(%d) of PCIECFGREG_SPROM_CTRL(0x%x)\n",
7255 		PCIE_CFG_SPROM_CTRL_SB_RESET_BIT, PCIECFGREG_SPROM_CTRL));
7256 	val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val));
7257 	DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_SPROM_CTRL, val));
7258 	val = val | (1 << PCIE_CFG_SPROM_CTRL_SB_RESET_BIT);
7259 	DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_SPROM_CTRL, val));
7260 	OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val), val);
7261 
7262 	/* Wait till bit backplane reset is ASSERTED i,e
7263 	 * bit 10 of PCIECFGREG_SPROM_CTRL is cleared.
7264 	 * Only after this, poll for 21st bit of DAR reg 0xAE0 is valid
7265 	 * else DAR register will read previous old value
7266 	 */
7267 	DHD_INFO(("Wait till PCIE_CFG_SPROM_CTRL_SB_RESET_BIT(%d) of "
7268 		"PCIECFGREG_SPROM_CTRL(0x%x) is cleared\n",
7269 		PCIE_CFG_SPROM_CTRL_SB_RESET_BIT, PCIECFGREG_SPROM_CTRL));
7270 	do {
7271 		val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val));
7272 		DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_SPROM_CTRL, val));
7273 		cond = val & (1 << PCIE_CFG_SPROM_CTRL_SB_RESET_BIT);
7274 		OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY);
7275 	} while (cond && (retry++ < DHD_BP_RESET_STATUS_RETRIES));
7276 
7277 	if (cond) {
7278 		DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
7279 			PCIECFGREG_SPROM_CTRL, PCIE_CFG_SPROM_CTRL_SB_RESET_BIT));
7280 		ret = BCME_ERROR;
7281 		goto aspm_enab;
7282 	}
7283 
7284 	/* Wait till bit 21 of dar_clk_ctrl_status_reg is cleared */
7285 	DHD_INFO(("Wait till PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT(%d) of "
7286 		"dar_clk_ctrl_status_reg(0x%x) is cleared\n",
7287 		PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT, dar_clk_ctrl_status_reg));
7288 	do {
7289 		val = si_corereg(bus->sih, bus->sih->buscoreidx,
7290 			dar_clk_ctrl_status_reg, 0, 0);
7291 		DHD_INFO(("read_dar si_corereg: reg=0x%x read val=0x%x\n",
7292 			dar_clk_ctrl_status_reg, val));
7293 		cond = val & (1 << PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT);
7294 		OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY);
7295 	} while (cond && (retry++ < DHD_BP_RESET_STATUS_RETRIES));
7296 
7297 	if (cond) {
7298 		DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
7299 			dar_clk_ctrl_status_reg, PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT));
7300 		ret = BCME_ERROR;
7301 	}
7302 
7303 aspm_enab:
7304 	/* Enable ASPM */
7305 	DHD_INFO(("Enable ASPM: set bit 1 of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n",
7306 		PCIECFGREG_LINK_STATUS_CTRL));
7307 	val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val));
7308 	DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
7309 	val = val | (PCIE_ASPM_L1_ENAB);
7310 	DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
7311 	OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val), val);
7312 
7313 	DHD_ERROR(("******** BP reset Succedeed ********\n"));
7314 
7315 	return ret;
7316 }
7317 
7318 #if defined(LINUX) || defined(linux)
7319 int
7320 dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag)
7321 {
7322 	dhd_bus_t *bus = dhdp->bus;
7323 	int bcmerror = 0;
7324 	unsigned long flags;
7325 	int retry = POWERUP_MAX_RETRY;
7326 
7327 	if (flag == TRUE) { /* Turn off WLAN */
7328 		/* Removing Power */
7329 		DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__));
7330 
7331 		/* wait for other contexts to finish -- if required a call
7332 		* to OSL_DELAY for 1s can be added to give other contexts
7333 		* a chance to finish
7334 		*/
7335 		dhdpcie_advertise_bus_cleanup(bus->dhd);
7336 
7337 		if (bus->dhd->busstate != DHD_BUS_DOWN) {
7338 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
7339 			atomic_set(&bus->dhd->block_bus, TRUE);
7340 			dhd_flush_rx_tx_wq(bus->dhd);
7341 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
7342 
7343 #ifdef BCMPCIE_OOB_HOST_WAKE
7344 			/* Clean up any pending host wake IRQ */
7345 			dhd_bus_oob_intr_set(bus->dhd, FALSE);
7346 			dhd_bus_oob_intr_unregister(bus->dhd);
7347 #endif /* BCMPCIE_OOB_HOST_WAKE */
7348 			dhd_os_wd_timer(dhdp, 0);
7349 			dhd_bus_stop(bus, TRUE);
7350 			if (bus->intr) {
7351 				dhdpcie_bus_intr_disable(bus);
7352 				dhdpcie_free_irq(bus);
7353 			}
7354 			dhd_deinit_bus_lp_state_lock(bus);
7355 			dhd_deinit_bar1_switch_lock(bus);
7356 			dhd_deinit_backplane_access_lock(bus);
7357 			dhd_deinit_pwr_req_lock(bus);
7358 #ifdef PCIE_INB_DW
7359 			dhd_deinit_dongle_ds_lock(bus);
7360 #endif /* PCIE_INB_DW */
7361 			dhd_bus_release_dongle(bus);
7362 			dhdpcie_bus_free_resource(bus);
7363 			bcmerror = dhdpcie_bus_disable_device(bus);
7364 			if (bcmerror) {
7365 				DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
7366 					__FUNCTION__, bcmerror));
7367 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
7368 				atomic_set(&bus->dhd->block_bus, FALSE);
7369 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
7370 			}
7371 			/* Clean up protocol data after Bus Master Enable bit clear
7372 			 * so that host can safely unmap DMA and remove the allocated buffers
7373 			 * from the PKTID MAP. Some Applicantion Processors supported
7374 			 * System MMU triggers Kernel panic when they detect to attempt to
7375 			 * DMA-unmapped memory access from the devices which use the
7376 			 * System MMU. Therefore, Kernel panic can be happened since it is
7377 			 * possible that dongle can access to DMA-unmapped memory after
7378 			 * calling the dhd_prot_reset().
7379 			 * For this reason, the dhd_prot_reset() and dhd_clear() functions
7380 			 * should be located after the dhdpcie_bus_disable_device().
7381 			 */
7382 			dhd_prot_reset(dhdp);
7383 			/* XXX Reset dhd_pub_t instance to initial status
7384 			 * for built-in type driver
7385 			 */
7386 			dhd_clear(dhdp);
7387 
7388 			bcmerror = dhdpcie_bus_stop_host_dev(bus);
7389 			if (bcmerror) {
7390 				DHD_ERROR(("%s: dhdpcie_bus_stop host_dev failed: %d\n",
7391 					__FUNCTION__, bcmerror));
7392 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
7393 				atomic_set(&bus->dhd->block_bus, FALSE);
7394 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
7395 				goto done;
7396 			}
7397 
7398 			DHD_GENERAL_LOCK(bus->dhd, flags);
7399 			DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
7400 			bus->dhd->busstate = DHD_BUS_DOWN;
7401 			DHD_GENERAL_UNLOCK(bus->dhd, flags);
7402 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
7403 			atomic_set(&bus->dhd->block_bus, FALSE);
7404 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
7405 		} else {
7406 			if (bus->intr) {
7407 				dhdpcie_free_irq(bus);
7408 			}
7409 #ifdef BCMPCIE_OOB_HOST_WAKE
7410 			/* Clean up any pending host wake IRQ */
7411 			dhd_bus_oob_intr_set(bus->dhd, FALSE);
7412 			dhd_bus_oob_intr_unregister(bus->dhd);
7413 #endif /* BCMPCIE_OOB_HOST_WAKE */
7414 			dhd_dpc_kill(bus->dhd);
7415 			if (!bus->no_bus_init) {
7416 				dhd_bus_release_dongle(bus);
7417 				dhdpcie_bus_free_resource(bus);
7418 				bcmerror = dhdpcie_bus_disable_device(bus);
7419 				if (bcmerror) {
7420 					DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
7421 						__FUNCTION__, bcmerror));
7422 				}
7423 
7424 				/* Clean up protocol data after Bus Master Enable bit clear
7425 				 * so that host can safely unmap DMA and remove the allocated
7426 				 * buffers from the PKTID MAP. Some Applicantion Processors
7427 				 * supported System MMU triggers Kernel panic when they detect
7428 				 * to attempt to DMA-unmapped memory access from the devices
7429 				 * which use the System MMU.
7430 				 * Therefore, Kernel panic can be happened since it is possible
7431 				 * that dongle can access to DMA-unmapped memory after calling
7432 				 * the dhd_prot_reset().
7433 				 * For this reason, the dhd_prot_reset() and dhd_clear() functions
7434 				 * should be located after the dhdpcie_bus_disable_device().
7435 				 */
7436 				dhd_prot_reset(dhdp);
7437 				/* XXX Reset dhd_pub_t instance to initial status
7438 				 * for built-in type driver
7439 				 */
7440 				dhd_clear(dhdp);
7441 			} else {
7442 				bus->no_bus_init = FALSE;
7443 			}
7444 
7445 			bcmerror = dhdpcie_bus_stop_host_dev(bus);
7446 			if (bcmerror) {
7447 				DHD_ERROR(("%s: dhdpcie_bus_stop_host_dev failed: %d\n",
7448 					__FUNCTION__, bcmerror));
7449 				goto done;
7450 			}
7451 		}
7452 
7453 		bus->dhd->dongle_reset = TRUE;
7454 		DHD_ERROR(("%s:  WLAN OFF Done\n", __FUNCTION__));
7455 
7456 	} else { /* Turn on WLAN */
7457 		if (bus->dhd->busstate == DHD_BUS_DOWN) {
7458 			/* Powering On */
7459 			DHD_ERROR(("%s: == Power ON ==\n", __FUNCTION__));
7460 			/* PCIe RC Turn on */
7461 			do {
7462 				bcmerror = dhdpcie_bus_start_host_dev(bus);
7463 				if (!bcmerror) {
7464 					DHD_ERROR(("%s: dhdpcie_bus_start_host_dev OK\n",
7465 						__FUNCTION__));
7466 					break;
7467 				} else {
7468 					OSL_SLEEP(10);
7469 				}
7470 			} while (retry--);
7471 
7472 			if (bcmerror) {
7473 				DHD_ERROR(("%s: host pcie clock enable failed: %d\n",
7474 					__FUNCTION__, bcmerror));
7475 				goto done;
7476 			}
7477 #if defined(DHD_CONTROL_PCIE_ASPM_WIFI_TURNON)
7478 			dhd_bus_aspm_enable_rc_ep(bus, FALSE);
7479 #endif /* DHD_CONTROL_PCIE_ASPM_WIFI_TURNON */
7480 			bus->is_linkdown = 0;
7481 			bus->cto_triggered = 0;
7482 #ifdef SUPPORT_LINKDOWN_RECOVERY
7483 			bus->read_shm_fail = FALSE;
7484 #endif /* SUPPORT_LINKDOWN_RECOVERY */
7485 			bcmerror = dhdpcie_bus_enable_device(bus);
7486 			if (bcmerror) {
7487 				DHD_ERROR(("%s: host configuration restore failed: %d\n",
7488 					__FUNCTION__, bcmerror));
7489 				goto done;
7490 			}
7491 
7492 			bcmerror = dhdpcie_bus_alloc_resource(bus);
7493 			if (bcmerror) {
7494 				DHD_ERROR(("%s: dhdpcie_bus_resource_alloc failed: %d\n",
7495 					__FUNCTION__, bcmerror));
7496 				goto done;
7497 			}
7498 #if defined(DHD_HP2P) && defined(OEM_ANDROID)
7499 			bus->dhd->hp2p_enable = TRUE;
7500 #endif
7501 
7502 #ifdef FORCE_DONGLE_RESET_IN_DEVRESET_ON
7503 			/*
7504 			 * This will be enabled from phone platforms to
7505 			 * reset dongle during Wifi ON
7506 			 */
7507 			dhdpcie_dongle_reset(bus);
7508 #endif /* FORCE_DONGLE_RESET_IN_DEVRESET_ON */
7509 
7510 			bcmerror = dhdpcie_bus_dongle_attach(bus);
7511 			if (bcmerror) {
7512 				DHD_ERROR(("%s: dhdpcie_bus_dongle_attach failed: %d\n",
7513 					__FUNCTION__, bcmerror));
7514 				goto done;
7515 			}
7516 
7517 			bcmerror = dhd_bus_request_irq(bus);
7518 			if (bcmerror) {
7519 				DHD_ERROR(("%s: dhd_bus_request_irq failed: %d\n",
7520 					__FUNCTION__, bcmerror));
7521 				goto done;
7522 			}
7523 
7524 			bus->dhd->dongle_reset = FALSE;
7525 
7526 #if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
7527 			dhd_irq_set_affinity(bus->dhd, cpumask_of(1));
7528 #endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
7529 
7530 			bcmerror = dhd_bus_start(dhdp);
7531 			if (bcmerror) {
7532 				DHD_ERROR(("%s: dhd_bus_start: %d\n",
7533 					__FUNCTION__, bcmerror));
7534 				goto done;
7535 			}
7536 
7537 			/* Renabling watchdog which is disabled in dhdpcie_advertise_bus_cleanup */
7538 			if (bus->dhd->dhd_watchdog_ms_backup) {
7539 				DHD_ERROR(("%s: Enabling wdtick after dhd init\n",
7540 					__FUNCTION__));
7541 				dhd_os_wd_timer(bus->dhd, bus->dhd->dhd_watchdog_ms_backup);
7542 			}
7543 			DHD_ERROR(("%s: WLAN Power On Done\n", __FUNCTION__));
7544 		} else {
7545 			DHD_ERROR(("%s: what should we do here\n", __FUNCTION__));
7546 			goto done;
7547 		}
7548 	}
7549 
7550 done:
7551 	return bcmerror;
7552 }
7553 #else
7554 int
7555 dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag)
7556 {
7557 	dhd_bus_t *bus = dhdp->bus;
7558 	int bcmerror = 0;
7559 	unsigned long flags;
7560 
7561 	if (flag == TRUE) {
7562 		/* Removing Power */
7563 		if (!dhdp->dongle_reset) {
7564 			DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__));
7565 			dhdpcie_advertise_bus_cleanup(bus->dhd);
7566 			dhd_os_sdlock(dhdp);
7567 			dhd_os_wd_timer(dhdp, 0);
7568 			dhd_bus_stop(bus, FALSE);
7569 			dhd_prot_reset(dhdp);
7570 
7571 			dhdpcie_bus_release_dongle(bus, bus->dhd->osh,
7572 				bus->dhd->dongle_isolation, TRUE);
7573 			bus->dhd->dongle_reset = TRUE;
7574 
7575 			dhd_os_sdunlock(dhdp);
7576 
7577 			DHD_GENERAL_LOCK(bus->dhd, flags);
7578 			DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
7579 			bus->dhd->busstate = DHD_BUS_DOWN;
7580 			DHD_GENERAL_UNLOCK(bus->dhd, flags);
7581 
7582 			DHD_ERROR(("%s:  WLAN OFF Done\n", __FUNCTION__));
7583 		} else {
7584 			DHD_ERROR(("%s: Dongle is already in RESET!\n", __FUNCTION__));
7585 			bcmerror = BCME_DONGLE_DOWN;
7586 		}
7587 	} else {
7588 		/* Powering On */
7589 		DHD_ERROR(("%s: == Power ON ==\n", __FUNCTION__));
7590 
7591 		if (bus->dhd->dongle_reset) {
7592 			dhd_os_sdlock(dhdp); /* Turn on WLAN */
7593 
7594 			if (dhdpcie_dongle_attach(bus)) {
7595 				DHD_ERROR(("%s: dhdpcie_probe_attach failed\n", __FUNCTION__));
7596 				dhd_os_sdunlock(dhdp);
7597 				return BCME_DONGLE_DOWN;
7598 			}
7599 			DHD_GENERAL_LOCK(bus->dhd, flags);
7600 			DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
7601 			bus->dhd->busstate = DHD_BUS_DOWN;
7602 			DHD_GENERAL_UNLOCK(bus->dhd, flags);
7603 
7604 			DHD_INFO(("%s: About to download firmware\n", __FUNCTION__));
7605 			if (dhd_bus_download_firmware(bus, bus->dhd->osh,
7606 				bus->fw_path, bus->nv_path) == 0) {
7607 
7608 				bcmerror = dhd_bus_init((dhd_pub_t *) bus->dhd, FALSE);
7609 				if (bcmerror == BCME_OK) {
7610 					bus->dhd->dongle_reset = FALSE;
7611 
7612 					dhd_os_wd_timer(dhdp, dhd_watchdog_ms);
7613 					DHD_ERROR(("%s: WLAN Power On Done\n", __FUNCTION__));
7614 				} else {
7615 					DHD_ERROR(("%s: dhd_bus_init FAILed\n", __FUNCTION__));
7616 					dhd_bus_stop(bus, FALSE);
7617 				}
7618 			} else {
7619 				DHD_ERROR(("%s: dhd_bus_download_firmware FAILed\n", __FUNCTION__));
7620 				bcmerror = BCME_DONGLE_DOWN;
7621 			}
7622 
7623 			dhd_os_sdunlock(dhdp);
7624 		} else {
7625 			bcmerror = BCME_DONGLE_DOWN;
7626 			DHD_ERROR(("%s called when dongle is not in reset\n", __FUNCTION__));
7627 		}
7628 	}
7629 	return bcmerror;
7630 }
7631 #endif /* LINUX || linux */
7632 
7633 #ifdef DHD_PCIE_REG_ACCESS
7634 static int
7635 pcie2_mdioop(dhd_bus_t *bus, uint physmedia, uint regaddr, bool write, uint *val,
7636 	bool slave_bypass)
7637 {
7638 	uint pcie_serdes_spinwait = 200, i = 0, mdio_ctrl;
7639 	uint32 reg32;
7640 
7641 	pcie2_mdiosetblock(bus, physmedia);
7642 
7643 	/* enable mdio access to SERDES */
7644 	mdio_ctrl = MDIOCTL2_DIVISOR_VAL;
7645 	mdio_ctrl |= (regaddr << MDIOCTL2_REGADDR_SHF);
7646 
7647 	if (slave_bypass)
7648 		mdio_ctrl |= MDIOCTL2_SLAVE_BYPASS;
7649 
7650 	if (!write)
7651 		mdio_ctrl |= MDIOCTL2_READ;
7652 
7653 	si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_CONTROL, ~0, mdio_ctrl);
7654 
7655 	if (write) {
7656 		reg32 =  PCIE2_MDIO_WR_DATA;
7657 		si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA, ~0,
7658 			*val | MDIODATA2_DONE);
7659 	} else
7660 		reg32 =  PCIE2_MDIO_RD_DATA;
7661 
7662 	/* retry till the transaction is complete */
7663 	while (i < pcie_serdes_spinwait) {
7664 		uint done_val =  si_corereg(bus->sih, bus->sih->buscoreidx, reg32, 0, 0);
7665 		if (!(done_val & MDIODATA2_DONE)) {
7666 			if (!write) {
7667 				*val = si_corereg(bus->sih, bus->sih->buscoreidx,
7668 					PCIE2_MDIO_RD_DATA, 0, 0);
7669 				*val = *val & MDIODATA2_MASK;
7670 			}
7671 			return 0;
7672 		}
7673 		OSL_DELAY(1000);
7674 		i++;
7675 	}
7676 	return -1;
7677 }
7678 #endif /* DHD_PCIE_REG_ACCESS */
7679 
7680 #ifdef BCMINTERNAL
7681 static uint64
7682 serialized_backplane_access_64(dhd_bus_t *bus, uint addr, uint size, uint64 *val, bool read)
7683 {
7684 	uint64 ret;
7685 	unsigned long flags;
7686 
7687 	DHD_BACKPLANE_ACCESS_LOCK(bus->backplane_access_lock, flags);
7688 	ret = si_backplane_access_64(bus->sih, addr, size, val, read);
7689 	DHD_BACKPLANE_ACCESS_UNLOCK(bus->backplane_access_lock, flags);
7690 	return ret;
7691 }
7692 #endif /* BCMINTERNAL */
7693 
7694 static int
7695 dhdpcie_get_dma_ring_indices(dhd_pub_t *dhd)
7696 {
7697 	int h2d_support, d2h_support;
7698 
7699 	d2h_support = dhd->dma_d2h_ring_upd_support ? 1 : 0;
7700 	h2d_support = dhd->dma_h2d_ring_upd_support ? 1 : 0;
7701 	return (d2h_support | (h2d_support << 1));
7702 
7703 }
7704 int
7705 dhdpcie_set_dma_ring_indices(dhd_pub_t *dhd, int32 int_val)
7706 {
7707 	int bcmerror = 0;
7708 	/* Can change it only during initialization/FW download */
7709 	if (dhd->busstate == DHD_BUS_DOWN) {
7710 		if ((int_val > 3) || (int_val < 0)) {
7711 			DHD_ERROR(("Bad argument. Possible values: 0, 1, 2 & 3\n"));
7712 			bcmerror = BCME_BADARG;
7713 		} else {
7714 			dhd->dma_d2h_ring_upd_support = (int_val & 1) ? TRUE : FALSE;
7715 			dhd->dma_h2d_ring_upd_support = (int_val & 2) ? TRUE : FALSE;
7716 			dhd->dma_ring_upd_overwrite = TRUE;
7717 		}
7718 	} else {
7719 		DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
7720 			__FUNCTION__));
7721 		bcmerror = BCME_NOTDOWN;
7722 	}
7723 
7724 	return bcmerror;
7725 
7726 }
7727 
7728 /* si_backplane_access() manages a shared resource - BAR0 mapping, hence its
7729  * calls shall be serialized. This wrapper function provides such serialization
7730  * and shall be used everywjer einstead of direct call of si_backplane_access()
7731  *
7732  * Linux DHD driver calls si_backplane_access() from 3 three contexts: tasklet
7733  * (that may call dhdpcie_sssr_dump() from dhdpcie_sssr_dump()), iovar
7734  * ("sbreg", "membyres", etc.) and procfs (used by GDB proxy). To avoid race
7735  * conditions calls of si_backplane_access() shall be serialized. Presence of
7736  * tasklet context implies that serialization shall b ebased on spinlock. Hence
7737  * Linux implementation of dhd_pcie_backplane_access_[un]lock() is
7738  * spinlock-based.
7739  *
7740  * Other platforms may add their own implementations of
7741  * dhd_pcie_backplane_access_[un]lock() as needed (e.g. if serialization is not
7742  * needed implementation might be empty)
7743  */
7744 static uint
7745 serialized_backplane_access(dhd_bus_t *bus, uint addr, uint size, uint *val, bool read)
7746 {
7747 	uint ret;
7748 	unsigned long flags;
7749 	DHD_BACKPLANE_ACCESS_LOCK(bus->backplane_access_lock, flags);
7750 	ret = si_backplane_access(bus->sih, addr, size, val, read);
7751 	DHD_BACKPLANE_ACCESS_UNLOCK(bus->backplane_access_lock, flags);
7752 	return ret;
7753 }
7754 
7755 #ifndef DHD_CAP_PLATFORM
7756 #define DHD_CAP_PLATFORM "x86 "
7757 #endif
7758 
7759 #ifndef DHD_CAP_CUSTOMER
7760 #define DHD_CAP_CUSTOMER "brcm "
7761 #endif
7762 
7763 void
7764 BCMRAMFN(dhd_cap_bcmstrbuf)(dhd_pub_t *dhd, struct bcmstrbuf *b)
7765 {
7766 	bcm_bprintf(b, DHD_CAP_PLATFORM);
7767 	bcm_bprintf(b, DHD_CAP_CUSTOMER);
7768 #ifdef PCIE_FULL_DONGLE
7769 	bcm_bprintf(b, "pcie ");
7770 #endif /* PCIE_FULL_DONGLE */
7771 	/* regaccess and memaccess will be present only for internal reference builds @brcm */
7772 #ifdef DHD_NO_MOG
7773 	bcm_bprintf(b, "internal ");
7774 #else
7775 	bcm_bprintf(b, "external ");
7776 #endif /* DHD_NO_MOG */
7777 #ifdef WLAN_ACCEL_BOOT
7778 	bcm_bprintf(b, "wlan-accel ");
7779 #endif /* WLAN_ACCEL_BOOT */
7780 #ifdef ENABLE_DHD_GRO
7781 	bcm_bprintf(b, "gro ");
7782 #endif /* ENABLE_DHD_GRO */
7783 #ifdef WBRC
7784 	bcm_bprintf(b, "wbrc ");
7785 #endif /* ENABLE_DHD_GRO */
7786 #ifdef WL_CFG80211
7787 	bcm_bprintf(b, "cfg80211 ");
7788 #endif /* WL_CFG80211 */
7789 #ifdef DHD_FILE_DUMP_EVENT
7790 	bcm_bprintf(b, "haldump ");
7791 #endif /* DHD_FILE_DUMP_EVENT */
7792 #ifdef DHD_LB_RXP
7793 	bcm_bprintf(b, "lbrxp ");
7794 #endif /* DHD_LB_RXP */
7795 #ifdef DHD_LB_TXP
7796 #ifdef DHD_LB_TXP_DEFAULT_ENAB
7797 	bcm_bprintf(b, "lbtxp ");
7798 #endif /* DHD_LB_TXP_DEFAULT_ENAB */
7799 #endif /* DHD_LB_TXP */
7800 #ifdef DHD_HTPUT_TUNABLES
7801 	bcm_bprintf(b, "htput ");
7802 #endif /* DHD_HTPUT_TUNABLES */
7803 }
7804 
7805 /** Return dhd capability string */
7806 static char*
7807 dhd_cap(dhd_pub_t *dhd, char *buf, uint bufsize)
7808 {
7809 	struct bcmstrbuf b;
7810 
7811 	bcm_binit(&b, buf, bufsize);
7812 
7813 	dhd_cap_bcmstrbuf(dhd, &b);
7814 
7815 	/* this is either full or overflow. return error */
7816 	if (b.size <= 1)
7817 		return NULL;
7818 
7819 	return (buf);
7820 }
7821 
7822 /**
7823  * IOVAR handler of the DHD bus layer (in this case, the PCIe bus).
7824  *
7825  * @param actionid  e.g. IOV_SVAL(IOV_PCIEREG)
7826  * @param params    input buffer
7827  * @param plen      length in [bytes] of input buffer 'params'
7828  * @param arg       output buffer
7829  * @param len       length in [bytes] of output buffer 'arg'
7830  */
7831 static int
7832 dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name,
7833                 void *params, uint plen, void *arg, uint len, int val_size)
7834 {
7835 	int bcmerror = 0;
7836 #ifdef BCMINTERNAL
7837 	uint64 uint64_val = 0;
7838 #endif /* BCMINTERNAL */
7839 	int32 int_val = 0;
7840 	int32 int_val2 = 0;
7841 	int32 int_val3 = 0;
7842 	bool bool_val = 0;
7843 
7844 	DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n",
7845 	           __FUNCTION__, actionid, name, params, plen, arg, len, val_size));
7846 
7847 	if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
7848 		goto exit;
7849 
7850 	if (plen >= sizeof(int_val))
7851 		bcopy(params, &int_val, sizeof(int_val));
7852 
7853 	if (plen >= sizeof(int_val) * 2)
7854 		bcopy((void*)((uintptr)params + sizeof(int_val)), &int_val2, sizeof(int_val2));
7855 
7856 	if (plen >= sizeof(int_val) * 3)
7857 		bcopy((void*)((uintptr)params + 2 * sizeof(int_val)), &int_val3, sizeof(int_val3));
7858 
7859 	bool_val = (int_val != 0) ? TRUE : FALSE;
7860 
7861 	/* Check if dongle is in reset. If so, only allow DEVRESET iovars */
7862 	if (bus->dhd->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) ||
7863 	                                actionid == IOV_GVAL(IOV_DEVRESET))) {
7864 		bcmerror = BCME_NOTREADY;
7865 		goto exit;
7866 	}
7867 
7868 	switch (actionid) {
7869 
7870 #ifdef BCMINTERNAL
7871 	case IOV_SVAL(IOV_MSI_SIM):
7872 		/* allocate memory for MSI (Message Signaled Interrupt) window */
7873 		int_val = !!int_val;
7874 		DHD_INFO(("int_val is %d\n", int_val));
7875 		if (bus->msi_sim != int_val)  {
7876 			if (int_val) {
7877 				/* bus->msi_addr */
7878 				bus->msi_sim_addr =
7879 					MALLOC(bus->dhd->osh, MSI_SIM_BUFSIZE);
7880 				if (bus->msi_sim_addr) {
7881 					*bus->msi_sim_addr = 0;
7882 					bus->msi_sim_phys = DMA_MAP(bus->dhd->osh,
7883 					bus->msi_sim_addr, MSI_SIM_BUFSIZE, DMA_RX, 0, 0);
7884 					/* program the MSI addr */
7885 					si_corereg(bus->sih, bus->sih->buscoreidx,
7886 					OFFSETOF(sbpcieregs_t,
7887 						configaddr), ~0, PCIE_CFG_MSIDATA_OFFSET);
7888 					si_corereg(bus->sih, bus->sih->buscoreidx,
7889 					OFFSETOF(sbpcieregs_t,
7890 						configdata), ~0, PCIE_CFG_MSI_GENDATA);
7891 					si_corereg(bus->sih, bus->sih->buscoreidx,
7892 					OFFSETOF(sbpcieregs_t,
7893 						configaddr), ~0, PCIE_CFG_MSIADDR_LOW_OFFSET);
7894 					ASSERT(PHYSADDRHI(bus->msi_sim_phys) == 0);
7895 					si_corereg(bus->sih, bus->sih->buscoreidx,
7896 					OFFSETOF(sbpcieregs_t,
7897 					configdata), ~0, (uint32)PHYSADDRLO(bus->msi_sim_phys));
7898 					si_corereg(bus->sih, bus->sih->buscoreidx,
7899 					OFFSETOF(sbpcieregs_t,
7900 						configaddr), ~0, PCIE_CFG_MSICAP_OFFSET);
7901 					si_corereg(bus->sih, bus->sih->buscoreidx,
7902 					OFFSETOF(sbpcieregs_t,
7903 						configdata), ~0, PCIE_CFG_MSICAP_ENABLE_MSI);
7904 					/* poll the MSI addr window  */
7905 					bus->pollrate = 10;
7906 				}
7907 			DHD_INFO(("msi_sim_addr is %p\n", bus->msi_sim_addr));
7908 		} else {
7909 			/* bus->msi_addr */
7910 			si_corereg(bus->sih, bus->sih->buscoreidx,
7911 			OFFSETOF(sbpcieregs_t,
7912 				configaddr), ~0,
7913 				PCIE_CFG_MSICAP_OFFSET);
7914 			si_corereg(bus->sih, bus->sih->buscoreidx,
7915 			OFFSETOF(sbpcieregs_t,
7916 				configdata), ~0,
7917 				PCIE_CFG_MSICAP_DISABLE_MSI);
7918 			si_corereg(bus->sih, bus->sih->buscoreidx,
7919 			OFFSETOF(sbpcieregs_t,
7920 				configaddr), ~0,
7921 				PCIE_CFG_MSIADDR_LOW_OFFSET);
7922 			si_corereg(bus->sih, bus->sih->buscoreidx,
7923 			OFFSETOF(sbpcieregs_t, configdata), ~0, 0);
7924 
7925 			DMA_UNMAP(bus->dhd->osh, bus->msi_sim_phys,
7926 				MSI_SIM_BUFSIZE, DMA_RX, 0, 0);
7927 			MFREE(bus->dhd->osh,
7928 				bus->msi_sim_addr, MSI_SIM_BUFSIZE);
7929 		}
7930 		bus->msi_sim = (bool)int_val;
7931 		}
7932 		break;
7933 	case IOV_GVAL(IOV_MSI_SIM):
7934 		bcopy(&bus->msi_sim, arg, val_size);
7935 		break;
7936 #endif /* BCMINTERNAL */
7937 
7938 	case IOV_SVAL(IOV_VARS):
7939 		bcmerror = dhdpcie_downloadvars(bus, arg, len);
7940 		break;
7941 #ifdef DHD_PCIE_REG_ACCESS
7942 	case IOV_SVAL(IOV_PCIEREG):
7943 		/* XXX: validate int_val ??? */
7944 		si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0,
7945 			int_val);
7946 		si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), ~0,
7947 			int_val2);
7948 		break;
7949 
7950 	case IOV_GVAL(IOV_PCIEREG):
7951 		/* XXX: validate int_val ??? */
7952 		si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0,
7953 			int_val);
7954 		int_val = si_corereg(bus->sih, bus->sih->buscoreidx,
7955 			OFFSETOF(sbpcieregs_t, configdata), 0, 0);
7956 		bcopy(&int_val, arg, sizeof(int_val));
7957 		break;
7958 
7959 	case IOV_SVAL(IOV_PCIECOREREG):
7960 		/* XXX: validate int_val ??? */
7961 		si_corereg(bus->sih, bus->sih->buscoreidx, int_val, ~0, int_val2);
7962 		break;
7963 	case IOV_GVAL(IOV_BAR0_SECWIN_REG):
7964 	{
7965 		sdreg_t sdreg;
7966 		uint32 addr, size;
7967 
7968 		bcopy(params, &sdreg, sizeof(sdreg));
7969 
7970 		addr = sdreg.offset;
7971 		size = sdreg.func;
7972 
7973 		if (serialized_backplane_access(bus, addr, size, (uint *)&int_val, TRUE) != BCME_OK)
7974 		{
7975 			DHD_ERROR(("Invalid size/addr combination \n"));
7976 			bcmerror = BCME_ERROR;
7977 			break;
7978 		}
7979 		bcopy(&int_val, arg, sizeof(int32));
7980 		break;
7981 	}
7982 
7983 	case IOV_SVAL(IOV_BAR0_SECWIN_REG):
7984 	{
7985 		sdreg_t sdreg;
7986 		uint32 addr, size;
7987 
7988 		bcopy(params, &sdreg, sizeof(sdreg));
7989 
7990 		addr = sdreg.offset;
7991 		size = sdreg.func;
7992 		if (serialized_backplane_access(bus, addr, size,
7993 			(uint *)(&sdreg.value), FALSE) != BCME_OK) {
7994 			DHD_ERROR(("Invalid size/addr combination \n"));
7995 			bcmerror = BCME_ERROR;
7996 		}
7997 		break;
7998 	}
7999 
8000 	case IOV_GVAL(IOV_SBREG):
8001 	{
8002 		sdreg_t sdreg;
8003 		uint32 addr, size;
8004 
8005 		bcopy(params, &sdreg, sizeof(sdreg));
8006 
8007 		addr = sdreg.offset | SI_ENUM_BASE(bus->sih);
8008 		size = sdreg.func;
8009 
8010 		if (serialized_backplane_access(bus, addr, size, (uint *)&int_val, TRUE) != BCME_OK)
8011 		{
8012 			DHD_ERROR(("Invalid size/addr combination \n"));
8013 			bcmerror = BCME_ERROR;
8014 			break;
8015 		}
8016 		bcopy(&int_val, arg, size);
8017 		break;
8018 	}
8019 
8020 	case IOV_SVAL(IOV_SBREG):
8021 	{
8022 		sdreg_t sdreg;
8023 		uint32 addr, size;
8024 
8025 		bcopy(params, &sdreg, sizeof(sdreg));
8026 
8027 		addr = sdreg.offset | SI_ENUM_BASE(bus->sih);
8028 		size = sdreg.func;
8029 		if (serialized_backplane_access(bus, addr, size,
8030 			(uint *)(&sdreg.value), FALSE) != BCME_OK) {
8031 			DHD_ERROR(("Invalid size/addr combination \n"));
8032 			bcmerror = BCME_ERROR;
8033 		}
8034 		break;
8035 	}
8036 
8037 	case IOV_GVAL(IOV_PCIESERDESREG):
8038 	{
8039 		uint val;
8040 		if (!PCIE_GEN2(bus->sih)) {
8041 			DHD_ERROR(("supported only in pcie gen2\n"));
8042 			bcmerror = BCME_ERROR;
8043 			break;
8044 		}
8045 
8046 		if (!pcie2_mdioop(bus, int_val, int_val2, FALSE, &val, FALSE)) {
8047 			bcopy(&val, arg, sizeof(int32));
8048 		} else {
8049 			DHD_ERROR(("pcie2_mdioop failed.\n"));
8050 			bcmerror = BCME_ERROR;
8051 		}
8052 		break;
8053 	}
8054 
8055 	case IOV_SVAL(IOV_PCIESERDESREG):
8056 		if (!PCIE_GEN2(bus->sih)) {
8057 			DHD_ERROR(("supported only in pcie gen2\n"));
8058 			bcmerror = BCME_ERROR;
8059 			break;
8060 		}
8061 		if (pcie2_mdioop(bus, int_val, int_val2, TRUE, (uint *)&int_val3, FALSE)) {
8062 			DHD_ERROR(("pcie2_mdioop failed.\n"));
8063 			bcmerror = BCME_ERROR;
8064 		}
8065 		break;
8066 	case IOV_GVAL(IOV_PCIECOREREG):
8067 		/* XXX: validate int_val ??? */
8068 		int_val = si_corereg(bus->sih, bus->sih->buscoreidx, int_val, 0, 0);
8069 		bcopy(&int_val, arg, sizeof(int_val));
8070 		break;
8071 
8072 	case IOV_SVAL(IOV_PCIECFGREG):
8073 		/* XXX: validate int_val ??? */
8074 		OSL_PCI_WRITE_CONFIG(bus->osh, int_val, 4, int_val2);
8075 		break;
8076 
8077 	case IOV_GVAL(IOV_PCIECFGREG):
8078 		/* XXX: validate int_val ??? */
8079 		int_val = OSL_PCI_READ_CONFIG(bus->osh, int_val, 4);
8080 		bcopy(&int_val, arg, sizeof(int_val));
8081 		break;
8082 #endif /* DHD_PCIE_REG_ACCESS */
8083 	case IOV_SVAL(IOV_PCIE_LPBK):
8084 		bcmerror = dhdpcie_bus_lpback_req(bus, int_val);
8085 		break;
8086 
8087 	case IOV_SVAL(IOV_PCIE_DMAXFER): {
8088 		dma_xfer_info_t *dmaxfer = (dma_xfer_info_t *)arg;
8089 		uint32 mem_addr;
8090 
8091 		if (!dmaxfer)
8092 			return BCME_BADARG;
8093 		if (dmaxfer->version != DHD_DMAXFER_VERSION)
8094 			return BCME_VERSION;
8095 		if (dmaxfer->length != sizeof(dma_xfer_info_t)) {
8096 			return BCME_BADLEN;
8097 		}
8098 
8099 		mem_addr = (uint32)dmaxfer->tput;
8100 		dmaxfer->tput = 0;
8101 		bcmerror = dhdpcie_bus_dmaxfer_req(bus, dmaxfer->num_bytes,
8102 				dmaxfer->src_delay, dmaxfer->dest_delay,
8103 				dmaxfer->type, dmaxfer->core_num,
8104 				dmaxfer->should_wait, mem_addr);
8105 
8106 		if (dmaxfer->should_wait && bcmerror >= 0) {
8107 			bcmerror = dhdmsgbuf_dmaxfer_status(bus->dhd, dmaxfer);
8108 		}
8109 		break;
8110 	}
8111 
8112 	case IOV_GVAL(IOV_PCIE_DMAXFER): {
8113 		dma_xfer_info_t *dmaxfer = (dma_xfer_info_t *)params;
8114 		if (!dmaxfer)
8115 			return BCME_BADARG;
8116 		if (dmaxfer->version != DHD_DMAXFER_VERSION)
8117 			return BCME_VERSION;
8118 		if (dmaxfer->length != sizeof(dma_xfer_info_t)) {
8119 			return BCME_BADLEN;
8120 		}
8121 		bcmerror = dhdmsgbuf_dmaxfer_status(bus->dhd, dmaxfer);
8122 		break;
8123 	}
8124 
8125 #ifdef BCMINTERNAL
8126 	case IOV_GVAL(IOV_PCIE_TX_LPBK):
8127 		int_val = dhdpcie_bus_get_tx_lpback(bus);
8128 		bcopy(&int_val, arg, val_size);
8129 		break;
8130 	case IOV_SVAL(IOV_PCIE_TX_LPBK):
8131 		bcmerror = dhdpcie_bus_set_tx_lpback(bus, bool_val);
8132 		break;
8133 #endif /* BCMINTERNAL */
8134 
8135 #ifdef PCIE_OOB
8136 	case IOV_GVAL(IOV_OOB_BT_REG_ON):
8137 		int_val = dhd_oob_get_bt_reg_on(bus);
8138 		bcopy(&int_val, arg, val_size);
8139 		break;
8140 	case IOV_SVAL(IOV_OOB_BT_REG_ON):
8141 		dhd_oob_set_bt_reg_on(bus, (uint8)int_val);
8142 		break;
8143 	case IOV_GVAL(IOV_OOB_ENABLE):
8144 		int_val = bus->oob_enabled;
8145 		bcopy(&int_val, arg, val_size);
8146 		break;
8147 	case IOV_SVAL(IOV_OOB_ENABLE):
8148 		bus->oob_enabled = (bool)int_val;
8149 		break;
8150 #endif /* PCIE_OOB */
8151 #ifdef PCIE_INB_DW
8152 	case IOV_GVAL(IOV_INB_DW_ENABLE):
8153 		int_val = bus->inb_enabled;
8154 		bcopy(&int_val, arg, val_size);
8155 		break;
8156 	case IOV_SVAL(IOV_INB_DW_ENABLE):
8157 		bus->inb_enabled = (bool)int_val;
8158 		break;
8159 #endif /* PCIE_INB_DW */
8160 #if defined(PCIE_OOB) || defined(PCIE_INB_DW)
8161 	case IOV_GVAL(IOV_DEEP_SLEEP):
8162 		int_val = bus->ds_enabled;
8163 		bcopy(&int_val, arg, val_size);
8164 		break;
8165 
8166 	case IOV_SVAL(IOV_DEEP_SLEEP):
8167 		if (int_val == 1) {
8168 			bus->deep_sleep = TRUE;
8169 			if (!bus->ds_enabled) {
8170 				bus->ds_enabled = TRUE;
8171 				/* Deassert */
8172 				if (dhd_bus_set_device_wake(bus, FALSE) == BCME_OK) {
8173 #ifdef PCIE_INB_DW
8174 					if (INBAND_DW_ENAB(bus)) {
8175 						int timeleft;
8176 						timeleft = dhd_os_ds_enter_wait(bus->dhd, NULL);
8177 						if (timeleft == 0) {
8178 							DHD_ERROR(("DS-ENTER timeout\n"));
8179 							bus->ds_enabled = FALSE;
8180 							break;
8181 						}
8182 					}
8183 #endif /* PCIE_INB_DW */
8184 				}
8185 				else {
8186 					DHD_ERROR(("%s: Enable Deep Sleep failed !\n",
8187 							__FUNCTION__));
8188 					bus->ds_enabled = FALSE;
8189 				}
8190 			} else {
8191 				DHD_ERROR(("%s: Deep Sleep already enabled !\n", __FUNCTION__));
8192 			}
8193 		}
8194 		else if (int_val == 0) {
8195 			bus->deep_sleep = FALSE;
8196 			if (bus->ds_enabled) {
8197 #if defined(DHD_EFI) && defined(DHD_INTR_POLL_PERIOD_DYNAMIC)
8198 				bus->dhd->cur_intr_poll_period = dhd_os_get_intr_poll_period();
8199 				/* for accurately measuring ds-exit latency
8200 				 * set interrupt poll period to a lesser value
8201 				 */
8202 				dhd_os_set_intr_poll_period(bus, INTR_POLL_PERIOD_CRITICAL);
8203 #endif	/* DHD_EFI && DHD_INTR_POLL_PERIOD_DYNAMIC */
8204 				bus->calc_ds_exit_latency = TRUE;
8205 				/* Assert */
8206 				if (dhd_bus_set_device_wake(bus, TRUE) == BCME_OK) {
8207 					bus->ds_enabled = FALSE;
8208 					if (INBAND_DW_ENAB(bus)) {
8209 						if (bus->ds_exit_latency != 0) {
8210 							DHD_ERROR(("DS-EXIT latency = %llu us\n",
8211 								bus->ds_exit_latency));
8212 						} else {
8213 							DHD_ERROR(("Failed to measure DS-EXIT"
8214 								" latency!(Possibly a non"
8215 								" waitable context)\n"));
8216 						}
8217 					}
8218 				} else {
8219 					DHD_ERROR(("%s: Disable Deep Sleep failed !\n",
8220 						__FUNCTION__));
8221 				}
8222 				bus->calc_ds_exit_latency = FALSE;
8223 #if defined(DHD_EFI) && defined(DHD_INTR_POLL_PERIOD_DYNAMIC)
8224 				/* restore interrupt poll period to the previous existing value */
8225 				dhd_os_set_intr_poll_period(bus, bus->dhd->cur_intr_poll_period);
8226 #endif	/* DHD_EFI && DHD_INTR_POLL_PERIOD_DYNAMIC */
8227 			} else {
8228 				DHD_ERROR(("%s: Deep Sleep already disabled !\n", __FUNCTION__));
8229 			}
8230 		}
8231 		else
8232 			DHD_ERROR(("%s: Invalid number, allowed only 0|1\n", __FUNCTION__));
8233 
8234 		break;
8235 #endif /* PCIE_OOB || PCIE_INB_DW */
8236 #ifdef DEVICE_TX_STUCK_DETECT
8237 	case IOV_GVAL(IOV_DEVICE_TX_STUCK_DETECT):
8238 		int_val = bus->dev_tx_stuck_monitor;
8239 		bcopy(&int_val, arg, val_size);
8240 		break;
8241 	case IOV_SVAL(IOV_DEVICE_TX_STUCK_DETECT):
8242 		bus->dev_tx_stuck_monitor = (bool)int_val;
8243 		break;
8244 #endif /* DEVICE_TX_STUCK_DETECT */
8245 	case IOV_GVAL(IOV_PCIE_SUSPEND):
8246 		int_val = (bus->dhd->busstate == DHD_BUS_SUSPEND) ? 1 : 0;
8247 		bcopy(&int_val, arg, val_size);
8248 		break;
8249 
8250 	case IOV_SVAL(IOV_PCIE_SUSPEND):
8251 		if (bool_val) { /* Suspend */
8252 			int ret;
8253 			unsigned long flags;
8254 
8255 			/*
8256 			 * If some other context is busy, wait until they are done,
8257 			 * before starting suspend
8258 			 */
8259 			ret = dhd_os_busbusy_wait_condition(bus->dhd,
8260 				&bus->dhd->dhd_bus_busy_state, DHD_BUS_BUSY_IN_DHD_IOVAR);
8261 			if (ret == 0) {
8262 				DHD_ERROR(("%s:Wait Timedout, dhd_bus_busy_state = 0x%x\n",
8263 					__FUNCTION__, bus->dhd->dhd_bus_busy_state));
8264 				return BCME_BUSY;
8265 			}
8266 
8267 			DHD_GENERAL_LOCK(bus->dhd, flags);
8268 			DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd);
8269 			DHD_GENERAL_UNLOCK(bus->dhd, flags);
8270 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
8271 			dhdpcie_bus_suspend(bus, TRUE, TRUE);
8272 #else
8273 			dhdpcie_bus_suspend(bus, TRUE);
8274 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
8275 
8276 			DHD_GENERAL_LOCK(bus->dhd, flags);
8277 			DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd);
8278 			dhd_os_busbusy_wake(bus->dhd);
8279 			DHD_GENERAL_UNLOCK(bus->dhd, flags);
8280 		} else { /* Resume */
8281 			unsigned long flags;
8282 			DHD_GENERAL_LOCK(bus->dhd, flags);
8283 			DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd);
8284 			DHD_GENERAL_UNLOCK(bus->dhd, flags);
8285 
8286 			dhdpcie_bus_suspend(bus, FALSE);
8287 
8288 			DHD_GENERAL_LOCK(bus->dhd, flags);
8289 			DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd);
8290 			dhd_os_busbusy_wake(bus->dhd);
8291 			DHD_GENERAL_UNLOCK(bus->dhd, flags);
8292 		}
8293 		break;
8294 
8295 	case IOV_GVAL(IOV_MEMSIZE):
8296 		int_val = (int32)bus->ramsize;
8297 		bcopy(&int_val, arg, val_size);
8298 		break;
8299 #ifdef DHD_BUS_MEM_ACCESS
8300 	case IOV_SVAL(IOV_MEMBYTES):
8301 	case IOV_GVAL(IOV_MEMBYTES):
8302 	{
8303 		uint32 address;		/* absolute backplane address */
8304 		uint size, dsize;
8305 		uint8 *data;
8306 
8307 		bool set = (actionid == IOV_SVAL(IOV_MEMBYTES));
8308 
8309 		ASSERT(plen >= 2*sizeof(int));
8310 
8311 		address = (uint32)int_val;
8312 		bcopy((char *)params + sizeof(int_val), &int_val, sizeof(int_val));
8313 		size = (uint)int_val;
8314 
8315 		/* Do some validation */
8316 		dsize = set ? plen - (2 * sizeof(int)) : len;
8317 		if (dsize < size) {
8318 			DHD_ERROR(("%s: error on %s membytes, addr 0x%08x size %d dsize %d\n",
8319 			           __FUNCTION__, (set ? "set" : "get"), address, size, dsize));
8320 			bcmerror = BCME_BADARG;
8321 			break;
8322 		}
8323 
8324 		DHD_INFO(("%s: Request to %s %d bytes at address 0x%08x\n dsize %d ", __FUNCTION__,
8325 		          (set ? "write" : "read"), size, address, dsize));
8326 
8327 		/* check if CR4 */
8328 		if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) ||
8329 		    si_setcore(bus->sih, ARMCA7_CORE_ID, 0) ||
8330 		    si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) {
8331 			/* if address is 0, store the reset instruction to be written in 0 */
8332 			if (set && address == bus->dongle_ram_base) {
8333 				bus->resetinstr = *(((uint32*)params) + 2);
8334 			}
8335 		}
8336 
8337 		/* Generate the actual data pointer */
8338 		data = set ? (uint8*)params + 2 * sizeof(int): (uint8*)arg;
8339 
8340 		/* Call to do the transfer */
8341 		bcmerror = dhdpcie_bus_membytes(bus, set, address, data, size);
8342 
8343 		break;
8344 	}
8345 #endif /* DHD_BUS_MEM_ACCESS */
8346 
8347 	/* Debug related. Dumps core registers or one of the dongle memory */
8348 	case IOV_GVAL(IOV_DUMP_DONGLE):
8349 	{
8350 		dump_dongle_in_t ddi = *(dump_dongle_in_t*)params;
8351 		dump_dongle_out_t *ddo = (dump_dongle_out_t*)arg;
8352 		uint32 *p = ddo->val;
8353 		const uint max_offset = 4096 - 1; /* one core contains max 4096/4 registers */
8354 
8355 		if (plen < sizeof(ddi) || len < sizeof(ddo)) {
8356 			bcmerror = BCME_BADARG;
8357 			break;
8358 		}
8359 
8360 		switch (ddi.type) {
8361 		case DUMP_DONGLE_COREREG:
8362 			ddo->n_bytes = 0;
8363 
8364 			if (si_setcoreidx(bus->sih, ddi.index) == NULL) {
8365 				break; // beyond last core: core enumeration ended
8366 			}
8367 
8368 			ddo->address = si_addrspace(bus->sih, CORE_SLAVE_PORT_0, CORE_BASE_ADDR_0);
8369 			ddo->address += ddi.offset; // BP address at which this dump starts
8370 
8371 			ddo->id = si_coreid(bus->sih);
8372 			ddo->rev = si_corerev(bus->sih);
8373 
8374 			while (ddi.offset < max_offset &&
8375 				sizeof(dump_dongle_out_t) + ddo->n_bytes < (uint)len) {
8376 				*p++ = si_corereg(bus->sih, ddi.index, ddi.offset, 0, 0);
8377 				ddi.offset += sizeof(uint32);
8378 				ddo->n_bytes += sizeof(uint32);
8379 			}
8380 			break;
8381 		default:
8382 			// TODO: implement d11 SHM/TPL dumping
8383 			bcmerror = BCME_BADARG;
8384 			break;
8385 		}
8386 		break;
8387 	}
8388 
8389 	/* Debug related. Returns a string with dongle capabilities */
8390 	case IOV_GVAL(IOV_DHD_CAPS):
8391 	{
8392 		if (dhd_cap(bus->dhd, (char*)arg, len) == NULL) {
8393 			bcmerror = BCME_BUFTOOSHORT;
8394 		}
8395 		break;
8396 	}
8397 
8398 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
8399 	case IOV_SVAL(IOV_GDB_SERVER):
8400 		/* debugger_*() functions may sleep, so cannot hold spinlock */
8401 		if (int_val > 0) {
8402 			debugger_init((void *) bus, &bus_ops, int_val, SI_ENUM_BASE(bus->sih));
8403 		} else {
8404 			debugger_close();
8405 		}
8406 		break;
8407 #endif /* DEBUGGER || DHD_DSCOPE */
8408 #if defined(GDB_PROXY)
8409 	case IOV_GVAL(IOV_GDB_PROXY_PROBE):
8410 	{
8411 		dhd_gdb_proxy_probe_data_t ret;
8412 		ret.data_len = (uint32)sizeof(ret);
8413 		ret.magic = DHD_IOCTL_MAGIC;
8414 		ret.flags = 0;
8415 		if (bus->gdb_proxy_access_enabled) {
8416 			ret.flags |= DHD_GDB_PROXY_PROBE_ACCESS_ENABLED;
8417 			if (bus->dhd->busstate < DHD_BUS_LOAD) {
8418 				ret.flags |= DHD_GDB_PROXY_PROBE_FIRMWARE_NOT_RUNNING;
8419 			} else {
8420 				ret.flags |= DHD_GDB_PROXY_PROBE_FIRMWARE_RUNNING;
8421 			}
8422 		}
8423 		if (bus->gdb_proxy_bootloader_mode) {
8424 			ret.flags |= DHD_GDB_PROXY_PROBE_BOOTLOADER_MODE;
8425 		}
8426 		ret.last_id = bus->gdb_proxy_last_id;
8427 		if (bus->hostfw_buf.va) {
8428 			ret.flags |= DHD_GDB_PROXY_PROBE_HOSTMEM_CODE;
8429 			ret.hostmem_code_win_base =
8430 				(uint32)PCIEDEV_ARM_ADDR(PHYSADDRLO(bus->hostfw_buf.pa),
8431 				PCIEDEV_TRANS_WIN_HOSTMEM);
8432 			ret.hostmem_code_win_length = bus->hostfw_buf.len;
8433 		}
8434 		if (plen && int_val) {
8435 			bus->gdb_proxy_last_id = (uint32)int_val;
8436 		}
8437 		if (len >= sizeof(ret)) {
8438 			bcopy(&ret, arg, sizeof(ret));
8439 			bus->dhd->gdb_proxy_active = TRUE;
8440 		} else {
8441 			bcmerror = BCME_BADARG;
8442 		}
8443 		break;
8444 	}
8445 	case IOV_GVAL(IOV_GDB_PROXY_STOP_COUNT):
8446 		int_val = (int32)bus->dhd->gdb_proxy_stop_count;
8447 		bcopy(&int_val, arg, sizeof(int_val));
8448 		break;
8449 	case IOV_SVAL(IOV_GDB_PROXY_STOP_COUNT):
8450 		bus->dhd->gdb_proxy_stop_count = (uint32)int_val;
8451 		break;
8452 #endif /* GDB_PROXY */
8453 
8454 #ifdef BCM_BUZZZ
8455 	/* Dump dongle side buzzz trace to console */
8456 	case IOV_GVAL(IOV_BUZZZ_DUMP):
8457 		bcmerror = dhd_buzzz_dump_dngl(bus);
8458 		break;
8459 #endif /* BCM_BUZZZ */
8460 
8461 	case IOV_SVAL(IOV_SET_DOWNLOAD_STATE):
8462 		bcmerror = dhdpcie_bus_download_state(bus, bool_val);
8463 		break;
8464 
8465 #if defined(FW_SIGNATURE)
8466 	case IOV_SVAL(IOV_SET_DOWNLOAD_INFO):
8467 	{
8468 		fw_download_info_t *info = (fw_download_info_t*)params;
8469 		DHD_INFO(("dwnldinfo: sig=%s fw=%x,%u bl=%s,0x%x\n",
8470 			info->fw_signature_fname,
8471 			info->fw_start_addr, info->fw_size,
8472 			info->bootloader_fname, info->bootloader_start_addr));
8473 		bcmerror = dhdpcie_bus_save_download_info(bus,
8474 			info->fw_start_addr, info->fw_size, info->fw_signature_fname,
8475 			info->bootloader_fname, info->bootloader_start_addr);
8476 		break;
8477 	}
8478 #endif /* FW_SIGNATURE */
8479 
8480 	case IOV_GVAL(IOV_RAMSIZE):
8481 		int_val = (int32)bus->ramsize;
8482 		bcopy(&int_val, arg, val_size);
8483 		break;
8484 
8485 	case IOV_SVAL(IOV_RAMSIZE):
8486 		bus->ramsize = int_val;
8487 		bus->orig_ramsize = int_val;
8488 		break;
8489 
8490 	case IOV_GVAL(IOV_RAMSTART):
8491 		int_val = (int32)bus->dongle_ram_base;
8492 		bcopy(&int_val, arg, val_size);
8493 		break;
8494 
8495 	case IOV_GVAL(IOV_CC_NVMSHADOW):
8496 	{
8497 		struct bcmstrbuf dump_b;
8498 
8499 		bcm_binit(&dump_b, arg, len);
8500 		bcmerror = dhdpcie_cc_nvmshadow(bus, &dump_b);
8501 		break;
8502 	}
8503 
8504 	case IOV_GVAL(IOV_SLEEP_ALLOWED):
8505 		bool_val = bus->sleep_allowed;
8506 		bcopy(&bool_val, arg, val_size);
8507 		break;
8508 
8509 	case IOV_SVAL(IOV_SLEEP_ALLOWED):
8510 		bus->sleep_allowed = bool_val;
8511 		break;
8512 
8513 	case IOV_GVAL(IOV_DONGLEISOLATION):
8514 		int_val = bus->dhd->dongle_isolation;
8515 		bcopy(&int_val, arg, val_size);
8516 		break;
8517 
8518 	case IOV_SVAL(IOV_DONGLEISOLATION):
8519 		bus->dhd->dongle_isolation = bool_val;
8520 		break;
8521 
8522 	case IOV_GVAL(IOV_LTRSLEEPON_UNLOOAD):
8523 		int_val = bus->ltrsleep_on_unload;
8524 		bcopy(&int_val, arg, val_size);
8525 		break;
8526 
8527 	case IOV_SVAL(IOV_LTRSLEEPON_UNLOOAD):
8528 		bus->ltrsleep_on_unload = bool_val;
8529 		break;
8530 
8531 	case IOV_GVAL(IOV_DUMP_RINGUPD_BLOCK):
8532 	{
8533 		struct bcmstrbuf dump_b;
8534 		bcm_binit(&dump_b, arg, len);
8535 		bcmerror = dhd_prot_ringupd_dump(bus->dhd, &dump_b);
8536 		break;
8537 	}
8538 	case IOV_GVAL(IOV_DMA_RINGINDICES):
8539 	{
8540 		int_val = dhdpcie_get_dma_ring_indices(bus->dhd);
8541 		bcopy(&int_val, arg, sizeof(int_val));
8542 		break;
8543 	}
8544 	case IOV_SVAL(IOV_DMA_RINGINDICES):
8545 		bcmerror = dhdpcie_set_dma_ring_indices(bus->dhd, int_val);
8546 		break;
8547 
8548 	case IOV_GVAL(IOV_METADATA_DBG):
8549 		int_val = dhd_prot_metadata_dbg_get(bus->dhd);
8550 		bcopy(&int_val, arg, val_size);
8551 		break;
8552 	case IOV_SVAL(IOV_METADATA_DBG):
8553 		dhd_prot_metadata_dbg_set(bus->dhd, (int_val != 0));
8554 		break;
8555 
8556 	case IOV_GVAL(IOV_RX_METADATALEN):
8557 		int_val = dhd_prot_metadatalen_get(bus->dhd, TRUE);
8558 		bcopy(&int_val, arg, val_size);
8559 		break;
8560 
8561 	case IOV_SVAL(IOV_RX_METADATALEN):
8562 #if !(defined(BCM_ROUTER_DHD))
8563 		if (int_val > 64) {
8564 			bcmerror = BCME_BUFTOOLONG;
8565 			break;
8566 		}
8567 		dhd_prot_metadatalen_set(bus->dhd, int_val, TRUE);
8568 #else
8569 		bcmerror = BCME_UNSUPPORTED;
8570 #endif /* BCM_ROUTER_DHD */
8571 		break;
8572 
8573 	case IOV_SVAL(IOV_TXP_THRESHOLD):
8574 		dhd_prot_txp_threshold(bus->dhd, TRUE, int_val);
8575 		break;
8576 
8577 	case IOV_GVAL(IOV_TXP_THRESHOLD):
8578 		int_val = dhd_prot_txp_threshold(bus->dhd, FALSE, int_val);
8579 		bcopy(&int_val, arg, val_size);
8580 		break;
8581 
8582 	case IOV_SVAL(IOV_DB1_FOR_MB):
8583 		if (int_val)
8584 			bus->db1_for_mb = TRUE;
8585 		else
8586 			bus->db1_for_mb = FALSE;
8587 		break;
8588 
8589 	case IOV_GVAL(IOV_DB1_FOR_MB):
8590 		if (bus->db1_for_mb)
8591 			int_val = 1;
8592 		else
8593 			int_val = 0;
8594 		bcopy(&int_val, arg, val_size);
8595 		break;
8596 
8597 	case IOV_GVAL(IOV_TX_METADATALEN):
8598 		int_val = dhd_prot_metadatalen_get(bus->dhd, FALSE);
8599 		bcopy(&int_val, arg, val_size);
8600 		break;
8601 
8602 	case IOV_SVAL(IOV_TX_METADATALEN):
8603 #if !(defined(BCM_ROUTER_DHD))
8604 		if (int_val > 64) {
8605 			bcmerror = BCME_BUFTOOLONG;
8606 			break;
8607 		}
8608 		dhd_prot_metadatalen_set(bus->dhd, int_val, FALSE);
8609 #else
8610 		bcmerror = BCME_UNSUPPORTED;
8611 #endif /* BCM_ROUTER_DHD */
8612 		break;
8613 
8614 	case IOV_SVAL(IOV_DEVRESET):
8615 	{
8616 		devreset_info_t *devreset = (devreset_info_t *)arg;
8617 
8618 		if (!devreset) {
8619 			return BCME_BADARG;
8620 		}
8621 
8622 		if (devreset->length == sizeof(devreset_info_t)) {
8623 			if (devreset->version != DHD_DEVRESET_VERSION) {
8624 				return BCME_VERSION;
8625 			}
8626 			int_val = devreset->mode;
8627 		}
8628 
8629 		switch (int_val) {
8630 			case DHD_BUS_DEVRESET_ON:
8631 				bcmerror = dhd_bus_devreset(bus->dhd, (uint8)int_val);
8632 				break;
8633 			case DHD_BUS_DEVRESET_OFF:
8634 				bcmerror = dhd_bus_devreset(bus->dhd, (uint8)int_val);
8635 				break;
8636 #if !defined(NDIS)
8637 			case DHD_BUS_DEVRESET_FLR:
8638 				bcmerror = dhd_bus_perform_flr(bus, bus->flr_force_fail);
8639 				break;
8640 			case DHD_BUS_DEVRESET_FLR_FORCE_FAIL:
8641 				bus->flr_force_fail = TRUE;
8642 				break;
8643 #ifdef BT_OVER_PCIE
8644 			case DHD_BUS_DEVRESET_QUIESCE:
8645 				if (bus->dhd->busstate == DHD_BUS_DATA) {
8646 					if (bus->dhd->db7_trap.fw_db7w_trap) {
8647 						unsigned long flags = 0;
8648 
8649 						DHD_GENERAL_LOCK(bus->dhd, flags);
8650 						bus->dhd->db7_trap.fw_db7w_trap_inprogress = TRUE;
8651 						DHD_GENERAL_UNLOCK(bus->dhd, flags);
8652 						dhdpcie_fw_trap(bus);
8653 						OSL_DELAY(100 * 1000); // wait 100 msec
8654 						DHD_GENERAL_LOCK(bus->dhd, flags);
8655 						bus->dhd->db7_trap.fw_db7w_trap_inprogress = FALSE;
8656 						DHD_GENERAL_UNLOCK(bus->dhd, flags);
8657 					} else {
8658 						DHD_TRACE(("%s: DB7 Not supported!!!\n",
8659 							__FUNCTION__));
8660 					}
8661 
8662 					devreset->status =
8663 						dhd_bus_perform_flr_with_quiesce(bus->dhd, bus,
8664 						FALSE);
8665 
8666 					if (devreset->status == BCME_DNGL_DEVRESET) {
8667 						devreset->status = BCME_OK;
8668 					}
8669 					bcmerror = BCME_DNGL_DEVRESET;
8670 				} else {
8671 					DHD_ERROR(("%s: Bus is NOT up\n", __FUNCTION__));
8672 					bcmerror = BCME_NOTUP;
8673 				}
8674 				break;
8675 #endif /* BT_OVER_PCIE */
8676 #endif /* !defined(NDIS) */
8677 			default:
8678 				DHD_ERROR(("%s: invalid argument for devreset\n", __FUNCTION__));
8679 				break;
8680 		}
8681 		break;
8682 	}
8683 	case IOV_SVAL(IOV_FORCE_FW_TRAP):
8684 		if (bus->dhd->busstate == DHD_BUS_DATA)
8685 			dhdpcie_fw_trap(bus);
8686 		else {
8687 			DHD_ERROR(("%s: Bus is NOT up\n", __FUNCTION__));
8688 			bcmerror = BCME_NOTUP;
8689 		}
8690 		break;
8691 	case IOV_GVAL(IOV_FLOW_PRIO_MAP):
8692 		int_val = bus->dhd->flow_prio_map_type;
8693 		bcopy(&int_val, arg, val_size);
8694 		break;
8695 
8696 	case IOV_SVAL(IOV_FLOW_PRIO_MAP):
8697 		int_val = (int32)dhd_update_flow_prio_map(bus->dhd, (uint8)int_val);
8698 		bcopy(&int_val, arg, val_size);
8699 		break;
8700 
8701 #ifdef DHD_PCIE_RUNTIMEPM
8702 	case IOV_GVAL(IOV_IDLETIME):
8703 		if (!(bus->dhd->op_mode & DHD_FLAG_MFG_MODE)) {
8704 			int_val = bus->idletime;
8705 		} else {
8706 			int_val = 0;
8707 		}
8708 		bcopy(&int_val, arg, val_size);
8709 		break;
8710 
8711 	case IOV_SVAL(IOV_IDLETIME):
8712 		if (int_val < 0) {
8713 			bcmerror = BCME_BADARG;
8714 		} else {
8715 			bus->idletime = int_val;
8716 			if (bus->idletime) {
8717 				DHD_ENABLE_RUNTIME_PM(bus->dhd);
8718 			} else {
8719 				DHD_DISABLE_RUNTIME_PM(bus->dhd);
8720 			}
8721 		}
8722 		break;
8723 #endif /* DHD_PCIE_RUNTIMEPM */
8724 
8725 	case IOV_GVAL(IOV_TXBOUND):
8726 		int_val = (int32)dhd_txbound;
8727 		bcopy(&int_val, arg, val_size);
8728 		break;
8729 
8730 	case IOV_SVAL(IOV_TXBOUND):
8731 		dhd_txbound = (uint)int_val;
8732 		break;
8733 
8734 	case IOV_SVAL(IOV_H2D_MAILBOXDATA):
8735 		dhdpcie_send_mb_data(bus, (uint)int_val);
8736 		break;
8737 
8738 	case IOV_SVAL(IOV_INFORINGS):
8739 		dhd_prot_init_info_rings(bus->dhd);
8740 		break;
8741 
8742 	case IOV_SVAL(IOV_H2D_PHASE):
8743 		if (bus->dhd->busstate != DHD_BUS_DOWN) {
8744 			DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
8745 				__FUNCTION__));
8746 			bcmerror = BCME_NOTDOWN;
8747 			break;
8748 		}
8749 		if (int_val)
8750 			bus->dhd->h2d_phase_supported = TRUE;
8751 		else
8752 			bus->dhd->h2d_phase_supported = FALSE;
8753 		break;
8754 
8755 	case IOV_GVAL(IOV_H2D_PHASE):
8756 		int_val = (int32) bus->dhd->h2d_phase_supported;
8757 		bcopy(&int_val, arg, val_size);
8758 		break;
8759 
8760 	case IOV_SVAL(IOV_H2D_ENABLE_TRAP_BADPHASE):
8761 		if (bus->dhd->busstate != DHD_BUS_DOWN) {
8762 			DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
8763 				__FUNCTION__));
8764 			bcmerror = BCME_NOTDOWN;
8765 			break;
8766 		}
8767 		if (int_val)
8768 			bus->dhd->force_dongletrap_on_bad_h2d_phase = TRUE;
8769 		else
8770 			bus->dhd->force_dongletrap_on_bad_h2d_phase = FALSE;
8771 		break;
8772 
8773 	case IOV_GVAL(IOV_H2D_ENABLE_TRAP_BADPHASE):
8774 		int_val = (int32) bus->dhd->force_dongletrap_on_bad_h2d_phase;
8775 		bcopy(&int_val, arg, val_size);
8776 		break;
8777 
8778 	case IOV_SVAL(IOV_H2D_TXPOST_MAX_ITEM):
8779 		if (bus->dhd->busstate != DHD_BUS_DOWN) {
8780 			DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
8781 				__FUNCTION__));
8782 			bcmerror = BCME_NOTDOWN;
8783 			break;
8784 		}
8785 		dhd_prot_set_h2d_max_txpost(bus->dhd, (uint16)int_val);
8786 		break;
8787 
8788 	case IOV_GVAL(IOV_H2D_TXPOST_MAX_ITEM):
8789 		int_val = dhd_prot_get_h2d_max_txpost(bus->dhd);
8790 		bcopy(&int_val, arg, val_size);
8791 		break;
8792 
8793 #if defined(DHD_HTPUT_TUNABLES)
8794 	case IOV_SVAL(IOV_H2D_HTPUT_TXPOST_MAX_ITEM):
8795 		if (bus->dhd->busstate != DHD_BUS_DOWN) {
8796 			DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
8797 				__FUNCTION__));
8798 			bcmerror = BCME_NOTDOWN;
8799 			break;
8800 		}
8801 		dhd_prot_set_h2d_htput_max_txpost(bus->dhd, (uint16)int_val);
8802 		break;
8803 
8804 	case IOV_GVAL(IOV_H2D_HTPUT_TXPOST_MAX_ITEM):
8805 		int_val = dhd_prot_get_h2d_htput_max_txpost(bus->dhd);
8806 		bcopy(&int_val, arg, val_size);
8807 		break;
8808 #endif /* DHD_HTPUT_TUNABLES */
8809 
8810 	case IOV_GVAL(IOV_RXBOUND):
8811 		int_val = (int32)dhd_rxbound;
8812 		bcopy(&int_val, arg, val_size);
8813 		break;
8814 
8815 	case IOV_SVAL(IOV_RXBOUND):
8816 		dhd_rxbound = (uint)int_val;
8817 		break;
8818 
8819 	case IOV_GVAL(IOV_TRAPDATA):
8820 	{
8821 		struct bcmstrbuf dump_b;
8822 		bcm_binit(&dump_b, arg, len);
8823 		bcmerror = dhd_prot_dump_extended_trap(bus->dhd, &dump_b, FALSE);
8824 		break;
8825 	}
8826 
8827 	case IOV_GVAL(IOV_TRAPDATA_RAW):
8828 	{
8829 		struct bcmstrbuf dump_b;
8830 		bcm_binit(&dump_b, arg, len);
8831 		bcmerror = dhd_prot_dump_extended_trap(bus->dhd, &dump_b, TRUE);
8832 		break;
8833 	}
8834 
8835 #ifdef DHD_PCIE_REG_ACCESS
8836 	case IOV_GVAL(IOV_PCIEASPM): {
8837 		uint8 clkreq = 0;
8838 		uint32 aspm = 0;
8839 
8840 		/* this command is to hide the details, but match the lcreg
8841 		#define PCIE_CLKREQ_ENAB		0x100
8842 		#define PCIE_ASPM_L1_ENAB        	2
8843 		#define PCIE_ASPM_L0s_ENAB       	1
8844 		*/
8845 
8846 		clkreq = dhdpcie_clkreq(bus->dhd->osh, 0, 0);
8847 		aspm = dhdpcie_lcreg(bus->dhd->osh, 0, 0);
8848 
8849 		int_val = ((clkreq & 0x1) << 8) | (aspm & PCIE_ASPM_ENAB);
8850 		bcopy(&int_val, arg, val_size);
8851 		break;
8852 	}
8853 
8854 	case IOV_SVAL(IOV_PCIEASPM): {
8855 		uint32 tmp;
8856 
8857 		tmp = dhdpcie_lcreg(bus->dhd->osh, 0, 0);
8858 		dhdpcie_lcreg(bus->dhd->osh, PCIE_ASPM_ENAB,
8859 			(tmp & ~PCIE_ASPM_ENAB) | (int_val & PCIE_ASPM_ENAB));
8860 
8861 		dhdpcie_clkreq(bus->dhd->osh, 1, ((int_val & 0x100) >> 8));
8862 		break;
8863 	}
8864 #endif /* DHD_PCIE_REG_ACCESS */
8865 	case IOV_SVAL(IOV_HANGREPORT):
8866 		bus->dhd->hang_report = bool_val;
8867 		DHD_ERROR(("%s: Set hang_report as %d\n",
8868 			__FUNCTION__, bus->dhd->hang_report));
8869 		break;
8870 
8871 	case IOV_GVAL(IOV_HANGREPORT):
8872 		int_val = (int32)bus->dhd->hang_report;
8873 		bcopy(&int_val, arg, val_size);
8874 		break;
8875 
8876 	case IOV_SVAL(IOV_CTO_PREVENTION):
8877 		bcmerror = dhdpcie_cto_init(bus, bool_val);
8878 		break;
8879 
8880 	case IOV_GVAL(IOV_CTO_PREVENTION):
8881 		if (bus->sih->buscorerev < 19) {
8882 			bcmerror = BCME_UNSUPPORTED;
8883 			break;
8884 		}
8885 		int_val = (int32)bus->cto_enable;
8886 		bcopy(&int_val, arg, val_size);
8887 		break;
8888 
8889 	case IOV_SVAL(IOV_CTO_THRESHOLD):
8890 		{
8891 			if (bus->sih->buscorerev < 19) {
8892 				bcmerror = BCME_UNSUPPORTED;
8893 				break;
8894 			}
8895 			bus->cto_threshold = (uint32)int_val;
8896 		}
8897 		break;
8898 
8899 	case IOV_GVAL(IOV_CTO_THRESHOLD):
8900 		if (bus->sih->buscorerev < 19) {
8901 			bcmerror = BCME_UNSUPPORTED;
8902 			break;
8903 		}
8904 		if (bus->cto_threshold) {
8905 			int_val = (int32)bus->cto_threshold;
8906 		} else {
8907 			int_val = pcie_cto_to_thresh_default(bus->sih->buscorerev);
8908 		}
8909 
8910 		bcopy(&int_val, arg, val_size);
8911 		break;
8912 
8913 	case IOV_SVAL(IOV_PCIE_WD_RESET):
8914 		if (bool_val) {
8915 			/* Legacy chipcommon watchdog reset */
8916 			dhdpcie_cc_watchdog_reset(bus);
8917 		}
8918 		break;
8919 
8920 #ifdef DHD_EFI
8921 	case IOV_SVAL(IOV_CONTROL_SIGNAL):
8922 		{
8923 			bcmerror = dhd_control_signal(bus, arg, len, TRUE);
8924 			break;
8925 		}
8926 
8927 	case IOV_GVAL(IOV_CONTROL_SIGNAL):
8928 		{
8929 			bcmerror = dhd_control_signal(bus, params, plen, FALSE);
8930 			break;
8931 		}
8932 	case IOV_GVAL(IOV_WIFI_PROPERTIES):
8933 		bcmerror = dhd_wifi_properties(bus, params, plen);
8934 		break;
8935 	case IOV_GVAL(IOV_OTP_DUMP):
8936 		bcmerror = dhd_otp_dump(bus, params, plen);
8937 		break;
8938 #if defined(BT_OVER_PCIE) && defined(BTOP_TEST)
8939 	case IOV_SVAL(IOV_BTOP_TEST):
8940 		bcmerror = dhd_btop_test(bus, arg, len);
8941 		break;
8942 #endif /* BT_OVER_PCIE && BTOP_TEST */
8943 #endif /*  DHD_EFI */
8944 	case IOV_GVAL(IOV_IDMA_ENABLE):
8945 		int_val = bus->idma_enabled;
8946 		bcopy(&int_val, arg, val_size);
8947 		break;
8948 	case IOV_SVAL(IOV_IDMA_ENABLE):
8949 		bus->idma_enabled = (bool)int_val;
8950 		break;
8951 	case IOV_GVAL(IOV_IFRM_ENABLE):
8952 		int_val = bus->ifrm_enabled;
8953 		bcopy(&int_val, arg, val_size);
8954 		break;
8955 	case IOV_SVAL(IOV_IFRM_ENABLE):
8956 		bus->ifrm_enabled = (bool)int_val;
8957 		break;
8958 #ifdef BCMINTERNAL
8959 	case IOV_GVAL(IOV_DMA_CHAN):
8960 		int_val = bus->dma_chan;
8961 		bcopy(&int_val, arg, val_size);
8962 		break;
8963 	case IOV_SVAL(IOV_DMA_CHAN):
8964 		{
8965 			bus->dma_chan = (bool)int_val;
8966 			bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
8967 				dhd_bus_db0_addr_get(bus));
8968 			break;
8969 		}
8970 	case IOV_SVAL(IOV_HYBRIDFW):
8971 		{
8972 			char *fp;
8973 			fp = dhd_os_open_image1(bus->dhd, params);
8974 			if (fp == NULL) {
8975 				bcmerror = BCME_ERROR;
8976 				break;
8977 			}
8978 			bcmerror = dhdpcie_hybridfw_download(bus, fp);
8979 			dhd_os_close_image1(bus->dhd, fp);
8980 			break;
8981 		}
8982 #endif /* BCMINTERNAL */
8983 	case IOV_GVAL(IOV_CLEAR_RING):
8984 		bcopy(&int_val, arg, val_size);
8985 		dhd_flow_rings_flush(bus->dhd, 0);
8986 		break;
8987 	case IOV_GVAL(IOV_DAR_ENABLE):
8988 		int_val = bus->dar_enabled;
8989 		bcopy(&int_val, arg, val_size);
8990 		break;
8991 	case IOV_SVAL(IOV_DAR_ENABLE):
8992 		bus->dar_enabled = (bool)int_val;
8993 		break;
8994 	case IOV_GVAL(IOV_HSCBSIZE):
8995 		bcmerror = dhd_get_hscb_info(bus->dhd, NULL, (uint32 *)arg);
8996 		break;
8997 #ifdef DHD_BUS_MEM_ACCESS
8998 	case IOV_GVAL(IOV_HSCBBYTES):
8999 		bcmerror = dhd_get_hscb_buff(bus->dhd, int_val, int_val2, (void*)arg);
9000 		break;
9001 #endif
9002 #ifdef D2H_MINIDUMP
9003 	case IOV_GVAL(IOV_MINIDUMP_OVERRIDE):
9004 		int_val = bus->d2h_minidump_override;
9005 		bcopy(&int_val, arg, val_size);
9006 		break;
9007 	case IOV_SVAL(IOV_MINIDUMP_OVERRIDE):
9008 		/* Can change it only before FW download */
9009 		if (bus->dhd->busstate != DHD_BUS_DOWN) {
9010 			DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
9011 				__FUNCTION__));
9012 			bcmerror = BCME_NOTDOWN;
9013 			break;
9014 		}
9015 		bus->d2h_minidump_override = (bool)int_val;
9016 		break;
9017 #endif /* D2H_MINIDUMP */
9018 
9019 #ifdef BCMINTERNAL
9020 #ifdef DHD_FWTRACE
9021 	case IOV_SVAL(IOV_FWTRACE):
9022 		{
9023 			DHD_INFO(("%s: set firware tracing enable/disable %d\n",
9024 			          __FUNCTION__, int_val));
9025 
9026 			bcmerror = handle_set_fwtrace(bus->dhd, (uint32) int_val);
9027 			break;
9028 		}
9029 		break;
9030 
9031 	case IOV_GVAL(IOV_FWTRACE):
9032 	{
9033 		uint32 val = 0, temp_val = 0;
9034 		uint16 of_counter, trace_val = 0;
9035 		int ret;
9036 
9037 		ret = dhd_iovar(bus->dhd, 0, "dngl:fwtrace",
9038 		                NULL, 0, (char *) &val, sizeof(val), FALSE);
9039 		if (ret < 0) {
9040 			DHD_ERROR(("%s: get dhd_iovar has failed fwtrace, "
9041 			           "ret=%d\n", __FUNCTION__, ret));
9042 			bcmerror = BCME_ERROR;
9043 		} else {
9044 			of_counter = get_fw_trace_overflow_counter(bus->dhd);
9045 			DHD_INFO(("overflow counter = %d \n", of_counter));
9046 			trace_val = val & 0xFFFF;
9047 			temp_val = (((uint32) temp_val | (uint32) of_counter) << 16u) | trace_val;
9048 			bcopy(&temp_val, arg, sizeof(temp_val));
9049 		}
9050 		break;
9051 	}
9052 #endif	/* DHD_FWTRACE */
9053 #endif /* BCMINTERNAL */
9054 
9055 #ifdef DHD_HP2P
9056 	case IOV_SVAL(IOV_HP2P_ENABLE):
9057 		dhd_prot_hp2p_enable(bus->dhd, TRUE, int_val);
9058 		break;
9059 
9060 	case IOV_GVAL(IOV_HP2P_ENABLE):
9061 		int_val = dhd_prot_hp2p_enable(bus->dhd, FALSE, int_val);
9062 		bcopy(&int_val, arg, val_size);
9063 		break;
9064 
9065 	case IOV_SVAL(IOV_HP2P_PKT_THRESHOLD):
9066 		dhd_prot_pkt_threshold(bus->dhd, TRUE, int_val);
9067 		break;
9068 
9069 	case IOV_GVAL(IOV_HP2P_PKT_THRESHOLD):
9070 		int_val = dhd_prot_pkt_threshold(bus->dhd, FALSE, int_val);
9071 		bcopy(&int_val, arg, val_size);
9072 		break;
9073 
9074 	case IOV_SVAL(IOV_HP2P_TIME_THRESHOLD):
9075 		dhd_prot_time_threshold(bus->dhd, TRUE, int_val);
9076 		break;
9077 
9078 	case IOV_GVAL(IOV_HP2P_TIME_THRESHOLD):
9079 		int_val = dhd_prot_time_threshold(bus->dhd, FALSE, int_val);
9080 		bcopy(&int_val, arg, val_size);
9081 		break;
9082 
9083 	case IOV_SVAL(IOV_HP2P_PKT_EXPIRY):
9084 		dhd_prot_pkt_expiry(bus->dhd, TRUE, int_val);
9085 		break;
9086 
9087 	case IOV_GVAL(IOV_HP2P_PKT_EXPIRY):
9088 		int_val = dhd_prot_pkt_expiry(bus->dhd, FALSE, int_val);
9089 		bcopy(&int_val, arg, val_size);
9090 		break;
9091 	case IOV_SVAL(IOV_HP2P_TXCPL_MAXITEMS):
9092 		if (bus->dhd->busstate != DHD_BUS_DOWN) {
9093 			return BCME_NOTDOWN;
9094 		}
9095 		dhd_bus_set_hp2p_ring_max_size(bus, TRUE, int_val);
9096 		break;
9097 
9098 	case IOV_GVAL(IOV_HP2P_TXCPL_MAXITEMS):
9099 		int_val = dhd_bus_get_hp2p_ring_max_size(bus, TRUE);
9100 		bcopy(&int_val, arg, val_size);
9101 		break;
9102 	case IOV_SVAL(IOV_HP2P_RXCPL_MAXITEMS):
9103 		if (bus->dhd->busstate != DHD_BUS_DOWN) {
9104 			return BCME_NOTDOWN;
9105 		}
9106 		dhd_bus_set_hp2p_ring_max_size(bus, FALSE, int_val);
9107 		break;
9108 
9109 	case IOV_GVAL(IOV_HP2P_RXCPL_MAXITEMS):
9110 		int_val = dhd_bus_get_hp2p_ring_max_size(bus, FALSE);
9111 		bcopy(&int_val, arg, val_size);
9112 		break;
9113 	case IOV_SVAL(IOV_HP2P_MF_ENABLE):
9114 		bus->dhd->hp2p_mf_enable = int_val ? TRUE : FALSE;
9115 		break;
9116 
9117 	case IOV_GVAL(IOV_HP2P_MF_ENABLE):
9118 		int_val = bus->dhd->hp2p_mf_enable ? 1 : 0;
9119 		bcopy(&int_val, arg, val_size);
9120 		break;
9121 #endif /* DHD_HP2P */
9122 	case IOV_SVAL(IOV_EXTDTXS_IN_TXCPL):
9123 		if (bus->dhd->busstate != DHD_BUS_DOWN) {
9124 			return BCME_NOTDOWN;
9125 		}
9126 		if (int_val)
9127 			bus->dhd->extdtxs_in_txcpl = TRUE;
9128 		else
9129 			bus->dhd->extdtxs_in_txcpl = FALSE;
9130 		break;
9131 
9132 	case IOV_GVAL(IOV_EXTDTXS_IN_TXCPL):
9133 		int_val = bus->dhd->extdtxs_in_txcpl;
9134 		bcopy(&int_val, arg, val_size);
9135 		break;
9136 
9137 	case IOV_SVAL(IOV_HOSTRDY_AFTER_INIT):
9138 		if (bus->dhd->busstate != DHD_BUS_DOWN) {
9139 			return BCME_NOTDOWN;
9140 		}
9141 		if (int_val)
9142 			bus->dhd->hostrdy_after_init = TRUE;
9143 		else
9144 			bus->dhd->hostrdy_after_init = FALSE;
9145 		break;
9146 
9147 	case IOV_GVAL(IOV_HOSTRDY_AFTER_INIT):
9148 		int_val = bus->dhd->hostrdy_after_init;
9149 		bcopy(&int_val, arg, val_size);
9150 		break;
9151 
9152 #ifdef BCMINTERNAL
9153 	case IOV_GVAL(IOV_SBREG_64):
9154 	{
9155 		sdreg_64_t sdreg;
9156 		uint32 addr, size;
9157 
9158 		bcopy(params, &sdreg, sizeof(sdreg));
9159 
9160 		addr = sdreg.offset | SI_ENUM_BASE(bus->sih);
9161 		size = sdreg.func;
9162 
9163 		if (serialized_backplane_access_64(bus, addr, size,
9164 			&uint64_val, TRUE) != BCME_OK)
9165 		{
9166 			DHD_ERROR(("Invalid size/addr combination \n"));
9167 			bcmerror = BCME_ERROR;
9168 			break;
9169 		}
9170 		bcopy(&uint64_val, arg, size);
9171 		break;
9172 	}
9173 
9174 	case IOV_SVAL(IOV_SBREG_64):
9175 	{
9176 		sdreg_64_t sdreg;
9177 		uint32 addr, size;
9178 
9179 		bcopy(params, &sdreg, sizeof(sdreg));
9180 
9181 		addr = sdreg.offset | SI_ENUM_BASE(bus->sih);
9182 		size = sdreg.func;
9183 
9184 		if (serialized_backplane_access_64(bus, addr, size,
9185 			(uint64 *)(&sdreg.value), FALSE) != BCME_OK) {
9186 			DHD_ERROR(("Invalid size/addr combination \n"));
9187 			bcmerror = BCME_ERROR;
9188 		}
9189 		break;
9190 	}
9191 #endif /* BCMINTERNAL */
9192 
9193 	default:
9194 		bcmerror = BCME_UNSUPPORTED;
9195 		break;
9196 	}
9197 
9198 exit:
9199 	return bcmerror;
9200 } /* dhdpcie_bus_doiovar */
9201 
9202 /** Transfers bytes from host to dongle using pio mode */
9203 static int
9204 dhdpcie_bus_lpback_req(struct  dhd_bus *bus, uint32 len)
9205 {
9206 	if (bus->dhd == NULL) {
9207 		DHD_ERROR(("%s: bus not inited\n", __FUNCTION__));
9208 		return 0;
9209 	}
9210 	if (bus->dhd->prot == NULL) {
9211 		DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__));
9212 		return 0;
9213 	}
9214 	if (bus->dhd->busstate != DHD_BUS_DATA) {
9215 		DHD_ERROR(("%s: not in a readystate to LPBK  is not inited\n", __FUNCTION__));
9216 		return 0;
9217 	}
9218 	dhdmsgbuf_lpbk_req(bus->dhd, len);
9219 	return 0;
9220 }
9221 
9222 void
9223 dhd_bus_dump_dar_registers(struct dhd_bus *bus)
9224 {
9225 	uint32 dar_clk_ctrl_val, dar_pwr_ctrl_val, dar_intstat_val,
9226 		dar_errlog_val, dar_erraddr_val, dar_pcie_mbint_val;
9227 	uint32 dar_clk_ctrl_reg, dar_pwr_ctrl_reg, dar_intstat_reg,
9228 		dar_errlog_reg, dar_erraddr_reg, dar_pcie_mbint_reg;
9229 
9230 	if (bus->is_linkdown) {
9231 		DHD_ERROR(("%s: link is down\n", __FUNCTION__));
9232 		return;
9233 	}
9234 
9235 	if (bus->sih == NULL) {
9236 		DHD_ERROR(("%s: si_attach has not happened, cannot dump DAR registers\n",
9237 			__FUNCTION__));
9238 		return;
9239 	}
9240 
9241 	if (DAR_PWRREQ(bus)) {
9242 		dhd_bus_pcie_pwr_req(bus);
9243 	}
9244 
9245 	dar_clk_ctrl_reg = (uint32)DAR_CLK_CTRL(bus->sih->buscorerev);
9246 	dar_pwr_ctrl_reg = (uint32)DAR_PCIE_PWR_CTRL(bus->sih->buscorerev);
9247 	dar_intstat_reg = (uint32)DAR_INTSTAT(bus->sih->buscorerev);
9248 	dar_errlog_reg = (uint32)DAR_ERRLOG(bus->sih->buscorerev);
9249 	dar_erraddr_reg = (uint32)DAR_ERRADDR(bus->sih->buscorerev);
9250 	dar_pcie_mbint_reg = (uint32)DAR_PCIMailBoxInt(bus->sih->buscorerev);
9251 
9252 	if (bus->sih->buscorerev < 24) {
9253 		DHD_ERROR(("%s: DAR not supported for corerev(%d) < 24\n",
9254 			__FUNCTION__, bus->sih->buscorerev));
9255 		return;
9256 	}
9257 
9258 	dar_clk_ctrl_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_clk_ctrl_reg, 0, 0);
9259 	dar_pwr_ctrl_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_pwr_ctrl_reg, 0, 0);
9260 	dar_intstat_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_intstat_reg, 0, 0);
9261 	dar_errlog_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_errlog_reg, 0, 0);
9262 	dar_erraddr_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_erraddr_reg, 0, 0);
9263 	dar_pcie_mbint_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_pcie_mbint_reg, 0, 0);
9264 
9265 	DHD_ERROR(("%s: dar_clk_ctrl(0x%x:0x%x) dar_pwr_ctrl(0x%x:0x%x) dar_intstat(0x%x:0x%x)\n",
9266 		__FUNCTION__, dar_clk_ctrl_reg, dar_clk_ctrl_val,
9267 		dar_pwr_ctrl_reg, dar_pwr_ctrl_val, dar_intstat_reg, dar_intstat_val));
9268 
9269 	DHD_ERROR(("%s: dar_errlog(0x%x:0x%x) dar_erraddr(0x%x:0x%x) dar_pcie_mbint(0x%x:0x%x)\n",
9270 		__FUNCTION__, dar_errlog_reg, dar_errlog_val,
9271 		dar_erraddr_reg, dar_erraddr_val, dar_pcie_mbint_reg, dar_pcie_mbint_val));
9272 }
9273 
9274 /* Ring DoorBell1 to indicate Hostready i.e. D3 Exit */
9275 void
9276 dhd_bus_hostready(struct  dhd_bus *bus)
9277 {
9278 	if (!bus->dhd->d2h_hostrdy_supported) {
9279 		return;
9280 	}
9281 
9282 	if (bus->is_linkdown) {
9283 		DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
9284 		return;
9285 	}
9286 
9287 	DHD_ERROR(("%s : Read PCICMD Reg: 0x%08X\n", __FUNCTION__,
9288 		dhd_pcie_config_read(bus, PCI_CFG_CMD, sizeof(uint32))));
9289 
9290 	dhd_bus_dump_dar_registers(bus);
9291 
9292 #ifdef DHD_MMIO_TRACE
9293 	dhd_bus_mmio_trace(bus, dhd_bus_db1_addr_get(bus), 0x1, TRUE);
9294 #endif /* defined(DHD_MMIO_TRACE) */
9295 	si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db1_addr_get(bus), ~0, 0x12345678);
9296 	bus->hostready_count ++;
9297 	DHD_ERROR(("%s: Ring Hostready:%d\n", __FUNCTION__, bus->hostready_count));
9298 }
9299 
9300 /* Clear INTSTATUS */
9301 void
9302 dhdpcie_bus_clear_intstatus(struct dhd_bus *bus)
9303 {
9304 	uint32 intstatus = 0;
9305 	/* Skip after recieving D3 ACK */
9306 	if (DHD_CHK_BUS_LPS_D3_ACKED(bus)) {
9307 		return;
9308 	}
9309 	/* XXX: check for PCIE Gen2 also */
9310 	if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) ||
9311 		(bus->sih->buscorerev == 2)) {
9312 		intstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
9313 		dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, intstatus);
9314 	} else {
9315 		/* this is a PCIE core register..not a config register... */
9316 		intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, 0, 0);
9317 		si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, bus->def_intmask,
9318 			intstatus);
9319 	}
9320 }
9321 
9322 int
9323 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
9324 dhdpcie_bus_suspend(struct dhd_bus *bus, bool state, bool byint)
9325 #else
9326 dhdpcie_bus_suspend(struct dhd_bus *bus, bool state)
9327 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
9328 {
9329 	int timeleft;
9330 	int rc = 0;
9331 	unsigned long flags;
9332 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
9333 	int d3_read_retry = 0;
9334 	uint32 d2h_mb_data = 0;
9335 	uint32 zero = 0;
9336 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
9337 
9338 	printf("%s: state=%d\n", __FUNCTION__, state);
9339 	if (bus->dhd == NULL) {
9340 		DHD_ERROR(("%s: bus not inited\n", __FUNCTION__));
9341 		return BCME_ERROR;
9342 	}
9343 	if (bus->dhd->prot == NULL) {
9344 		DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__));
9345 		return BCME_ERROR;
9346 	}
9347 
9348 	if (dhd_query_bus_erros(bus->dhd)) {
9349 		return BCME_ERROR;
9350 	}
9351 
9352 	DHD_GENERAL_LOCK(bus->dhd, flags);
9353 	if (!(bus->dhd->busstate == DHD_BUS_DATA || bus->dhd->busstate == DHD_BUS_SUSPEND)) {
9354 		DHD_ERROR(("%s: not in a readystate\n", __FUNCTION__));
9355 		DHD_GENERAL_UNLOCK(bus->dhd, flags);
9356 		return BCME_ERROR;
9357 	}
9358 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
9359 	if (bus->dhd->dongle_reset) {
9360 		DHD_ERROR(("Dongle is in reset state.\n"));
9361 		return -EIO;
9362 	}
9363 
9364 	/* Check whether we are already in the requested state.
9365 	 * state=TRUE means Suspend
9366 	 * state=FALSE meanse Resume
9367 	 */
9368 	if (state == TRUE && bus->dhd->busstate == DHD_BUS_SUSPEND) {
9369 		DHD_ERROR(("Bus is already in SUSPEND state.\n"));
9370 		return BCME_OK;
9371 	} else if (state == FALSE && bus->dhd->busstate == DHD_BUS_DATA) {
9372 		DHD_ERROR(("Bus is already in RESUME state.\n"));
9373 		return BCME_OK;
9374 	}
9375 
9376 	if (state) {
9377 #ifdef OEM_ANDROID
9378 		int idle_retry = 0;
9379 		int active;
9380 #endif /* OEM_ANDROID */
9381 
9382 		if (bus->is_linkdown) {
9383 			DHD_ERROR(("%s: PCIe link was down, state=%d\n",
9384 				__FUNCTION__, state));
9385 			return BCME_ERROR;
9386 		}
9387 
9388 		/* Suspend */
9389 		DHD_ERROR(("%s: Entering suspend state\n", __FUNCTION__));
9390 
9391 		bus->dhd->dhd_watchdog_ms_backup = dhd_watchdog_ms;
9392 		if (bus->dhd->dhd_watchdog_ms_backup) {
9393 			DHD_ERROR(("%s: Disabling wdtick before going to suspend\n",
9394 				__FUNCTION__));
9395 			dhd_os_wd_timer(bus->dhd, 0);
9396 		}
9397 
9398 		DHD_GENERAL_LOCK(bus->dhd, flags);
9399 #if defined(LINUX) || defined(linux)
9400 		if (DHD_BUS_BUSY_CHECK_IN_TX(bus->dhd)) {
9401 			DHD_ERROR(("Tx Request is not ended\n"));
9402 			bus->dhd->busstate = DHD_BUS_DATA;
9403 			DHD_GENERAL_UNLOCK(bus->dhd, flags);
9404 #ifndef DHD_EFI
9405 			return -EBUSY;
9406 #else
9407 			return BCME_ERROR;
9408 #endif
9409 		}
9410 #endif /* LINUX || linux */
9411 
9412 		bus->last_suspend_start_time = OSL_LOCALTIME_NS();
9413 
9414 		/* stop all interface network queue. */
9415 		dhd_bus_stop_queue(bus);
9416 		DHD_GENERAL_UNLOCK(bus->dhd, flags);
9417 
9418 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
9419 		if (byint) {
9420 			DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
9421 			/* Clear wait_for_d3_ack before sending D3_INFORM */
9422 			bus->wait_for_d3_ack = 0;
9423 			dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM);
9424 
9425 			timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
9426 			DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
9427 		} else {
9428 			/* Clear wait_for_d3_ack before sending D3_INFORM */
9429 			bus->wait_for_d3_ack = 0;
9430 			dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM | H2D_HOST_ACK_NOINT);
9431 			while (!bus->wait_for_d3_ack && d3_read_retry < MAX_D3_ACK_TIMEOUT) {
9432 				dhdpcie_handle_mb_data(bus);
9433 				usleep_range(1000, 1500);
9434 				d3_read_retry++;
9435 			}
9436 		}
9437 #else
9438 		DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
9439 #ifdef DHD_TIMESYNC
9440 		/* disable time sync mechanism, if configed */
9441 		dhd_timesync_control(bus->dhd, TRUE);
9442 #endif /* DHD_TIMESYNC */
9443 
9444 #ifdef PCIE_INB_DW
9445 		/* As D3_INFORM will be sent after De-assert,
9446 		 * skip sending DS-ACK for DS-REQ.
9447 		 */
9448 		bus->skip_ds_ack = TRUE;
9449 #endif /* PCIE_INB_DW */
9450 
9451 #if defined(PCIE_OOB) || defined(PCIE_INB_DW)
9452 		dhd_bus_set_device_wake(bus, TRUE);
9453 #endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */
9454 #ifdef PCIE_OOB
9455 		bus->oob_presuspend = TRUE;
9456 #endif
9457 #ifdef PCIE_INB_DW
9458 		/* De-assert at this point for In-band device_wake */
9459 		if (INBAND_DW_ENAB(bus)) {
9460 #ifdef DHD_EFI
9461 			/* during pcie suspend, irrespective of 'deep_sleep' enabled
9462 			* or disabled, always de-assert DW. If 'deep_sleep' was disabled
9463 			* by user iovar, then upon resuming, DW is again asserted in the
9464 			* 'dhd_bus_handle_mb_data' path.
9465 			*/
9466 			dhd_bus_inb_set_device_wake(bus, FALSE);
9467 #else
9468 			dhd_bus_set_device_wake(bus, FALSE);
9469 #endif /* DHD_EFI */
9470 			DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
9471 			dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_HOST_SLEEP_WAIT);
9472 			DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
9473 		}
9474 #endif /* PCIE_INB_DW */
9475 		/* Clear wait_for_d3_ack before sending D3_INFORM */
9476 		bus->wait_for_d3_ack = 0;
9477 		/*
9478 		 * Send H2D_HOST_D3_INFORM to dongle and mark bus->bus_low_power_state
9479 		 * to DHD_BUS_D3_INFORM_SENT in dhd_prot_ring_write_complete_mbdata
9480 		 * inside atomic context, so that no more DBs will be
9481 		 * rung after sending D3_INFORM
9482 		 */
9483 #ifdef PCIE_INB_DW
9484 		if (INBAND_DW_ENAB(bus)) {
9485 			DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
9486 			dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM);
9487 			DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
9488 		} else
9489 #endif /* PCIE_INB_DW */
9490 		{
9491 			dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM);
9492 		}
9493 
9494 		/* Wait for D3 ACK for D3_ACK_RESP_TIMEOUT seconds */
9495 
9496 		timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
9497 
9498 #ifdef DHD_RECOVER_TIMEOUT
9499 		/* XXX: WAR for missing D3 ACK MB interrupt */
9500 		if (bus->wait_for_d3_ack == 0) {
9501 			/* If wait_for_d3_ack was not updated because D2H MB was not received */
9502 			uint32 intstatus = si_corereg(bus->sih, bus->sih->buscoreidx,
9503 				bus->pcie_mailbox_int, 0, 0);
9504 			int host_irq_disabled = dhdpcie_irq_disabled(bus);
9505 			if ((intstatus) && (intstatus != (uint32)-1) &&
9506 				(timeleft == 0) && (!dhd_query_bus_erros(bus->dhd))) {
9507 				DHD_ERROR(("%s: D3 ACK trying again intstatus=%x"
9508 					" host_irq_disabled=%d\n",
9509 					__FUNCTION__, intstatus, host_irq_disabled));
9510 				dhd_pcie_intr_count_dump(bus->dhd);
9511 				dhd_print_tasklet_status(bus->dhd);
9512 				if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6 &&
9513 					!bus->use_mailbox) {
9514 					dhd_prot_process_ctrlbuf(bus->dhd);
9515 				} else {
9516 					dhdpcie_handle_mb_data(bus);
9517 				}
9518 				timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
9519 				/* Clear Interrupts */
9520 				dhdpcie_bus_clear_intstatus(bus);
9521 			}
9522 		} /* bus->wait_for_d3_ack was 0 */
9523 #endif /* DHD_RECOVER_TIMEOUT */
9524 
9525 		DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
9526 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
9527 
9528 #ifdef OEM_ANDROID
9529 		/* To allow threads that got pre-empted to complete.
9530 		 */
9531 		while ((active = dhd_os_check_wakelock_all(bus->dhd)) &&
9532 			(idle_retry < MAX_WKLK_IDLE_CHECK)) {
9533 			OSL_SLEEP(1);
9534 			idle_retry++;
9535 		}
9536 #endif /* OEM_ANDROID */
9537 
9538 		if (bus->wait_for_d3_ack) {
9539 			DHD_ERROR(("%s: Got D3 Ack \n", __FUNCTION__));
9540 			/* Got D3 Ack. Suspend the bus */
9541 #ifdef OEM_ANDROID
9542 			if (active) {
9543 				DHD_ERROR(("%s():Suspend failed because of wakelock"
9544 					"restoring Dongle to D0\n", __FUNCTION__));
9545 
9546 				if (bus->dhd->dhd_watchdog_ms_backup) {
9547 					DHD_ERROR(("%s: Enabling wdtick due to wakelock active\n",
9548 						__FUNCTION__));
9549 					dhd_os_wd_timer(bus->dhd,
9550 						bus->dhd->dhd_watchdog_ms_backup);
9551 				}
9552 
9553 				/*
9554 				 * Dongle still thinks that it has to be in D3 state until
9555 				 * it gets a D0 Inform, but we are backing off from suspend.
9556 				 * Ensure that Dongle is brought back to D0.
9557 				 *
9558 				 * Bringing back Dongle from D3 Ack state to D0 state is a
9559 				 * 2 step process. Dongle would want to know that D0 Inform
9560 				 * would be sent as a MB interrupt to bring it out of D3 Ack
9561 				 * state to D0 state. So we have to send both this message.
9562 				 */
9563 
9564 				/* Clear wait_for_d3_ack to send D0_INFORM or host_ready */
9565 				bus->wait_for_d3_ack = 0;
9566 
9567 				DHD_SET_BUS_NOT_IN_LPS(bus);
9568 #ifdef PCIE_INB_DW
9569 				if (INBAND_DW_ENAB(bus)) {
9570 					DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
9571 					/* Since suspend has failed due to wakelock is held,
9572 					 * update the DS state to DW_DEVICE_HOST_WAKE_WAIT.
9573 					 * So that host sends the DS-ACK for DS-REQ.
9574 					 */
9575 					DHD_ERROR(("Suspend failed due to wakelock is held, "
9576 					 "set inband dw state to DW_DEVICE_HOST_WAKE_WAIT\n"));
9577 					dhdpcie_bus_set_pcie_inband_dw_state(bus,
9578 						DW_DEVICE_HOST_WAKE_WAIT);
9579 					dhd_bus_ds_trace(bus, 0, TRUE,
9580 						dhdpcie_bus_get_pcie_inband_dw_state(bus));
9581 					DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
9582 				}
9583 				bus->skip_ds_ack = FALSE;
9584 #endif /* PCIE_INB_DW */
9585 				/* For Linux, Macos etc (otherthan NDIS) enable back the dongle
9586 				 * interrupts using intmask and host interrupts
9587 				 * which were disabled in the dhdpcie_bus_isr()->
9588 				 * dhd_bus_handle_d3_ack().
9589 				 */
9590 				/* Enable back interrupt using Intmask!! */
9591 				dhdpcie_bus_intr_enable(bus);
9592 #ifndef NDIS /* !NDIS */
9593 				/* Defer enabling host irq after RPM suspend failure */
9594 				if (!DHD_BUS_BUSY_CHECK_RPM_SUSPEND_IN_PROGRESS(bus->dhd)) {
9595 					/* Enable back interrupt from Host side!! */
9596 					if (dhdpcie_irq_disabled(bus)) {
9597 						dhdpcie_enable_irq(bus);
9598 						bus->resume_intr_enable_count++;
9599 					}
9600 				}
9601 #else
9602 				/* Enable back the intmask which was cleared in DPC
9603 				 * after getting D3_ACK.
9604 				 */
9605 				bus->resume_intr_enable_count++;
9606 
9607 #endif /* !NDIS */
9608 				if (bus->use_d0_inform) {
9609 					DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
9610 					dhdpcie_send_mb_data(bus,
9611 						(H2D_HOST_D0_INFORM_IN_USE | H2D_HOST_D0_INFORM));
9612 					DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
9613 				}
9614 				/* ring doorbell 1 (hostready) */
9615 				dhd_bus_hostready(bus);
9616 
9617 				DHD_GENERAL_LOCK(bus->dhd, flags);
9618 				bus->dhd->busstate = DHD_BUS_DATA;
9619 				/* resume all interface network queue. */
9620 				dhd_bus_start_queue(bus);
9621 				DHD_GENERAL_UNLOCK(bus->dhd, flags);
9622 				rc = BCME_ERROR;
9623 			} else {
9624 				/* Actual Suspend after no wakelock */
9625 #endif /* OEM_ANDROID */
9626 				/* At this time bus->bus_low_power_state will be
9627 				 * made to DHD_BUS_D3_ACK_RECIEVED after recieving D3_ACK
9628 				 * in dhd_bus_handle_d3_ack()
9629 				 */
9630 #ifdef PCIE_OOB
9631 				bus->oob_presuspend = FALSE;
9632 				if (OOB_DW_ENAB(bus)) {
9633 					dhd_bus_set_device_wake(bus, FALSE);
9634 				}
9635 #endif
9636 #ifdef PCIE_OOB
9637 				bus->oob_presuspend = TRUE;
9638 #endif
9639 #ifdef PCIE_INB_DW
9640 				if (INBAND_DW_ENAB(bus)) {
9641 					DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
9642 					if (dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
9643 						DW_DEVICE_HOST_SLEEP_WAIT) {
9644 						dhdpcie_bus_set_pcie_inband_dw_state(bus,
9645 							DW_DEVICE_HOST_SLEEP);
9646 #ifdef PCIE_INB_DW
9647 						dhd_bus_ds_trace(bus, 0, TRUE,
9648 							dhdpcie_bus_get_pcie_inband_dw_state(bus));
9649 #else
9650 						dhd_bus_ds_trace(bus, 0, TRUE);
9651 #endif /* PCIE_INB_DW */
9652 					}
9653 					DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
9654 				}
9655 #endif /* PCIE_INB_DW */
9656 				if (bus->use_d0_inform &&
9657 					(bus->api.fw_rev < PCIE_SHARED_VERSION_6)) {
9658 					DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
9659 					dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM_IN_USE));
9660 					DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
9661 				}
9662 #ifdef PCIE_OOB
9663 				dhd_bus_set_device_wake(bus, FALSE);
9664 #endif /* PCIE_OOB */
9665 
9666 #if defined(BCMPCIE_OOB_HOST_WAKE)
9667 				if (bus->dhd->dhd_induce_error == DHD_INDUCE_DROP_OOB_IRQ) {
9668 					DHD_ERROR(("%s: Inducing DROP OOB IRQ\n", __FUNCTION__));
9669 				} else {
9670 					dhdpcie_oob_intr_set(bus, TRUE);
9671 				}
9672 #endif /* BCMPCIE_OOB_HOST_WAKE */
9673 
9674 				DHD_GENERAL_LOCK(bus->dhd, flags);
9675 				/* The Host cannot process interrupts now so disable the same.
9676 				 * No need to disable the dongle INTR using intmask, as we are
9677 				 * already calling disabling INTRs from DPC context after
9678 				 * getting D3_ACK in dhd_bus_handle_d3_ack.
9679 				 * Code may not look symmetric between Suspend and
9680 				 * Resume paths but this is done to close down the timing window
9681 				 * between DPC and suspend context and bus->bus_low_power_state
9682 				 * will be set to DHD_BUS_D3_ACK_RECIEVED in DPC.
9683 				 */
9684 				bus->dhd->d3ackcnt_timeout = 0;
9685 				bus->dhd->busstate = DHD_BUS_SUSPEND;
9686 				DHD_GENERAL_UNLOCK(bus->dhd, flags);
9687 #if defined(LINUX) || defined(linux)
9688 				dhdpcie_dump_resource(bus);
9689 #endif /* LINUX || linux */
9690 				rc = dhdpcie_pci_suspend_resume(bus, state);
9691 				if (!rc) {
9692 					bus->last_suspend_end_time = OSL_LOCALTIME_NS();
9693 				}
9694 #ifdef OEM_ANDROID
9695 			}
9696 #endif /* OEM_ANDROID */
9697 		} else if (timeleft == 0) { /* D3 ACK Timeout */
9698 #ifdef DHD_FW_COREDUMP
9699 			uint32 cur_memdump_mode = bus->dhd->memdump_enabled;
9700 #endif /* DHD_FW_COREDUMP */
9701 
9702 			/* check if the D3 ACK timeout due to scheduling issue */
9703 			bus->dhd->is_sched_error = !dhd_query_bus_erros(bus->dhd) &&
9704 				dhd_bus_query_dpc_sched_errors(bus->dhd);
9705 			bus->dhd->d3ack_timeout_occured = TRUE;
9706 			/* If the D3 Ack has timeout */
9707 			bus->dhd->d3ackcnt_timeout++;
9708 			DHD_ERROR(("%s: resumed on timeout for D3 ACK%s d3_inform_cnt %d\n",
9709 				__FUNCTION__, bus->dhd->is_sched_error ?
9710 				" due to scheduling problem" : "", bus->dhd->d3ackcnt_timeout));
9711 #if defined(DHD_KERNEL_SCHED_DEBUG) && defined(DHD_FW_COREDUMP)
9712 			/* XXX DHD triggers Kernel panic if the resumed on timeout occurrs
9713 			 * due to tasklet or workqueue scheduling problems in the Linux Kernel.
9714 			 * Customer informs that it is hard to find any clue from the
9715 			 * host memory dump since the important tasklet or workqueue information
9716 			 * is already disappered due the latency while printing out the timestamp
9717 			 * logs for debugging scan timeout issue.
9718 			 * For this reason, customer requestes us to trigger Kernel Panic rather
9719 			 * than taking a SOCRAM dump.
9720 			 */
9721 			if (bus->dhd->is_sched_error && cur_memdump_mode == DUMP_MEMFILE_BUGON) {
9722 				/* change g_assert_type to trigger Kernel panic */
9723 				g_assert_type = 2;
9724 				/* use ASSERT() to trigger panic */
9725 				ASSERT(0);
9726 			}
9727 #endif /* DHD_KERNEL_SCHED_DEBUG && DHD_FW_COREDUMP */
9728 			DHD_SET_BUS_NOT_IN_LPS(bus);
9729 
9730 			DHD_GENERAL_LOCK(bus->dhd, flags);
9731 			bus->dhd->busstate = DHD_BUS_DATA;
9732 			/* resume all interface network queue. */
9733 			dhd_bus_start_queue(bus);
9734 			DHD_GENERAL_UNLOCK(bus->dhd, flags);
9735 			/* XXX : avoid multiple socram dump from dongle trap and
9736 			 * invalid PCIe bus assceess due to PCIe link down
9737 			 */
9738 			if (bus->dhd->check_trap_rot) {
9739 				DHD_ERROR(("Check dongle trap in the case of d3 ack timeout\n"));
9740 				dhdpcie_checkdied(bus, NULL, 0);
9741 			}
9742 			if (bus->dhd->dongle_trap_occured) {
9743 #ifdef OEM_ANDROID
9744 #ifdef SUPPORT_LINKDOWN_RECOVERY
9745 #ifdef CONFIG_ARCH_MSM
9746 				bus->no_cfg_restore = 1;
9747 #endif /* CONFIG_ARCH_MSM */
9748 #endif /* SUPPORT_LINKDOWN_RECOVERY */
9749 				dhd_os_check_hang(bus->dhd, 0, -EREMOTEIO);
9750 #endif /* OEM_ANDROID */
9751 			} else if (!bus->is_linkdown &&
9752 				!bus->cto_triggered) {
9753 				uint32 intstatus = 0;
9754 
9755 				/* Check if PCIe bus status is valid */
9756 				intstatus = si_corereg(bus->sih, bus->sih->buscoreidx,
9757 					bus->pcie_mailbox_int, 0, 0);
9758 				if (intstatus == (uint32)-1) {
9759 					/* Invalidate PCIe bus status */
9760 					bus->is_linkdown = 1;
9761 				}
9762 
9763 				dhd_bus_dump_console_buffer(bus);
9764 				dhd_prot_debug_info_print(bus->dhd);
9765 #ifdef DHD_FW_COREDUMP
9766 				if (cur_memdump_mode) {
9767 					/* write core dump to file */
9768 					bus->dhd->memdump_type = DUMP_TYPE_D3_ACK_TIMEOUT;
9769 					dhdpcie_mem_dump(bus);
9770 				}
9771 #endif /* DHD_FW_COREDUMP */
9772 
9773 #ifdef NDIS
9774 				/* ASSERT only if hang detection/recovery is disabled.
9775 				 * If enabled then let
9776 				 * windows HDR mechansim trigger FW download via surprise removal
9777 				 */
9778 				dhd_bus_check_died(bus);
9779 #endif
9780 #ifdef OEM_ANDROID
9781 				DHD_ERROR(("%s: Event HANG send up due to D3_ACK timeout\n",
9782 					__FUNCTION__));
9783 #ifdef SUPPORT_LINKDOWN_RECOVERY
9784 #ifdef CONFIG_ARCH_MSM
9785 				bus->no_cfg_restore = 1;
9786 #endif /* CONFIG_ARCH_MSM */
9787 #endif /* SUPPORT_LINKDOWN_RECOVERY */
9788 				dhd_os_check_hang(bus->dhd, 0, -ETIMEDOUT);
9789 #endif /* OEM_ANDROID */
9790 			}
9791 #if defined(DHD_ERPOM) || (defined(DHD_EFI) && defined(BT_OVER_PCIE))
9792 			dhd_schedule_reset(bus->dhd);
9793 #endif /* DHD_ERPOM || DHD_EFI */
9794 			rc = -ETIMEDOUT;
9795 		}
9796 #ifdef PCIE_OOB
9797 		bus->oob_presuspend = FALSE;
9798 #endif /* PCIE_OOB */
9799 	} else {
9800 		/* Resume */
9801 		DHD_ERROR(("%s: Entering resume state\n", __FUNCTION__));
9802 		bus->last_resume_start_time = OSL_LOCALTIME_NS();
9803 
9804 		/**
9805 		 * PCIE2_BAR0_CORE2_WIN gets reset after D3 cold.
9806 		 * si_backplane_access(function to read/write backplane)
9807 		 * updates the window(PCIE2_BAR0_CORE2_WIN) only if
9808 		 * window being accessed is different form the window
9809 		 * being pointed by second_bar0win.
9810 		 * Since PCIE2_BAR0_CORE2_WIN is already reset because of D3 cold,
9811 		 * invalidating second_bar0win after resume updates
9812 		 * PCIE2_BAR0_CORE2_WIN with right window.
9813 		 */
9814 		si_invalidate_second_bar0win(bus->sih);
9815 #if defined(linux) && defined(OEM_ANDROID)
9816 #if defined(BCMPCIE_OOB_HOST_WAKE)
9817 		DHD_OS_OOB_IRQ_WAKE_UNLOCK(bus->dhd);
9818 #endif /* BCMPCIE_OOB_HOST_WAKE */
9819 #endif /* linux && OEM_ANDROID */
9820 #ifdef PCIE_INB_DW
9821 		if (INBAND_DW_ENAB(bus)) {
9822 			DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
9823 			if (dhdpcie_bus_get_pcie_inband_dw_state(bus) == DW_DEVICE_HOST_SLEEP) {
9824 				dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_HOST_WAKE_WAIT);
9825 #ifdef PCIE_INB_DW
9826 				dhd_bus_ds_trace(bus, 0, TRUE,
9827 					dhdpcie_bus_get_pcie_inband_dw_state(bus));
9828 #else
9829 				dhd_bus_ds_trace(bus, 0, TRUE);
9830 #endif /* PCIE_INB_DW */
9831 			}
9832 			DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
9833 		}
9834 		bus->skip_ds_ack = FALSE;
9835 #endif /* PCIE_INB_DW */
9836 		rc = dhdpcie_pci_suspend_resume(bus, state);
9837 #if defined(LINUX) || defined(linux)
9838 		dhdpcie_dump_resource(bus);
9839 #endif /* LINUX || linux */
9840 
9841 		/* set bus_low_power_state to DHD_BUS_NO_LOW_POWER_STATE */
9842 		DHD_SET_BUS_NOT_IN_LPS(bus);
9843 
9844 		if (!rc && bus->dhd->busstate == DHD_BUS_SUSPEND) {
9845 			if (bus->use_d0_inform) {
9846 				DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
9847 				dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM));
9848 				DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
9849 			}
9850 			/* ring doorbell 1 (hostready) */
9851 			dhd_bus_hostready(bus);
9852 		}
9853 		DHD_GENERAL_LOCK(bus->dhd, flags);
9854 		bus->dhd->busstate = DHD_BUS_DATA;
9855 #ifdef DHD_PCIE_RUNTIMEPM
9856 		if (DHD_BUS_BUSY_CHECK_RPM_SUSPEND_DONE(bus->dhd)) {
9857 			bus->bus_wake = 1;
9858 			OSL_SMP_WMB();
9859 			wake_up(&bus->rpm_queue);
9860 		}
9861 #endif /* DHD_PCIE_RUNTIMEPM */
9862 #ifdef PCIE_OOB
9863 		/*
9864 		 * Assert & Deassert the Device Wake. The following is the explanation for doing so.
9865 		 * 0) At this point,
9866 		 *    Host is in suspend state, Link is in L2/L3, Dongle is in D3 Cold
9867 		 *    Device Wake is enabled.
9868 		 * 1) When the Host comes out of Suspend, it first sends PERST# in the Link.
9869 		 *    Looking at this the Dongle moves from D3 Cold to NO DS State
9870 		 * 2) Now The Host OS calls the "resume" function of DHD. From here the DHD first
9871 		 *    Asserts the Device Wake.
9872 		 *    From the defn, when the Device Wake is asserted, The dongle FW will ensure
9873 		 *    that the Dongle is out of deep sleep IF the device is already in deep sleep.
9874 		 *    But note that now the Dongle is NOT in Deep sleep and is actually in
9875 		 *    NO DS state. So just driving the Device Wake high does not trigger any state
9876 		 *    transitions. The Host should actually "Toggle" the Device Wake to ensure
9877 		 *    that Dongle synchronizes with the Host and starts the State Transition to D0.
9878 		 * 4) Note that the above explanation is applicable Only when the Host comes out of
9879 		 *    suspend and the Dongle comes out of D3 Cold
9880 		 */
9881 		/* This logic is not required when hostready is enabled */
9882 
9883 		if (!bus->dhd->d2h_hostrdy_supported) {
9884 			dhd_bus_set_device_wake(bus, TRUE);
9885 			OSL_DELAY(1000);
9886 			dhd_bus_set_device_wake(bus, FALSE);
9887 		}
9888 
9889 #endif /* PCIE_OOB */
9890 		/* resume all interface network queue. */
9891 		dhd_bus_start_queue(bus);
9892 
9893 		/* For Linux, Macos etc (otherthan NDIS) enable back the dongle interrupts
9894 		 * using intmask and host interrupts
9895 		 * which were disabled in the dhdpcie_bus_isr()->dhd_bus_handle_d3_ack().
9896 		 */
9897 		dhdpcie_bus_intr_enable(bus); /* Enable back interrupt using Intmask!! */
9898 #ifndef NDIS /* !NDIS */
9899 		/* Defer enabling host interrupt until RPM resume done */
9900 		if (!DHD_BUS_BUSY_CHECK_RPM_RESUME_IN_PROGRESS(bus->dhd)) {
9901 			if (dhdpcie_irq_disabled(bus)) {
9902 				dhdpcie_enable_irq(bus);
9903 				bus->resume_intr_enable_count++;
9904 			}
9905 		}
9906 #else
9907 		/* TODO: for NDIS also we need to use enable_irq in future */
9908 		bus->resume_intr_enable_count++;
9909 #endif /* !NDIS */
9910 
9911 		DHD_GENERAL_UNLOCK(bus->dhd, flags);
9912 
9913 #ifdef DHD_TIMESYNC
9914 		/* enable time sync mechanism, if configed */
9915 		DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
9916 		dhd_timesync_control(bus->dhd, FALSE);
9917 		DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
9918 #endif /* DHD_TIMESYNC */
9919 
9920 		if (bus->dhd->dhd_watchdog_ms_backup) {
9921 			DHD_ERROR(("%s: Enabling wdtick after resume\n",
9922 				__FUNCTION__));
9923 			dhd_os_wd_timer(bus->dhd, bus->dhd->dhd_watchdog_ms_backup);
9924 		}
9925 
9926 		bus->last_resume_end_time = OSL_LOCALTIME_NS();
9927 
9928 		/* Update TCM rd index for EDL ring */
9929 		DHD_EDL_RING_TCM_RD_UPDATE(bus->dhd);
9930 
9931 	}
9932 	return rc;
9933 }
9934 
9935 #define BUS_SUSPEND	TRUE
9936 #define BUS_RESUME	FALSE
9937 int dhd_bus_suspend(dhd_pub_t *dhd)
9938 {
9939 	int ret;
9940 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
9941 	/* TODO: Check whether the arguments are correct */
9942 	ret = dhdpcie_bus_suspend(dhd->bus, TRUE, BUS_SUSPEND);
9943 #else
9944 	ret = dhdpcie_bus_suspend(dhd->bus, BUS_SUSPEND);
9945 #endif
9946 	return ret;
9947 }
9948 
9949 int dhd_bus_resume(dhd_pub_t *dhd, int stage)
9950 {
9951 	int ret;
9952 	BCM_REFERENCE(stage);
9953 
9954 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
9955 	/* TODO: Check whether the arguments are correct */
9956 	ret = dhdpcie_bus_suspend(dhd->bus, FALSE, BUS_RESUME);
9957 #else
9958 	ret = dhdpcie_bus_suspend(dhd->bus, BUS_RESUME);
9959 #endif
9960 	return ret;
9961 }
9962 
9963 uint32
9964 dhdpcie_force_alp(struct dhd_bus *bus, bool enable)
9965 {
9966 	ASSERT(bus && bus->sih);
9967 	if (enable) {
9968 	si_corereg(bus->sih, bus->sih->buscoreidx,
9969 		OFFSETOF(sbpcieregs_t, u.pcie2.clk_ctl_st), CCS_FORCEALP, CCS_FORCEALP);
9970 	} else {
9971 		si_corereg(bus->sih, bus->sih->buscoreidx,
9972 			OFFSETOF(sbpcieregs_t, u.pcie2.clk_ctl_st), CCS_FORCEALP, 0);
9973 	}
9974 	return 0;
9975 }
9976 
9977 /* set pcie l1 entry time: dhd pciereg 0x1004[22:16] */
9978 uint32
9979 dhdpcie_set_l1_entry_time(struct dhd_bus *bus, int l1_entry_time)
9980 {
9981 	uint reg_val;
9982 
9983 	ASSERT(bus && bus->sih);
9984 
9985 	si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0,
9986 		0x1004);
9987 	reg_val = si_corereg(bus->sih, bus->sih->buscoreidx,
9988 		OFFSETOF(sbpcieregs_t, configdata), 0, 0);
9989 	reg_val = (reg_val & ~(0x7f << 16)) | ((l1_entry_time & 0x7f) << 16);
9990 	si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), ~0,
9991 		reg_val);
9992 
9993 	return 0;
9994 }
9995 
9996 static uint32
9997 dhd_apply_d11_war_length(struct  dhd_bus *bus, uint32 len, uint32 d11_lpbk)
9998 {
9999 	uint16 chipid = si_chipid(bus->sih);
10000 	/*
10001 	 * XXX: WAR for CRWLDOT11M-3011
10002 	 * program the DMA descriptor Buffer length as the expected frame length
10003 	 *  + 8 bytes extra for corerev 82 when buffer length % 128 is equal to 4
10004 	 */
10005 	if ((chipid == BCM4375_CHIP_ID ||
10006 		chipid == BCM4362_CHIP_ID ||
10007 		chipid == BCM4377_CHIP_ID ||
10008 		chipid == BCM43751_CHIP_ID ||
10009 		chipid == BCM43752_CHIP_ID) &&
10010 		(d11_lpbk != M2M_DMA_LPBK && d11_lpbk != M2M_NON_DMA_LPBK)) {
10011 			len += 8;
10012 	}
10013 	DHD_ERROR(("%s: len %d\n", __FUNCTION__, len));
10014 	return len;
10015 }
10016 
10017 /** Transfers bytes from host to dongle and to host again using DMA */
10018 static int
10019 dhdpcie_bus_dmaxfer_req(struct  dhd_bus *bus,
10020 		uint32 len, uint32 srcdelay, uint32 destdelay,
10021 		uint32 d11_lpbk, uint32 core_num, uint32 wait,
10022 		uint32 mem_addr)
10023 {
10024 	int ret = 0;
10025 
10026 	if (bus->dhd == NULL) {
10027 		DHD_ERROR(("%s: bus not inited\n", __FUNCTION__));
10028 		return BCME_ERROR;
10029 	}
10030 	if (bus->dhd->prot == NULL) {
10031 		DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__));
10032 		return BCME_ERROR;
10033 	}
10034 	if (bus->dhd->busstate != DHD_BUS_DATA) {
10035 		DHD_ERROR(("%s: not in a readystate to LPBK  is not inited\n", __FUNCTION__));
10036 		return BCME_ERROR;
10037 	}
10038 
10039 	if (len < 5 || len > 4194296) {
10040 		DHD_ERROR(("%s: len is too small or too large\n", __FUNCTION__));
10041 		return BCME_ERROR;
10042 	}
10043 
10044 #if defined(DHD_EFI) && defined(DHD_INTR_POLL_PERIOD_DYNAMIC)
10045 	bus->dhd->cur_intr_poll_period = dhd_os_get_intr_poll_period();
10046 	/* before running loopback test, set interrupt poll period to a lesser value */
10047 	dhd_os_set_intr_poll_period(bus, INTR_POLL_PERIOD_CRITICAL);
10048 #endif	/* DHD_EFI && DHD_INTR_POLL_PERIOD_DYNAMIC */
10049 
10050 	len = dhd_apply_d11_war_length(bus, len, d11_lpbk);
10051 
10052 	bus->dmaxfer_complete = FALSE;
10053 	ret = dhdmsgbuf_dmaxfer_req(bus->dhd, len, srcdelay, destdelay,
10054 		d11_lpbk, core_num, mem_addr);
10055 	if (ret != BCME_OK || !wait) {
10056 		DHD_INFO(("%s: dmaxfer req returns status %u; wait = %u\n", __FUNCTION__,
10057 				ret, wait));
10058 	} else {
10059 		ret = dhd_os_dmaxfer_wait(bus->dhd, &bus->dmaxfer_complete);
10060 		if (ret < 0)
10061 			ret = BCME_NOTREADY;
10062 #if defined(DHD_EFI) && defined(DHD_INTR_POLL_PERIOD_DYNAMIC)
10063 		/* restore interrupt poll period to the previous existing value */
10064 		dhd_os_set_intr_poll_period(bus, bus->dhd->cur_intr_poll_period);
10065 #endif /* DHD_EFI && DHD_INTR_POLL_PERIOD_DYNAMIC */
10066 	}
10067 
10068 	return ret;
10069 
10070 }
10071 
10072 #ifdef BCMINTERNAL
10073 static int
10074 dhdpcie_bus_set_tx_lpback(struct  dhd_bus *bus, bool enable)
10075 {
10076 	if (bus->dhd == NULL) {
10077 		DHD_ERROR(("bus not inited\n"));
10078 		return BCME_ERROR;
10079 	}
10080 	if (bus->dhd->prot == NULL) {
10081 		DHD_ERROR(("prot is not inited\n"));
10082 		return BCME_ERROR;
10083 	}
10084 	if (bus->dhd->busstate != DHD_BUS_DATA) {
10085 		DHD_ERROR(("not in a readystate to LPBK  is not inited\n"));
10086 		return BCME_ERROR;
10087 	}
10088 	bus->dhd->loopback = enable;
10089 	return BCME_OK;
10090 }
10091 
10092 static int
10093 dhdpcie_bus_get_tx_lpback(struct  dhd_bus *bus)
10094 {
10095 	if (bus->dhd == NULL) {
10096 		DHD_ERROR(("bus not inited\n"));
10097 		return BCME_ERROR;
10098 	}
10099 	return bus->dhd->loopback ? 1 : 0;
10100 	return BCME_OK;
10101 }
10102 #endif /* BCMINTERNAL */
10103 
10104 bool
10105 dhd_bus_is_multibp_capable(struct dhd_bus *bus)
10106 {
10107 	return MULTIBP_CAP(bus->sih);
10108 }
10109 
10110 #define PCIE_REV_FOR_4378A0	66	/* dhd_bus_perform_flr_with_quiesce() causes problems */
10111 #define PCIE_REV_FOR_4378B0	68
10112 
10113 static int
10114 dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter)
10115 {
10116 	int bcmerror = 0;
10117 	volatile uint32 *cr4_regs;
10118 	bool do_flr;
10119 	bool do_wr_flops = TRUE;
10120 
10121 	if (!bus->sih) {
10122 		DHD_ERROR(("%s: NULL sih!!\n", __FUNCTION__));
10123 		return BCME_ERROR;
10124 	}
10125 
10126 	do_flr = ((bus->sih->buscorerev != PCIE_REV_FOR_4378A0) &&
10127 			(bus->sih->buscorerev != PCIE_REV_FOR_4378B0));
10128 
10129 	/*
10130 	 * Jira SWWLAN-214966: 4378B0 BToverPCIe: fails to download firmware
10131 	 *   with "insmod dhd.ko firmware_path=rtecdc.bin nvram_path=nvram.txt" format
10132 	 *   CTO is seen during autoload case.
10133 	 * Need to assert PD1 power req during ARM out of reset.
10134 	 * And doing FLR after this would conflict as FLR resets PCIe enum space.
10135 	 */
10136 	if (MULTIBP_ENAB(bus->sih) && !do_flr) {
10137 		dhd_bus_pcie_pwr_req(bus);
10138 	}
10139 
10140 	/* To enter download state, disable ARM and reset SOCRAM.
10141 	 * To exit download state, simply reset ARM (default is RAM boot).
10142 	 */
10143 	if (enter) {
10144 #ifndef BCMQT	/* for performance reasons, skip the FLR for QT */
10145 #ifdef BT_OVER_PCIE
10146 		if (dhd_bus_is_multibp_capable(bus) && do_flr &&
10147 		    dhd_fw_download_status(bus->dhd) != FW_DOWNLOAD_IN_PROGRESS) {
10148 			/* for multi-backplane architecture, issue an FLR to reset the WLAN cores */
10149 			const int pwr_req_ref = bus->pwr_req_ref;
10150 			if (pwr_req_ref > 0) {
10151 				(void)dhd_bus_perform_flr_with_quiesce(bus->dhd, bus, FALSE);
10152 
10153 				/*
10154 				 * If power has been requested prior to calling FLR, but
10155 				 * the FLR code cleared the power request, we need to
10156 				 * request again to get back to the status of where we were
10157 				 * prior, otherwise there'll be a mismatch in reqs/clears
10158 				 */
10159 				if (bus->pwr_req_ref < pwr_req_ref) {
10160 					dhd_bus_pcie_pwr_req(bus);
10161 				}
10162 			} else {
10163 				(void)dhd_bus_perform_flr_with_quiesce(bus->dhd, bus, FALSE);
10164 			}
10165 		}
10166 #endif /* BT_OVER_PCIE */
10167 #endif /* !BCMQT */
10168 
10169 		/* Make sure BAR1 maps to backplane address 0 */
10170 		dhdpcie_setbar1win(bus, 0x00000000);
10171 		bus->alp_only = TRUE;
10172 #ifdef GDB_PROXY
10173 		bus->gdb_proxy_access_enabled = TRUE;
10174 		bus->gdb_proxy_bootloader_mode = FALSE;
10175 #endif /* GDB_PROXY */
10176 
10177 		/* some chips (e.g. 43602) have two ARM cores, the CR4 is receives the firmware. */
10178 		cr4_regs = si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
10179 
10180 		if (cr4_regs == NULL && !(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
10181 		    !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) &&
10182 		    !(si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
10183 			DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
10184 			bcmerror = BCME_ERROR;
10185 			goto fail;
10186 		}
10187 
10188 		if (si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) {
10189 			/* Halt ARM & remove reset */
10190 			si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT);
10191 			if (!(si_setcore(bus->sih, SYSMEM_CORE_ID, 0))) {
10192 				DHD_ERROR(("%s: Failed to find SYSMEM core!\n", __FUNCTION__));
10193 				bcmerror = BCME_ERROR;
10194 				goto fail;
10195 			}
10196 			si_core_reset(bus->sih, 0, 0);
10197 			/* reset last 4 bytes of RAM address. to be used for shared area */
10198 			dhdpcie_init_shared_addr(bus);
10199 		} else if (cr4_regs == NULL) { /* no CR4 present on chip */
10200 			si_core_disable(bus->sih, 0);
10201 
10202 			if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
10203 				DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
10204 				bcmerror = BCME_ERROR;
10205 				goto fail;
10206 			}
10207 
10208 			si_core_reset(bus->sih, 0, 0);
10209 
10210 			/* Clear the top bit of memory */
10211 			if (bus->ramsize) {
10212 				uint32 zeros = 0;
10213 				if (dhdpcie_bus_membytes(bus, TRUE, bus->ramsize - 4,
10214 				                     (uint8*)&zeros, 4) < 0) {
10215 					bcmerror = BCME_ERROR;
10216 					goto fail;
10217 				}
10218 			}
10219 		} else {
10220 			/* For CR4,
10221 			 * Halt ARM
10222 			 * Remove ARM reset
10223 			 * Read RAM base address [0x18_0000]
10224 			 * [next] Download firmware
10225 			 * [done at else] Populate the reset vector
10226 			 * [done at else] Remove ARM halt
10227 			*/
10228 			/* Halt ARM & remove reset */
10229 			si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT);
10230 			if (BCM43602_CHIP(bus->sih->chip)) {
10231 				/* XXX CRWLARMCR4-53 43602a0 HW bug when banks are powered down */
10232 				W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 5);
10233 				W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0);
10234 				W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 7);
10235 				W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0);
10236 			}
10237 			/* reset last 4 bytes of RAM address. to be used for shared area */
10238 			dhdpcie_init_shared_addr(bus);
10239 		}
10240 	} else {
10241 		if (si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) {
10242 			/* write vars */
10243 			if ((bcmerror = dhdpcie_bus_write_vars(bus))) {
10244 				DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
10245 				goto fail;
10246 			}
10247 			/* write random numbers to sysmem for the purpose of
10248 			 * randomizing heap address space.
10249 			 */
10250 			if ((bcmerror = dhdpcie_wrt_rnd(bus)) != BCME_OK) {
10251 				DHD_ERROR(("%s: Failed to get random seed to write to TCM !\n",
10252 					__FUNCTION__));
10253 				goto fail;
10254 			}
10255 #ifdef BCMINTERNAL
10256 			if (bus->hostfw_buf.va) {
10257 				/* Share the location of the host memory
10258 				 * location where pageable FW binary is located.
10259 				 */
10260 				host_page_location_info_t host_location;
10261 				host_location.tlv_signature =
10262 					htol32(BCM_HOST_PAGE_LOCATION_SIGNATURE);
10263 				host_location.tlv_size = htol32(sizeof(host_location)) -
10264 					sizeof(host_location.tlv_size) -
10265 					sizeof(host_location.tlv_signature);
10266 				host_location.binary_size = htol32(bus->hostfw_buf.len);
10267 				host_location.addr_hi = PHYSADDRHI(bus->hostfw_buf.pa);
10268 				host_location.addr_lo = PHYSADDRLO(bus->hostfw_buf.pa);
10269 				bus->next_tlv -= sizeof(host_location);
10270 				dhdpcie_bus_membytes(bus, TRUE, bus->next_tlv,
10271 					(uint8*)&host_location, sizeof(host_location));
10272 				DHD_INFO(("%s:Host page location info:"
10273 					" %08x-%08x Len:%x!\n",
10274 					__FUNCTION__, host_location.addr_hi,
10275 					host_location.addr_lo, host_location.binary_size));
10276 			}
10277 #ifdef DHD_FWTRACE
10278 			{
10279 				/*
10280 				 * Send host trace buffer at time of firmware download
10281 				 * to enable collecting full init time firmware trace
10282 				 */
10283 				host_fwtrace_buf_location_info_t host_info;
10284 
10285 				if (fwtrace_init(bus->dhd) == BCME_OK) {
10286 					fwtrace_get_haddr(bus->dhd, &host_info.host_buf_info);
10287 
10288 					host_info.tlv_size = sizeof(host_info.host_buf_info);
10289 					host_info.tlv_signature =
10290 						htol32(BCM_HOST_FWTRACE_BUF_LOCATION_SIGNATURE);
10291 
10292 					bus->ramtop_addr -= sizeof(host_info);
10293 
10294 					dhdpcie_bus_membytes(bus, TRUE, bus->ramtop_addr,
10295 						(uint8*)&host_info, sizeof(host_info));
10296 
10297 					bus->next_tlv = sizeof(host_info);
10298 				}
10299 			}
10300 #endif /* DHD_FWTRACE */
10301 #endif /* BCMINTERNAL */
10302 
10303 #if defined(FW_SIGNATURE)
10304 			if ((bcmerror = dhdpcie_bus_download_fw_signature(bus, &do_wr_flops))
10305 				!= BCME_OK) {
10306 				goto fail;
10307 			}
10308 #endif /* FW_SIGNATURE */
10309 
10310 			if (do_wr_flops) {
10311 				uint32 resetinstr_data;
10312 
10313 				/* switch back to arm core again */
10314 				if (!(si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
10315 					DHD_ERROR(("%s: Failed to find ARM CA7 core!\n",
10316 						__FUNCTION__));
10317 					bcmerror = BCME_ERROR;
10318 					goto fail;
10319 				}
10320 
10321 				/*
10322 				 * read address 0 with reset instruction,
10323 				 * to validate that is not secured
10324 				 */
10325 				bcmerror = dhdpcie_bus_membytes(bus, FALSE, 0,
10326 					(uint8 *)&resetinstr_data, sizeof(resetinstr_data));
10327 
10328 				if (resetinstr_data == 0xFFFFFFFF) {
10329 					DHD_ERROR(("%s: **** FLOPS Vector is secured, "
10330 						"Signature file is missing! ***\n", __FUNCTION__));
10331 					bcmerror = BCME_NO_SIG_FILE;
10332 					goto fail;
10333 				}
10334 
10335 				/* write address 0 with reset instruction */
10336 				bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0,
10337 					(uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
10338 				/* now remove reset and halt and continue to run CA7 */
10339 			}
10340 		} else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
10341 			if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
10342 				DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
10343 				bcmerror = BCME_ERROR;
10344 				goto fail;
10345 			}
10346 
10347 			if (!si_iscoreup(bus->sih)) {
10348 				DHD_ERROR(("%s: SOCRAM core is down after reset?\n", __FUNCTION__));
10349 				bcmerror = BCME_ERROR;
10350 				goto fail;
10351 			}
10352 
10353 			/* Enable remap before ARM reset but after vars.
10354 			 * No backplane access in remap mode
10355 			 */
10356 			if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) &&
10357 			    !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) {
10358 				DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__));
10359 				bcmerror = BCME_ERROR;
10360 				goto fail;
10361 			}
10362 
10363 			/* XXX Change standby configuration here if necessary */
10364 
10365 			if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
10366 			    !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
10367 				DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
10368 				bcmerror = BCME_ERROR;
10369 				goto fail;
10370 			}
10371 		} else {
10372 			if (BCM43602_CHIP(bus->sih->chip)) {
10373 				/* Firmware crashes on SOCSRAM access when core is in reset */
10374 				if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
10375 					DHD_ERROR(("%s: Failed to find SOCRAM core!\n",
10376 						__FUNCTION__));
10377 					bcmerror = BCME_ERROR;
10378 					goto fail;
10379 				}
10380 				si_core_reset(bus->sih, 0, 0);
10381 				si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
10382 			}
10383 
10384 			/* write vars */
10385 			if ((bcmerror = dhdpcie_bus_write_vars(bus))) {
10386 				DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
10387 				goto fail;
10388 			}
10389 
10390 			/* write a random number rTLV to TCM for the purpose of
10391 			 * randomizing heap address space.
10392 			 */
10393 			if ((bcmerror = dhdpcie_wrt_rnd(bus)) != BCME_OK) {
10394 				DHD_ERROR(("%s: Failed to get random seed to write to TCM !\n",
10395 					__FUNCTION__));
10396 				goto fail;
10397 			}
10398 
10399 #if defined(FW_SIGNATURE)
10400 			if ((bcmerror = dhdpcie_bus_download_fw_signature(bus, &do_wr_flops))
10401 				!= BCME_OK) {
10402 				goto fail;
10403 			}
10404 #endif /* FW_SIGNATURE */
10405 			if (do_wr_flops) {
10406 				uint32 resetinstr_data;
10407 
10408 				/* switch back to arm core again */
10409 				if (!(si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) {
10410 					DHD_ERROR(("%s: Failed to find ARM CR4 core!\n",
10411 						__FUNCTION__));
10412 					bcmerror = BCME_ERROR;
10413 					goto fail;
10414 				}
10415 
10416 				/*
10417 				 * read address 0 with reset instruction,
10418 				 * to validate that is not secured
10419 				 */
10420 				bcmerror = dhdpcie_bus_membytes(bus, FALSE, 0,
10421 					(uint8 *)&resetinstr_data, sizeof(resetinstr_data));
10422 
10423 				if (resetinstr_data == 0xFFFFFFFF) {
10424 					DHD_ERROR(("%s: **** FLOPS Vector is secured, "
10425 						"Signature file is missing! ***\n", __FUNCTION__));
10426 					bcmerror = BCME_NO_SIG_FILE;
10427 					goto fail;
10428 				}
10429 
10430 				/* write address 0 with reset instruction */
10431 				bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0,
10432 					(uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
10433 
10434 				if (bcmerror == BCME_OK) {
10435 					uint32 tmp;
10436 
10437 					bcmerror = dhdpcie_bus_membytes(bus, FALSE, 0,
10438 					                                (uint8 *)&tmp, sizeof(tmp));
10439 
10440 					if (bcmerror == BCME_OK && tmp != bus->resetinstr) {
10441 						DHD_ERROR(("%s: Failed to write 0x%08x to addr 0\n",
10442 						          __FUNCTION__, bus->resetinstr));
10443 						DHD_ERROR(("%s: contents of addr 0 is 0x%08x\n",
10444 						          __FUNCTION__, tmp));
10445 						bcmerror = BCME_ERROR;
10446 						goto fail;
10447 					}
10448 				}
10449 				/* now remove reset and halt and continue to run CR4 */
10450 			}
10451 		}
10452 
10453 		bus->arm_oor_time = OSL_LOCALTIME_NS();
10454 		si_core_reset(bus->sih, 0, 0);
10455 
10456 		/* Allow HT Clock now that the ARM is running. */
10457 		bus->alp_only = FALSE;
10458 
10459 		bus->dhd->busstate = DHD_BUS_LOAD;
10460 #ifdef DHD_EFI
10461 		/*
10462 		 * dhdpcie_init_phase2() sets the fw_download_status as FW_DOWNLOAD_IN_PROGRESS
10463 		 * during the first default attempt to load FW either from OTP or WIRELESS folder.
10464 		 *
10465 		 * After the first successful download of the FW(either from OTP or WIRELESS folder
10466 		 * or by dhd download command) set the fw_download_status as FW_DOWNLOAD_DONE.
10467 		 *
10468 		 * We need to maintain these states to perform FLR in dhdpcie_bus_download_state()
10469 		 * only after first successful download.
10470 		 */
10471 		bus->dhd->fw_download_status = FW_DOWNLOAD_DONE;
10472 #endif /* DHD_EFI */
10473 	}
10474 
10475 fail:
10476 	/* Always return to PCIE core */
10477 	si_setcore(bus->sih, PCIE2_CORE_ID, 0);
10478 
10479 	if (MULTIBP_ENAB(bus->sih) && !do_flr) {
10480 		dhd_bus_pcie_pwr_req_clear(bus);
10481 	}
10482 
10483 	return bcmerror;
10484 } /* dhdpcie_bus_download_state */
10485 
10486 #if defined(FW_SIGNATURE)
10487 
10488 static int
10489 dhdpcie_bus_download_fw_signature(dhd_bus_t *bus, bool *do_write)
10490 {
10491 	int bcmerror = BCME_OK;
10492 
10493 	DHD_INFO(("FWSIG: bl=%s,%x fw=%x,%u sig=%s,%x,%u"
10494 		" stat=%x,%u ram=%x,%x\n",
10495 		bus->bootloader_filename, bus->bootloader_addr,
10496 		bus->fw_download_addr, bus->fw_download_len,
10497 		bus->fwsig_filename, bus->fwsig_download_addr,
10498 		bus->fwsig_download_len,
10499 		bus->fwstat_download_addr, bus->fwstat_download_len,
10500 		bus->dongle_ram_base, bus->ramtop_addr));
10501 
10502 	if (bus->fwsig_filename[0] == 0) {
10503 		DHD_INFO(("%s: missing signature file\n", __FUNCTION__));
10504 		goto exit;
10505 	}
10506 
10507 	/* Write RAM Bootloader to TCM if requested */
10508 	if ((bcmerror = dhdpcie_bus_download_ram_bootloader(bus))
10509 		!= BCME_OK) {
10510 		DHD_ERROR(("%s: could not write RAM BL to TCM, err %d\n",
10511 			__FUNCTION__, bcmerror));
10512 		goto exit;
10513 	}
10514 
10515 	/* Write FW signature rTLV to TCM */
10516 	if ((bcmerror = dhdpcie_bus_write_fwsig(bus, bus->fwsig_filename,
10517 		NULL))) {
10518 		DHD_ERROR(("%s: could not write FWsig to TCM, err %d\n",
10519 			__FUNCTION__, bcmerror));
10520 		goto exit;
10521 	}
10522 
10523 	/* Write FW signature verification status rTLV to TCM */
10524 	if ((bcmerror = dhdpcie_bus_write_fws_status(bus)) != BCME_OK) {
10525 		DHD_ERROR(("%s: could not write FWinfo to TCM, err %d\n",
10526 			__FUNCTION__, bcmerror));
10527 		goto exit;
10528 	}
10529 
10530 	/* Write FW memory map rTLV to TCM */
10531 	if ((bcmerror = dhdpcie_bus_write_fws_mem_info(bus)) != BCME_OK) {
10532 		DHD_ERROR(("%s: could not write FWinfo to TCM, err %d\n",
10533 			__FUNCTION__, bcmerror));
10534 		goto exit;
10535 	}
10536 
10537 	/* Write a end-of-TLVs marker to TCM */
10538 	if ((bcmerror = dhdpcie_download_rtlv_end(bus)) != BCME_OK) {
10539 		DHD_ERROR(("%s: could not write rTLV-end marker to TCM, err %d\n",
10540 			__FUNCTION__, bcmerror));
10541 		goto exit;
10542 	}
10543 
10544 	/* In case of BL RAM, do write flops */
10545 	if (bus->bootloader_filename[0] != 0) {
10546 		*do_write = TRUE;
10547 	} else {
10548 		*do_write = FALSE;
10549 	}
10550 
10551 exit:
10552 	return bcmerror;
10553 }
10554 
10555 /* Download a reversed-TLV to the top of dongle RAM without overlapping any existing rTLVs */
10556 static int
10557 dhdpcie_download_rtlv(dhd_bus_t *bus, dngl_rtlv_type_t type, dngl_rtlv_len_t len, uint8 *value)
10558 {
10559 	int bcmerror = BCME_OK;
10560 #ifdef DHD_DEBUG
10561 	uint8 *readback_buf = NULL;
10562 	uint32 readback_val = 0;
10563 #endif /* DHD_DEBUG */
10564 	uint32 dest_addr = 0;		/* dongle RAM dest address */
10565 	uint32 dest_size = 0;		/* dongle RAM dest size */
10566 	uint32 dest_raw_size = 0;	/* dest size with added checksum */
10567 
10568 	/* Calculate the destination dongle RAM address and size */
10569 	dest_size = ROUNDUP(len, 4);
10570 	dest_addr = bus->ramtop_addr - sizeof(dngl_rtlv_type_t) - sizeof(dngl_rtlv_len_t)
10571 		- dest_size;
10572 	bus->ramtop_addr = dest_addr;
10573 
10574 	/* Create the rTLV size field.  This consists of 2 16-bit fields:
10575 	 * The lower 16 bits is the size.  The higher 16 bits is a checksum
10576 	 * consisting of the size with all bits reversed.
10577 	 *     +-------------+-------------+
10578 	 *     |   checksum  |   size      |
10579 	 *     +-------------+-------------+
10580 	 *      High 16 bits    Low 16 bits
10581 	 */
10582 	dest_raw_size = (~dest_size << 16) | (dest_size & 0x0000FFFF);
10583 
10584 	/* Write the value block */
10585 	if (dest_size > 0) {
10586 		bcmerror = dhdpcie_bus_membytes(bus, TRUE, dest_addr, value, dest_size);
10587 		if (bcmerror) {
10588 			DHD_ERROR(("%s: error %d on writing %d membytes to 0x%08x\n",
10589 				__FUNCTION__, bcmerror, dest_size, dest_addr));
10590 			goto exit;
10591 		}
10592 	}
10593 
10594 	/* Write the length word */
10595 	bcmerror = dhdpcie_bus_membytes(bus, TRUE, dest_addr + dest_size,
10596 		(uint8*)&dest_raw_size, sizeof(dngl_rtlv_len_t));
10597 
10598 	/* Write the type word */
10599 	bcmerror = dhdpcie_bus_membytes(bus, TRUE,
10600 		dest_addr + dest_size + sizeof(dngl_rtlv_len_t),
10601 		(uint8*)&type, sizeof(dngl_rtlv_type_t));
10602 
10603 #ifdef DHD_DEBUG
10604 	/* Read back and compare the downloaded data */
10605 	if (dest_size > 0) {
10606 		readback_buf = (uint8*)MALLOC(bus->dhd->osh, dest_size);
10607 		if (!readback_buf) {
10608 			bcmerror = BCME_NOMEM;
10609 			goto exit;
10610 		}
10611 		memset(readback_buf, 0xaa, dest_size);
10612 		bcmerror = dhdpcie_bus_membytes(bus, FALSE, dest_addr, readback_buf, dest_size);
10613 		if (bcmerror) {
10614 			DHD_ERROR(("%s: readback error %d, %d bytes from 0x%08x\n",
10615 				__FUNCTION__, bcmerror, dest_size, dest_addr));
10616 			goto exit;
10617 		}
10618 		if (memcmp(value, readback_buf, dest_size) != 0) {
10619 			DHD_ERROR(("%s: Downloaded data mismatch.\n", __FUNCTION__));
10620 			bcmerror = BCME_ERROR;
10621 			goto exit;
10622 		} else {
10623 			DHD_ERROR(("Download and compare of TLV 0x%x succeeded"
10624 				" (size %u, addr %x).\n", type, dest_size, dest_addr));
10625 		}
10626 	}
10627 
10628 	/* Read back and compare the downloaded len field */
10629 	bcmerror = dhdpcie_bus_membytes(bus, FALSE, dest_addr + dest_size,
10630 		(uint8*)&readback_val, sizeof(dngl_rtlv_len_t));
10631 	if (!bcmerror) {
10632 		if (readback_val != dest_raw_size) {
10633 			bcmerror = BCME_BADLEN;
10634 		}
10635 	}
10636 	if (bcmerror) {
10637 		DHD_ERROR(("%s: Downloaded len error %d\n", __FUNCTION__, bcmerror));
10638 		goto exit;
10639 	}
10640 
10641 	/* Read back and compare the downloaded type field */
10642 	bcmerror = dhdpcie_bus_membytes(bus, FALSE,
10643 		dest_addr + dest_size + sizeof(dngl_rtlv_len_t),
10644 		(uint8*)&readback_val, sizeof(dngl_rtlv_type_t));
10645 	if (!bcmerror) {
10646 		if (readback_val != type) {
10647 			bcmerror = BCME_BADOPTION;
10648 		}
10649 	}
10650 	if (bcmerror) {
10651 		DHD_ERROR(("%s: Downloaded type error %d\n", __FUNCTION__, bcmerror));
10652 		goto exit;
10653 	}
10654 #endif /* DHD_DEBUG */
10655 
10656 	bus->ramtop_addr = dest_addr;
10657 
10658 exit:
10659 #ifdef DHD_DEBUG
10660 	if (readback_buf) {
10661 		MFREE(bus->dhd->osh, readback_buf, dest_size);
10662 	}
10663 #endif /* DHD_DEBUG */
10664 
10665 	return bcmerror;
10666 } /* dhdpcie_download_rtlv */
10667 
10668 /* Download a reversed-TLV END marker to the top of dongle RAM */
10669 static int
10670 dhdpcie_download_rtlv_end(dhd_bus_t *bus)
10671 {
10672 	return dhdpcie_download_rtlv(bus, DNGL_RTLV_TYPE_END_MARKER, 0, NULL);
10673 }
10674 
10675 /* Write the FW signature verification status to dongle memory */
10676 static int
10677 dhdpcie_bus_write_fws_status(dhd_bus_t *bus)
10678 {
10679 	bcm_fwsign_verif_status_t vstatus;
10680 	int ret;
10681 
10682 	bzero(&vstatus, sizeof(vstatus));
10683 
10684 	ret = dhdpcie_download_rtlv(bus, DNGL_RTLV_TYPE_FWSIGN_STATUS, sizeof(vstatus),
10685 		(uint8*)&vstatus);
10686 	bus->fwstat_download_addr = bus->ramtop_addr;
10687 	bus->fwstat_download_len = sizeof(vstatus);
10688 
10689 	return ret;
10690 } /* dhdpcie_bus_write_fws_status */
10691 
10692 /* Write the FW signature verification memory map to dongle memory */
10693 static int
10694 dhdpcie_bus_write_fws_mem_info(dhd_bus_t *bus)
10695 {
10696 	bcm_fwsign_mem_info_t memmap;
10697 	int ret;
10698 
10699 	bzero(&memmap, sizeof(memmap));
10700 	memmap.firmware.start = bus->fw_download_addr;
10701 	memmap.firmware.end = memmap.firmware.start + bus->fw_download_len;
10702 	memmap.heap.start = ROUNDUP(memmap.firmware.end + BL_HEAP_START_GAP_SIZE, 4);
10703 	memmap.heap.end = memmap.heap.start + BL_HEAP_SIZE;
10704 	memmap.signature.start = bus->fwsig_download_addr;
10705 	memmap.signature.end = memmap.signature.start + bus->fwsig_download_len;
10706 	memmap.vstatus.start = bus->fwstat_download_addr;
10707 	memmap.vstatus.end = memmap.vstatus.start + bus->fwstat_download_len;
10708 	DHD_INFO(("%s: mem_info: fw=%x-%x heap=%x-%x sig=%x-%x vst=%x-%x res=%x\n",
10709 		__FUNCTION__,
10710 		memmap.firmware.start, memmap.firmware.end,
10711 		memmap.heap.start, memmap.heap.end,
10712 		memmap.signature.start, memmap.signature.end,
10713 		memmap.vstatus.start, memmap.vstatus.end,
10714 		memmap.reset_vec.start));
10715 
10716 	ret = dhdpcie_download_rtlv(bus, DNGL_RTLV_TYPE_FWSIGN_MEM_MAP, sizeof(memmap),
10717 		(uint8*)&memmap);
10718 	bus->fw_memmap_download_addr = bus->ramtop_addr;
10719 	bus->fw_memmap_download_len = sizeof(memmap);
10720 
10721 	return ret;
10722 } /* dhdpcie_bus_write_fws_mem_info */
10723 
10724 /* Download a bootloader image to dongle RAM */
10725 static int
10726 dhdpcie_bus_download_ram_bootloader(dhd_bus_t *bus)
10727 {
10728 	int ret = BCME_OK;
10729 	uint32 dongle_ram_base_save;
10730 
10731 	DHD_INFO(("download_bloader: %s,0x%x. ramtop=0x%x\n",
10732 		bus->bootloader_filename, bus->bootloader_addr, bus->ramtop_addr));
10733 	if (bus->bootloader_filename[0] == '\0') {
10734 		return ret;
10735 	}
10736 
10737 	/* Save ram base */
10738 	dongle_ram_base_save = bus->dongle_ram_base;
10739 
10740 	/* Set ram base to bootloader download start address */
10741 	bus->dongle_ram_base = bus->bootloader_addr;
10742 
10743 	/* Download the bootloader image to TCM */
10744 	ret = dhdpcie_download_code_file(bus, bus->bootloader_filename);
10745 
10746 	/* Restore ram base */
10747 	bus->dongle_ram_base = dongle_ram_base_save;
10748 
10749 	return ret;
10750 } /* dhdpcie_bus_download_ram_bootloader */
10751 
10752 /* Save the FW download address and size */
10753 static int
10754 dhdpcie_bus_save_download_info(dhd_bus_t *bus, uint32 download_addr,
10755 	uint32 download_size, const char *signature_fname,
10756 	const char *bloader_fname, uint32 bloader_download_addr)
10757 {
10758 	bus->fw_download_len = download_size;
10759 	bus->fw_download_addr = download_addr;
10760 	strlcpy(bus->fwsig_filename, signature_fname, sizeof(bus->fwsig_filename));
10761 	strlcpy(bus->bootloader_filename, bloader_fname, sizeof(bus->bootloader_filename));
10762 	bus->bootloader_addr = bloader_download_addr;
10763 #ifdef GDB_PROXY
10764 	/* GDB proxy bootloader mode - if signature file specified (i.e.
10765 	 * bootloader is used), but bootloader is not specified (i.e. ROM
10766 	 * bootloader is uses).
10767 	 * Bootloader mode is significant only for for preattachment debugging
10768 	 * of chips, in which debug cell can't be initialized before ARM CPU
10769 	 * start
10770 	 */
10771 	bus->gdb_proxy_bootloader_mode =
10772 		(bus->fwsig_filename[0] != 0) && (bus->bootloader_filename[0] == 0);
10773 #endif /* GDB_PROXY */
10774 	return BCME_OK;
10775 } /* dhdpcie_bus_save_download_info */
10776 
10777 /* Read a small binary file and write it to the specified socram dest address */
10778 static int
10779 dhdpcie_download_sig_file(dhd_bus_t *bus, char *path, uint32 type)
10780 {
10781 	int bcmerror = BCME_OK;
10782 	void *filep = NULL;
10783 	uint8 *srcbuf = NULL;
10784 	int srcsize = 0;
10785 	int len;
10786 	uint32 dest_size = 0;	/* dongle RAM dest size */
10787 
10788 	if (path == NULL || path[0] == '\0') {
10789 		DHD_ERROR(("%s: no file\n", __FUNCTION__));
10790 		bcmerror = BCME_NOTFOUND;
10791 		goto exit;
10792 	}
10793 
10794 	/* Open file, get size */
10795 	filep = dhd_os_open_image1(bus->dhd, path);
10796 	if (filep == NULL) {
10797 		DHD_ERROR(("%s: error opening file %s\n", __FUNCTION__, path));
10798 		bcmerror = BCME_NOTFOUND;
10799 		goto exit;
10800 	}
10801 	srcsize = dhd_os_get_image_size(filep);
10802 	if (srcsize <= 0 || srcsize > MEMBLOCK) {
10803 		DHD_ERROR(("%s: invalid fwsig size %u\n", __FUNCTION__, srcsize));
10804 		bcmerror = BCME_BUFTOOSHORT;
10805 		goto exit;
10806 	}
10807 	dest_size = ROUNDUP(srcsize, 4);
10808 
10809 	/* Allocate src buffer, read in the entire file */
10810 	srcbuf = (uint8 *)MALLOCZ(bus->dhd->osh, dest_size);
10811 	if (!srcbuf) {
10812 		bcmerror = BCME_NOMEM;
10813 		goto exit;
10814 	}
10815 	len = dhd_os_get_image_block(srcbuf, srcsize, filep);
10816 	if (len != srcsize) {
10817 		DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len));
10818 		bcmerror = BCME_BADLEN;
10819 		goto exit;
10820 	}
10821 
10822 	/* Write the src buffer as a rTLV to the dongle */
10823 	bcmerror = dhdpcie_download_rtlv(bus, type, dest_size, srcbuf);
10824 
10825 	bus->fwsig_download_addr = bus->ramtop_addr;
10826 	bus->fwsig_download_len = dest_size;
10827 
10828 exit:
10829 	if (filep) {
10830 		dhd_os_close_image1(bus->dhd, filep);
10831 	}
10832 	if (srcbuf) {
10833 		MFREE(bus->dhd->osh, srcbuf, dest_size);
10834 	}
10835 
10836 	return bcmerror;
10837 } /* dhdpcie_download_sig_file */
10838 
10839 static int
10840 dhdpcie_bus_write_fwsig(dhd_bus_t *bus, char *fwsig_path, char *nvsig_path)
10841 {
10842 	int bcmerror = BCME_OK;
10843 
10844 	/* Download the FW signature file to the chip */
10845 	bcmerror = dhdpcie_download_sig_file(bus, fwsig_path, DNGL_RTLV_TYPE_FW_SIGNATURE);
10846 	if (bcmerror) {
10847 		goto exit;
10848 	}
10849 
10850 exit:
10851 	if (bcmerror) {
10852 		DHD_ERROR(("%s: error %d\n", __FUNCTION__, bcmerror));
10853 	}
10854 	return bcmerror;
10855 } /* dhdpcie_bus_write_fwsig */
10856 
10857 /* Dump secure firmware status. */
10858 static int
10859 dhd_bus_dump_fws(dhd_bus_t *bus, struct bcmstrbuf *strbuf)
10860 {
10861 	bcm_fwsign_verif_status_t status;
10862 	bcm_fwsign_mem_info_t     meminfo;
10863 	int                       err = BCME_OK;
10864 
10865 	bzero(&status, sizeof(status));
10866 	if (bus->fwstat_download_addr != 0) {
10867 		err = dhdpcie_bus_membytes(bus, FALSE, bus->fwstat_download_addr,
10868 			(uint8 *)&status, sizeof(status));
10869 		if (err != BCME_OK) {
10870 			DHD_ERROR(("%s: error %d on reading %zu membytes at 0x%08x\n",
10871 				__FUNCTION__, err, sizeof(status), bus->fwstat_download_addr));
10872 			return (err);
10873 		}
10874 	}
10875 
10876 	bzero(&meminfo, sizeof(meminfo));
10877 	if (bus->fw_memmap_download_addr != 0) {
10878 		err = dhdpcie_bus_membytes(bus, FALSE, bus->fw_memmap_download_addr,
10879 			(uint8 *)&meminfo, sizeof(meminfo));
10880 		if (err != BCME_OK) {
10881 			DHD_ERROR(("%s: error %d on reading %zu membytes at 0x%08x\n",
10882 				__FUNCTION__, err, sizeof(meminfo), bus->fw_memmap_download_addr));
10883 			return (err);
10884 		}
10885 	}
10886 
10887 	bcm_bprintf(strbuf, "Firmware signing\nSignature: (%08x) len (%d)\n",
10888 		bus->fwsig_download_addr, bus->fwsig_download_len);
10889 
10890 	bcm_bprintf(strbuf,
10891 		"Verification status: (%08x)\n"
10892 		"\tstatus: %d\n"
10893 		"\tstate: %u\n"
10894 		"\talloc_bytes: %u\n"
10895 		"\tmax_alloc_bytes: %u\n"
10896 		"\ttotal_alloc_bytes: %u\n"
10897 		"\ttotal_freed_bytes: %u\n"
10898 		"\tnum_allocs: %u\n"
10899 		"\tmax_allocs: %u\n"
10900 		"\tmax_alloc_size: %u\n"
10901 		"\talloc_failures: %u\n",
10902 		bus->fwstat_download_addr,
10903 		status.status,
10904 		status.state,
10905 		status.alloc_bytes,
10906 		status.max_alloc_bytes,
10907 		status.total_alloc_bytes,
10908 		status.total_freed_bytes,
10909 		status.num_allocs,
10910 		status.max_allocs,
10911 		status.max_alloc_size,
10912 		status.alloc_failures);
10913 
10914 	bcm_bprintf(strbuf,
10915 		"Memory info: (%08x)\n"
10916 		"\tfw   %08x-%08x\n\theap %08x-%08x\n\tsig  %08x-%08x\n\tvst  %08x-%08x\n",
10917 		bus->fw_memmap_download_addr,
10918 		meminfo.firmware.start,  meminfo.firmware.end,
10919 		meminfo.heap.start,      meminfo.heap.end,
10920 		meminfo.signature.start, meminfo.signature.end,
10921 		meminfo.vstatus.start,   meminfo.vstatus.end);
10922 
10923 	return (err);
10924 }
10925 #endif /* FW_SIGNATURE */
10926 
10927 /* Write nvram data to the top of dongle RAM, ending with a size in # of 32-bit words */
10928 static int
10929 dhdpcie_bus_write_vars(dhd_bus_t *bus)
10930 {
10931 	int bcmerror = 0;
10932 	uint32 varsize, phys_size;
10933 	uint32 varaddr;
10934 	uint8 *vbuffer;
10935 	uint32 varsizew;
10936 #ifdef DHD_DEBUG
10937 	uint8 *nvram_ularray;
10938 #endif /* DHD_DEBUG */
10939 
10940 	/* Even if there are no vars are to be written, we still need to set the ramsize. */
10941 	varsize = bus->varsz ? ROUNDUP(bus->varsz, 4) : 0;
10942 	varaddr = (bus->ramsize - 4) - varsize;
10943 
10944 	varaddr += bus->dongle_ram_base;
10945 	bus->ramtop_addr = varaddr;
10946 
10947 	if (bus->vars) {
10948 
10949 		/* XXX In case the controller has trouble with odd bytes... */
10950 		vbuffer = (uint8 *)MALLOC(bus->dhd->osh, varsize);
10951 		if (!vbuffer)
10952 			return BCME_NOMEM;
10953 
10954 		bzero(vbuffer, varsize);
10955 		bcopy(bus->vars, vbuffer, bus->varsz);
10956 		/* Write the vars list */
10957 		bcmerror = dhdpcie_bus_membytes(bus, TRUE, varaddr, vbuffer, varsize);
10958 
10959 		/* Implement read back and verify later */
10960 #ifdef DHD_DEBUG
10961 		/* Verify NVRAM bytes */
10962 		DHD_INFO(("%s: Compare NVRAM dl & ul; varsize=%d\n", __FUNCTION__, varsize));
10963 		nvram_ularray = (uint8*)MALLOC(bus->dhd->osh, varsize);
10964 		if (!nvram_ularray) {
10965 			MFREE(bus->dhd->osh, vbuffer, varsize);
10966 			return BCME_NOMEM;
10967 		}
10968 
10969 		/* Upload image to verify downloaded contents. */
10970 		memset(nvram_ularray, 0xaa, varsize);
10971 
10972 		/* Read the vars list to temp buffer for comparison */
10973 		bcmerror = dhdpcie_bus_membytes(bus, FALSE, varaddr, nvram_ularray, varsize);
10974 		if (bcmerror) {
10975 				DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n",
10976 					__FUNCTION__, bcmerror, varsize, varaddr));
10977 		}
10978 
10979 		/* Compare the org NVRAM with the one read from RAM */
10980 		if (memcmp(vbuffer, nvram_ularray, varsize)) {
10981 			DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__));
10982 			prhex("nvram file", vbuffer, varsize);
10983 			prhex("downloaded nvram", nvram_ularray, varsize);
10984 			MFREE(bus->dhd->osh, nvram_ularray, varsize);
10985 			MFREE(bus->dhd->osh, vbuffer, varsize);
10986 			return BCME_ERROR;
10987 		} else
10988 			DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n",
10989 			__FUNCTION__));
10990 
10991 		MFREE(bus->dhd->osh, nvram_ularray, varsize);
10992 #endif /* DHD_DEBUG */
10993 
10994 		MFREE(bus->dhd->osh, vbuffer, varsize);
10995 	}
10996 
10997 	phys_size = REMAP_ENAB(bus) ? bus->ramsize : bus->orig_ramsize;
10998 
10999 	phys_size += bus->dongle_ram_base;
11000 
11001 	/* adjust to the user specified RAM */
11002 	DHD_INFO(("%s: Physical memory size: %d, usable memory size: %d\n", __FUNCTION__,
11003 		phys_size, bus->ramsize));
11004 	DHD_INFO(("%s: Vars are at %d, orig varsize is %d\n", __FUNCTION__,
11005 		varaddr, varsize));
11006 	varsize = ((phys_size - 4) - varaddr);
11007 
11008 	/*
11009 	 * Determine the length token:
11010 	 * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits.
11011 	 */
11012 	if (bcmerror) {
11013 		varsizew = 0;
11014 		bus->nvram_csm = varsizew;
11015 	} else {
11016 		varsizew = varsize / 4;
11017 		varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF);
11018 		bus->nvram_csm = varsizew;
11019 		varsizew = htol32(varsizew);
11020 	}
11021 
11022 	DHD_INFO(("%s: New varsize is %d, length token=0x%08x\n", __FUNCTION__, varsize, varsizew));
11023 
11024 	/* Write the length token to the last word */
11025 	bcmerror = dhdpcie_bus_membytes(bus, TRUE, (phys_size - 4),
11026 		(uint8*)&varsizew, 4);
11027 
11028 	return bcmerror;
11029 } /* dhdpcie_bus_write_vars */
11030 
11031 int
11032 dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len)
11033 {
11034 	int bcmerror = BCME_OK;
11035 #ifdef KEEP_JP_REGREV
11036 	/* XXX Needed by customer's request */
11037 	char *tmpbuf;
11038 	uint tmpidx;
11039 #endif /* KEEP_JP_REGREV */
11040 #ifdef GDB_PROXY
11041 	const char nodeadman_record[] = "deadman_to=0";
11042 #endif /* GDB_PROXY */
11043 
11044 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
11045 
11046 	if (!len) {
11047 		bcmerror = BCME_BUFTOOSHORT;
11048 		goto err;
11049 	}
11050 
11051 	/* Free the old ones and replace with passed variables */
11052 	if (bus->vars)
11053 		MFREE(bus->dhd->osh, bus->vars, bus->varsz);
11054 #ifdef GDB_PROXY
11055 	if (bus->dhd->gdb_proxy_nodeadman) {
11056 		len += sizeof(nodeadman_record);
11057 	}
11058 #endif /* GDB_PROXY */
11059 
11060 	bus->vars = MALLOC(bus->dhd->osh, len);
11061 	bus->varsz = bus->vars ? len : 0;
11062 	if (bus->vars == NULL) {
11063 		bcmerror = BCME_NOMEM;
11064 		goto err;
11065 	}
11066 
11067 	/* Copy the passed variables, which should include the terminating double-null */
11068 	bcopy(arg, bus->vars, bus->varsz);
11069 #ifdef GDB_PROXY
11070 	if (bus->dhd->gdb_proxy_nodeadman &&
11071 		!replace_nvram_variable(bus->vars, bus->varsz, nodeadman_record, NULL))
11072 	{
11073 		bcmerror = BCME_NOMEM;
11074 		goto err;
11075 	}
11076 #endif /* GDB_PROXY */
11077 
11078 	/* Re-Calculate htclkratio only for QT, for FPGA it is fixed at 30 */
11079 #ifdef BCMQT_HW
11080 	dhdpcie_htclkratio_recal(bus, bus->vars, bus->varsz);
11081 #endif
11082 
11083 #ifdef DHD_USE_SINGLE_NVRAM_FILE
11084 	/* XXX Change the default country code only for MFG firmware */
11085 	if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) {
11086 		char *sp = NULL;
11087 		char *ep = NULL;
11088 		int i;
11089 		char tag[2][8] = {"ccode=", "regrev="};
11090 
11091 		/* Find ccode and regrev info */
11092 		for (i = 0; i < 2; i++) {
11093 			sp = strnstr(bus->vars, tag[i], bus->varsz);
11094 			if (!sp) {
11095 				DHD_ERROR(("%s: Could not find ccode info from the nvram %s\n",
11096 					__FUNCTION__, bus->nv_path));
11097 				bcmerror = BCME_ERROR;
11098 				goto err;
11099 			}
11100 			sp = strchr(sp, '=');
11101 			ep = strchr(sp, '\0');
11102 			/* We assumed that string length of both ccode and
11103 			 * regrev values should not exceed WLC_CNTRY_BUF_SZ
11104 			 */
11105 			if (ep && ((ep - sp) <= WLC_CNTRY_BUF_SZ)) {
11106 				sp++;
11107 				while (*sp != '\0') {
11108 					DHD_INFO(("%s: parse '%s', current sp = '%c'\n",
11109 						__FUNCTION__, tag[i], *sp));
11110 					*sp++ = '0';
11111 				}
11112 			} else {
11113 				DHD_ERROR(("%s: Invalid parameter format when parsing for %s\n",
11114 					__FUNCTION__, tag[i]));
11115 				bcmerror = BCME_ERROR;
11116 				goto err;
11117 			}
11118 		}
11119 	}
11120 #endif /* DHD_USE_SINGLE_NVRAM_FILE */
11121 
11122 #ifdef KEEP_JP_REGREV
11123 	/* XXX Needed by customer's request */
11124 #ifdef DHD_USE_SINGLE_NVRAM_FILE
11125 	if (dhd_bus_get_fw_mode(bus->dhd) != DHD_FLAG_MFG_MODE)
11126 #endif /* DHD_USE_SINGLE_NVRAM_FILE */
11127 	{
11128 		char *pos = NULL;
11129 		tmpbuf = MALLOCZ(bus->dhd->osh, bus->varsz + 1);
11130 		if (tmpbuf == NULL) {
11131 			goto err;
11132 		}
11133 		memcpy(tmpbuf, bus->vars, bus->varsz);
11134 		for (tmpidx = 0; tmpidx < bus->varsz; tmpidx++) {
11135 			if (tmpbuf[tmpidx] == 0) {
11136 				tmpbuf[tmpidx] = '\n';
11137 			}
11138 		}
11139 		bus->dhd->vars_ccode[0] = 0;
11140 		bus->dhd->vars_regrev = 0;
11141 		if ((pos = strstr(tmpbuf, "ccode"))) {
11142 			sscanf(pos, "ccode=%3s\n", bus->dhd->vars_ccode);
11143 		}
11144 		if ((pos = strstr(tmpbuf, "regrev"))) {
11145 			sscanf(pos, "regrev=%u\n", &(bus->dhd->vars_regrev));
11146 		}
11147 		MFREE(bus->dhd->osh, tmpbuf, bus->varsz + 1);
11148 	}
11149 #endif /* KEEP_JP_REGREV */
11150 
11151 err:
11152 	return bcmerror;
11153 }
11154 
11155 /* loop through the capability list and see if the pcie capabilty exists */
11156 uint8
11157 dhdpcie_find_pci_capability(osl_t *osh, uint8 req_cap_id)
11158 {
11159 	uint8 cap_id;
11160 	uint8 cap_ptr = 0;
11161 	uint8 byte_val;
11162 
11163 	/* check for Header type 0 */
11164 	byte_val = read_pci_cfg_byte(PCI_CFG_HDR);
11165 	if ((byte_val & 0x7f) != PCI_HEADER_NORMAL) {
11166 		DHD_ERROR(("%s : PCI config header not normal.\n", __FUNCTION__));
11167 		goto end;
11168 	}
11169 
11170 	/* check if the capability pointer field exists */
11171 	byte_val = read_pci_cfg_byte(PCI_CFG_STAT);
11172 	if (!(byte_val & PCI_CAPPTR_PRESENT)) {
11173 		DHD_ERROR(("%s : PCI CAP pointer not present.\n", __FUNCTION__));
11174 		goto end;
11175 	}
11176 
11177 	cap_ptr = read_pci_cfg_byte(PCI_CFG_CAPPTR);
11178 	/* check if the capability pointer is 0x00 */
11179 	if (cap_ptr == 0x00) {
11180 		DHD_ERROR(("%s : PCI CAP pointer is 0x00.\n", __FUNCTION__));
11181 		goto end;
11182 	}
11183 
11184 	/* loop thr'u the capability list and see if the pcie capabilty exists */
11185 
11186 	cap_id = read_pci_cfg_byte(cap_ptr);
11187 
11188 	while (cap_id != req_cap_id) {
11189 		cap_ptr = read_pci_cfg_byte((cap_ptr + 1));
11190 		if (cap_ptr == 0x00) break;
11191 		cap_id = read_pci_cfg_byte(cap_ptr);
11192 	}
11193 
11194 end:
11195 	return cap_ptr;
11196 }
11197 
11198 void
11199 dhdpcie_pme_active(osl_t *osh, bool enable)
11200 {
11201 	uint8 cap_ptr;
11202 	uint32 pme_csr;
11203 
11204 	cap_ptr = dhdpcie_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID);
11205 
11206 	if (!cap_ptr) {
11207 		DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__));
11208 		return;
11209 	}
11210 
11211 	pme_csr = OSL_PCI_READ_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32));
11212 	DHD_ERROR(("%s : pme_sts_ctrl 0x%x\n", __FUNCTION__, pme_csr));
11213 
11214 	pme_csr |= PME_CSR_PME_STAT;
11215 	if (enable) {
11216 		pme_csr |= PME_CSR_PME_EN;
11217 	} else {
11218 		pme_csr &= ~PME_CSR_PME_EN;
11219 	}
11220 
11221 	OSL_PCI_WRITE_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32), pme_csr);
11222 }
11223 
11224 bool
11225 dhdpcie_pme_cap(osl_t *osh)
11226 {
11227 	uint8 cap_ptr;
11228 	uint32 pme_cap;
11229 
11230 	cap_ptr = dhdpcie_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID);
11231 
11232 	if (!cap_ptr) {
11233 		DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__));
11234 		return FALSE;
11235 	}
11236 
11237 	pme_cap = OSL_PCI_READ_CONFIG(osh, cap_ptr, sizeof(uint32));
11238 
11239 	DHD_ERROR(("%s : pme_cap 0x%x\n", __FUNCTION__, pme_cap));
11240 
11241 	return ((pme_cap & PME_CAP_PM_STATES) != 0);
11242 }
11243 
11244 static void
11245 dhdpcie_pme_stat_clear(dhd_bus_t *bus)
11246 {
11247 	uint32 pmcsr = dhd_pcie_config_read(bus, PCIE_CFG_PMCSR, sizeof(uint32));
11248 
11249 	OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_PMCSR, sizeof(uint32), pmcsr | PCIE_PMCSR_PMESTAT);
11250 }
11251 
11252 uint32
11253 dhdpcie_lcreg(osl_t *osh, uint32 mask, uint32 val)
11254 {
11255 
11256 	uint8	pcie_cap;
11257 	uint8	lcreg_offset;	/* PCIE capability LCreg offset in the config space */
11258 	uint32	reg_val;
11259 
11260 	pcie_cap = dhdpcie_find_pci_capability(osh, PCI_CAP_PCIECAP_ID);
11261 
11262 	if (!pcie_cap) {
11263 		DHD_ERROR(("%s : PCIe Capability not present\n", __FUNCTION__));
11264 		return 0;
11265 	}
11266 
11267 	lcreg_offset = pcie_cap + PCIE_CAP_LINKCTRL_OFFSET;
11268 
11269 	/* set operation */
11270 	if (mask) {
11271 		/* read */
11272 		reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
11273 
11274 		/* modify */
11275 		reg_val &= ~mask;
11276 		reg_val |= (mask & val);
11277 
11278 		/* write */
11279 		OSL_PCI_WRITE_CONFIG(osh, lcreg_offset, sizeof(uint32), reg_val);
11280 	}
11281 	return OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
11282 }
11283 
11284 #if defined(NDIS)
11285 /* set min res mask to highest value, preventing sleep */
11286 void
11287 dhdpcie_set_pmu_min_res_mask(struct dhd_bus *bus, uint min_res_mask)
11288 {
11289 	si_pmu_set_min_res_mask(bus->sih, bus->osh, min_res_mask);
11290 }
11291 #endif /* defined(NDIS) */
11292 
11293 uint8
11294 dhdpcie_clkreq(osl_t *osh, uint32 mask, uint32 val)
11295 {
11296 	uint8	pcie_cap;
11297 	uint32	reg_val;
11298 	uint8	lcreg_offset;	/* PCIE capability LCreg offset in the config space */
11299 
11300 	pcie_cap = dhdpcie_find_pci_capability(osh, PCI_CAP_PCIECAP_ID);
11301 
11302 	if (!pcie_cap) {
11303 		DHD_ERROR(("%s : PCIe Capability not present\n", __FUNCTION__));
11304 		return 0;
11305 	}
11306 
11307 	lcreg_offset = pcie_cap + PCIE_CAP_LINKCTRL_OFFSET;
11308 
11309 	reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
11310 	/* set operation */
11311 	if (mask) {
11312 		if (val)
11313 			reg_val |= PCIE_CLKREQ_ENAB;
11314 		else
11315 			reg_val &= ~PCIE_CLKREQ_ENAB;
11316 		OSL_PCI_WRITE_CONFIG(osh, lcreg_offset, sizeof(uint32), reg_val);
11317 		reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
11318 	}
11319 	if (reg_val & PCIE_CLKREQ_ENAB)
11320 		return 1;
11321 	else
11322 		return 0;
11323 }
11324 
11325 void dhd_dump_intr_counters(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
11326 {
11327 	dhd_bus_t *bus;
11328 	uint64 current_time = OSL_LOCALTIME_NS();
11329 
11330 	if (!dhd) {
11331 		DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
11332 		return;
11333 	}
11334 
11335 	bus = dhd->bus;
11336 	if (!bus) {
11337 		DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
11338 		return;
11339 	}
11340 
11341 	bcm_bprintf(strbuf, "\n ------- DUMPING INTR enable/disable counters-------\n");
11342 	bcm_bprintf(strbuf, "resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n"
11343 		"isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n"
11344 		"dpc_return_busdown_count=%lu non_ours_irq_count=%lu\n",
11345 		bus->resume_intr_enable_count, bus->dpc_intr_enable_count,
11346 		bus->isr_intr_disable_count, bus->suspend_intr_disable_count,
11347 		bus->dpc_return_busdown_count, bus->non_ours_irq_count);
11348 #ifdef BCMPCIE_OOB_HOST_WAKE
11349 	bcm_bprintf(strbuf, "oob_intr_count=%lu oob_intr_enable_count=%lu"
11350 		" oob_intr_disable_count=%lu\noob_irq_num=%d"
11351 		" last_oob_irq_times="SEC_USEC_FMT":"SEC_USEC_FMT
11352 		" last_oob_irq_enable_time="SEC_USEC_FMT"\nlast_oob_irq_disable_time="SEC_USEC_FMT
11353 		" oob_irq_enabled=%d oob_gpio_level=%d\n",
11354 		bus->oob_intr_count, bus->oob_intr_enable_count,
11355 		bus->oob_intr_disable_count, dhdpcie_get_oob_irq_num(bus),
11356 		GET_SEC_USEC(bus->last_oob_irq_isr_time),
11357 		GET_SEC_USEC(bus->last_oob_irq_thr_time),
11358 		GET_SEC_USEC(bus->last_oob_irq_enable_time),
11359 		GET_SEC_USEC(bus->last_oob_irq_disable_time), dhdpcie_get_oob_irq_status(bus),
11360 		dhdpcie_get_oob_irq_level());
11361 #endif /* BCMPCIE_OOB_HOST_WAKE */
11362 	bcm_bprintf(strbuf, "\ncurrent_time="SEC_USEC_FMT" isr_entry_time="SEC_USEC_FMT
11363 		" isr_exit_time="SEC_USEC_FMT"\n"
11364 		"isr_sched_dpc_time="SEC_USEC_FMT" rpm_sched_dpc_time="SEC_USEC_FMT"\n"
11365 		" last_non_ours_irq_time="SEC_USEC_FMT" dpc_entry_time="SEC_USEC_FMT"\n"
11366 		"last_process_ctrlbuf_time="SEC_USEC_FMT " last_process_flowring_time="SEC_USEC_FMT
11367 		" last_process_txcpl_time="SEC_USEC_FMT"\nlast_process_rxcpl_time="SEC_USEC_FMT
11368 		" last_process_infocpl_time="SEC_USEC_FMT" last_process_edl_time="SEC_USEC_FMT
11369 		"\ndpc_exit_time="SEC_USEC_FMT" resched_dpc_time="SEC_USEC_FMT"\n"
11370 		"last_d3_inform_time="SEC_USEC_FMT"\n",
11371 		GET_SEC_USEC(current_time), GET_SEC_USEC(bus->isr_entry_time),
11372 		GET_SEC_USEC(bus->isr_exit_time), GET_SEC_USEC(bus->isr_sched_dpc_time),
11373 		GET_SEC_USEC(bus->rpm_sched_dpc_time),
11374 		GET_SEC_USEC(bus->last_non_ours_irq_time), GET_SEC_USEC(bus->dpc_entry_time),
11375 		GET_SEC_USEC(bus->last_process_ctrlbuf_time),
11376 		GET_SEC_USEC(bus->last_process_flowring_time),
11377 		GET_SEC_USEC(bus->last_process_txcpl_time),
11378 		GET_SEC_USEC(bus->last_process_rxcpl_time),
11379 		GET_SEC_USEC(bus->last_process_infocpl_time),
11380 		GET_SEC_USEC(bus->last_process_edl_time),
11381 		GET_SEC_USEC(bus->dpc_exit_time), GET_SEC_USEC(bus->resched_dpc_time),
11382 		GET_SEC_USEC(bus->last_d3_inform_time));
11383 
11384 	bcm_bprintf(strbuf, "\nlast_suspend_start_time="SEC_USEC_FMT" last_suspend_end_time="
11385 		SEC_USEC_FMT" last_resume_start_time="SEC_USEC_FMT" last_resume_end_time="
11386 		SEC_USEC_FMT"\n", GET_SEC_USEC(bus->last_suspend_start_time),
11387 		GET_SEC_USEC(bus->last_suspend_end_time),
11388 		GET_SEC_USEC(bus->last_resume_start_time),
11389 		GET_SEC_USEC(bus->last_resume_end_time));
11390 
11391 #if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE)
11392 	bcm_bprintf(strbuf, "logtrace_thread_entry_time="SEC_USEC_FMT
11393 		" logtrace_thread_sem_down_time="SEC_USEC_FMT
11394 		"\nlogtrace_thread_flush_time="SEC_USEC_FMT
11395 		" logtrace_thread_unexpected_break_time="SEC_USEC_FMT
11396 		"\nlogtrace_thread_complete_time="SEC_USEC_FMT"\n",
11397 		GET_SEC_USEC(dhd->logtrace_thr_ts.entry_time),
11398 		GET_SEC_USEC(dhd->logtrace_thr_ts.sem_down_time),
11399 		GET_SEC_USEC(dhd->logtrace_thr_ts.flush_time),
11400 		GET_SEC_USEC(dhd->logtrace_thr_ts.unexpected_break_time),
11401 		GET_SEC_USEC(dhd->logtrace_thr_ts.complete_time));
11402 #endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */
11403 }
11404 
11405 void dhd_dump_intr_registers(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
11406 {
11407 	uint32 intstatus = 0;
11408 	uint32 intmask = 0;
11409 	uint32 d2h_db0 = 0;
11410 	uint32 d2h_mb_data = 0;
11411 
11412 	intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
11413 		dhd->bus->pcie_mailbox_int, 0, 0);
11414 #ifdef DHD_MMIO_TRACE
11415 	dhd_bus_mmio_trace(dhd->bus, dhd->bus->pcie_mailbox_int, intstatus, FALSE);
11416 #endif /* defined(DHD_MMIO_TRACE) */
11417 	intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
11418 		dhd->bus->pcie_mailbox_mask, 0, 0);
11419 #ifdef DHD_MMIO_TRACE
11420 	dhd_bus_mmio_trace(dhd->bus, dhd->bus->pcie_mailbox_mask, intmask, FALSE);
11421 #endif /* defined(DHD_MMIO_TRACE) */
11422 	d2h_db0 = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCID2H_MailBox, 0, 0);
11423 	dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0);
11424 
11425 	bcm_bprintf(strbuf, "intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
11426 		intstatus, intmask, d2h_db0);
11427 	bcm_bprintf(strbuf, "d2h_mb_data=0x%x def_intmask=0x%x\n",
11428 		d2h_mb_data, dhd->bus->def_intmask);
11429 }
11430 #if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
11431 void
11432 dhd_bus_dump_awdl_stats(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
11433 {
11434 	int i = 0;
11435 	dhd_awdl_stats_t *awdl_stats;
11436 
11437 	bcm_bprintf(strbuf, "---------- AWDL STATISTICS ---------\n");
11438 	bcm_bprintf(strbuf, "%s %10s %12s %16s %12s %16s %8s %8s %8s\n",
11439 		"Slot", "AvgSlotTUs", "AvgSlotTUsFW", "NumSlots",
11440 		"AvgTxCmpL_Us", "NumTxStatus", "Acked", "tossed", "noack");
11441 	for (i = 0; i < AWDL_NUM_SLOTS; i++) {
11442 		awdl_stats = &dhdp->awdl_stats[i];
11443 		bcm_bprintf(strbuf, "%4d %10llu %12llu %16llu %12llu %16llu ",
11444 			i,
11445 			awdl_stats->num_slots ?
11446 				DIV_U64_BY_U64(awdl_stats->cum_slot_time,
11447 				awdl_stats->num_slots) : 0,
11448 			awdl_stats->num_slots ?
11449 				DIV_U64_BY_U64(awdl_stats->fw_cum_slot_time,
11450 				awdl_stats->num_slots) : 0,
11451 			awdl_stats->num_slots,
11452 			awdl_stats->num_tx_status ?
11453 				DIV_U64_BY_U64(awdl_stats->cum_tx_status_latency,
11454 				awdl_stats->num_tx_status) : 0,
11455 			awdl_stats->num_tx_status);
11456 #ifdef BCMDBG
11457 		if (!dhdp->d2h_sync_mode) {
11458 			bcm_bprintf(strbuf, "%8d %8d %8d\n",
11459 				awdl_stats->tx_status[WLFC_CTL_PKTFLAG_DISCARD],
11460 				awdl_stats->tx_status[WLFC_CTL_PKTFLAG_TOSSED_BYWLC],
11461 				awdl_stats->tx_status[WLFC_CTL_PKTFLAG_DISCARD_NOACK]);
11462 		} else {
11463 			bcm_bprintf(strbuf,
11464 					"%8s %8s %8s\n", "NA", "NA", "NA");
11465 		}
11466 #else
11467 		bcm_bprintf(strbuf,
11468 				"%8s %8s %8s\n", "NA", "NA", "NA");
11469 #endif
11470 	}
11471 }
11472 #endif /* DHD_AWDL && AWDL_SLOT_STATS */
11473 /** Add bus dump output to a buffer */
11474 void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
11475 {
11476 	uint16 flowid;
11477 	int ix = 0;
11478 	flow_ring_node_t *flow_ring_node;
11479 	flow_info_t *flow_info;
11480 #ifdef BCMDBG
11481 	flow_info_t *local_flow_info;
11482 #endif /* BCMDBG */
11483 #ifdef TX_STATUS_LATENCY_STATS
11484 	uint8 ifindex;
11485 	if_flow_lkup_t *if_flow_lkup;
11486 	dhd_if_tx_status_latency_t if_tx_status_latency[DHD_MAX_IFS];
11487 #endif /* TX_STATUS_LATENCY_STATS */
11488 
11489 #if defined(FW_SIGNATURE)
11490 	/* Dump secure firmware status. */
11491 	if (dhdp->busstate <= DHD_BUS_LOAD) {
11492 		dhd_bus_dump_fws(dhdp->bus, strbuf);
11493 	}
11494 #endif
11495 
11496 	if (dhdp->busstate != DHD_BUS_DATA)
11497 		return;
11498 
11499 #ifdef TX_STATUS_LATENCY_STATS
11500 	memset(if_tx_status_latency, 0, sizeof(if_tx_status_latency));
11501 #endif /* TX_STATUS_LATENCY_STATS */
11502 #ifdef DHD_WAKE_STATUS
11503 	bcm_bprintf(strbuf, "wake %u rxwake %u readctrlwake %u\n",
11504 		bcmpcie_get_total_wake(dhdp->bus), dhdp->bus->wake_counts.rxwake,
11505 		dhdp->bus->wake_counts.rcwake);
11506 #ifdef DHD_WAKE_RX_STATUS
11507 	bcm_bprintf(strbuf, " unicast %u muticast %u broadcast %u arp %u\n",
11508 		dhdp->bus->wake_counts.rx_ucast, dhdp->bus->wake_counts.rx_mcast,
11509 		dhdp->bus->wake_counts.rx_bcast, dhdp->bus->wake_counts.rx_arp);
11510 	bcm_bprintf(strbuf, " multi4 %u multi6 %u icmp6 %u multiother %u\n",
11511 		dhdp->bus->wake_counts.rx_multi_ipv4, dhdp->bus->wake_counts.rx_multi_ipv6,
11512 		dhdp->bus->wake_counts.rx_icmpv6, dhdp->bus->wake_counts.rx_multi_other);
11513 	bcm_bprintf(strbuf, " icmp6_ra %u, icmp6_na %u, icmp6_ns %u\n",
11514 		dhdp->bus->wake_counts.rx_icmpv6_ra, dhdp->bus->wake_counts.rx_icmpv6_na,
11515 		dhdp->bus->wake_counts.rx_icmpv6_ns);
11516 #endif /* DHD_WAKE_RX_STATUS */
11517 #ifdef DHD_WAKE_EVENT_STATUS
11518 	for (flowid = 0; flowid < WLC_E_LAST; flowid++)
11519 		if (dhdp->bus->wake_counts.rc_event[flowid] != 0)
11520 			bcm_bprintf(strbuf, " %s = %u\n", bcmevent_get_name(flowid),
11521 				dhdp->bus->wake_counts.rc_event[flowid]);
11522 	bcm_bprintf(strbuf, "\n");
11523 #endif /* DHD_WAKE_EVENT_STATUS */
11524 #endif /* DHD_WAKE_STATUS */
11525 
11526 	dhd_prot_print_info(dhdp, strbuf);
11527 	dhd_dump_intr_registers(dhdp, strbuf);
11528 	dhd_dump_intr_counters(dhdp, strbuf);
11529 	bcm_bprintf(strbuf, "h2d_mb_data_ptr_addr 0x%x, d2h_mb_data_ptr_addr 0x%x\n",
11530 		dhdp->bus->h2d_mb_data_ptr_addr, dhdp->bus->d2h_mb_data_ptr_addr);
11531 	bcm_bprintf(strbuf, "dhd cumm_ctr %d\n", DHD_CUMM_CTR_READ(&dhdp->cumm_ctr));
11532 #ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
11533 	bcm_bprintf(strbuf, "multi_client_flow_rings:%d max_multi_client_flow_rings:%d\n",
11534 		dhdp->multi_client_flow_rings, dhdp->max_multi_client_flow_rings);
11535 #endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */
11536 #if defined(DHD_HTPUT_TUNABLES)
11537 	bcm_bprintf(strbuf, "htput_flow_ring_start:%d total_htput:%d client_htput=%d\n",
11538 		dhdp->htput_flow_ring_start, HTPUT_TOTAL_FLOW_RINGS, dhdp->htput_client_flow_rings);
11539 #endif /* DHD_HTPUT_TUNABLES */
11540 	bcm_bprintf(strbuf,
11541 		"%4s %4s %2s %4s %17s %4s %4s %6s %10s %17s %17s %17s %17s %14s %14s %10s ",
11542 		"Num:", "Flow", "If", "Prio", ":Dest_MacAddress:", "Qlen", "CLen", "L2CLen",
11543 		" Overflows", "TRD: HLRD: HDRD", "TWR: HLWR: HDWR", "BASE(VA)", "BASE(PA)",
11544 		"WORK_ITEM_SIZE", "MAX_WORK_ITEMS", "TOTAL_SIZE");
11545 
11546 #ifdef TX_STATUS_LATENCY_STATS
11547 	/* Average Tx status/Completion Latency in micro secs */
11548 	bcm_bprintf(strbuf, "%16s %16s ", "       NumTxPkts", "    AvgTxCmpL_Us");
11549 #endif /* TX_STATUS_LATENCY_STATS */
11550 
11551 	bcm_bprintf(strbuf, "\n");
11552 
11553 	for (flowid = 0; flowid < dhdp->num_h2d_rings; flowid++) {
11554 		unsigned long flags;
11555 		flow_ring_node = DHD_FLOW_RING(dhdp, flowid);
11556 		DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
11557 		if (flow_ring_node->status != FLOW_RING_STATUS_OPEN) {
11558 			DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
11559 			continue;
11560 		}
11561 
11562 		flow_info = &flow_ring_node->flow_info;
11563 		bcm_bprintf(strbuf,
11564 			"%4d %4d %2d %4d "MACDBG" %4d %4d %6d %10u ", ix++,
11565 			flow_ring_node->flowid, flow_info->ifindex, flow_info->tid,
11566 			MAC2STRDBG(flow_info->da),
11567 			DHD_FLOW_QUEUE_LEN(&flow_ring_node->queue),
11568 			DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_CLEN_PTR(&flow_ring_node->queue)),
11569 			DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_L2CLEN_PTR(&flow_ring_node->queue)),
11570 			DHD_FLOW_QUEUE_FAILURES(&flow_ring_node->queue));
11571 		dhd_prot_print_flow_ring(dhdp, flow_ring_node->prot_info, TRUE, strbuf,
11572 			"%5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d");
11573 
11574 #ifdef TX_STATUS_LATENCY_STATS
11575 		bcm_bprintf(strbuf, "%16llu %16llu ",
11576 			flow_info->num_tx_pkts,
11577 			flow_info->num_tx_status ?
11578 			DIV_U64_BY_U64(flow_info->cum_tx_status_latency,
11579 			flow_info->num_tx_status) : 0);
11580 		ifindex = flow_info->ifindex;
11581 		ASSERT(ifindex < DHD_MAX_IFS);
11582 		if (ifindex < DHD_MAX_IFS) {
11583 			if_tx_status_latency[ifindex].num_tx_status += flow_info->num_tx_status;
11584 			if_tx_status_latency[ifindex].cum_tx_status_latency +=
11585 				flow_info->cum_tx_status_latency;
11586 		} else {
11587 			DHD_ERROR(("%s: Bad IF index: %d associated with flowid: %d\n",
11588 				__FUNCTION__, ifindex, flowid));
11589 		}
11590 #endif /* TX_STATUS_LATENCY_STATS */
11591 		bcm_bprintf(strbuf, "\n");
11592 		DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
11593 	}
11594 
11595 #ifdef BCMDBG
11596 	if (!dhdp->d2h_sync_mode) {
11597 		ix = 0;
11598 		bcm_bprintf(strbuf, "\n%4s %4s %2s %10s %7s %6s %5s %5s %10s %7s %7s %7s \n",
11599 			"Num:", "Flow", "If", "     ACKED", "D11SPRS", "WLSPRS", "TSDWL",
11600 			"NOACK", "SPRS_ACKED", "EXPIRED", "DROPPED", "FWFREED");
11601 		for (flowid = 0; flowid < dhdp->num_h2d_rings; flowid++) {
11602 			flow_ring_node = DHD_FLOW_RING(dhdp, flowid);
11603 			if (!flow_ring_node->active)
11604 				continue;
11605 
11606 			flow_info = &flow_ring_node->flow_info;
11607 			bcm_bprintf(strbuf, "%4d %4d %2d ",
11608 				ix++, flow_ring_node->flowid, flow_info->ifindex);
11609 			local_flow_info = &flow_ring_node->flow_info;
11610 			bcm_bprintf(strbuf, "%10d %7d %6d %5d %5d %10d %7d %7d %7d\n",
11611 				local_flow_info->tx_status[WLFC_CTL_PKTFLAG_DISCARD],
11612 				local_flow_info->tx_status[WLFC_CTL_PKTFLAG_D11SUPPRESS],
11613 				local_flow_info->tx_status[WLFC_CTL_PKTFLAG_WLSUPPRESS],
11614 				local_flow_info->tx_status[WLFC_CTL_PKTFLAG_TOSSED_BYWLC],
11615 				local_flow_info->tx_status[WLFC_CTL_PKTFLAG_DISCARD_NOACK],
11616 				local_flow_info->tx_status[WLFC_CTL_PKTFLAG_SUPPRESS_ACKED],
11617 				local_flow_info->tx_status[WLFC_CTL_PKTFLAG_EXPIRED],
11618 				local_flow_info->tx_status[WLFC_CTL_PKTFLAG_DROPPED],
11619 				local_flow_info->tx_status[WLFC_CTL_PKTFLAG_MKTFREE]);
11620 		}
11621 	}
11622 #endif /* BCMDBG */
11623 
11624 #ifdef TX_STATUS_LATENCY_STATS
11625 	bcm_bprintf(strbuf, "\n%s  %16s  %16s\n", "If", "AvgTxCmpL_Us", "NumTxStatus");
11626 	if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
11627 	for (ix = 0; ix < DHD_MAX_IFS; ix++) {
11628 		if (!if_flow_lkup[ix].status) {
11629 			continue;
11630 		}
11631 		bcm_bprintf(strbuf, "%2d  %16llu  %16llu\n",
11632 			ix,
11633 			if_tx_status_latency[ix].num_tx_status ?
11634 			DIV_U64_BY_U64(if_tx_status_latency[ix].cum_tx_status_latency,
11635 			if_tx_status_latency[ix].num_tx_status): 0,
11636 			if_tx_status_latency[ix].num_tx_status);
11637 	}
11638 #endif /* TX_STATUS_LATENCY_STATS */
11639 
11640 #ifdef DHD_HP2P
11641 	if (dhdp->hp2p_capable) {
11642 		bcm_bprintf(strbuf, "\n%s  %16s  %16s", "Flowid", "Tx_t0", "Tx_t1");
11643 
11644 		for (flowid = 0; flowid < MAX_HP2P_FLOWS; flowid++) {
11645 			hp2p_info_t *hp2p_info;
11646 			int bin;
11647 
11648 			hp2p_info = &dhdp->hp2p_info[flowid];
11649 			if (hp2p_info->num_timer_start == 0)
11650 				continue;
11651 
11652 			bcm_bprintf(strbuf, "\n%d", hp2p_info->flowid);
11653 			bcm_bprintf(strbuf, "\n%s", "Bin");
11654 
11655 			for (bin = 0; bin < MAX_TX_HIST_BIN; bin++) {
11656 				bcm_bprintf(strbuf, "\n%2d %20llu  %16llu", bin,
11657 					hp2p_info->tx_t0[bin], hp2p_info->tx_t1[bin]);
11658 			}
11659 
11660 			bcm_bprintf(strbuf, "\n%s  %16s", "Flowid", "Rx_t0");
11661 			bcm_bprintf(strbuf, "\n%d", hp2p_info->flowid);
11662 			bcm_bprintf(strbuf, "\n%s", "Bin");
11663 
11664 			for (bin = 0; bin < MAX_RX_HIST_BIN; bin++) {
11665 				bcm_bprintf(strbuf, "\n%d %20llu", bin,
11666 					hp2p_info->rx_t0[bin]);
11667 			}
11668 
11669 			bcm_bprintf(strbuf, "\n%s  %16s  %16s",
11670 				"Packet limit", "Timer limit", "Timer start");
11671 			bcm_bprintf(strbuf, "\n%llu %24llu %16llu", hp2p_info->num_pkt_limit,
11672 				hp2p_info->num_timer_limit, hp2p_info->num_timer_start);
11673 		}
11674 
11675 		bcm_bprintf(strbuf, "\n");
11676 	}
11677 #endif /* DHD_HP2P */
11678 
11679 #if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
11680 	dhd_bus_dump_awdl_stats(dhdp, strbuf);
11681 	dhd_clear_awdl_stats(dhdp);
11682 #endif /* DHD_AWDL && AWDL_SLOT_STATS */
11683 	bcm_bprintf(strbuf, "D3 inform cnt %d\n", dhdp->bus->d3_inform_cnt);
11684 	bcm_bprintf(strbuf, "D0 inform cnt %d\n", dhdp->bus->d0_inform_cnt);
11685 	bcm_bprintf(strbuf, "D0 inform in use cnt %d\n", dhdp->bus->d0_inform_in_use_cnt);
11686 	if (dhdp->d2h_hostrdy_supported) {
11687 		bcm_bprintf(strbuf, "hostready count:%d\n", dhdp->bus->hostready_count);
11688 	}
11689 #ifdef PCIE_INB_DW
11690 	/* Inband device wake counters */
11691 	if (INBAND_DW_ENAB(dhdp->bus)) {
11692 		bcm_bprintf(strbuf, "Inband device_wake assert count: %d\n",
11693 			dhdp->bus->inband_dw_assert_cnt);
11694 		bcm_bprintf(strbuf, "Inband device_wake deassert count: %d\n",
11695 			dhdp->bus->inband_dw_deassert_cnt);
11696 		bcm_bprintf(strbuf, "Inband DS-EXIT <host initiated> count: %d\n",
11697 			dhdp->bus->inband_ds_exit_host_cnt);
11698 		bcm_bprintf(strbuf, "Inband DS-EXIT <device initiated> count: %d\n",
11699 			dhdp->bus->inband_ds_exit_device_cnt);
11700 		bcm_bprintf(strbuf, "Inband DS-EXIT Timeout count: %d\n",
11701 			dhdp->bus->inband_ds_exit_to_cnt);
11702 		bcm_bprintf(strbuf, "Inband HOST_SLEEP-EXIT Timeout count: %d\n",
11703 			dhdp->bus->inband_host_sleep_exit_to_cnt);
11704 	}
11705 #endif /* PCIE_INB_DW */
11706 	bcm_bprintf(strbuf, "d2h_intr_method -> %s\n",
11707 		dhdp->bus->d2h_intr_method ? "PCIE_MSI" : "PCIE_INTX");
11708 
11709 	bcm_bprintf(strbuf, "\n\nDB7 stats - db7_send_cnt: %d, db7_trap_cnt: %d, "
11710 		"max duration: %lld (%lld - %lld), db7_timing_error_cnt: %d\n",
11711 		dhdp->db7_trap.debug_db7_send_cnt,
11712 		dhdp->db7_trap.debug_db7_trap_cnt,
11713 		dhdp->db7_trap.debug_max_db7_dur,
11714 		dhdp->db7_trap.debug_max_db7_trap_time,
11715 		dhdp->db7_trap.debug_max_db7_send_time,
11716 		dhdp->db7_trap.debug_db7_timing_error_cnt);
11717 }
11718 
11719 #ifdef DNGL_AXI_ERROR_LOGGING
11720 bool
11721 dhd_axi_sig_match(dhd_pub_t *dhdp)
11722 {
11723 	uint32 axi_tcm_addr = dhdpcie_bus_rtcm32(dhdp->bus, dhdp->axierror_logbuf_addr);
11724 
11725 	if (dhdp->dhd_induce_error == DHD_INDUCE_DROP_AXI_SIG) {
11726 		DHD_ERROR(("%s: Induce AXI signature drop\n", __FUNCTION__));
11727 		return FALSE;
11728 	}
11729 
11730 	DHD_ERROR(("%s: axi_tcm_addr: 0x%x, tcm range: 0x%x ~ 0x%x\n",
11731 		__FUNCTION__, axi_tcm_addr, dhdp->bus->dongle_ram_base,
11732 		dhdp->bus->dongle_ram_base + dhdp->bus->ramsize));
11733 	if (axi_tcm_addr >= dhdp->bus->dongle_ram_base &&
11734 	    axi_tcm_addr < dhdp->bus->dongle_ram_base + dhdp->bus->ramsize) {
11735 		uint32 axi_signature = dhdpcie_bus_rtcm32(dhdp->bus, (axi_tcm_addr +
11736 			OFFSETOF(hnd_ext_trap_axi_error_v1_t, signature)));
11737 		if (axi_signature == HND_EXT_TRAP_AXIERROR_SIGNATURE) {
11738 			return TRUE;
11739 		} else {
11740 			DHD_ERROR(("%s: No AXI signature: 0x%x\n",
11741 				__FUNCTION__, axi_signature));
11742 			return FALSE;
11743 		}
11744 	} else {
11745 		DHD_ERROR(("%s: No AXI shared tcm address debug info.\n", __FUNCTION__));
11746 		return FALSE;
11747 	}
11748 }
11749 
11750 void
11751 dhd_axi_error(dhd_pub_t *dhdp)
11752 {
11753 	dhd_axi_error_dump_t *axi_err_dump;
11754 	uint8 *axi_err_buf = NULL;
11755 	uint8 *p_axi_err = NULL;
11756 	uint32 axi_logbuf_addr;
11757 	uint32 axi_tcm_addr;
11758 	int err, size;
11759 
11760 	/* XXX: On the Dongle side, if an invalid Host Address is generated for a transaction
11761 	 * it results in SMMU Fault. Now the Host won't respond for the invalid transaction.
11762 	 * On the Dongle side, after 50msec this results in AXI Slave Error.
11763 	 * Hence introduce a delay higher than 50msec to ensure AXI Slave error happens and
11764 	 * the Dongle collects the required information.
11765 	 */
11766 	OSL_DELAY(75000);
11767 
11768 	axi_logbuf_addr = dhdp->axierror_logbuf_addr;
11769 	if (!axi_logbuf_addr) {
11770 		DHD_ERROR(("%s: No AXI TCM address debug info.\n", __FUNCTION__));
11771 		goto sched_axi;
11772 	}
11773 
11774 	axi_err_dump = dhdp->axi_err_dump;
11775 	if (!axi_err_dump) {
11776 		goto sched_axi;
11777 	}
11778 
11779 	if (!dhd_axi_sig_match(dhdp)) {
11780 		goto sched_axi;
11781 	}
11782 
11783 	/* Reading AXI error data for SMMU fault */
11784 	DHD_ERROR(("%s: Read AXI data from TCM address\n", __FUNCTION__));
11785 	axi_tcm_addr = dhdpcie_bus_rtcm32(dhdp->bus, axi_logbuf_addr);
11786 	size = sizeof(hnd_ext_trap_axi_error_v1_t);
11787 	axi_err_buf = MALLOCZ(dhdp->osh, size);
11788 	if (axi_err_buf == NULL) {
11789 		DHD_ERROR(("%s: out of memory !\n", __FUNCTION__));
11790 		goto sched_axi;
11791 	}
11792 
11793 	p_axi_err = axi_err_buf;
11794 	err = dhdpcie_bus_membytes(dhdp->bus, FALSE, axi_tcm_addr, p_axi_err, size);
11795 	if (err) {
11796 		DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
11797 			__FUNCTION__, err, size, axi_tcm_addr));
11798 		goto sched_axi;
11799 	}
11800 
11801 	/* Dump data to Dmesg */
11802 	dhd_log_dump_axi_error(axi_err_buf);
11803 	err = memcpy_s(&axi_err_dump->etd_axi_error_v1, size, axi_err_buf, size);
11804 	if (err) {
11805 		DHD_ERROR(("%s: failed to copy etd axi error info, err=%d\n",
11806 			__FUNCTION__, err));
11807 	}
11808 
11809 sched_axi:
11810 	if (axi_err_buf) {
11811 		MFREE(dhdp->osh, axi_err_buf, size);
11812 	}
11813 	dhd_schedule_axi_error_dump(dhdp, NULL);
11814 }
11815 
11816 static void
11817 dhd_log_dump_axi_error(uint8 *axi_err)
11818 {
11819 	dma_dentry_v1_t dma_dentry;
11820 	dma_fifo_v1_t dma_fifo;
11821 	int i = 0, j = 0;
11822 
11823 	if (*(uint8 *)axi_err == HND_EXT_TRAP_AXIERROR_VERSION_1) {
11824 		hnd_ext_trap_axi_error_v1_t *axi_err_v1 = (hnd_ext_trap_axi_error_v1_t *)axi_err;
11825 		DHD_ERROR(("%s: signature : 0x%x\n", __FUNCTION__, axi_err_v1->signature));
11826 		DHD_ERROR(("%s: version : 0x%x\n", __FUNCTION__, axi_err_v1->version));
11827 		DHD_ERROR(("%s: length : 0x%x\n", __FUNCTION__, axi_err_v1->length));
11828 		DHD_ERROR(("%s: dma_fifo_valid_count : 0x%x\n",
11829 			__FUNCTION__, axi_err_v1->dma_fifo_valid_count));
11830 		DHD_ERROR(("%s: axi_errorlog_status : 0x%x\n",
11831 			__FUNCTION__, axi_err_v1->axi_errorlog_status));
11832 		DHD_ERROR(("%s: axi_errorlog_core : 0x%x\n",
11833 			__FUNCTION__, axi_err_v1->axi_errorlog_core));
11834 		DHD_ERROR(("%s: axi_errorlog_hi : 0x%x\n",
11835 			__FUNCTION__, axi_err_v1->axi_errorlog_hi));
11836 		DHD_ERROR(("%s: axi_errorlog_lo : 0x%x\n",
11837 			__FUNCTION__, axi_err_v1->axi_errorlog_lo));
11838 		DHD_ERROR(("%s: axi_errorlog_id : 0x%x\n",
11839 			__FUNCTION__, axi_err_v1->axi_errorlog_id));
11840 
11841 		for (i = 0; i < MAX_DMAFIFO_ENTRIES_V1; i++) {
11842 			dma_fifo = axi_err_v1->dma_fifo[i];
11843 			DHD_ERROR(("%s: valid:%d : 0x%x\n",
11844 				__FUNCTION__, i, dma_fifo.valid));
11845 			DHD_ERROR(("%s: direction:%d : 0x%x\n",
11846 				__FUNCTION__, i, dma_fifo.direction));
11847 			DHD_ERROR(("%s: index:%d : 0x%x\n",
11848 				__FUNCTION__, i, dma_fifo.index));
11849 			DHD_ERROR(("%s: dpa:%d : 0x%x\n",
11850 				__FUNCTION__, i, dma_fifo.dpa));
11851 			DHD_ERROR(("%s: desc_lo:%d : 0x%x\n",
11852 				__FUNCTION__, i, dma_fifo.desc_lo));
11853 			DHD_ERROR(("%s: desc_hi:%d : 0x%x\n",
11854 				__FUNCTION__, i, dma_fifo.desc_hi));
11855 			DHD_ERROR(("%s: din:%d : 0x%x\n",
11856 				__FUNCTION__, i, dma_fifo.din));
11857 			DHD_ERROR(("%s: dout:%d : 0x%x\n",
11858 				__FUNCTION__, i, dma_fifo.dout));
11859 			for (j = 0; j < MAX_DMAFIFO_DESC_ENTRIES_V1; j++) {
11860 				dma_dentry = axi_err_v1->dma_fifo[i].dentry[j];
11861 				DHD_ERROR(("%s: ctrl1:%d : 0x%x\n",
11862 					__FUNCTION__, i, dma_dentry.ctrl1));
11863 				DHD_ERROR(("%s: ctrl2:%d : 0x%x\n",
11864 					__FUNCTION__, i, dma_dentry.ctrl2));
11865 				DHD_ERROR(("%s: addrlo:%d : 0x%x\n",
11866 					__FUNCTION__, i, dma_dentry.addrlo));
11867 				DHD_ERROR(("%s: addrhi:%d : 0x%x\n",
11868 					__FUNCTION__, i, dma_dentry.addrhi));
11869 			}
11870 		}
11871 	}
11872 	else {
11873 		DHD_ERROR(("%s: Invalid AXI version: 0x%x\n", __FUNCTION__, (*(uint8 *)axi_err)));
11874 	}
11875 }
11876 #endif /* DNGL_AXI_ERROR_LOGGING */
11877 
11878 /**
11879  * Brings transmit packets on all flow rings closer to the dongle, by moving (a subset) from their
11880  * flow queue to their flow ring.
11881  */
11882 static void
11883 dhd_update_txflowrings(dhd_pub_t *dhd)
11884 {
11885 	unsigned long flags;
11886 	dll_t *item, *next;
11887 	flow_ring_node_t *flow_ring_node;
11888 	struct dhd_bus *bus = dhd->bus;
11889 	int count = 0;
11890 
11891 	if (dhd_query_bus_erros(dhd)) {
11892 		return;
11893 	}
11894 
11895 	/* Hold flowring_list_lock to ensure no race condition while accessing the List */
11896 	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
11897 	for (item = dll_head_p(&bus->flowring_active_list);
11898 		(!dhd_is_device_removed(dhd) && !dll_end(&bus->flowring_active_list, item));
11899 		item = next, count++) {
11900 		if (dhd->hang_was_sent) {
11901 			break;
11902 		}
11903 
11904 		if (count > bus->max_tx_flowrings) {
11905 			DHD_ERROR(("%s : overflow max flowrings\n", __FUNCTION__));
11906 #ifdef OEM_ANDROID
11907 			dhd->hang_reason = HANG_REASON_UNKNOWN;
11908 			dhd_os_send_hang_message(dhd);
11909 #endif /* OEM_ANDROID */
11910 			break;
11911 		}
11912 
11913 		next = dll_next_p(item);
11914 		flow_ring_node = dhd_constlist_to_flowring(item);
11915 
11916 		/* Ensure that flow_ring_node in the list is Not Null */
11917 		ASSERT(flow_ring_node != NULL);
11918 
11919 		/* Ensure that the flowring node has valid contents */
11920 		ASSERT(flow_ring_node->prot_info != NULL);
11921 
11922 		dhd_prot_update_txflowring(dhd, flow_ring_node->flowid, flow_ring_node->prot_info);
11923 	}
11924 	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
11925 }
11926 
11927 /** Mailbox ringbell Function */
11928 static void
11929 dhd_bus_gen_devmb_intr(struct dhd_bus *bus)
11930 {
11931 	if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
11932 		(bus->sih->buscorerev == 4)) {
11933 		DHD_ERROR(("%s: mailbox communication not supported\n", __FUNCTION__));
11934 		return;
11935 	}
11936 	if (bus->db1_for_mb)  {
11937 		/* this is a pcie core register, not the config register */
11938 		/* XXX: make sure we are on PCIE */
11939 		DHD_PCIE_INFO(("%s: writing a mail box interrupt to the device, through doorbell 1\n", __FUNCTION__));
11940 		if (DAR_PWRREQ(bus)) {
11941 			dhd_bus_pcie_pwr_req(bus);
11942 		}
11943 #ifdef DHD_MMIO_TRACE
11944 		dhd_bus_mmio_trace(bus, dhd_bus_db1_addr_get(bus), 0x2, TRUE);
11945 #endif /* defined(DHD_MMIO_TRACE) */
11946 		si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db1_addr_get(bus),
11947 			~0, 0x12345678);
11948 	} else {
11949 		DHD_PCIE_INFO(("%s: writing a mail box interrupt to the device,"
11950 				" through config space\n", __FUNCTION__));
11951 		dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
11952 		/* XXX CRWLPCIEGEN2-182 requires double write */
11953 		dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
11954 	}
11955 }
11956 
11957 /* Upon receiving a mailbox interrupt,
11958  * if H2D_FW_TRAP bit is set in mailbox location
11959  * device traps
11960  */
11961 static void
11962 dhdpcie_fw_trap(dhd_bus_t *bus)
11963 {
11964 	DHD_ERROR(("%s: send trap!!!\n", __FUNCTION__));
11965 	if (bus->dhd->db7_trap.fw_db7w_trap) {
11966 		uint32 addr = dhd_bus_db1_addr_3_get(bus);
11967 		bus->dhd->db7_trap.debug_db7_send_time = OSL_LOCALTIME_NS();
11968 		bus->dhd->db7_trap.debug_db7_send_cnt++;
11969 		si_corereg(bus->sih, bus->sih->buscoreidx, addr, ~0,
11970 			bus->dhd->db7_trap.db7_magic_number);
11971 		return;
11972 	}
11973 
11974 	/* Send the mailbox data and generate mailbox intr. */
11975 	dhdpcie_send_mb_data(bus, H2D_FW_TRAP);
11976 	/* For FWs that cannot interprete H2D_FW_TRAP */
11977 	(void)dhd_wl_ioctl_set_intiovar(bus->dhd, "bus:disconnect", 99, WLC_SET_VAR, TRUE, 0);
11978 }
11979 
11980 #ifdef PCIE_INB_DW
11981 
11982 void
11983 dhd_bus_inb_ack_pending_ds_req(dhd_bus_t *bus)
11984 {
11985 	/* The DHD_BUS_INB_DW_LOCK must be held before
11986 	* calling this function !!
11987 	*/
11988 	if ((dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
11989 		DW_DEVICE_DS_DEV_SLEEP_PEND) &&
11990 		(bus->host_active_cnt == 0)) {
11991 		dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_DS_DEV_SLEEP);
11992 		dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
11993 	}
11994 }
11995 
11996 int
11997 dhd_bus_inb_set_device_wake(struct dhd_bus *bus, bool val)
11998 {
11999 	int timeleft;
12000 	unsigned long flags;
12001 	int ret;
12002 
12003 	if (!INBAND_DW_ENAB(bus)) {
12004 		return BCME_ERROR;
12005 	}
12006 	if (val) {
12007 		DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
12008 
12009 		/*
12010 		 * Reset the Door Bell Timeout value. So that the Watchdog
12011 		 * doesn't try to Deassert Device Wake, while we are in
12012 		 * the process of still Asserting the same.
12013 		 */
12014 		dhd_bus_doorbell_timeout_reset(bus);
12015 
12016 		if (dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
12017 			DW_DEVICE_DS_DEV_SLEEP) {
12018 			/* Clear wait_for_ds_exit */
12019 			bus->wait_for_ds_exit = 0;
12020 			if (bus->calc_ds_exit_latency) {
12021 				bus->ds_exit_latency = 0;
12022 				bus->ds_exit_ts2 = 0;
12023 				bus->ds_exit_ts1 = OSL_SYSUPTIME_US();
12024 			}
12025 			ret = dhdpcie_send_mb_data(bus, H2DMB_DS_DEVICE_WAKE_ASSERT);
12026 			if (ret != BCME_OK) {
12027 				DHD_ERROR(("Failed: assert Inband device_wake\n"));
12028 				DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
12029 				ret = BCME_ERROR;
12030 				goto exit;
12031 			}
12032 			dhdpcie_bus_set_pcie_inband_dw_state(bus,
12033 				DW_DEVICE_DS_DISABLED_WAIT);
12034 			bus->inband_dw_assert_cnt++;
12035 		} else if (dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
12036 			DW_DEVICE_DS_DISABLED_WAIT) {
12037 			DHD_ERROR(("Inband device wake is already asserted, "
12038 				"waiting for DS-Exit\n"));
12039 		}
12040 		else {
12041 			DHD_PCIE_INFO(("Not in DS SLEEP state \n"));
12042 			DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
12043 			ret = BCME_OK;
12044 			goto exit;
12045 		}
12046 
12047 		/*
12048 		 * Since we are going to wait/sleep .. release the lock.
12049 		 * The Device Wake sanity is still valid, because
12050 		 * a) If there is another context that comes in and tries
12051 		 *    to assert DS again and if it gets the lock, since
12052 		 *    ds_state would be now != DW_DEVICE_DS_DEV_SLEEP the
12053 		 *    context would return saying Not in DS Sleep.
12054 		 * b) If ther is another context that comes in and tries
12055 		 *    to de-assert DS and gets the lock,
12056 		 *    since the ds_state is != DW_DEVICE_DS_DEV_WAKE
12057 		 *    that context would return too. This can not happen
12058 		 *    since the watchdog is the only context that can
12059 		 *    De-Assert Device Wake and as the first step of
12060 		 *    Asserting the Device Wake, we have pushed out the
12061 		 *    Door Bell Timeout.
12062 		 *
12063 		 */
12064 
12065 		if (!CAN_SLEEP()) {
12066 			dhdpcie_bus_set_pcie_inband_dw_state(bus,
12067 				DW_DEVICE_DS_DEV_WAKE);
12068 			DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
12069 			/* Called from context that cannot sleep */
12070 			OSL_DELAY(1000);
12071 		} else {
12072 			DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
12073 			/* Wait for DS EXIT for DS_EXIT_TIMEOUT seconds */
12074 			timeleft = dhd_os_ds_exit_wait(bus->dhd, &bus->wait_for_ds_exit);
12075 			if (!bus->wait_for_ds_exit || timeleft == 0) {
12076 				DHD_ERROR(("dhd_bus_inb_set_device_wake:DS-EXIT timeout, "
12077 					"wait_for_ds_exit : %d\n", bus->wait_for_ds_exit));
12078 				bus->inband_ds_exit_to_cnt++;
12079 				bus->ds_exit_timeout = 0;
12080 #ifdef DHD_FW_COREDUMP
12081 				if (bus->dhd->memdump_enabled) {
12082 					/* collect core dump */
12083 					DHD_GENERAL_LOCK(bus->dhd, flags);
12084 					DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_IN_PROGRESS(bus->dhd);
12085 					DHD_GENERAL_UNLOCK(bus->dhd, flags);
12086 					bus->dhd->memdump_type =
12087 						DUMP_TYPE_INBAND_DEVICE_WAKE_FAILURE;
12088 					dhd_bus_mem_dump(bus->dhd);
12089 				}
12090 #else
12091 				ASSERT(0);
12092 #endif /* DHD_FW_COREDUMP */
12093 				ret = BCME_ERROR;
12094 				goto exit;
12095 			}
12096 		}
12097 		ret = BCME_OK;
12098 	} else {
12099 		DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
12100 		if ((dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
12101 			DW_DEVICE_DS_DEV_WAKE)) {
12102 			ret = dhdpcie_send_mb_data(bus, H2DMB_DS_DEVICE_WAKE_DEASSERT);
12103 			if (ret != BCME_OK) {
12104 				DHD_ERROR(("Failed: deassert Inband device_wake\n"));
12105 				DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
12106 				goto exit;
12107 			}
12108 			dhdpcie_bus_set_pcie_inband_dw_state(bus,
12109 				DW_DEVICE_DS_ACTIVE);
12110 			bus->inband_dw_deassert_cnt++;
12111 		} else if ((dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
12112 			DW_DEVICE_DS_DEV_SLEEP_PEND) &&
12113 			(bus->host_active_cnt == 0)) {
12114 			dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_DS_DEV_SLEEP);
12115 			dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
12116 		}
12117 
12118 		ret = BCME_OK;
12119 		DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
12120 	}
12121 
12122 exit:
12123 	return ret;
12124 }
12125 #endif /* PCIE_INB_DW */
12126 #if defined(PCIE_OOB) || defined(PCIE_INB_DW)
12127 void
12128 dhd_bus_doorbell_timeout_reset(struct dhd_bus *bus)
12129 {
12130 	if (dhd_doorbell_timeout) {
12131 #ifdef DHD_PCIE_RUNTIMEPM
12132 		if (dhd_runtimepm_ms) {
12133 			dhd_timeout_start(&bus->doorbell_timer,
12134 				(dhd_doorbell_timeout * 1000) / dhd_runtimepm_ms);
12135 		}
12136 #else
12137 #ifdef BCMQT
12138 		uint wd_scale = 1;
12139 #else
12140 		uint wd_scale = dhd_watchdog_ms;
12141 #endif
12142 		if (dhd_watchdog_ms) {
12143 			dhd_timeout_start(&bus->doorbell_timer,
12144 				(dhd_doorbell_timeout * 1000) / wd_scale);
12145 		}
12146 #endif /* DHD_PCIE_RUNTIMEPM */
12147 	}
12148 	else if (!(bus->dhd->busstate == DHD_BUS_SUSPEND)) {
12149 		dhd_bus_set_device_wake(bus, FALSE);
12150 	}
12151 }
12152 
12153 int
12154 dhd_bus_set_device_wake(struct dhd_bus *bus, bool val)
12155 {
12156 	if (bus->ds_enabled && bus->dhd->ring_attached) {
12157 #ifdef PCIE_INB_DW
12158 		if (INBAND_DW_ENAB(bus)) {
12159 			return dhd_bus_inb_set_device_wake(bus, val);
12160 		}
12161 #endif /* PCIE_INB_DW */
12162 #ifdef PCIE_OOB
12163 		if (OOB_DW_ENAB(bus)) {
12164 			return dhd_os_oob_set_device_wake(bus, val);
12165 		}
12166 #endif /* PCIE_OOB */
12167 	}
12168 	return BCME_OK;
12169 }
12170 
12171 void
12172 dhd_bus_dw_deassert(dhd_pub_t *dhd)
12173 {
12174 	dhd_bus_t *bus = dhd->bus;
12175 	unsigned long flags;
12176 
12177 	if (dhd_query_bus_erros(bus->dhd)) {
12178 		return;
12179 	}
12180 
12181 	/* If haven't communicated with device for a while, deassert the Device_Wake GPIO */
12182 	if (dhd_doorbell_timeout != 0 && bus->dhd->busstate == DHD_BUS_DATA &&
12183 		dhd_timeout_expired(&bus->doorbell_timer) &&
12184 		!dhd_query_bus_erros(bus->dhd)) {
12185 		DHD_GENERAL_LOCK(dhd, flags);
12186 		if (DHD_BUS_BUSY_CHECK_IDLE(dhd) &&
12187 			!DHD_CHECK_CFG_IN_PROGRESS(dhd)) {
12188 			DHD_BUS_BUSY_SET_IN_DS_DEASSERT(dhd);
12189 			DHD_GENERAL_UNLOCK(dhd, flags);
12190 			dhd_bus_set_device_wake(bus, FALSE);
12191 			DHD_GENERAL_LOCK(dhd, flags);
12192 			DHD_BUS_BUSY_CLEAR_IN_DS_DEASSERT(dhd);
12193 			dhd_os_busbusy_wake(bus->dhd);
12194 			DHD_GENERAL_UNLOCK(dhd, flags);
12195 		} else {
12196 			DHD_GENERAL_UNLOCK(dhd, flags);
12197 		}
12198 	}
12199 
12200 #ifdef PCIE_INB_DW
12201 	if (INBAND_DW_ENAB(bus)) {
12202 		if (bus->ds_exit_timeout) {
12203 			bus->ds_exit_timeout --;
12204 			if (bus->ds_exit_timeout == 1) {
12205 				DHD_ERROR(("DS-EXIT TIMEOUT\n"));
12206 				bus->ds_exit_timeout = 0;
12207 				bus->inband_ds_exit_to_cnt++;
12208 			}
12209 		}
12210 		if (bus->host_sleep_exit_timeout) {
12211 			bus->host_sleep_exit_timeout --;
12212 			if (bus->host_sleep_exit_timeout == 1) {
12213 				DHD_ERROR(("HOST_SLEEP-EXIT TIMEOUT\n"));
12214 				bus->host_sleep_exit_timeout = 0;
12215 				bus->inband_host_sleep_exit_to_cnt++;
12216 			}
12217 		}
12218 	}
12219 #endif /* PCIE_INB_DW */
12220 }
12221 #endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */
12222 
12223 /** mailbox doorbell ring function */
12224 void
12225 dhd_bus_ringbell(struct dhd_bus *bus, uint32 value)
12226 {
12227 	/* Skip once bus enters low power state (D3_INFORM/D3_ACK) */
12228 	if (__DHD_CHK_BUS_IN_LPS(bus)) {
12229 		DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
12230 			__FUNCTION__, bus->bus_low_power_state));
12231 		return;
12232 	}
12233 
12234 	/* Skip in the case of link down */
12235 	if (bus->is_linkdown) {
12236 		DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
12237 		return;
12238 	}
12239 
12240 	if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
12241 		(bus->sih->buscorerev == 4)) {
12242 		si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int,
12243 			PCIE_INTB, PCIE_INTB);
12244 	} else {
12245 		/* this is a pcie core register, not the config regsiter */
12246 		/* XXX: makesure we are on PCIE */
12247 		DHD_PCIE_INFO(("%s: writing a door bell to the device\n", __FUNCTION__));
12248 		if (IDMA_ACTIVE(bus->dhd)) {
12249 			if (DAR_PWRREQ(bus)) {
12250 				dhd_bus_pcie_pwr_req(bus);
12251 			}
12252 			si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db0_addr_2_get(bus),
12253 				~0, value);
12254 		} else {
12255 			if (DAR_PWRREQ(bus)) {
12256 				dhd_bus_pcie_pwr_req(bus);
12257 			}
12258 			si_corereg(bus->sih, bus->sih->buscoreidx,
12259 				dhd_bus_db0_addr_get(bus), ~0, 0x12345678);
12260 		}
12261 	}
12262 }
12263 
12264 /** mailbox doorbell ring function for IDMA/IFRM using dma channel2 */
12265 void
12266 dhd_bus_ringbell_2(struct dhd_bus *bus, uint32 value, bool devwake)
12267 {
12268 	/* this is a pcie core register, not the config regsiter */
12269 	/* XXX: makesure we are on PCIE */
12270 	/* Skip once bus enters low power state (D3_INFORM/D3_ACK) */
12271 	if (__DHD_CHK_BUS_IN_LPS(bus)) {
12272 		DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
12273 			__FUNCTION__, bus->bus_low_power_state));
12274 		return;
12275 	}
12276 
12277 	/* Skip in the case of link down */
12278 	if (bus->is_linkdown) {
12279 		DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
12280 		return;
12281 	}
12282 
12283 	DHD_PCIE_INFO(("writing a door bell 2 to the device\n"));
12284 	if (DAR_PWRREQ(bus)) {
12285 		dhd_bus_pcie_pwr_req(bus);
12286 	}
12287 	si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db0_addr_2_get(bus),
12288 		~0, value);
12289 }
12290 
12291 void
12292 dhdpcie_bus_ringbell_fast(struct dhd_bus *bus, uint32 value)
12293 {
12294 	/* Skip once bus enters low power state (D3_INFORM/D3_ACK) */
12295 	if (__DHD_CHK_BUS_IN_LPS(bus)) {
12296 		DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
12297 			__FUNCTION__, bus->bus_low_power_state));
12298 		return;
12299 	}
12300 
12301 	/* Skip in the case of link down */
12302 	if (bus->is_linkdown) {
12303 		DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
12304 		return;
12305 	}
12306 
12307 #if defined(PCIE_OOB) || defined(PCIE_INB_DW)
12308 	if (OOB_DW_ENAB(bus)) {
12309 		dhd_bus_set_device_wake(bus, TRUE);
12310 	}
12311 	dhd_bus_doorbell_timeout_reset(bus);
12312 #endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */
12313 #ifdef DHD_MMIO_TRACE
12314 	dhd_bus_mmio_trace(bus, dhd_bus_db0_addr_get(bus), value,
12315 		((value >> 24u) == 0xFF) ? TRUE : FALSE);
12316 #endif /* defined(DHD_MMIO_TRACE) */
12317 	if (DAR_PWRREQ(bus)) {
12318 		dhd_bus_pcie_pwr_req(bus);
12319 	}
12320 
12321 #ifdef DHD_DB0TS
12322 	if (bus->dhd->db0ts_capable) {
12323 		uint64 ts;
12324 
12325 		ts = local_clock();
12326 		do_div(ts, 1000);
12327 
12328 		value = htol32(ts & 0xFFFFFFFF);
12329 		DHD_INFO(("%s: usec timer = 0x%x\n", __FUNCTION__, value));
12330 	}
12331 #endif /* DHD_DB0TS */
12332 	W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, value);
12333 }
12334 
12335 void
12336 dhdpcie_bus_ringbell_2_fast(struct dhd_bus *bus, uint32 value, bool devwake)
12337 {
12338 	/* Skip once bus enters low power state (D3_INFORM/D3_ACK) */
12339 	if (__DHD_CHK_BUS_IN_LPS(bus)) {
12340 		DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
12341 			__FUNCTION__, bus->bus_low_power_state));
12342 		return;
12343 	}
12344 
12345 	/* Skip in the case of link down */
12346 	if (bus->is_linkdown) {
12347 		DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
12348 		return;
12349 	}
12350 
12351 #if defined(PCIE_OOB) || defined(PCIE_INB_DW)
12352 	if (devwake) {
12353 		if (OOB_DW_ENAB(bus)) {
12354 			dhd_bus_set_device_wake(bus, TRUE);
12355 		}
12356 	}
12357 	dhd_bus_doorbell_timeout_reset(bus);
12358 #endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */
12359 
12360 #ifdef DHD_MMIO_TRACE
12361 	dhd_bus_mmio_trace(bus, dhd_bus_db0_addr_2_get(bus), value, TRUE);
12362 #endif /* defined(DHD_MMIO_TRACE) */
12363 	if (DAR_PWRREQ(bus)) {
12364 		dhd_bus_pcie_pwr_req(bus);
12365 	}
12366 	W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_2_addr, value);
12367 }
12368 
12369 static void
12370 dhd_bus_ringbell_oldpcie(struct dhd_bus *bus, uint32 value)
12371 {
12372 	uint32 w;
12373 	/* Skip once bus enters low power state (D3_INFORM/D3_ACK) */
12374 	if (__DHD_CHK_BUS_IN_LPS(bus)) {
12375 		DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
12376 			__FUNCTION__, bus->bus_low_power_state));
12377 		return;
12378 	}
12379 
12380 	/* Skip in the case of link down */
12381 	if (bus->is_linkdown) {
12382 		DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
12383 		return;
12384 	}
12385 
12386 	w = (R_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr) & ~PCIE_INTB) | PCIE_INTB;
12387 	W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, w);
12388 }
12389 
12390 dhd_mb_ring_t
12391 dhd_bus_get_mbintr_fn(struct dhd_bus *bus)
12392 {
12393 	if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
12394 		(bus->sih->buscorerev == 4)) {
12395 		bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
12396 			bus->pcie_mailbox_int);
12397 		if (bus->pcie_mb_intr_addr) {
12398 			bus->pcie_mb_intr_osh = si_osh(bus->sih);
12399 			return dhd_bus_ringbell_oldpcie;
12400 		}
12401 	} else {
12402 		bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
12403 			dhd_bus_db0_addr_get(bus));
12404 		if (bus->pcie_mb_intr_addr) {
12405 			bus->pcie_mb_intr_osh = si_osh(bus->sih);
12406 			return dhdpcie_bus_ringbell_fast;
12407 		}
12408 	}
12409 	return dhd_bus_ringbell;
12410 }
12411 
12412 dhd_mb_ring_2_t
12413 dhd_bus_get_mbintr_2_fn(struct dhd_bus *bus)
12414 {
12415 	bus->pcie_mb_intr_2_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
12416 		dhd_bus_db0_addr_2_get(bus));
12417 	if (bus->pcie_mb_intr_2_addr) {
12418 		bus->pcie_mb_intr_osh = si_osh(bus->sih);
12419 		return dhdpcie_bus_ringbell_2_fast;
12420 	}
12421 	return dhd_bus_ringbell_2;
12422 }
12423 
12424 bool
12425 BCMFASTPATH(dhd_bus_dpc)(struct dhd_bus *bus)
12426 {
12427 	bool resched = FALSE;	  /* Flag indicating resched wanted */
12428 	unsigned long flags;
12429 
12430 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
12431 
12432 	bus->dpc_entry_time = OSL_LOCALTIME_NS();
12433 
12434 	if (dhd_query_bus_erros(bus->dhd)) {
12435 		return 0;
12436 	}
12437 
12438 	DHD_GENERAL_LOCK(bus->dhd, flags);
12439 	/* Check for only DHD_BUS_DOWN and not for DHD_BUS_DOWN_IN_PROGRESS
12440 	 * to avoid IOCTL Resumed On timeout when ioctl is waiting for response
12441 	 * and rmmod is fired in parallel, which will make DHD_BUS_DOWN_IN_PROGRESS
12442 	 * and if we return from here, then IOCTL response will never be handled
12443 	 */
12444 	if (bus->dhd->busstate == DHD_BUS_DOWN) {
12445 		DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__));
12446 		bus->intstatus = 0;
12447 		DHD_GENERAL_UNLOCK(bus->dhd, flags);
12448 		bus->dpc_return_busdown_count++;
12449 		return 0;
12450 	}
12451 #ifdef DHD_PCIE_RUNTIMEPM
12452 	bus->idlecount = 0;
12453 #endif /* DHD_PCIE_RUNTIMEPM */
12454 	DHD_BUS_BUSY_SET_IN_DPC(bus->dhd);
12455 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
12456 
12457 #ifdef DHD_READ_INTSTATUS_IN_DPC
12458 	if (bus->ipend) {
12459 		bus->ipend = FALSE;
12460 		bus->intstatus = dhdpcie_bus_intstatus(bus);
12461 		/* Check if the interrupt is ours or not */
12462 		if (bus->intstatus == 0) {
12463 			goto INTR_ON;
12464 		}
12465 		bus->intrcount++;
12466 	}
12467 #endif /* DHD_READ_INTSTATUS_IN_DPC */
12468 
12469 	/* Do not process dpc after receiving D3_ACK */
12470 	if (DHD_CHK_BUS_LPS_D3_ACKED(bus)) {
12471 		DHD_ERROR(("%s: D3 Ack Recieved, skip dpc\n", __FUNCTION__));
12472 		goto exit;
12473 	}
12474 
12475 	resched = dhdpcie_bus_process_mailbox_intr(bus, bus->intstatus);
12476 	if (!resched) {
12477 		bus->intstatus = 0;
12478 #ifdef DHD_READ_INTSTATUS_IN_DPC
12479 INTR_ON:
12480 #endif /* DHD_READ_INTSTATUS_IN_DPC */
12481 #ifdef CHIP_INTR_CONTROL
12482 		dhdpcie_bus_intr_enable(bus); /* Enable back interrupt using Intmask!! */
12483 		bus->dpc_intr_enable_count++;
12484 #else
12485 		/* For Linux, Macos etc (otherthan NDIS) enable back the host interrupts
12486 		 * which has been disabled in the dhdpcie_bus_isr()
12487 		 */
12488 		if ((dhdpcie_irq_disabled(bus)) && (!dhd_query_bus_erros(bus->dhd))) {
12489 			dhdpcie_enable_irq(bus); /* Enable back interrupt!! */
12490 			bus->dpc_intr_enable_count++;
12491 		}
12492 #endif /* HOST_INTR_CONTROL */
12493 		bus->dpc_exit_time = OSL_LOCALTIME_NS();
12494 	} else {
12495 		bus->resched_dpc_time = OSL_LOCALTIME_NS();
12496 	}
12497 
12498 	bus->dpc_sched = resched;
12499 #ifdef DHD_FLOW_RING_STATUS_TRACE
12500 	if (bus->dhd->dma_h2d_ring_upd_support && bus->dhd->dma_d2h_ring_upd_support &&
12501 			(bus->dhd->ring_attached == TRUE)) {
12502 		dhd_bus_flow_ring_status_dpc_trace(bus->dhd);
12503 	}
12504 #endif /* DHD_FLOW_RING_STATUS_TRACE */
12505 
12506 exit:
12507 	DHD_GENERAL_LOCK(bus->dhd, flags);
12508 	DHD_BUS_BUSY_CLEAR_IN_DPC(bus->dhd);
12509 	dhd_os_busbusy_wake(bus->dhd);
12510 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
12511 
12512 	return resched;
12513 
12514 }
12515 
12516 int
12517 dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data)
12518 {
12519 	uint32 cur_h2d_mb_data = 0;
12520 
12521 	if (bus->is_linkdown) {
12522 		DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
12523 		return BCME_ERROR;
12524 	}
12525 
12526 	DHD_PCIE_INFO(("%s: H2D_MB_DATA: 0x%08X\n", __FUNCTION__, h2d_mb_data));
12527 
12528 #ifdef PCIE_INB_DW
12529 	if (h2d_mb_data == H2D_HOST_DS_ACK) {
12530 		dhdpcie_set_dongle_deepsleep(bus, TRUE);
12531 	}
12532 	dhd_bus_ds_trace(bus, h2d_mb_data, FALSE, dhdpcie_bus_get_pcie_inband_dw_state(bus));
12533 #else
12534 	dhd_bus_ds_trace(bus, h2d_mb_data, FALSE);
12535 #endif /* PCIE_INB_DW */
12536 	if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !bus->use_mailbox) {
12537 		DHD_PCIE_INFO(("API rev is 6, sending mb data as H2D Ctrl message"
12538 				" to dongle, 0x%04x\n", h2d_mb_data));
12539 		/* Prevent asserting device_wake during doorbell ring for mb data to avoid loop. */
12540 #ifdef PCIE_OOB
12541 		bus->oob_enabled = FALSE;
12542 #endif /* PCIE_OOB */
12543 		/* XXX: check the error return value here... */
12544 		if (dhd_prot_h2d_mbdata_send_ctrlmsg(bus->dhd, h2d_mb_data)) {
12545 			DHD_ERROR(("failure sending the H2D Mailbox message "
12546 					"to firmware\n"));
12547 			goto fail;
12548 		}
12549 #ifdef PCIE_OOB
12550 		bus->oob_enabled = TRUE;
12551 #endif /* PCIE_OOB */
12552 		goto done;
12553 	}
12554 
12555 	dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, H2D_MB_DATA, 0);
12556 
12557 	if (cur_h2d_mb_data != 0) {
12558 		uint32 i = 0;
12559 		DHD_PCIE_INFO(("%s: GRRRRRRR: MB transaction is already pending 0x%04x\n",
12560 			 __FUNCTION__, cur_h2d_mb_data));
12561 		/* XXX: start a zero length timer to keep checking this to be zero */
12562 		while ((i++ < 100) && cur_h2d_mb_data) {
12563 			OSL_DELAY(10);
12564 			dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, H2D_MB_DATA, 0);
12565 		}
12566 		if (i >= 100) {
12567 			DHD_ERROR(("%s : waited 1ms for the dngl "
12568 				"to ack the previous mb transaction\n", __FUNCTION__));
12569 			DHD_ERROR(("%s : MB transaction is still pending 0x%04x\n",
12570 				__FUNCTION__, cur_h2d_mb_data));
12571 		}
12572 	}
12573 
12574 	dhd_bus_cmn_writeshared(bus, &h2d_mb_data, sizeof(uint32), H2D_MB_DATA, 0);
12575 	dhd_bus_gen_devmb_intr(bus);
12576 
12577 done:
12578 	if (h2d_mb_data == H2D_HOST_D3_INFORM) {
12579 		DHD_INFO_HW4(("%s: send H2D_HOST_D3_INFORM to dongle\n", __FUNCTION__));
12580 		bus->last_d3_inform_time = OSL_LOCALTIME_NS();
12581 		bus->d3_inform_cnt++;
12582 	}
12583 	if (h2d_mb_data == H2D_HOST_D0_INFORM_IN_USE) {
12584 		DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM_IN_USE to dongle\n", __FUNCTION__));
12585 		bus->d0_inform_in_use_cnt++;
12586 	}
12587 	if (h2d_mb_data == H2D_HOST_D0_INFORM) {
12588 		DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM to dongle\n", __FUNCTION__));
12589 		bus->d0_inform_cnt++;
12590 	}
12591 	return BCME_OK;
12592 fail:
12593 	return BCME_ERROR;
12594 }
12595 
12596 static void
12597 dhd_bus_handle_d3_ack(dhd_bus_t *bus)
12598 {
12599 	bus->suspend_intr_disable_count++;
12600 	/* Disable dongle Interrupts Immediately after D3 */
12601 
12602 	/* For Linux, Macos etc (otherthan NDIS) along with disabling
12603 	 * dongle interrupt by clearing the IntMask, disable directly
12604 	 * interrupt from the host side as well. Also clear the intstatus
12605 	 * if it is set to avoid unnecessary intrrupts after D3 ACK.
12606 	 */
12607 	dhdpcie_bus_intr_disable(bus); /* Disable interrupt using IntMask!! */
12608 	dhdpcie_bus_clear_intstatus(bus);
12609 #ifndef NDIS /* !NDIS */
12610 	dhdpcie_disable_irq_nosync(bus); /* Disable host interrupt!! */
12611 #endif /* !NDIS */
12612 
12613 	DHD_SET_BUS_LPS_D3_ACKED(bus);
12614 	DHD_ERROR(("%s: D3_ACK Recieved\n", __FUNCTION__));
12615 
12616 	if (bus->dhd->dhd_induce_error == DHD_INDUCE_D3_ACK_TIMEOUT) {
12617 		/* Set bus_low_power_state to DHD_BUS_D3_ACK_RECIEVED */
12618 		DHD_ERROR(("%s: Due to d3ack induce error forcefully set "
12619 		"bus_low_power_state to DHD_BUS_D3_INFORM_SENT\n", __FUNCTION__));
12620 		DHD_SET_BUS_LPS_D3_INFORMED(bus);
12621 	}
12622 	/* Check for D3 ACK induce flag, which is set by firing dhd iovar to induce D3 Ack timeout.
12623 	 * If flag is set, D3 wake is skipped, which results in to D3 Ack timeout.
12624 	 */
12625 	if (bus->dhd->dhd_induce_error != DHD_INDUCE_D3_ACK_TIMEOUT) {
12626 		bus->wait_for_d3_ack = 1;
12627 		dhd_os_d3ack_wake(bus->dhd);
12628 	} else {
12629 		DHD_ERROR(("%s: Inducing D3 ACK timeout\n", __FUNCTION__));
12630 	}
12631 }
12632 void
12633 dhd_bus_handle_mb_data(dhd_bus_t *bus, uint32 d2h_mb_data)
12634 {
12635 #ifdef PCIE_INB_DW
12636 	unsigned long flags = 0;
12637 #endif /* PCIE_INB_DW */
12638 	if (MULTIBP_ENAB(bus->sih)) {
12639 		dhd_bus_pcie_pwr_req(bus);
12640 	}
12641 
12642 	DHD_PCIE_INFO(("D2H_MB_DATA: 0x%04x\n", d2h_mb_data));
12643 #ifdef PCIE_INB_DW
12644 	DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
12645 	dhd_bus_ds_trace(bus, d2h_mb_data, TRUE, dhdpcie_bus_get_pcie_inband_dw_state(bus));
12646 	DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
12647 #else
12648 	dhd_bus_ds_trace(bus, d2h_mb_data, TRUE);
12649 #endif /* PCIE_INB_DW */
12650 
12651 	if (d2h_mb_data & D2H_DEV_FWHALT) {
12652 		if (bus->dhd->db7_trap.fw_db7w_trap_inprogress == FALSE) {
12653 			DHD_ERROR(("FW trap has happened, dongle_trap_data 0x%8x\n",
12654 				bus->dhd->dongle_trap_data));
12655 		}
12656 
12657 		if (bus->dhd->dongle_trap_data & D2H_DEV_TRAP_HOSTDB) {
12658 			uint64 db7_dur;
12659 
12660 			bus->dhd->db7_trap.debug_db7_trap_time = OSL_LOCALTIME_NS();
12661 			bus->dhd->db7_trap.debug_db7_trap_cnt++;
12662 			db7_dur = bus->dhd->db7_trap.debug_db7_trap_time -
12663 				bus->dhd->db7_trap.debug_db7_send_time;
12664 			if (db7_dur > bus->dhd->db7_trap.debug_max_db7_dur) {
12665 				bus->dhd->db7_trap.debug_max_db7_send_time =
12666 					bus->dhd->db7_trap.debug_db7_send_time;
12667 				bus->dhd->db7_trap.debug_max_db7_trap_time =
12668 					bus->dhd->db7_trap.debug_db7_trap_time;
12669 			}
12670 			bus->dhd->db7_trap.debug_max_db7_dur =
12671 				MAX(bus->dhd->db7_trap.debug_max_db7_dur, db7_dur);
12672 			if (bus->dhd->db7_trap.fw_db7w_trap_inprogress == FALSE) {
12673 				bus->dhd->db7_trap.debug_db7_timing_error_cnt++;
12674 			}
12675 		} else {
12676 			dhdpcie_checkdied(bus, NULL, 0);
12677 #ifdef BCM_ROUTER_DHD
12678 			dhdpcie_handle_dongle_trap(bus);
12679 #endif
12680 #ifdef OEM_ANDROID
12681 #ifdef SUPPORT_LINKDOWN_RECOVERY
12682 #ifdef CONFIG_ARCH_MSM
12683 			bus->no_cfg_restore = 1;
12684 #endif /* CONFIG_ARCH_MSM */
12685 #endif /* SUPPORT_LINKDOWN_RECOVERY */
12686 			dhd_os_check_hang(bus->dhd, 0, -EREMOTEIO);
12687 #endif /* OEM_ANDROID */
12688 		}
12689 		if (bus->dhd->db7_trap.fw_db7w_trap_inprogress) {
12690 			bus->dhd->db7_trap.fw_db7w_trap_inprogress = FALSE;
12691 			bus->dhd->dongle_trap_occured = TRUE;
12692 		}
12693 		goto exit;
12694 	}
12695 	if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ)  {
12696 		bool ds_acked = FALSE;
12697 		BCM_REFERENCE(ds_acked);
12698 		if (__DHD_CHK_BUS_LPS_D3_ACKED(bus)) {
12699 			DHD_ERROR(("DS-ENTRY AFTER D3-ACK!!!!! QUITING\n"));
12700 			DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
12701 			bus->dhd->busstate = DHD_BUS_DOWN;
12702 			goto exit;
12703 		}
12704 		/* what should we do */
12705 		DHD_PCIE_INFO(("D2H_MB_DATA: DEEP SLEEP REQ\n"));
12706 #ifdef PCIE_INB_DW
12707 		if (INBAND_DW_ENAB(bus)) {
12708 			/* As per inband state machine, host should not send DS-ACK
12709 			 * during suspend or suspend in progress, instead D3 inform will be sent.
12710 			 */
12711 			if (!bus->skip_ds_ack) {
12712 				DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
12713 				if (dhdpcie_bus_get_pcie_inband_dw_state(bus)
12714 					== DW_DEVICE_DS_ACTIVE) {
12715 					dhdpcie_bus_set_pcie_inband_dw_state(bus,
12716 						DW_DEVICE_DS_DEV_SLEEP_PEND);
12717 					 if (bus->host_active_cnt == 0) {
12718 						dhdpcie_bus_set_pcie_inband_dw_state(bus,
12719 							DW_DEVICE_DS_DEV_SLEEP);
12720 						dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
12721 						ds_acked = TRUE;
12722 						DHD_PCIE_INFO(("D2H_MB_DATA: sent DEEP SLEEP"
12723 							"ACK to DNGL\n"));
12724 					} else {
12725 						DHD_PCIE_INFO(("%s: Host is active, "
12726 							"skip sending DS-ACK. "
12727 							"host_active_cnt is %d\n",
12728 							__FUNCTION__, bus->host_active_cnt));
12729 					}
12730 				}
12731 				/* Currently DW_DEVICE_HOST_SLEEP_WAIT is set only
12732 				 * under dhd_bus_suspend() function.
12733 				 */
12734 				else if (dhdpcie_bus_get_pcie_inband_dw_state(bus)
12735 				== DW_DEVICE_HOST_SLEEP_WAIT) {
12736 					DHD_ERROR(("%s: DS-ACK not sent due to suspend "
12737 						"in progress\n", __FUNCTION__));
12738 				} else {
12739 					DHD_ERROR_RLMT(("%s: Failed to send DS-ACK, DS state is %d",
12740 						__FUNCTION__,
12741 						dhdpcie_bus_get_pcie_inband_dw_state(bus)));
12742 				}
12743 				DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
12744 				dhd_os_ds_enter_wake(bus->dhd);
12745 			} else {
12746 				DHD_PCIE_INFO(("%s: Skip DS-ACK due to "
12747 					"suspend in progress\n", __FUNCTION__));
12748 			}
12749 #ifdef DHD_EFI
12750 			if (ds_acked && !bus->ds_enabled) {
12751 				/* if 'deep_sleep' is disabled, then need to again assert DW
12752 				* from here once we we have acked the DS_ENTER_REQ, so that
12753 				* dongle stays awake and honours the user iovar request.
12754 				* Note, that this code will be hit only for the pcie_suspend/resume
12755 				* case with 'deep_sleep' disabled, and will not get executed in
12756 				* the normal path either when 'deep_sleep' is enabled (default)
12757 				* or when 'deep_sleep' is disabled, because if 'deep_sleep' is
12758 				* disabled, then by definition, dongle will not send DS_ENTER_REQ
12759 				* except in the case of D0 -> D3 -> D0 transitions, which is what
12760 				* is being handled here.
12761 				*/
12762 				dhd_bus_inb_set_device_wake(bus, TRUE);
12763 			}
12764 #endif /* DHD_EFI */
12765 		} else
12766 #endif /* PCIE_INB_DW */
12767 		{
12768 			dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
12769 			DHD_PCIE_INFO(("D2H_MB_DATA: sent DEEP SLEEP ACK\n"));
12770 		}
12771 	}
12772 	if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE)  {
12773 #ifdef PCIE_INB_DW
12774 		if (INBAND_DW_ENAB(bus)) {
12775 			if (bus->calc_ds_exit_latency) {
12776 				bus->ds_exit_ts2 = OSL_SYSUPTIME_US();
12777 				if (bus->ds_exit_ts2 > bus->ds_exit_ts1 &&
12778 						bus->ds_exit_ts1 != 0)
12779 					bus->ds_exit_latency = bus->ds_exit_ts2 - bus->ds_exit_ts1;
12780 				else
12781 					bus->ds_exit_latency = 0;
12782 			}
12783 		}
12784 #endif /* PCIE_INB_DW */
12785 		/* what should we do */
12786 		DHD_PCIE_INFO(("D2H_MB_DATA: DEEP SLEEP EXIT\n"));
12787 #ifdef PCIE_INB_DW
12788 		if (INBAND_DW_ENAB(bus)) {
12789 			DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
12790 			if (dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
12791 					DW_DEVICE_DS_DISABLED_WAIT) {
12792 				/* wake up only if some one is waiting in
12793 				* DW_DEVICE_DS_DISABLED_WAIT state
12794 				* in this case the waiter will change the state
12795 				* to DW_DEVICE_DS_DEV_WAKE
12796 				*/
12797 				bus->inband_ds_exit_host_cnt++;
12798 				/* To synchronize with the previous memory operations call wmb() */
12799 				OSL_SMP_WMB();
12800 				bus->wait_for_ds_exit = 1;
12801 				/* Call another wmb() to make sure before waking up the
12802 				 * other event value gets updated.
12803 				 */
12804 				OSL_SMP_WMB();
12805 				dhdpcie_bus_set_pcie_inband_dw_state(bus,
12806 					DW_DEVICE_DS_DEV_WAKE);
12807 				DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
12808 				dhd_os_ds_exit_wake(bus->dhd);
12809 			} else if (dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
12810 					DW_DEVICE_DS_DEV_SLEEP) {
12811 				DHD_PCIE_INFO(("recvd unsolicited DS-EXIT"
12812 						" from dongle in DEV_SLEEP\n"));
12813 				/*
12814 				* unsolicited state change to DW_DEVICE_DS_DEV_WAKE if
12815 				* D2H_DEV_DS_EXIT_NOTE received in DW_DEVICE_DS_DEV_SLEEP state.
12816 				* This is need when dongle is woken by external events like
12817 				* WOW, ping ..etc
12818 				*/
12819 				bus->inband_ds_exit_device_cnt++;
12820 				dhdpcie_bus_set_pcie_inband_dw_state(bus,
12821 					DW_DEVICE_DS_DEV_WAKE);
12822 				DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
12823 			} else {
12824 				DHD_PCIE_INFO(("D2H_MB_DATA: not in"
12825 						" DS_DISABLED_WAIT/DS_DEV_SLEEP\n"));
12826 				bus->inband_ds_exit_host_cnt++;
12827 				DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
12828 			}
12829 			/*
12830 			 * bus->deep_sleep is TRUE by default. deep_sleep is set to FALSE when the
12831 			 * dhd iovar deep_sleep is fired with value 0(user request to not enter
12832 			 * deep sleep). So donot attempt to go to deep sleep when user has
12833 			 * explicitly asked not to go to deep sleep. bus->deep_sleep is set to
12834 			 * TRUE when the dhd iovar deep_sleep is fired with value 1.
12835 			 */
12836 			if (bus->deep_sleep) {
12837 				dhd_bus_set_device_wake(bus, FALSE);
12838 				dhdpcie_set_dongle_deepsleep(bus, FALSE);
12839 			}
12840 		}
12841 #endif /* PCIE_INB_DW */
12842 	}
12843 	if (d2h_mb_data & D2HMB_DS_HOST_SLEEP_EXIT_ACK)  {
12844 		/* what should we do */
12845 		DHD_PCIE_INFO(("D2H_MB_DATA: D0 ACK\n"));
12846 #ifdef PCIE_INB_DW
12847 		if (INBAND_DW_ENAB(bus)) {
12848 			DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
12849 			if (dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
12850 				DW_DEVICE_HOST_WAKE_WAIT) {
12851 				dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_DS_ACTIVE);
12852 			}
12853 			DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
12854 		}
12855 #endif /* PCIE_INB_DW */
12856 	}
12857 	if (d2h_mb_data & D2H_DEV_D3_ACK)  {
12858 		/* what should we do */
12859 		DHD_INFO_HW4(("D2H_MB_DATA: D3 ACK\n"));
12860 		if (!bus->wait_for_d3_ack) {
12861 #if defined(DHD_HANG_SEND_UP_TEST)
12862 			if (bus->dhd->req_hang_type == HANG_REASON_D3_ACK_TIMEOUT) {
12863 				DHD_ERROR(("TEST HANG: Skip to process D3 ACK\n"));
12864 			} else {
12865 				dhd_bus_handle_d3_ack(bus);
12866 			}
12867 #else /* DHD_HANG_SEND_UP_TEST */
12868 			dhd_bus_handle_d3_ack(bus);
12869 #endif /* DHD_HANG_SEND_UP_TEST */
12870 		}
12871 	}
12872 
12873 exit:
12874 	if (MULTIBP_ENAB(bus->sih)) {
12875 		dhd_bus_pcie_pwr_req_clear(bus);
12876 	}
12877 }
12878 
12879 static void
12880 dhdpcie_handle_mb_data(dhd_bus_t *bus)
12881 {
12882 	uint32 d2h_mb_data = 0;
12883 	uint32 zero = 0;
12884 
12885 	if (MULTIBP_ENAB(bus->sih)) {
12886 		dhd_bus_pcie_pwr_req(bus);
12887 	}
12888 
12889 	if (bus->is_linkdown) {
12890 		DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
12891 		return;
12892 	}
12893 
12894 	dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0);
12895 	if (D2H_DEV_MB_INVALIDATED(d2h_mb_data)) {
12896 		DHD_ERROR(("%s: Invalid D2H_MB_DATA: 0x%08x\n",
12897 			__FUNCTION__, d2h_mb_data));
12898 		goto exit;
12899 	}
12900 
12901 	dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), D2H_MB_DATA, 0);
12902 
12903 	DHD_INFO_HW4(("%s: D2H_MB_DATA: 0x%04x\n", __FUNCTION__, d2h_mb_data));
12904 	if (d2h_mb_data & D2H_DEV_FWHALT)  {
12905 		DHD_ERROR(("FW trap has happened\n"));
12906 		dhdpcie_checkdied(bus, NULL, 0);
12907 		/* not ready yet dhd_os_ind_firmware_stall(bus->dhd); */
12908 #ifdef BCM_ROUTER_DHD
12909 		dhdpcie_handle_dongle_trap(bus);
12910 #endif
12911 		goto exit;
12912 	}
12913 	if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ)  {
12914 		/* what should we do */
12915 		DHD_PCIE_INFO(("%s: D2H_MB_DATA: DEEP SLEEP REQ\n", __FUNCTION__));
12916 		dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
12917 		DHD_PCIE_INFO(("%s: D2H_MB_DATA: sent DEEP SLEEP ACK\n", __FUNCTION__));
12918 	}
12919 	if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE)  {
12920 		/* what should we do */
12921 		DHD_PCIE_INFO(("%s: D2H_MB_DATA: DEEP SLEEP EXIT\n", __FUNCTION__));
12922 	}
12923 	if (d2h_mb_data & D2H_DEV_D3_ACK)  {
12924 		/* what should we do */
12925 		DHD_INFO_HW4(("%s: D2H_MB_DATA: D3 ACK\n", __FUNCTION__));
12926 		if (!bus->wait_for_d3_ack) {
12927 #if defined(DHD_HANG_SEND_UP_TEST)
12928 			if (bus->dhd->req_hang_type == HANG_REASON_D3_ACK_TIMEOUT) {
12929 				DHD_ERROR(("TEST HANG: Skip to process D3 ACK\n"));
12930 			} else {
12931 			dhd_bus_handle_d3_ack(bus);
12932 			}
12933 #else /* DHD_HANG_SEND_UP_TEST */
12934 			dhd_bus_handle_d3_ack(bus);
12935 #endif /* DHD_HANG_SEND_UP_TEST */
12936 		}
12937 	}
12938 
12939 exit:
12940 	if (MULTIBP_ENAB(bus->sih)) {
12941 		dhd_bus_pcie_pwr_req_clear(bus);
12942 	}
12943 }
12944 
12945 static void
12946 dhdpcie_read_handle_mb_data(dhd_bus_t *bus)
12947 {
12948 	uint32 d2h_mb_data = 0;
12949 	uint32 zero = 0;
12950 
12951 	if (MULTIBP_ENAB(bus->sih)) {
12952 		dhd_bus_pcie_pwr_req(bus);
12953 	}
12954 
12955 	dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0);
12956 	if (!d2h_mb_data) {
12957 		goto exit;
12958 	}
12959 
12960 	dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), D2H_MB_DATA, 0);
12961 
12962 	dhd_bus_handle_mb_data(bus, d2h_mb_data);
12963 
12964 exit:
12965 	if (MULTIBP_ENAB(bus->sih)) {
12966 		dhd_bus_pcie_pwr_req_clear(bus);
12967 	}
12968 }
12969 
12970 #define DHD_SCHED_RETRY_DPC_DELAY_MS 100u
12971 
12972 static void
12973 dhd_bus_handle_intx_ahead_dma_indices(dhd_bus_t *bus)
12974 {
12975 	if (bus->d2h_intr_method == PCIE_MSI) {
12976 		DHD_PCIE_INFO(("%s: not required for msi\n", __FUNCTION__));
12977 		return;
12978 	}
12979 
12980 	if (bus->dhd->dma_d2h_ring_upd_support == FALSE) {
12981 		DHD_PCIE_INFO(("%s: not required for non-dma-indices\n", __FUNCTION__));
12982 		return;
12983 	}
12984 
12985 	if (dhd_query_bus_erros(bus->dhd)) {
12986 		return;
12987 	}
12988 
12989 #ifndef NDIS
12990 	/*
12991 	 * skip delayed dpc if tasklet is scheduled by non-ISR
12992 	 * From ISR context, we disable IRQ and enable IRQ back at the end of dpc,
12993 	 * hence if IRQ is not disabled we can consider it as scheduled from non-ISR context.
12994 	 */
12995 	if (dhdpcie_irq_disabled(bus) == FALSE) {
12996 		DHD_PCIE_INFO(("%s: skip delayed dpc as tasklet is scheduled from non isr\n",
12997 			__FUNCTION__));
12998 		return;
12999 	}
13000 #endif /* NDIS */
13001 
13002 	if (DHD_CHK_BUS_LPS_D3_ACKED(bus)) {
13003 		DHD_PCIE_INFO(("%s: skip delayed dpc as d3 ack is received\n", __FUNCTION__));
13004 		return;
13005 	}
13006 
13007 	dhd_schedule_delayed_dpc_on_dpc_cpu(bus->dhd, DHD_SCHED_RETRY_DPC_DELAY_MS);
13008 	return;
13009 }
13010 
13011 static bool
13012 dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus)
13013 {
13014 	bool resched = FALSE;
13015 
13016 	if (MULTIBP_ENAB(bus->sih)) {
13017 		dhd_bus_pcie_pwr_req(bus);
13018 	}
13019 	if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
13020 		(bus->sih->buscorerev == 4)) {
13021 		/* Msg stream interrupt */
13022 		if (intstatus & I_BIT1) {
13023 			resched = dhdpci_bus_read_frames(bus);
13024 		} else if (intstatus & I_BIT0) {
13025 			/* do nothing for Now */
13026 		}
13027 	} else {
13028 		if (intstatus & (PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1))
13029 			bus->api.handle_mb_data(bus);
13030 
13031 		/* The fact that we are here implies that dhdpcie_bus_intstatus( )
13032 		* retuned a non-zer0 status after applying the current mask.
13033 		* No further check required, in fact bus->instatus can be eliminated.
13034 		* Both bus->instatus, and bud->intdis are shared between isr and dpc.
13035 		*/
13036 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
13037 		if (pm_runtime_get(dhd_bus_to_dev(bus)) >= 0) {
13038 			resched = dhdpci_bus_read_frames(bus);
13039 			pm_runtime_mark_last_busy(dhd_bus_to_dev(bus));
13040 			pm_runtime_put_autosuspend(dhd_bus_to_dev(bus));
13041 		}
13042 #else
13043 		resched = dhdpci_bus_read_frames(bus);
13044 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
13045 	}
13046 
13047 	dhd_bus_handle_intx_ahead_dma_indices(bus);
13048 
13049 	if (MULTIBP_ENAB(bus->sih)) {
13050 		dhd_bus_pcie_pwr_req_clear(bus);
13051 	}
13052 	return resched;
13053 }
13054 
13055 #if defined(DHD_H2D_LOG_TIME_SYNC)
13056 static void
13057 dhdpci_bus_rte_log_time_sync_poll(dhd_bus_t *bus)
13058 {
13059 	unsigned long time_elapsed;
13060 
13061 	/* Poll for timeout value periodically */
13062 	if ((bus->dhd->busstate == DHD_BUS_DATA) &&
13063 		(bus->dhd->dhd_rte_time_sync_ms != 0) &&
13064 		DHD_CHK_BUS_NOT_IN_LPS(bus)) {
13065 		/*
13066 		 * XXX OSL_SYSUPTIME_US() overflow should not happen.
13067 		 * As it is a unsigned 64 bit value 18446744073709551616L,
13068 		 * which needs 213503982334 days to overflow
13069 		 */
13070 		time_elapsed = OSL_SYSUPTIME_US() - bus->dhd_rte_time_sync_count;
13071 		/* Compare time is milli seconds */
13072 		if ((time_elapsed / 1000) >= bus->dhd->dhd_rte_time_sync_ms) {
13073 			/*
13074 			 * Its fine, if it has crossed the timeout value. No need to adjust the
13075 			 * elapsed time
13076 			 */
13077 			bus->dhd_rte_time_sync_count += time_elapsed;
13078 
13079 			/* Schedule deffered work. Work function will send IOVAR. */
13080 			dhd_h2d_log_time_sync_deferred_wq_schedule(bus->dhd);
13081 		}
13082 	}
13083 }
13084 #endif /* DHD_H2D_LOG_TIME_SYNC */
13085 
13086 static bool
13087 dhdpci_bus_read_frames(dhd_bus_t *bus)
13088 {
13089 	bool more = FALSE;
13090 
13091 	/* First check if there a FW trap */
13092 	if ((bus->api.fw_rev >= PCIE_SHARED_VERSION_6) &&
13093 		(bus->dhd->dongle_trap_data = dhd_prot_process_trapbuf(bus->dhd))) {
13094 #ifdef DNGL_AXI_ERROR_LOGGING
13095 		if (bus->dhd->axi_error) {
13096 			DHD_ERROR(("AXI Error happened\n"));
13097 			return FALSE;
13098 		}
13099 #endif /* DNGL_AXI_ERROR_LOGGING */
13100 		dhd_bus_handle_mb_data(bus, D2H_DEV_FWHALT);
13101 		return FALSE;
13102 	}
13103 
13104 	if (dhd_query_bus_erros(bus->dhd)) {
13105 		DHD_ERROR(("%s: detected bus errors. Hence donot process msg rings\n",
13106 			__FUNCTION__));
13107 		return FALSE;
13108 	}
13109 #ifdef DHD_DMA_INDICES_SEQNUM
13110 	dhd_prot_save_dmaidx(bus->dhd);
13111 #endif /* DHD_DMA_INDICES_SEQNUM */
13112 	/* There may be frames in both ctrl buf and data buf; check ctrl buf first */
13113 	dhd_prot_process_ctrlbuf(bus->dhd);
13114 	bus->last_process_ctrlbuf_time = OSL_LOCALTIME_NS();
13115 
13116 	/* Do not process rest of ring buf once bus enters low power state (D3_INFORM/D3_ACK) */
13117 	if (DHD_CHK_BUS_IN_LPS(bus)) {
13118 		DHD_ERROR(("%s: Bus is in power save state (%d). "
13119 			"Skip processing rest of ring buffers.\n",
13120 			__FUNCTION__, bus->bus_low_power_state));
13121 		return FALSE;
13122 	}
13123 
13124 	/* update the flow ring cpls */
13125 	dhd_update_txflowrings(bus->dhd);
13126 	bus->last_process_flowring_time = OSL_LOCALTIME_NS();
13127 
13128 	/* With heavy TX traffic, we could get a lot of TxStatus
13129 	 * so add bound
13130 	 */
13131 #ifdef DHD_HP2P
13132 	more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound, DHD_HP2P_RING);
13133 #endif /* DHD_HP2P */
13134 	more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound, DHD_REGULAR_RING);
13135 	bus->last_process_txcpl_time = OSL_LOCALTIME_NS();
13136 
13137 	/* With heavy RX traffic, this routine potentially could spend some time
13138 	 * processing RX frames without RX bound
13139 	 */
13140 #ifdef DHD_HP2P
13141 	more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound, DHD_HP2P_RING);
13142 #endif /* DHD_HP2P */
13143 	more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound, DHD_REGULAR_RING);
13144 	bus->last_process_rxcpl_time = OSL_LOCALTIME_NS();
13145 
13146 	/* Process info ring completion messages */
13147 #ifdef EWP_EDL
13148 	if (!bus->dhd->dongle_edl_support)
13149 #endif
13150 	{
13151 		more |= dhd_prot_process_msgbuf_infocpl(bus->dhd, DHD_INFORING_BOUND);
13152 		bus->last_process_infocpl_time = OSL_LOCALTIME_NS();
13153 	}
13154 #ifdef EWP_EDL
13155 	else {
13156 		more |= dhd_prot_process_msgbuf_edl(bus->dhd);
13157 		bus->last_process_edl_time = OSL_LOCALTIME_NS();
13158 	}
13159 #endif /* EWP_EDL */
13160 
13161 #ifdef BCMINTERNAL
13162 #ifdef DHD_FWTRACE
13163 	/* Handle the firmware trace data in the logtrace kernel thread */
13164 	dhd_event_logtrace_enqueue_fwtrace(bus->dhd);
13165 #endif /* DHD_FWTRACE */
13166 #endif /* BCMINTERNAL */
13167 
13168 #ifdef BTLOG
13169 	/* Process info ring completion messages */
13170 	more |= dhd_prot_process_msgbuf_btlogcpl(bus->dhd, DHD_BTLOGRING_BOUND);
13171 #endif	/* BTLOG */
13172 
13173 #ifdef IDLE_TX_FLOW_MGMT
13174 	if (bus->enable_idle_flowring_mgmt) {
13175 		/* Look for idle flow rings */
13176 		dhd_bus_check_idle_scan(bus);
13177 	}
13178 #endif /* IDLE_TX_FLOW_MGMT */
13179 
13180 	/* don't talk to the dongle if fw is about to be reloaded */
13181 	if (bus->dhd->hang_was_sent) {
13182 		more = FALSE;
13183 	}
13184 
13185 #ifdef SUPPORT_LINKDOWN_RECOVERY
13186 	/* XXX : It seems that linkdown is occurred without notification,
13187 	 *       In case read shared memory failed, recovery hang is needed
13188 	 */
13189 	if (bus->read_shm_fail) {
13190 		/* Read interrupt state once again to confirm linkdown */
13191 		int intstatus = si_corereg(bus->sih, bus->sih->buscoreidx,
13192 			bus->pcie_mailbox_int, 0, 0);
13193 		if (intstatus != (uint32)-1) {
13194 			DHD_ERROR(("%s: read SHM failed but intstatus is valid\n", __FUNCTION__));
13195 #ifdef DHD_FW_COREDUMP
13196 			if (bus->dhd->memdump_enabled) {
13197 				DHD_OS_WAKE_LOCK(bus->dhd);
13198 				bus->dhd->memdump_type = DUMP_TYPE_READ_SHM_FAIL;
13199 				dhd_bus_mem_dump(bus->dhd);
13200 				DHD_OS_WAKE_UNLOCK(bus->dhd);
13201 			}
13202 #endif /* DHD_FW_COREDUMP */
13203 		} else {
13204 			DHD_ERROR(("%s: Link is Down.\n", __FUNCTION__));
13205 #ifdef CONFIG_ARCH_MSM
13206 			bus->no_cfg_restore = 1;
13207 #endif /* CONFIG_ARCH_MSM */
13208 			bus->is_linkdown = 1;
13209 		}
13210 
13211 		/* XXX The dhd_prot_debug_info_print() function *has* to be
13212 		 * invoked only if the bus->is_linkdown is updated so that
13213 		 * host doesn't need to read any pcie registers if
13214 		 * PCIe link is down.
13215 		 */
13216 		dhd_prot_debug_info_print(bus->dhd);
13217 		bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN_EP_DETECT;
13218 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
13219 		copy_hang_info_linkdown(bus->dhd);
13220 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
13221 		dhd_os_send_hang_message(bus->dhd);
13222 		more = FALSE;
13223 	}
13224 #endif /* SUPPORT_LINKDOWN_RECOVERY */
13225 #if defined(DHD_H2D_LOG_TIME_SYNC)
13226 	dhdpci_bus_rte_log_time_sync_poll(bus);
13227 #endif /* DHD_H2D_LOG_TIME_SYNC */
13228 	return more;
13229 }
13230 
13231 bool
13232 dhdpcie_tcm_valid(dhd_bus_t *bus)
13233 {
13234 	uint32 addr = 0;
13235 	int rv;
13236 	uint32 shaddr = 0;
13237 	pciedev_shared_t sh;
13238 
13239 	shaddr = bus->dongle_ram_base + bus->ramsize - 4;
13240 
13241 	/* Read last word in memory to determine address of pciedev_shared structure */
13242 	addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr));
13243 
13244 	if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) ||
13245 		(addr > shaddr)) {
13246 		DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid addr\n",
13247 			__FUNCTION__, addr));
13248 		return FALSE;
13249 	}
13250 
13251 	/* Read hndrte_shared structure */
13252 	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&sh,
13253 		sizeof(pciedev_shared_t))) < 0) {
13254 		DHD_ERROR(("Failed to read PCIe shared struct with %d\n", rv));
13255 		return FALSE;
13256 	}
13257 
13258 	/* Compare any field in pciedev_shared_t */
13259 	if (sh.console_addr != bus->pcie_sh->console_addr) {
13260 		DHD_ERROR(("Contents of pciedev_shared_t structure are not matching.\n"));
13261 		return FALSE;
13262 	}
13263 
13264 	return TRUE;
13265 }
13266 
13267 static void
13268 dhdpcie_update_bus_api_revisions(uint32 firmware_api_version, uint32 host_api_version)
13269 {
13270 	snprintf(bus_api_revision, BUS_API_REV_STR_LEN, "\nBus API revisions:(FW rev%d)(DHD rev%d)",
13271 			firmware_api_version, host_api_version);
13272 	return;
13273 }
13274 
13275 static bool
13276 dhdpcie_check_firmware_compatible(uint32 firmware_api_version, uint32 host_api_version)
13277 {
13278 	bool retcode = FALSE;
13279 
13280 	DHD_INFO(("firmware api revision %d, host api revision %d\n",
13281 		firmware_api_version, host_api_version));
13282 
13283 	switch (firmware_api_version) {
13284 	case PCIE_SHARED_VERSION_7:
13285 	case PCIE_SHARED_VERSION_6:
13286 	case PCIE_SHARED_VERSION_5:
13287 		retcode = TRUE;
13288 		break;
13289 	default:
13290 		if (firmware_api_version <= host_api_version)
13291 			retcode = TRUE;
13292 	}
13293 	return retcode;
13294 }
13295 
13296 static int
13297 dhdpcie_readshared(dhd_bus_t *bus)
13298 {
13299 	uint32 addr = 0;
13300 	int rv, dma_indx_wr_buf, dma_indx_rd_buf;
13301 	uint32 shaddr = 0;
13302 	pciedev_shared_t *sh = bus->pcie_sh;
13303 	dhd_timeout_t tmo;
13304 	bool idma_en = FALSE;
13305 #if defined(PCIE_INB_DW)
13306 	bool d2h_inband_dw = FALSE;
13307 #endif /* defined(PCIE_INB_DW) */
13308 #if defined(PCIE_OOB)
13309 	bool d2h_no_oob_dw = FALSE;
13310 #endif /* defined(PCIE_INB_DW) */
13311 	uint32 timeout = MAX_READ_TIMEOUT;
13312 	uint32 elapsed;
13313 #ifndef CUSTOMER_HW4_DEBUG
13314 	uint32 intstatus;
13315 #endif /* OEM_ANDROID */
13316 
13317 	if (MULTIBP_ENAB(bus->sih)) {
13318 		dhd_bus_pcie_pwr_req(bus);
13319 	}
13320 
13321 	shaddr = bus->dongle_ram_base + bus->ramsize - 4;
13322 
13323 #ifdef BCMSLTGT
13324 #ifdef BCMQT_HW
13325 	if (qt_dngl_timeout) {
13326 		timeout = qt_dngl_timeout * 1000;
13327 	}
13328 #endif /* BCMQT_HW */
13329 	DHD_ERROR(("%s: host timeout in QT/FPGA mode %ld ms\n",
13330 		__FUNCTION__, (timeout * htclkratio) / USEC_PER_MSEC));
13331 #endif /* BCMSLTGT */
13332 
13333 	/* start a timer for 5 seconds */
13334 	dhd_timeout_start(&tmo, timeout);
13335 
13336 	while (((addr == 0) || (addr == bus->nvram_csm)) && !dhd_timeout_expired(&tmo)) {
13337 		/* Read last word in memory to determine address of pciedev_shared structure */
13338 		addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr));
13339 #ifdef BCMINTERNAL
13340 #ifdef DHD_FWTRACE
13341 		/*
13342 		 * FW might fill all trace buffers even before full DHD/FW initialization.
13343 		 * poll for trace buffers to avoid circular buffer overflow.
13344 		 */
13345 		process_fw_trace_data(bus->dhd);
13346 #endif /* DHD_FWTRACE */
13347 #endif /* BCMINTERNAL */
13348 	}
13349 
13350 	if (addr == (uint32)-1) {
13351 		DHD_ERROR(("%s: ##### pciedev shared address is 0xffffffff ####\n", __FUNCTION__));
13352 #ifdef SUPPORT_LINKDOWN_RECOVERY
13353 #ifdef CONFIG_ARCH_MSM
13354 		bus->no_cfg_restore = 1;
13355 #endif /* CONFIG_ARCH_MSM */
13356 #endif /* SUPPORT_LINKDOWN_RECOVERY */
13357 
13358 #ifdef CUSTOMER_HW4_DEBUG
13359 		bus->is_linkdown = 1;
13360 		DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__));
13361 #else
13362 		dhd_bus_dump_imp_cfg_registers(bus);
13363 		dhd_bus_dump_dar_registers(bus);
13364 		/* Check the PCIe link status by reading intstatus register */
13365 		intstatus = si_corereg(bus->sih,
13366 				bus->sih->buscoreidx, bus->pcie_mailbox_int, 0, 0);
13367 		if (intstatus == (uint32)-1) {
13368 			DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__));
13369 			bus->is_linkdown = TRUE;
13370 		} else {
13371 #if defined(DHD_FW_COREDUMP)
13372 			/* save core dump or write to a file */
13373 			if (bus->dhd->memdump_enabled) {
13374 				/* since dhdpcie_readshared() is invoked only during init or trap */
13375 				bus->dhd->memdump_type = bus->dhd->dongle_trap_data ?
13376 					DUMP_TYPE_DONGLE_TRAP : DUMP_TYPE_DONGLE_INIT_FAILURE;
13377 				dhdpcie_mem_dump(bus);
13378 			}
13379 #endif /* DHD_FW_COREDUMP */
13380 		}
13381 #endif /* CUSTOMER_HW4_DEBUG */
13382 		return BCME_ERROR;
13383 	}
13384 
13385 	if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) ||
13386 		(addr > shaddr)) {
13387 		elapsed = tmo.elapsed;
13388 #ifdef BCMSLTGT
13389 		elapsed *= htclkratio;
13390 #endif /* BCMSLTGT */
13391 		DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid\n",
13392 			__FUNCTION__, addr));
13393 		DHD_ERROR(("%s: Waited %u usec, dongle is not ready\n", __FUNCTION__, tmo.elapsed));
13394 #ifdef DEBUG_DNGL_INIT_FAIL
13395 		if (addr != (uint32)-1) {	/* skip further PCIE reads if read this addr */
13396 #ifdef CUSTOMER_HW4_DEBUG
13397 			bus->dhd->memdump_enabled = DUMP_MEMONLY;
13398 #endif /* CUSTOMER_HW4_DEBUG */
13399 			if (bus->dhd->memdump_enabled) {
13400 				bus->dhd->memdump_type = DUMP_TYPE_DONGLE_INIT_FAILURE;
13401 				dhdpcie_mem_dump(bus);
13402 			}
13403 		}
13404 #endif /* DEBUG_DNGL_INIT_FAIL */
13405 #if defined(NDIS)
13406 		/* This is a very common code path to catch f/w init failures.
13407 		   Capture a socram dump.
13408 		*/
13409 		ASSERT(0);
13410 #endif /* defined(NDIS) */
13411 		return BCME_ERROR;
13412 	} else {
13413 		bus->rd_shared_pass_time = OSL_LOCALTIME_NS();
13414 		elapsed = tmo.elapsed;
13415 #ifdef BCMSLTGT
13416 		elapsed *= htclkratio;
13417 #endif /* BCMSLTGT */
13418 		bus->shared_addr = (ulong)addr;
13419 		DHD_ERROR(("### Total time ARM OOR to Readshared pass took %llu usec ###\n",
13420 			DIV_U64_BY_U32((bus->rd_shared_pass_time - bus->arm_oor_time),
13421 			NSEC_PER_USEC)));
13422 		DHD_ERROR(("%s: PCIe shared addr (0x%08x) read took %u usec "
13423 			"before dongle is ready\n", __FUNCTION__, addr, elapsed));
13424 	}
13425 
13426 #ifdef DHD_EFI
13427 	bus->dhd->pcie_readshared_done = 1;
13428 #endif
13429 	/* Read hndrte_shared structure */
13430 	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)sh,
13431 		sizeof(pciedev_shared_t))) < 0) {
13432 		DHD_ERROR(("%s: Failed to read PCIe shared struct with %d\n", __FUNCTION__, rv));
13433 		return rv;
13434 	}
13435 
13436 	/* Endianness */
13437 	sh->flags = ltoh32(sh->flags);
13438 	sh->trap_addr = ltoh32(sh->trap_addr);
13439 	sh->assert_exp_addr = ltoh32(sh->assert_exp_addr);
13440 	sh->assert_file_addr = ltoh32(sh->assert_file_addr);
13441 	sh->assert_line = ltoh32(sh->assert_line);
13442 	sh->console_addr = ltoh32(sh->console_addr);
13443 	sh->msgtrace_addr = ltoh32(sh->msgtrace_addr);
13444 	sh->dma_rxoffset = ltoh32(sh->dma_rxoffset);
13445 	sh->rings_info_ptr = ltoh32(sh->rings_info_ptr);
13446 	sh->flags2 = ltoh32(sh->flags2);
13447 
13448 	/* load bus console address */
13449 	bus->console_addr = sh->console_addr;
13450 
13451 	/* Read the dma rx offset */
13452 	bus->dma_rxoffset = bus->pcie_sh->dma_rxoffset;
13453 	dhd_prot_rx_dataoffset(bus->dhd, bus->dma_rxoffset);
13454 
13455 	DHD_INFO(("%s: DMA RX offset from shared Area %d\n", __FUNCTION__, bus->dma_rxoffset));
13456 
13457 	bus->api.fw_rev = sh->flags & PCIE_SHARED_VERSION_MASK;
13458 	if (!(dhdpcie_check_firmware_compatible(bus->api.fw_rev, PCIE_SHARED_VERSION)))
13459 	{
13460 		DHD_ERROR(("%s: pcie_shared version %d in dhd "
13461 		           "is older than pciedev_shared version %d in dongle\n",
13462 		           __FUNCTION__, PCIE_SHARED_VERSION,
13463 		           bus->api.fw_rev));
13464 		return BCME_ERROR;
13465 	}
13466 	dhdpcie_update_bus_api_revisions(bus->api.fw_rev, PCIE_SHARED_VERSION);
13467 
13468 	bus->rw_index_sz = (sh->flags & PCIE_SHARED_2BYTE_INDICES) ?
13469 		sizeof(uint16) : sizeof(uint32);
13470 	DHD_INFO(("%s: Dongle advertizes %d size indices\n",
13471 		__FUNCTION__, bus->rw_index_sz));
13472 
13473 #ifdef IDLE_TX_FLOW_MGMT
13474 	if (sh->flags & PCIE_SHARED_IDLE_FLOW_RING) {
13475 		DHD_ERROR(("%s: FW Supports IdleFlow ring managment!\n",
13476 			__FUNCTION__));
13477 		bus->enable_idle_flowring_mgmt = TRUE;
13478 	}
13479 #endif /* IDLE_TX_FLOW_MGMT */
13480 
13481 #ifdef PCIE_OOB
13482 	bus->dhd->d2h_no_oob_dw = (sh->flags & PCIE_SHARED_NO_OOB_DW) ? TRUE : FALSE;
13483 	d2h_no_oob_dw = bus->dhd->d2h_no_oob_dw;
13484 #endif /* PCIE_OOB */
13485 
13486 #ifdef PCIE_INB_DW
13487 	bus->dhd->d2h_inband_dw = (sh->flags & PCIE_SHARED_INBAND_DS) ? TRUE : FALSE;
13488 	d2h_inband_dw = bus->dhd->d2h_inband_dw;
13489 #endif /* PCIE_INB_DW */
13490 
13491 #if defined(PCIE_INB_DW)
13492 	DHD_ERROR(("FW supports Inband dw ? %s\n",
13493 		d2h_inband_dw ? "Y":"N"));
13494 #endif /* defined(PCIE_INB_DW) */
13495 
13496 #if defined(PCIE_OOB)
13497 	DHD_ERROR(("FW supports oob dw ? %s\n",
13498 		d2h_no_oob_dw ? "N":"Y"));
13499 #endif /* defined(PCIE_OOB) */
13500 
13501 	if (IDMA_CAPABLE(bus)) {
13502 		if (bus->sih->buscorerev == 23) {
13503 #ifdef PCIE_INB_DW
13504 			if (bus->dhd->d2h_inband_dw)
13505 			{
13506 				idma_en = TRUE;
13507 			}
13508 #endif /* PCIE_INB_DW */
13509 		} else {
13510 			idma_en = TRUE;
13511 		}
13512 	}
13513 
13514 	if (idma_en) {
13515 		bus->dhd->idma_enable = (sh->flags & PCIE_SHARED_IDMA) ? TRUE : FALSE;
13516 		bus->dhd->ifrm_enable = (sh->flags & PCIE_SHARED_IFRM) ? TRUE : FALSE;
13517 	}
13518 
13519 	bus->dhd->d2h_sync_mode = sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK;
13520 
13521 	bus->dhd->dar_enable = (sh->flags & PCIE_SHARED_DAR) ? TRUE : FALSE;
13522 
13523 	/* Does the FW support DMA'ing r/w indices */
13524 	if (sh->flags & PCIE_SHARED_DMA_INDEX) {
13525 		if (!bus->dhd->dma_ring_upd_overwrite) {
13526 #if defined(BCM_ROUTER_DHD)
13527 		/* Router platform does not use IOV_DMA_RINGINDICES */
13528 			if (sh->flags & PCIE_SHARED_2BYTE_INDICES)
13529 #endif /* BCM_ROUTER_DHD */
13530 			{
13531 				if (!IFRM_ENAB(bus->dhd)) {
13532 					bus->dhd->dma_h2d_ring_upd_support = TRUE;
13533 				}
13534 				bus->dhd->dma_d2h_ring_upd_support = TRUE;
13535 			}
13536 		}
13537 
13538 		if (bus->dhd->dma_d2h_ring_upd_support && bus->dhd->d2h_sync_mode) {
13539 			DHD_ERROR(("%s: ERROR COMBO: sync (0x%x) enabled for DMA indices\n",
13540 				__FUNCTION__, bus->dhd->d2h_sync_mode));
13541 		}
13542 
13543 		DHD_INFO(("%s: Host support DMAing indices: H2D:%d - D2H:%d. FW supports it\n",
13544 			__FUNCTION__,
13545 			(bus->dhd->dma_h2d_ring_upd_support ? 1 : 0),
13546 			(bus->dhd->dma_d2h_ring_upd_support ? 1 : 0)));
13547 	} else if (!(sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK)) {
13548 		DHD_ERROR(("%s FW has to support either dma indices or d2h sync\n",
13549 			__FUNCTION__));
13550 		return BCME_UNSUPPORTED;
13551 	} else {
13552 		bus->dhd->dma_h2d_ring_upd_support = FALSE;
13553 		bus->dhd->dma_d2h_ring_upd_support = FALSE;
13554 	}
13555 
13556 	/* Does the firmware support fast delete ring? */
13557 	if (sh->flags2 & PCIE_SHARED2_FAST_DELETE_RING) {
13558 		DHD_INFO(("%s: Firmware supports fast delete ring\n",
13559 			__FUNCTION__));
13560 		bus->dhd->fast_delete_ring_support = TRUE;
13561 	} else {
13562 		DHD_INFO(("%s: Firmware does not support fast delete ring\n",
13563 			__FUNCTION__));
13564 		bus->dhd->fast_delete_ring_support = FALSE;
13565 	}
13566 
13567 	/* get ring_info, ring_state and mb data ptrs and store the addresses in bus structure */
13568 	{
13569 		ring_info_t  ring_info;
13570 
13571 		/* boundary check */
13572 		if ((sh->rings_info_ptr < bus->dongle_ram_base) || (sh->rings_info_ptr > shaddr)) {
13573 			DHD_ERROR(("%s: rings_info_ptr is invalid 0x%x, skip reading ring info\n",
13574 				__FUNCTION__, sh->rings_info_ptr));
13575 			return BCME_ERROR;
13576 		}
13577 
13578 		if ((rv = dhdpcie_bus_membytes(bus, FALSE, sh->rings_info_ptr,
13579 			(uint8 *)&ring_info, sizeof(ring_info_t))) < 0)
13580 			return rv;
13581 
13582 		bus->h2d_mb_data_ptr_addr = ltoh32(sh->h2d_mb_data_ptr);
13583 		bus->d2h_mb_data_ptr_addr = ltoh32(sh->d2h_mb_data_ptr);
13584 
13585 		if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6) {
13586 			bus->max_tx_flowrings = ltoh16(ring_info.max_tx_flowrings);
13587 			bus->max_submission_rings = ltoh16(ring_info.max_submission_queues);
13588 			bus->max_completion_rings = ltoh16(ring_info.max_completion_rings);
13589 			bus->max_cmn_rings = bus->max_submission_rings - bus->max_tx_flowrings;
13590 			bus->api.handle_mb_data = dhdpcie_read_handle_mb_data;
13591 			bus->use_mailbox = sh->flags & PCIE_SHARED_USE_MAILBOX;
13592 		}
13593 		else {
13594 			bus->max_tx_flowrings = ltoh16(ring_info.max_tx_flowrings);
13595 			bus->max_submission_rings = bus->max_tx_flowrings;
13596 			bus->max_completion_rings = BCMPCIE_D2H_COMMON_MSGRINGS;
13597 			bus->max_cmn_rings = BCMPCIE_H2D_COMMON_MSGRINGS;
13598 			bus->api.handle_mb_data = dhdpcie_handle_mb_data;
13599 			bus->use_mailbox = TRUE;
13600 		}
13601 		if (bus->max_completion_rings == 0) {
13602 			DHD_ERROR(("dongle completion rings are invalid %d\n",
13603 				bus->max_completion_rings));
13604 			return BCME_ERROR;
13605 		}
13606 		if (bus->max_submission_rings == 0) {
13607 			DHD_ERROR(("dongle submission rings are invalid %d\n",
13608 				bus->max_submission_rings));
13609 			return BCME_ERROR;
13610 		}
13611 		if (bus->max_tx_flowrings == 0) {
13612 			DHD_ERROR(("dongle txflow rings are invalid %d\n", bus->max_tx_flowrings));
13613 			return BCME_ERROR;
13614 		}
13615 
13616 		/* If both FW and Host support DMA'ing indices, allocate memory and notify FW
13617 		 * The max_sub_queues is read from FW initialized ring_info
13618 		 */
13619 		if (bus->dhd->dma_h2d_ring_upd_support || IDMA_ENAB(bus->dhd)) {
13620 			dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
13621 				H2D_DMA_INDX_WR_BUF, bus->max_submission_rings);
13622 			dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
13623 				D2H_DMA_INDX_RD_BUF, bus->max_completion_rings);
13624 
13625 			if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) {
13626 				DHD_ERROR(("%s: Failed to allocate memory for dma'ing h2d indices"
13627 						"Host will use w/r indices in TCM\n",
13628 						__FUNCTION__));
13629 				bus->dhd->dma_h2d_ring_upd_support = FALSE;
13630 				bus->dhd->idma_enable = FALSE;
13631 			}
13632 		}
13633 
13634 		if (bus->dhd->dma_d2h_ring_upd_support) {
13635 			dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
13636 				D2H_DMA_INDX_WR_BUF, bus->max_completion_rings);
13637 			dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
13638 				H2D_DMA_INDX_RD_BUF, bus->max_submission_rings);
13639 
13640 			if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) {
13641 				DHD_ERROR(("%s: Failed to allocate memory for dma'ing d2h indices"
13642 						"Host will use w/r indices in TCM\n",
13643 						__FUNCTION__));
13644 				bus->dhd->dma_d2h_ring_upd_support = FALSE;
13645 			}
13646 		}
13647 #ifdef DHD_DMA_INDICES_SEQNUM
13648 		if (bus->dhd->dma_d2h_ring_upd_support) {
13649 			uint32 bufsz = bus->rw_index_sz * bus->max_completion_rings;
13650 			if (dhd_prot_dma_indx_copybuf_init(bus->dhd, bufsz, D2H_DMA_INDX_WR_BUF)
13651 				!= BCME_OK) {
13652 				return BCME_NOMEM;
13653 			}
13654 			bufsz = bus->rw_index_sz * bus->max_submission_rings;
13655 			if (dhd_prot_dma_indx_copybuf_init(bus->dhd, bufsz, H2D_DMA_INDX_RD_BUF)
13656 				!= BCME_OK) {
13657 				return BCME_NOMEM;
13658 			}
13659 		}
13660 #endif /* DHD_DMA_INDICES_SEQNUM */
13661 		if (IFRM_ENAB(bus->dhd)) {
13662 			dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
13663 				H2D_IFRM_INDX_WR_BUF, bus->max_tx_flowrings);
13664 
13665 			if (dma_indx_wr_buf != BCME_OK) {
13666 				DHD_ERROR(("%s: Failed to alloc memory for Implicit DMA\n",
13667 						__FUNCTION__));
13668 				bus->dhd->ifrm_enable = FALSE;
13669 			}
13670 		}
13671 
13672 		/* read ringmem and ringstate ptrs from shared area and store in host variables */
13673 		dhd_fillup_ring_sharedptr_info(bus, &ring_info);
13674 		if (dhd_msg_level & DHD_INFO_VAL) {
13675 			bcm_print_bytes("ring_info_raw", (uchar *)&ring_info, sizeof(ring_info_t));
13676 		}
13677 		DHD_INFO(("%s: ring_info\n", __FUNCTION__));
13678 
13679 		DHD_ERROR(("%s: max H2D queues %d\n",
13680 			__FUNCTION__, ltoh16(ring_info.max_tx_flowrings)));
13681 
13682 		DHD_INFO(("mail box address\n"));
13683 		DHD_INFO(("%s: h2d_mb_data_ptr_addr 0x%04x\n",
13684 			__FUNCTION__, bus->h2d_mb_data_ptr_addr));
13685 		DHD_INFO(("%s: d2h_mb_data_ptr_addr 0x%04x\n",
13686 			__FUNCTION__, bus->d2h_mb_data_ptr_addr));
13687 	}
13688 
13689 	DHD_INFO(("%s: d2h_sync_mode 0x%08x\n",
13690 		__FUNCTION__, bus->dhd->d2h_sync_mode));
13691 
13692 	bus->dhd->d2h_hostrdy_supported =
13693 		((sh->flags & PCIE_SHARED_HOSTRDY_SUPPORT) == PCIE_SHARED_HOSTRDY_SUPPORT);
13694 
13695 	bus->dhd->ext_trap_data_supported =
13696 		((sh->flags2 & PCIE_SHARED2_EXTENDED_TRAP_DATA) == PCIE_SHARED2_EXTENDED_TRAP_DATA);
13697 
13698 	if ((sh->flags2 & PCIE_SHARED2_TXSTATUS_METADATA) == 0)
13699 		bus->dhd->pcie_txs_metadata_enable = 0;
13700 
13701 	if (sh->flags2 & PCIE_SHARED2_TRAP_ON_HOST_DB7)  {
13702 		memset(&bus->dhd->db7_trap, 0, sizeof(bus->dhd->db7_trap));
13703 		bus->dhd->db7_trap.fw_db7w_trap = 1;
13704 		/* add an option to let the user select ?? */
13705 		bus->dhd->db7_trap.db7_magic_number = PCIE_DB7_MAGIC_NUMBER_DPC_TRAP;
13706 	}
13707 
13708 #ifdef BTLOG
13709 	bus->dhd->bt_logging = (sh->flags2 & PCIE_SHARED2_BT_LOGGING) ? TRUE : FALSE;
13710 	/* XXX: WAR is needed for dongle with BTLOG to be backwards compatible with existing DHD.
13711 	 * The issue is that existing DHD doesn't not compute the INFO cmpl ringid correctly once
13712 	 * BTLOG dongle increases the max_submission_rings resulting in overwritting ring in the
13713 	 * dongle. When dongle enables submit_count_WAR, it implies that the sumbit ring has be
13714 	 * incremented in the dongle but will not be reflected in max_submission_rings.
13715 	 */
13716 	bus->dhd->submit_count_WAR = (sh->flags2 & PCIE_SHARED2_SUBMIT_COUNT_WAR) ? TRUE : FALSE;
13717 	DHD_ERROR(("FW supports BT logging ? %s \n", bus->dhd->bt_logging ? "Y" : "N"));
13718 #endif	/* BTLOG */
13719 
13720 #ifdef SNAPSHOT_UPLOAD
13721 	bus->dhd->snapshot_upload = (sh->flags2 & PCIE_SHARED2_SNAPSHOT_UPLOAD) ? TRUE : FALSE;
13722 	DHD_ERROR(("FW supports snapshot upload ? %s \n", bus->dhd->snapshot_upload ? "Y" : "N"));
13723 #endif	/* SNAPSHOT_UPLOAD */
13724 
13725 #ifdef D2H_MINIDUMP
13726 	bus->d2h_minidump = (sh->flags2 & PCIE_SHARED2_FW_SMALL_MEMDUMP) ? TRUE : FALSE;
13727 	DHD_ERROR(("FW supports minidump ? %s \n", bus->d2h_minidump ? "Y" : "N"));
13728 	if (bus->d2h_minidump_override) {
13729 		bus->d2h_minidump = FALSE;
13730 	}
13731 	DHD_ERROR(("d2h_minidump: %d d2h_minidump_override: %d\n",
13732 		bus->d2h_minidump, bus->d2h_minidump_override));
13733 #endif /* D2H_MINIDUMP */
13734 
13735 	bus->dhd->hscb_enable =
13736 		(sh->flags2 & PCIE_SHARED2_HSCB) == PCIE_SHARED2_HSCB;
13737 
13738 #ifdef EWP_EDL
13739 	if (host_edl_support) {
13740 		bus->dhd->dongle_edl_support = (sh->flags2 & PCIE_SHARED2_EDL_RING) ? TRUE : FALSE;
13741 		DHD_ERROR(("Dongle EDL support: %u\n", bus->dhd->dongle_edl_support));
13742 	}
13743 #endif /* EWP_EDL */
13744 
13745 	bus->dhd->debug_buf_dest_support =
13746 		(sh->flags2 & PCIE_SHARED2_DEBUG_BUF_DEST) ? TRUE : FALSE;
13747 	DHD_ERROR(("FW supports debug buf dest ? %s \n",
13748 		bus->dhd->debug_buf_dest_support ? "Y" : "N"));
13749 
13750 #ifdef DHD_HP2P
13751 	if (bus->dhd->hp2p_enable) {
13752 		bus->dhd->hp2p_ts_capable =
13753 			(sh->flags2 & PCIE_SHARED2_PKT_TIMESTAMP) == PCIE_SHARED2_PKT_TIMESTAMP;
13754 		bus->dhd->hp2p_capable =
13755 			(sh->flags2 & PCIE_SHARED2_HP2P) == PCIE_SHARED2_HP2P;
13756 		bus->dhd->hp2p_capable |= bus->dhd->hp2p_ts_capable;
13757 
13758 		DHD_ERROR(("FW supports HP2P ? %s\n",
13759 			bus->dhd->hp2p_capable ? "Y" : "N"));
13760 
13761 		if (bus->dhd->hp2p_capable) {
13762 			bus->dhd->pkt_thresh = HP2P_PKT_THRESH;
13763 			bus->dhd->pkt_expiry = HP2P_PKT_EXPIRY;
13764 			bus->dhd->time_thresh = HP2P_TIME_THRESH;
13765 			for (addr = 0; addr < MAX_HP2P_FLOWS; addr++) {
13766 				hp2p_info_t *hp2p_info = &bus->dhd->hp2p_info[addr];
13767 				hp2p_info->hrtimer_init = FALSE;
13768 				hrtimer_init(&hp2p_info->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
13769 				hp2p_info->timer.function = &dhd_hp2p_write;
13770 			}
13771 		}
13772 	}
13773 #endif /* DHD_HP2P */
13774 
13775 #ifdef DHD_DB0TS
13776 	bus->dhd->db0ts_capable =
13777 		(sh->flags & PCIE_SHARED_TIMESTAMP_DB0) == PCIE_SHARED_TIMESTAMP_DB0;
13778 #endif /* DHD_DB0TS */
13779 
13780 	if (MULTIBP_ENAB(bus->sih)) {
13781 		dhd_bus_pcie_pwr_req_clear(bus);
13782 
13783 		/*
13784 		 * WAR to fix ARM cold boot;
13785 		 * De-assert WL domain in DAR
13786 		 */
13787 		if (bus->sih->buscorerev >= 68) {
13788 			dhd_bus_pcie_pwr_req_wl_domain(bus,
13789 				DAR_PCIE_PWR_CTRL((bus->sih)->buscorerev), FALSE);
13790 		}
13791 	}
13792 	return BCME_OK;
13793 } /* dhdpcie_readshared */
13794 
13795 /** Read ring mem and ring state ptr info from shared memory area in device memory */
13796 static void
13797 dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info)
13798 {
13799 	uint16 i = 0;
13800 	uint16 j = 0;
13801 	uint32 tcm_memloc;
13802 	uint32 d2h_w_idx_ptr, d2h_r_idx_ptr, h2d_w_idx_ptr, h2d_r_idx_ptr;
13803 	uint16 max_tx_flowrings = bus->max_tx_flowrings;
13804 
13805 	/* Ring mem ptr info */
13806 	/* Alloated in the order
13807 		H2D_MSGRING_CONTROL_SUBMIT              0
13808 		H2D_MSGRING_RXPOST_SUBMIT               1
13809 		D2H_MSGRING_CONTROL_COMPLETE            2
13810 		D2H_MSGRING_TX_COMPLETE                 3
13811 		D2H_MSGRING_RX_COMPLETE                 4
13812 	*/
13813 
13814 	{
13815 		/* ringmemptr holds start of the mem block address space */
13816 		tcm_memloc = ltoh32(ring_info->ringmem_ptr);
13817 
13818 		/* Find out ringmem ptr for each ring common  ring */
13819 		for (i = 0; i <= BCMPCIE_COMMON_MSGRING_MAX_ID; i++) {
13820 			bus->ring_sh[i].ring_mem_addr = tcm_memloc;
13821 			/* Update mem block */
13822 			tcm_memloc = tcm_memloc + sizeof(ring_mem_t);
13823 			DHD_INFO(("%s: ring id %d ring mem addr 0x%04x \n", __FUNCTION__,
13824 				i, bus->ring_sh[i].ring_mem_addr));
13825 		}
13826 	}
13827 
13828 	/* Ring state mem ptr info */
13829 	{
13830 		d2h_w_idx_ptr = ltoh32(ring_info->d2h_w_idx_ptr);
13831 		d2h_r_idx_ptr = ltoh32(ring_info->d2h_r_idx_ptr);
13832 		h2d_w_idx_ptr = ltoh32(ring_info->h2d_w_idx_ptr);
13833 		h2d_r_idx_ptr = ltoh32(ring_info->h2d_r_idx_ptr);
13834 
13835 		/* Store h2d common ring write/read pointers */
13836 		for (i = 0; i < BCMPCIE_H2D_COMMON_MSGRINGS; i++) {
13837 			bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
13838 			bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
13839 
13840 			/* update mem block */
13841 			h2d_w_idx_ptr = h2d_w_idx_ptr + bus->rw_index_sz;
13842 			h2d_r_idx_ptr = h2d_r_idx_ptr + bus->rw_index_sz;
13843 
13844 			DHD_INFO(("%s: h2d w/r : idx %d write %x read %x \n", __FUNCTION__, i,
13845 				bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
13846 		}
13847 
13848 		/* Store d2h common ring write/read pointers */
13849 		for (j = 0; j < BCMPCIE_D2H_COMMON_MSGRINGS; j++, i++) {
13850 			bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr;
13851 			bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr;
13852 
13853 			/* update mem block */
13854 			d2h_w_idx_ptr = d2h_w_idx_ptr + bus->rw_index_sz;
13855 			d2h_r_idx_ptr = d2h_r_idx_ptr + bus->rw_index_sz;
13856 
13857 			DHD_INFO(("%s: d2h w/r : idx %d write %x read %x \n", __FUNCTION__, i,
13858 				bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
13859 		}
13860 
13861 		/* Store txflow ring write/read pointers */
13862 		if (bus->api.fw_rev < PCIE_SHARED_VERSION_6) {
13863 			max_tx_flowrings -= BCMPCIE_H2D_COMMON_MSGRINGS;
13864 		} else {
13865 			/* Account for Debug info h2d ring located after the last tx flow ring */
13866 			max_tx_flowrings = max_tx_flowrings + 1;
13867 		}
13868 		for (j = 0; j < max_tx_flowrings; i++, j++)
13869 		{
13870 			bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
13871 			bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
13872 
13873 			/* update mem block */
13874 			h2d_w_idx_ptr = h2d_w_idx_ptr + bus->rw_index_sz;
13875 			h2d_r_idx_ptr = h2d_r_idx_ptr + bus->rw_index_sz;
13876 
13877 			DHD_INFO(("%s: FLOW Rings h2d w/r : idx %d write %x read %x \n",
13878 				__FUNCTION__, i,
13879 				bus->ring_sh[i].ring_state_w,
13880 				bus->ring_sh[i].ring_state_r));
13881 		}
13882 #ifdef DHD_HP2P
13883 		/* store wr/rd pointers for debug info completion or EDL ring and hp2p rings */
13884 		for (j = 0; j <= MAX_HP2P_CMPL_RINGS; i++, j++) {
13885 			bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr;
13886 			bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr;
13887 			d2h_w_idx_ptr = d2h_w_idx_ptr + bus->rw_index_sz;
13888 			d2h_r_idx_ptr = d2h_r_idx_ptr + bus->rw_index_sz;
13889 			DHD_INFO(("d2h w/r : idx %d write %x read %x \n", i,
13890 				bus->ring_sh[i].ring_state_w,
13891 				bus->ring_sh[i].ring_state_r));
13892 		}
13893 #else
13894 		/* store wr/rd pointers for debug info completion or EDL ring */
13895 		bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr;
13896 		bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr;
13897 		d2h_w_idx_ptr = d2h_w_idx_ptr + bus->rw_index_sz;
13898 		d2h_r_idx_ptr = d2h_r_idx_ptr + bus->rw_index_sz;
13899 		DHD_INFO(("d2h w/r : idx %d write %x read %x \n", i,
13900 			bus->ring_sh[i].ring_state_w,
13901 			bus->ring_sh[i].ring_state_r));
13902 #endif /* DHD_HP2P */
13903 	}
13904 } /* dhd_fillup_ring_sharedptr_info */
13905 
13906 /**
13907  * Initialize bus module: prepare for communication with the dongle. Called after downloading
13908  * firmware into the dongle.
13909  */
13910 int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex)
13911 {
13912 	dhd_bus_t *bus = dhdp->bus;
13913 	int  ret = 0;
13914 
13915 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
13916 
13917 	ASSERT(bus->dhd);
13918 	if (!bus->dhd)
13919 		return 0;
13920 
13921 	dhd_bus_pcie_pwr_req_clear_reload_war(bus);
13922 
13923 	if (MULTIBP_ENAB(bus->sih)) {
13924 		dhd_bus_pcie_pwr_req(bus);
13925 	}
13926 
13927 	/* Configure AER registers to log the TLP header */
13928 	dhd_bus_aer_config(bus);
13929 
13930 	/* Make sure we're talking to the core. */
13931 	bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
13932 	ASSERT(bus->reg != NULL);
13933 
13934 	/* before opening up bus for data transfer, check if shared are is intact */
13935 	ret = dhdpcie_readshared(bus);
13936 	if (ret < 0) {
13937 		DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__));
13938 		goto exit;
13939 	}
13940 
13941 	/* Make sure we're talking to the core. */
13942 	bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
13943 	ASSERT(bus->reg != NULL);
13944 
13945 	/* Set bus state according to enable result */
13946 	dhdp->busstate = DHD_BUS_DATA;
13947 	DHD_SET_BUS_NOT_IN_LPS(bus);
13948 	dhdp->dhd_bus_busy_state = 0;
13949 
13950 	/* D11 status via PCIe completion header */
13951 	if ((ret = dhdpcie_init_d11status(bus)) < 0) {
13952 		goto exit;
13953 	}
13954 
13955 #if defined(OEM_ANDROID) || defined(LINUX)
13956 	if (!dhd_download_fw_on_driverload)
13957 		dhd_dpc_enable(bus->dhd);
13958 #endif /* OEM_ANDROID || LINUX */
13959 	/* Enable the interrupt after device is up */
13960 	dhdpcie_bus_intr_enable(bus);
13961 
13962 	DHD_ERROR(("%s: Enabling bus->intr_enabled\n", __FUNCTION__));
13963 	bus->intr_enabled = TRUE;
13964 
13965 	/* XXX These need to change w/API updates */
13966 	/* bcmsdh_intr_unmask(bus->sdh); */
13967 #ifdef DHD_PCIE_RUNTIMEPM
13968 	bus->idlecount = 0;
13969 	bus->idletime = (int32)MAX_IDLE_COUNT;
13970 	init_waitqueue_head(&bus->rpm_queue);
13971 	mutex_init(&bus->pm_lock);
13972 #else
13973 	bus->idletime = 0;
13974 #endif /* DHD_PCIE_RUNTIMEPM */
13975 #ifdef PCIE_INB_DW
13976 	bus->skip_ds_ack = FALSE;
13977 	/* Initialize the lock to serialize Device Wake Inband activities */
13978 	if (!bus->inb_lock) {
13979 		bus->inb_lock = osl_spin_lock_init(bus->dhd->osh);
13980 	}
13981 #endif
13982 
13983 	/* XXX Temp errnum workaround: return ok, caller checks bus state */
13984 
13985 	/* Make use_d0_inform TRUE for Rev 5 for backward compatibility */
13986 	if (bus->api.fw_rev < PCIE_SHARED_VERSION_6) {
13987 		bus->use_d0_inform = TRUE;
13988 	} else {
13989 		bus->use_d0_inform = FALSE;
13990 	}
13991 
13992 	bus->hostready_count = 0;
13993 
13994 exit:
13995 	if (MULTIBP_ENAB(bus->sih)) {
13996 		dhd_bus_pcie_pwr_req_clear(bus);
13997 	}
13998 	return ret;
13999 }
14000 
14001 static void
14002 dhdpcie_init_shared_addr(dhd_bus_t *bus)
14003 {
14004 	uint32 addr = 0;
14005 	uint32 val = 0;
14006 	addr = bus->dongle_ram_base + bus->ramsize - 4;
14007 #ifdef DHD_PCIE_RUNTIMEPM
14008 	dhdpcie_runtime_bus_wake(bus->dhd, TRUE, __builtin_return_address(0));
14009 #endif /* DHD_PCIE_RUNTIMEPM */
14010 	dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val));
14011 }
14012 
14013 bool
14014 dhdpcie_chipmatch(uint16 vendor, uint16 device)
14015 {
14016 	if (vendor != PCI_VENDOR_ID_BROADCOM) {
14017 #ifndef DHD_EFI
14018 		DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__,
14019 			vendor, device));
14020 #endif /* DHD_EFI */
14021 		return (-ENODEV);
14022 	}
14023 
14024 	switch (device) {
14025 		case BCM4345_CHIP_ID:
14026 		case BCM43454_CHIP_ID:
14027 		case BCM43455_CHIP_ID:
14028 		case BCM43457_CHIP_ID:
14029 		case BCM43458_CHIP_ID:
14030 		case BCM4350_D11AC_ID:
14031 		case BCM4350_D11AC2G_ID:
14032 		case BCM4350_D11AC5G_ID:
14033 		case BCM4350_CHIP_ID:
14034 		case BCM4354_D11AC_ID:
14035 		case BCM4354_D11AC2G_ID:
14036 		case BCM4354_D11AC5G_ID:
14037 		case BCM4354_CHIP_ID:
14038 		case BCM4356_D11AC_ID:
14039 		case BCM4356_D11AC2G_ID:
14040 		case BCM4356_D11AC5G_ID:
14041 		case BCM4356_CHIP_ID:
14042 		case BCM4371_D11AC_ID:
14043 		case BCM4371_D11AC2G_ID:
14044 		case BCM4371_D11AC5G_ID:
14045 		case BCM4371_CHIP_ID:
14046 		case BCM4345_D11AC_ID:
14047 		case BCM4345_D11AC2G_ID:
14048 		case BCM4345_D11AC5G_ID:
14049 		case BCM43452_D11AC_ID:
14050 		case BCM43452_D11AC2G_ID:
14051 		case BCM43452_D11AC5G_ID:
14052 		case BCM4335_D11AC_ID:
14053 		case BCM4335_D11AC2G_ID:
14054 		case BCM4335_D11AC5G_ID:
14055 		case BCM4335_CHIP_ID:
14056 		case BCM43602_D11AC_ID:
14057 		case BCM43602_D11AC2G_ID:
14058 		case BCM43602_D11AC5G_ID:
14059 		case BCM43602_CHIP_ID:
14060 		case BCM43569_D11AC_ID:
14061 		case BCM43569_D11AC2G_ID:
14062 		case BCM43569_D11AC5G_ID:
14063 		case BCM43569_CHIP_ID:
14064 		/* XXX: For 4358, BCM4358_CHIP_ID is not checked intentionally as
14065 		 * this is not a real chip id, but propagated from the OTP.
14066 		 */
14067 		case BCM4358_D11AC_ID:
14068 		case BCM4358_D11AC2G_ID:
14069 		case BCM4358_D11AC5G_ID:
14070 		case BCM4349_D11AC_ID:
14071 		case BCM4349_D11AC2G_ID:
14072 		case BCM4349_D11AC5G_ID:
14073 		case BCM4355_D11AC_ID:
14074 		case BCM4355_D11AC2G_ID:
14075 		case BCM4355_D11AC5G_ID:
14076 		case BCM4355_CHIP_ID:
14077 		/* XXX: BCM4359_CHIP_ID is not checked intentionally as this is
14078 		 * not a real chip id, but propogated from the OTP.
14079 		 */
14080 		case BCM4359_D11AC_ID:
14081 		case BCM4359_D11AC2G_ID:
14082 		case BCM4359_D11AC5G_ID:
14083 		case BCM43596_D11AC_ID:
14084 		case BCM43596_D11AC2G_ID:
14085 		case BCM43596_D11AC5G_ID:
14086 		case BCM43597_D11AC_ID:
14087 		case BCM43597_D11AC2G_ID:
14088 		case BCM43597_D11AC5G_ID:
14089 		case BCM4364_D11AC_ID:
14090 		case BCM4364_D11AC2G_ID:
14091 		case BCM4364_D11AC5G_ID:
14092 		case BCM4364_CHIP_ID:
14093 		case BCM4361_D11AC_ID:
14094 		case BCM4361_D11AC2G_ID:
14095 		case BCM4361_D11AC5G_ID:
14096 		case BCM4361_CHIP_ID:
14097 		case BCM4347_D11AC_ID:
14098 		case BCM4347_D11AC2G_ID:
14099 		case BCM4347_D11AC5G_ID:
14100 		case BCM4347_CHIP_ID:
14101 		case BCM4369_D11AX_ID:
14102 		case BCM4369_D11AX2G_ID:
14103 		case BCM4369_D11AX5G_ID:
14104 		case BCM4369_CHIP_ID:
14105 		case BCM4376_D11AX_ID:
14106 		case BCM4376_D11AX2G_ID:
14107 		case BCM4376_D11AX5G_ID:
14108 		case BCM4376_CHIP_ID:
14109 		case BCM4377_M_D11AX_ID:
14110 		case BCM4377_D11AX_ID:
14111 		case BCM4377_D11AX2G_ID:
14112 		case BCM4377_D11AX5G_ID:
14113 		case BCM4377_CHIP_ID:
14114 		case BCM4378_D11AX_ID:
14115 		case BCM4378_D11AX2G_ID:
14116 		case BCM4378_D11AX5G_ID:
14117 		case BCM4378_CHIP_ID:
14118 		case BCM4387_D11AX_ID:
14119 		case BCM4387_CHIP_ID:
14120 		case BCM4362_D11AX_ID:
14121 		case BCM4362_D11AX2G_ID:
14122 		case BCM4362_D11AX5G_ID:
14123 		case BCM4362_CHIP_ID:
14124 		case BCM4375_D11AX_ID:
14125 		case BCM4375_D11AX2G_ID:
14126 		case BCM4375_D11AX5G_ID:
14127 		case BCM4375_CHIP_ID:
14128 		case BCM43751_D11AX_ID:
14129 		case BCM43751_D11AX2G_ID:
14130 		case BCM43751_D11AX5G_ID:
14131 		case BCM43751_CHIP_ID:
14132 		case BCM43752_D11AX_ID:
14133 		case BCM43752_D11AX2G_ID:
14134 		case BCM43752_D11AX5G_ID:
14135 		case BCM43752_CHIP_ID:
14136 		case BCM4388_CHIP_ID:
14137 		case BCM4388_D11AX_ID:
14138 		case BCM4389_CHIP_ID:
14139 		case BCM4389_D11AX_ID:
14140 		case BCM4385_D11AX_ID:
14141 		case BCM4385_CHIP_ID:
14142 
14143 #ifdef UNRELEASEDCHIP
14144 		case BCM4397_CHIP_ID:
14145 		case BCM4397_D11AX_ID:
14146 #endif
14147 			return 0;
14148 		default:
14149 #ifndef DHD_EFI
14150 			DHD_ERROR(("%s: Unsupported vendor %x device %x\n",
14151 				__FUNCTION__, vendor, device));
14152 #endif
14153 			return (-ENODEV);
14154 	}
14155 } /* dhdpcie_chipmatch */
14156 
14157 /**
14158  * Name:  dhdpcie_cc_nvmshadow
14159  *
14160  * Description:
14161  * A shadow of OTP/SPROM exists in ChipCommon Region
14162  * betw. 0x800 and 0xBFF (Backplane Addr. 0x1800_0800 and 0x1800_0BFF).
14163  * Strapping option (SPROM vs. OTP), presence of OTP/SPROM and its size
14164  * can also be read from ChipCommon Registers.
14165  */
14166 /* XXX So far tested with 4345 and 4350 (Hence the checks in the function.) */
14167 static int
14168 dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b)
14169 {
14170 	uint16 dump_offset = 0;
14171 	uint32 dump_size = 0, otp_size = 0, sprom_size = 0;
14172 
14173 	/* Table for 65nm OTP Size (in bits) */
14174 	int  otp_size_65nm[8] = {0, 2048, 4096, 8192, 4096, 6144, 512, 1024};
14175 
14176 	volatile uint16 *nvm_shadow;
14177 
14178 	uint cur_coreid;
14179 	uint chipc_corerev;
14180 	chipcregs_t *chipcregs;
14181 
14182 	/* Save the current core */
14183 	cur_coreid = si_coreid(bus->sih);
14184 	/* Switch to ChipC */
14185 	chipcregs = (chipcregs_t *)si_setcore(bus->sih, CC_CORE_ID, 0);
14186 	ASSERT(chipcregs != NULL);
14187 
14188 	chipc_corerev = si_corerev(bus->sih);
14189 
14190 	/* Check ChipcommonCore Rev */
14191 	if (chipc_corerev < 44) {
14192 		DHD_ERROR(("%s: ChipcommonCore Rev %d < 44\n", __FUNCTION__, chipc_corerev));
14193 		return BCME_UNSUPPORTED;
14194 	}
14195 
14196 	/* Check ChipID */
14197 	if (((uint16)bus->sih->chip != BCM4350_CHIP_ID) && !BCM4345_CHIP((uint16)bus->sih->chip) &&
14198 	        ((uint16)bus->sih->chip != BCM4355_CHIP_ID) &&
14199 	        ((uint16)bus->sih->chip != BCM4364_CHIP_ID)) {
14200 		DHD_ERROR(("%s: cc_nvmdump cmd. supported for Olympic chips"
14201 					"4350/4345/4355/4364 only\n", __FUNCTION__));
14202 		return BCME_UNSUPPORTED;
14203 	}
14204 
14205 	/* Check if SRC_PRESENT in SpromCtrl(0x190 in ChipCommon Regs) is set */
14206 	if (chipcregs->sromcontrol & SRC_PRESENT) {
14207 		/* SPROM Size: 1Kbits (0x0), 4Kbits (0x1), 16Kbits(0x2) */
14208 		sprom_size = (1 << (2 * ((chipcregs->sromcontrol & SRC_SIZE_MASK)
14209 					>> SRC_SIZE_SHIFT))) * 1024;
14210 		bcm_bprintf(b, "\nSPROM Present (Size %d bits)\n", sprom_size);
14211 	}
14212 
14213 	/* XXX Check if OTP exists. 2 possible approaches:
14214 	 * 1) Check if OtpPresent in SpromCtrl (0x190 in ChipCommon Regs) is set OR
14215 	 * 2) Check if OtpSize > 0
14216 	 */
14217 	if (chipcregs->sromcontrol & SRC_OTPPRESENT) {
14218 		bcm_bprintf(b, "\nOTP Present");
14219 
14220 		if (((chipcregs->otplayout & OTPL_WRAP_TYPE_MASK) >> OTPL_WRAP_TYPE_SHIFT)
14221 			== OTPL_WRAP_TYPE_40NM) {
14222 			/* 40nm OTP: Size = (OtpSize + 1) * 1024 bits */
14223 			/* Chipcommon rev51 is a variation on rev45 and does not support
14224 			 * the latest OTP configuration.
14225 			 */
14226 			if (chipc_corerev != 51 && chipc_corerev >= 49) {
14227 				otp_size = (((chipcregs->otplayout & OTPL_ROW_SIZE_MASK)
14228 					>> OTPL_ROW_SIZE_SHIFT) + 1) * 1024;
14229 				bcm_bprintf(b, "(Size %d bits)\n", otp_size);
14230 			} else {
14231 				otp_size =  (((chipcregs->capabilities & CC_CAP_OTPSIZE)
14232 				        >> CC_CAP_OTPSIZE_SHIFT) + 1) * 1024;
14233 				bcm_bprintf(b, "(Size %d bits)\n", otp_size);
14234 			}
14235 		} else {
14236 			/* This part is untested since newer chips have 40nm OTP */
14237 			/* Chipcommon rev51 is a variation on rev45 and does not support
14238 			 * the latest OTP configuration.
14239 			 */
14240 			if (chipc_corerev != 51 && chipc_corerev >= 49) {
14241 				otp_size = otp_size_65nm[(chipcregs->otplayout & OTPL_ROW_SIZE_MASK)
14242 						>> OTPL_ROW_SIZE_SHIFT];
14243 				bcm_bprintf(b, "(Size %d bits)\n", otp_size);
14244 			} else {
14245 				otp_size = otp_size_65nm[(chipcregs->capabilities & CC_CAP_OTPSIZE)
14246 					        >> CC_CAP_OTPSIZE_SHIFT];
14247 				bcm_bprintf(b, "(Size %d bits)\n", otp_size);
14248 				DHD_INFO(("%s: 65nm/130nm OTP Size not tested. \n",
14249 					__FUNCTION__));
14250 			}
14251 		}
14252 	}
14253 
14254 	/* Chipcommon rev51 is a variation on rev45 and does not support
14255 	 * the latest OTP configuration.
14256 	 */
14257 	if (chipc_corerev != 51 && chipc_corerev >= 49) {
14258 		if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
14259 			((chipcregs->otplayout & OTPL_ROW_SIZE_MASK) == 0)) {
14260 			DHD_ERROR(("%s: SPROM and OTP could not be found "
14261 				"sromcontrol = %x, otplayout = %x \n",
14262 				__FUNCTION__, chipcregs->sromcontrol, chipcregs->otplayout));
14263 			return BCME_NOTFOUND;
14264 		}
14265 	} else {
14266 		if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
14267 			((chipcregs->capabilities & CC_CAP_OTPSIZE) == 0)) {
14268 			DHD_ERROR(("%s: SPROM and OTP could not be found "
14269 				"sromcontrol = %x, capablities = %x \n",
14270 				__FUNCTION__, chipcregs->sromcontrol, chipcregs->capabilities));
14271 			return BCME_NOTFOUND;
14272 		}
14273 	}
14274 
14275 	/* Check the strapping option in SpromCtrl: Set = OTP otherwise SPROM */
14276 	if ((!(chipcregs->sromcontrol & SRC_PRESENT) || (chipcregs->sromcontrol & SRC_OTPSEL)) &&
14277 		(chipcregs->sromcontrol & SRC_OTPPRESENT)) {
14278 
14279 		bcm_bprintf(b, "OTP Strap selected.\n"
14280 		               "\nOTP Shadow in ChipCommon:\n");
14281 
14282 		dump_size = otp_size / 16 ; /* 16bit words */
14283 
14284 	} else if (((chipcregs->sromcontrol & SRC_OTPSEL) == 0) &&
14285 		(chipcregs->sromcontrol & SRC_PRESENT)) {
14286 
14287 		bcm_bprintf(b, "SPROM Strap selected\n"
14288 				"\nSPROM Shadow in ChipCommon:\n");
14289 
14290 		/* If SPROM > 8K only 8Kbits is mapped to ChipCommon (0x800 - 0xBFF) */
14291 		/* dump_size in 16bit words */
14292 		dump_size = sprom_size > 8 ? (8 * 1024) / 16 : sprom_size / 16;
14293 	} else {
14294 		DHD_ERROR(("%s: NVM Shadow does not exist in ChipCommon\n",
14295 			__FUNCTION__));
14296 		return BCME_NOTFOUND;
14297 	}
14298 
14299 	if (bus->regs == NULL) {
14300 		DHD_ERROR(("ChipCommon Regs. not initialized\n"));
14301 		return BCME_NOTREADY;
14302 	} else {
14303 		bcm_bprintf(b, "\n OffSet:");
14304 
14305 		/* Chipcommon rev51 is a variation on rev45 and does not support
14306 		 * the latest OTP configuration.
14307 		 */
14308 		if (chipc_corerev != 51 && chipc_corerev >= 49) {
14309 			/* Chip common can read only 8kbits,
14310 			* for ccrev >= 49 otp size is around 12 kbits so use GCI core
14311 			*/
14312 			nvm_shadow = (volatile uint16 *)si_setcore(bus->sih, GCI_CORE_ID, 0);
14313 		} else {
14314 			/* Point to the SPROM/OTP shadow in ChipCommon */
14315 			nvm_shadow = chipcregs->sromotp;
14316 		}
14317 
14318 		if (nvm_shadow == NULL) {
14319 			DHD_ERROR(("%s: NVM Shadow is not intialized\n", __FUNCTION__));
14320 			return BCME_NOTFOUND;
14321 		}
14322 
14323 		/*
14324 		* Read 16 bits / iteration.
14325 		* dump_size & dump_offset in 16-bit words
14326 		*/
14327 		while (dump_offset < dump_size) {
14328 			if (dump_offset % 2 == 0)
14329 				/* Print the offset in the shadow space in Bytes */
14330 				bcm_bprintf(b, "\n 0x%04x", dump_offset * 2);
14331 
14332 			bcm_bprintf(b, "\t0x%04x", *(nvm_shadow + dump_offset));
14333 			dump_offset += 0x1;
14334 		}
14335 	}
14336 
14337 	/* Switch back to the original core */
14338 	si_setcore(bus->sih, cur_coreid, 0);
14339 
14340 	return BCME_OK;
14341 } /* dhdpcie_cc_nvmshadow */
14342 
14343 /** Flow rings are dynamically created and destroyed */
14344 void dhd_bus_clean_flow_ring(dhd_bus_t *bus, void *node)
14345 {
14346 	void *pkt;
14347 	flow_queue_t *queue;
14348 	flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)node;
14349 	unsigned long flags;
14350 
14351 	queue = &flow_ring_node->queue;
14352 
14353 #ifdef DHDTCPACK_SUPPRESS
14354 	/* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
14355 	 * when there is a newly coming packet from network stack.
14356 	 */
14357 	dhd_tcpack_info_tbl_clean(bus->dhd);
14358 #endif /* DHDTCPACK_SUPPRESS */
14359 
14360 #ifdef DHD_HP2P
14361 	if (flow_ring_node->hp2p_ring) {
14362 		if (!bus->dhd->hp2p_ring_more) {
14363 			bus->dhd->hp2p_ring_more = TRUE;
14364 		}
14365 		flow_ring_node->hp2p_ring = FALSE;
14366 	}
14367 #endif /* DHD_HP2P */
14368 
14369 	/* clean up BUS level info */
14370 	DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
14371 
14372 	/* Flush all pending packets in the queue, if any */
14373 	while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
14374 		PKTFREE(bus->dhd->osh, pkt, TRUE);
14375 	}
14376 	ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
14377 
14378 	/* Reinitialise flowring's queue */
14379 	dhd_flow_queue_reinit(bus->dhd, queue, bus->dhd->conf->flow_ring_queue_threshold);
14380 	flow_ring_node->status = FLOW_RING_STATUS_CLOSED;
14381 	flow_ring_node->active = FALSE;
14382 
14383 	DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
14384 
14385 	/* Hold flowring_list_lock to ensure no race condition while accessing the List */
14386 	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
14387 	dll_delete(&flow_ring_node->list);
14388 	dll_init(&flow_ring_node->list);
14389 	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
14390 
14391 	/* Release the flowring object back into the pool */
14392 	dhd_prot_flowrings_pool_release(bus->dhd,
14393 		flow_ring_node->flowid, flow_ring_node->prot_info);
14394 
14395 	/* Free the flowid back to the flowid allocator */
14396 	dhd_flowid_free(bus->dhd, flow_ring_node->flow_info.ifindex,
14397 	                flow_ring_node->flowid);
14398 }
14399 
14400 /**
14401  * Allocate a Flow ring buffer,
14402  * Init Ring buffer, send Msg to device about flow ring creation
14403 */
14404 int
14405 dhd_bus_flow_ring_create_request(dhd_bus_t *bus, void *arg)
14406 {
14407 	flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)arg;
14408 
14409 	DHD_PCIE_INFO(("%s :Flow create\n", __FUNCTION__));
14410 
14411 	/* Send Msg to device about flow ring creation */
14412 	if (dhd_prot_flow_ring_create(bus->dhd, flow_ring_node) != BCME_OK)
14413 		return BCME_NOMEM;
14414 
14415 	return BCME_OK;
14416 }
14417 
14418 /** Handle response from dongle on a 'flow ring create' request */
14419 void
14420 dhd_bus_flow_ring_create_response(dhd_bus_t *bus, uint16 flowid, int32 status)
14421 {
14422 	flow_ring_node_t *flow_ring_node;
14423 	unsigned long flags;
14424 
14425 	DHD_PCIE_INFO(("%s :Flow Response %d \n", __FUNCTION__, flowid));
14426 
14427 	/* Boundary check of the flowid */
14428 	if (flowid > bus->dhd->max_tx_flowid) {
14429 		DHD_ERROR(("%s: flowid is invalid %d, max id %d\n", __FUNCTION__,
14430 			flowid, bus->dhd->max_tx_flowid));
14431 		return;
14432 	}
14433 
14434 	flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
14435 	if (!flow_ring_node) {
14436 		DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__));
14437 		return;
14438 	}
14439 
14440 	ASSERT(flow_ring_node->flowid == flowid);
14441 	if (flow_ring_node->flowid != flowid) {
14442 		DHD_ERROR(("%s: flowid %d is different from the flowid "
14443 			"of the flow_ring_node %d\n", __FUNCTION__, flowid,
14444 			flow_ring_node->flowid));
14445 		return;
14446 	}
14447 
14448 	if (status != BCME_OK) {
14449 		DHD_ERROR(("%s Flow create Response failure error status = %d \n",
14450 		     __FUNCTION__, status));
14451 		/* Call Flow clean up */
14452 		dhd_bus_clean_flow_ring(bus, flow_ring_node);
14453 		return;
14454 	}
14455 
14456 	DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
14457 	flow_ring_node->status = FLOW_RING_STATUS_OPEN;
14458 	DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
14459 
14460 	/* Now add the Flow ring node into the active list
14461 	 * Note that this code to add the newly created node to the active
14462 	 * list was living in dhd_flowid_lookup. But note that after
14463 	 * adding the node to the active list the contents of node is being
14464 	 * filled in dhd_prot_flow_ring_create.
14465 	 * If there is a D2H interrupt after the node gets added to the
14466 	 * active list and before the node gets populated with values
14467 	 * from the Bottom half dhd_update_txflowrings would be called.
14468 	 * which will then try to walk through the active flow ring list,
14469 	 * pickup the nodes and operate on them. Now note that since
14470 	 * the function dhd_prot_flow_ring_create is not finished yet
14471 	 * the contents of flow_ring_node can still be NULL leading to
14472 	 * crashes. Hence the flow_ring_node should be added to the
14473 	 * active list only after its truely created, which is after
14474 	 * receiving the create response message from the Host.
14475 	 */
14476 	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
14477 	dll_prepend(&bus->flowring_active_list, &flow_ring_node->list);
14478 	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
14479 
14480 	dhd_bus_schedule_queue(bus, flowid, FALSE); /* from queue to flowring */
14481 
14482 	return;
14483 }
14484 
14485 int
14486 dhd_bus_flow_ring_delete_request(dhd_bus_t *bus, void *arg)
14487 {
14488 	void * pkt;
14489 	flow_queue_t *queue;
14490 	flow_ring_node_t *flow_ring_node;
14491 	unsigned long flags;
14492 
14493 	DHD_PCIE_INFO(("%s :Flow Delete\n", __FUNCTION__));
14494 
14495 	flow_ring_node = (flow_ring_node_t *)arg;
14496 
14497 #ifdef DHDTCPACK_SUPPRESS
14498 	/* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
14499 	 * when there is a newly coming packet from network stack.
14500 	 */
14501 	dhd_tcpack_info_tbl_clean(bus->dhd);
14502 #endif /* DHDTCPACK_SUPPRESS */
14503 	DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
14504 	if (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) {
14505 		DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
14506 		DHD_ERROR(("%s :Delete Pending flowid %u\n", __FUNCTION__, flow_ring_node->flowid));
14507 		return BCME_ERROR;
14508 	}
14509 	flow_ring_node->status = FLOW_RING_STATUS_DELETE_PENDING;
14510 
14511 	queue = &flow_ring_node->queue; /* queue associated with flow ring */
14512 
14513 	/* Flush all pending packets in the queue, if any */
14514 	while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
14515 		PKTFREE(bus->dhd->osh, pkt, TRUE);
14516 	}
14517 	ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
14518 
14519 	DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
14520 
14521 	/* Send Msg to device about flow ring deletion */
14522 	dhd_prot_flow_ring_delete(bus->dhd, flow_ring_node);
14523 
14524 	return BCME_OK;
14525 }
14526 
14527 void
14528 dhd_bus_flow_ring_delete_response(dhd_bus_t *bus, uint16 flowid, uint32 status)
14529 {
14530 	flow_ring_node_t *flow_ring_node;
14531 
14532 	DHD_PCIE_INFO(("%s :Flow Delete Response %d \n", __FUNCTION__, flowid));
14533 
14534 	/* Boundary check of the flowid */
14535 	if (flowid > bus->dhd->max_tx_flowid) {
14536 		DHD_ERROR(("%s: flowid is invalid %d, max id %d\n", __FUNCTION__,
14537 			flowid, bus->dhd->max_tx_flowid));
14538 		return;
14539 	}
14540 
14541 	flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
14542 	if (!flow_ring_node) {
14543 		DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__));
14544 		return;
14545 	}
14546 
14547 	ASSERT(flow_ring_node->flowid == flowid);
14548 	if (flow_ring_node->flowid != flowid) {
14549 		DHD_ERROR(("%s: flowid %d is different from the flowid "
14550 			"of the flow_ring_node %d\n", __FUNCTION__, flowid,
14551 			flow_ring_node->flowid));
14552 		return;
14553 	}
14554 
14555 	if (status != BCME_OK) {
14556 		DHD_ERROR(("%s Flow Delete Response failure error status = %d \n",
14557 		    __FUNCTION__, status));
14558 		return;
14559 	}
14560 	/* Call Flow clean up */
14561 	dhd_bus_clean_flow_ring(bus, flow_ring_node);
14562 
14563 	return;
14564 
14565 }
14566 
14567 int dhd_bus_flow_ring_flush_request(dhd_bus_t *bus, void *arg)
14568 {
14569 	void *pkt;
14570 	flow_queue_t *queue;
14571 	flow_ring_node_t *flow_ring_node;
14572 	unsigned long flags;
14573 
14574 	DHD_PCIE_INFO(("%s :Flow Flush\n", __FUNCTION__));
14575 
14576 	flow_ring_node = (flow_ring_node_t *)arg;
14577 
14578 	DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
14579 	queue = &flow_ring_node->queue; /* queue associated with flow ring */
14580 	/* Flow ring status will be set back to FLOW_RING_STATUS_OPEN
14581 	 * once flow ring flush response is received for this flowring node.
14582 	 */
14583 	flow_ring_node->status = FLOW_RING_STATUS_FLUSH_PENDING;
14584 
14585 #ifdef DHDTCPACK_SUPPRESS
14586 	/* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
14587 	 * when there is a newly coming packet from network stack.
14588 	 */
14589 	dhd_tcpack_info_tbl_clean(bus->dhd);
14590 #endif /* DHDTCPACK_SUPPRESS */
14591 
14592 	/* Flush all pending packets in the queue, if any */
14593 	while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
14594 		PKTFREE(bus->dhd->osh, pkt, TRUE);
14595 	}
14596 	ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
14597 
14598 	DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
14599 
14600 	/* Send Msg to device about flow ring flush */
14601 	dhd_prot_flow_ring_flush(bus->dhd, flow_ring_node);
14602 
14603 	return BCME_OK;
14604 }
14605 
14606 void
14607 dhd_bus_flow_ring_flush_response(dhd_bus_t *bus, uint16 flowid, uint32 status)
14608 {
14609 	flow_ring_node_t *flow_ring_node;
14610 
14611 	if (status != BCME_OK) {
14612 		DHD_ERROR(("%s Flow flush Response failure error status = %d \n",
14613 		    __FUNCTION__, status));
14614 		return;
14615 	}
14616 
14617 	/* Boundary check of the flowid */
14618 	if (flowid > bus->dhd->max_tx_flowid) {
14619 		DHD_ERROR(("%s: flowid is invalid %d, max id %d\n", __FUNCTION__,
14620 			flowid, bus->dhd->max_tx_flowid));
14621 		return;
14622 	}
14623 
14624 	flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
14625 	if (!flow_ring_node) {
14626 		DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__));
14627 		return;
14628 	}
14629 
14630 	ASSERT(flow_ring_node->flowid == flowid);
14631 	if (flow_ring_node->flowid != flowid) {
14632 		DHD_ERROR(("%s: flowid %d is different from the flowid "
14633 			"of the flow_ring_node %d\n", __FUNCTION__, flowid,
14634 			flow_ring_node->flowid));
14635 		return;
14636 	}
14637 
14638 	flow_ring_node->status = FLOW_RING_STATUS_OPEN;
14639 	return;
14640 }
14641 
14642 uint32
14643 dhd_bus_max_h2d_queues(struct dhd_bus *bus)
14644 {
14645 	return bus->max_submission_rings;
14646 }
14647 
14648 /* To be symmetric with SDIO */
14649 void
14650 dhd_bus_pktq_flush(dhd_pub_t *dhdp)
14651 {
14652 	return;
14653 }
14654 
14655 void
14656 dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val)
14657 {
14658 	dhdp->bus->is_linkdown = val;
14659 }
14660 
14661 int
14662 dhd_bus_get_linkdown(dhd_pub_t *dhdp)
14663 {
14664 	return dhdp->bus->is_linkdown;
14665 }
14666 
14667 int
14668 dhd_bus_get_cto(dhd_pub_t *dhdp)
14669 {
14670 	return dhdp->bus->cto_triggered;
14671 }
14672 
14673 #ifdef IDLE_TX_FLOW_MGMT
14674 /* resume request */
14675 int
14676 dhd_bus_flow_ring_resume_request(dhd_bus_t *bus, void *arg)
14677 {
14678 	flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)arg;
14679 
14680 	DHD_ERROR(("%s :Flow Resume Request flow id %u\n", __FUNCTION__, flow_ring_node->flowid));
14681 
14682 	flow_ring_node->status = FLOW_RING_STATUS_RESUME_PENDING;
14683 
14684 	/* Send Msg to device about flow ring resume */
14685 	dhd_prot_flow_ring_resume(bus->dhd, flow_ring_node);
14686 
14687 	return BCME_OK;
14688 }
14689 
14690 /* add the node back to active flowring */
14691 void
14692 dhd_bus_flow_ring_resume_response(dhd_bus_t *bus, uint16 flowid, int32 status)
14693 {
14694 
14695 	flow_ring_node_t *flow_ring_node;
14696 
14697 	DHD_TRACE(("%s :flowid %d \n", __FUNCTION__, flowid));
14698 
14699 	flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
14700 	ASSERT(flow_ring_node->flowid == flowid);
14701 
14702 	if (status != BCME_OK) {
14703 		DHD_ERROR(("%s Error Status = %d \n",
14704 			__FUNCTION__, status));
14705 		return;
14706 	}
14707 
14708 	DHD_TRACE(("%s :Number of pkts queued in FlowId:%d is -> %u!!\n",
14709 		__FUNCTION__, flow_ring_node->flowid,  flow_ring_node->queue.len));
14710 
14711 	flow_ring_node->status = FLOW_RING_STATUS_OPEN;
14712 
14713 	dhd_bus_schedule_queue(bus, flowid, FALSE);
14714 	return;
14715 }
14716 
14717 /* scan the flow rings in active list for idle time out */
14718 void
14719 dhd_bus_check_idle_scan(dhd_bus_t *bus)
14720 {
14721 	uint64 time_stamp; /* in millisec */
14722 	uint64 diff;
14723 
14724 	time_stamp = OSL_SYSUPTIME();
14725 	diff = time_stamp - bus->active_list_last_process_ts;
14726 
14727 	if (diff > IDLE_FLOW_LIST_TIMEOUT) {
14728 		dhd_bus_idle_scan(bus);
14729 		bus->active_list_last_process_ts = OSL_SYSUPTIME();
14730 	}
14731 
14732 	return;
14733 }
14734 
14735 /* scan the nodes in active list till it finds a non idle node */
14736 void
14737 dhd_bus_idle_scan(dhd_bus_t *bus)
14738 {
14739 	dll_t *item, *prev;
14740 	flow_ring_node_t *flow_ring_node;
14741 	uint64 time_stamp, diff;
14742 	unsigned long flags;
14743 	uint16 ringid[MAX_SUSPEND_REQ];
14744 	uint16 count = 0;
14745 
14746 	time_stamp = OSL_SYSUPTIME();
14747 	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
14748 
14749 	for (item = dll_tail_p(&bus->flowring_active_list);
14750 	         !dll_end(&bus->flowring_active_list, item); item = prev) {
14751 		prev = dll_prev_p(item);
14752 
14753 		flow_ring_node = dhd_constlist_to_flowring(item);
14754 
14755 		if (flow_ring_node->flowid == (bus->max_submission_rings - 1))
14756 			continue;
14757 
14758 		if (flow_ring_node->status != FLOW_RING_STATUS_OPEN) {
14759 			/* Takes care of deleting zombie rings */
14760 			/* delete from the active list */
14761 			DHD_INFO(("deleting flow id %u from active list\n",
14762 				flow_ring_node->flowid));
14763 			__dhd_flow_ring_delete_from_active_list(bus, flow_ring_node);
14764 			continue;
14765 		}
14766 
14767 		diff = time_stamp - flow_ring_node->last_active_ts;
14768 
14769 		if ((diff > IDLE_FLOW_RING_TIMEOUT) && !(flow_ring_node->queue.len))  {
14770 			DHD_ERROR(("\nSuspending flowid %d\n", flow_ring_node->flowid));
14771 			/* delete from the active list */
14772 			__dhd_flow_ring_delete_from_active_list(bus, flow_ring_node);
14773 			flow_ring_node->status = FLOW_RING_STATUS_SUSPENDED;
14774 			ringid[count] = flow_ring_node->flowid;
14775 			count++;
14776 			if (count == MAX_SUSPEND_REQ) {
14777 				/* create a batch message now!! */
14778 				dhd_prot_flow_ring_batch_suspend_request(bus->dhd, ringid, count);
14779 				count = 0;
14780 			}
14781 
14782 		} else {
14783 
14784 			/* No more scanning, break from here! */
14785 			break;
14786 		}
14787 	}
14788 
14789 	if (count) {
14790 		dhd_prot_flow_ring_batch_suspend_request(bus->dhd, ringid, count);
14791 	}
14792 
14793 	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
14794 
14795 	return;
14796 }
14797 
14798 void dhd_flow_ring_move_to_active_list_head(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
14799 {
14800 	unsigned long flags;
14801 	dll_t* list;
14802 
14803 	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
14804 	/* check if the node is already at head, otherwise delete it and prepend */
14805 	list = dll_head_p(&bus->flowring_active_list);
14806 	if (&flow_ring_node->list != list) {
14807 		dll_delete(&flow_ring_node->list);
14808 		dll_prepend(&bus->flowring_active_list, &flow_ring_node->list);
14809 	}
14810 
14811 	/* update flow ring timestamp */
14812 	flow_ring_node->last_active_ts = OSL_SYSUPTIME();
14813 
14814 	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
14815 
14816 	return;
14817 }
14818 
14819 void dhd_flow_ring_add_to_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
14820 {
14821 	unsigned long flags;
14822 
14823 	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
14824 
14825 	dll_prepend(&bus->flowring_active_list, &flow_ring_node->list);
14826 	/* update flow ring timestamp */
14827 	flow_ring_node->last_active_ts = OSL_SYSUPTIME();
14828 
14829 	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
14830 
14831 	return;
14832 }
14833 void __dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
14834 {
14835 	dll_delete(&flow_ring_node->list);
14836 }
14837 
14838 void dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
14839 {
14840 	unsigned long flags;
14841 
14842 	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
14843 
14844 	__dhd_flow_ring_delete_from_active_list(bus, flow_ring_node);
14845 
14846 	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
14847 
14848 	return;
14849 }
14850 #endif /* IDLE_TX_FLOW_MGMT */
14851 
14852 #if defined(LINUX) || defined(linux)
14853 int
14854 dhdpcie_bus_start_host_dev(struct dhd_bus *bus)
14855 {
14856 	return dhdpcie_start_host_dev(bus);
14857 }
14858 
14859 int
14860 dhdpcie_bus_stop_host_dev(struct dhd_bus *bus)
14861 {
14862 	return dhdpcie_stop_host_dev(bus);
14863 }
14864 
14865 int
14866 dhdpcie_bus_disable_device(struct dhd_bus *bus)
14867 {
14868 	return dhdpcie_disable_device(bus);
14869 }
14870 
14871 int
14872 dhdpcie_bus_enable_device(struct dhd_bus *bus)
14873 {
14874 	return dhdpcie_enable_device(bus);
14875 }
14876 
14877 int
14878 dhdpcie_bus_alloc_resource(struct dhd_bus *bus)
14879 {
14880 	return dhdpcie_alloc_resource(bus);
14881 }
14882 
14883 void
14884 dhdpcie_bus_free_resource(struct dhd_bus *bus)
14885 {
14886 	dhdpcie_free_resource(bus);
14887 }
14888 
14889 int
14890 dhd_bus_request_irq(struct dhd_bus *bus)
14891 {
14892 	return dhdpcie_bus_request_irq(bus);
14893 }
14894 
14895 bool
14896 dhdpcie_bus_dongle_attach(struct dhd_bus *bus)
14897 {
14898 	return dhdpcie_dongle_attach(bus);
14899 }
14900 
14901 int
14902 dhd_bus_release_dongle(struct dhd_bus *bus)
14903 {
14904 	bool dongle_isolation;
14905 	osl_t *osh;
14906 
14907 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
14908 
14909 	if (bus) {
14910 		osh = bus->osh;
14911 		ASSERT(osh);
14912 
14913 		if (bus->dhd) {
14914 #if defined(DEBUGGER) || defined (DHD_DSCOPE)
14915 			debugger_close();
14916 #endif /* DEBUGGER || DHD_DSCOPE */
14917 
14918 			dongle_isolation = bus->dhd->dongle_isolation;
14919 			dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
14920 		}
14921 	}
14922 
14923 	return 0;
14924 }
14925 #endif /* LINUX || linux */
14926 
14927 int
14928 dhdpcie_cto_cfg_init(struct dhd_bus *bus, bool enable)
14929 {
14930 	if (enable) {
14931 		dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4,
14932 			PCI_CTO_INT_MASK | PCI_SBIM_MASK_SERR);
14933 	} else {
14934 		dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, 0);
14935 	}
14936 	return 0;
14937 }
14938 
14939 int
14940 dhdpcie_cto_init(struct dhd_bus *bus, bool enable)
14941 {
14942 	volatile void *regsva = (volatile void *)bus->regs;
14943 	uint32 val;
14944 	uint16 chipid = dhd_get_chipid(bus);
14945 	uint32 ctoctrl;
14946 
14947 	bus->cto_enable = enable;
14948 
14949 	dhdpcie_cto_cfg_init(bus, enable);
14950 
14951 	if (enable) {
14952 		if (bus->cto_threshold == 0) {
14953 			if ((chipid == BCM4387_CHIP_ID) ||
14954 			    (chipid == BCM4388_CHIP_ID) ||
14955 			    (chipid == BCM4389_CHIP_ID)) {
14956 				bus->cto_threshold = PCIE_CTO_TO_THRESH_DEFAULT_REV69;
14957 			} else {
14958 				bus->cto_threshold = PCIE_CTO_TO_THRESH_DEFAULT;
14959 			}
14960 		}
14961 		val = ((bus->cto_threshold << PCIE_CTO_TO_THRESHOLD_SHIFT) &
14962 			PCIE_CTO_TO_THRESHHOLD_MASK) |
14963 			((PCIE_CTO_CLKCHKCNT_VAL << PCIE_CTO_CLKCHKCNT_SHIFT) &
14964 			PCIE_CTO_CLKCHKCNT_MASK) |
14965 			PCIE_CTO_ENAB_MASK;
14966 
14967 		pcie_corereg(bus->osh, regsva, OFFSETOF(sbpcieregs_t, ctoctrl), ~0, val);
14968 	} else {
14969 		pcie_corereg(bus->osh, regsva, OFFSETOF(sbpcieregs_t, ctoctrl), ~0, 0);
14970 	}
14971 
14972 	ctoctrl = pcie_corereg(bus->osh, regsva, OFFSETOF(sbpcieregs_t, ctoctrl), 0, 0);
14973 
14974 	DHD_ERROR(("%s: ctoctrl(0x%x) enable/disable %d for chipid(0x%x)\n",
14975 		__FUNCTION__, ctoctrl, bus->cto_enable, chipid));
14976 
14977 	return 0;
14978 }
14979 
14980 static int
14981 dhdpcie_cto_error_recovery(struct dhd_bus *bus)
14982 {
14983 	uint32 pci_intmask, err_status;
14984 	uint8 i = 0;
14985 	uint32 val;
14986 
14987 	pci_intmask = dhdpcie_bus_cfg_read_dword(bus, PCI_INT_MASK, 4);
14988 	dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, pci_intmask & ~PCI_CTO_INT_MASK);
14989 
14990 	DHD_OS_WAKE_LOCK(bus->dhd);
14991 
14992 	DHD_ERROR(("--- CTO Triggered --- %d\n", bus->pwr_req_ref));
14993 
14994 	/*
14995 	 * DAR still accessible
14996 	 */
14997 	dhd_bus_dump_dar_registers(bus);
14998 
14999 	/* reset backplane */
15000 	val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
15001 	dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val | SPROM_CFG_TO_SB_RST);
15002 
15003 	/* clear timeout error */
15004 	while (1) {
15005 		err_status =  si_corereg(bus->sih, bus->sih->buscoreidx,
15006 			DAR_ERRLOG(bus->sih->buscorerev),
15007 			0, 0);
15008 		if (err_status & PCIE_CTO_ERR_MASK) {
15009 			si_corereg(bus->sih, bus->sih->buscoreidx,
15010 					DAR_ERRLOG(bus->sih->buscorerev),
15011 					~0, PCIE_CTO_ERR_MASK);
15012 		} else {
15013 			break;
15014 		}
15015 		OSL_DELAY(CTO_TO_CLEAR_WAIT_MS * 1000);
15016 		i++;
15017 		if (i > CTO_TO_CLEAR_WAIT_MAX_CNT) {
15018 			DHD_ERROR(("cto recovery fail\n"));
15019 
15020 			DHD_OS_WAKE_UNLOCK(bus->dhd);
15021 			return BCME_ERROR;
15022 		}
15023 	}
15024 
15025 	/* clear interrupt status */
15026 	dhdpcie_bus_cfg_write_dword(bus, PCI_INT_STATUS, 4, PCI_CTO_INT_MASK);
15027 
15028 	/* Halt ARM & remove reset */
15029 	/* TBD : we can add ARM Halt here in case */
15030 
15031 	/* reset SPROM_CFG_TO_SB_RST */
15032 	val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
15033 
15034 	DHD_ERROR(("cto recovery reset 0x%x:SPROM_CFG_TO_SB_RST(0x%x) 0x%x\n",
15035 		PCI_SPROM_CONTROL, SPROM_CFG_TO_SB_RST, val));
15036 	dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val & ~SPROM_CFG_TO_SB_RST);
15037 
15038 	val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
15039 	DHD_ERROR(("cto recovery success 0x%x:SPROM_CFG_TO_SB_RST(0x%x) 0x%x\n",
15040 		PCI_SPROM_CONTROL, SPROM_CFG_TO_SB_RST, val));
15041 
15042 	DHD_OS_WAKE_UNLOCK(bus->dhd);
15043 
15044 	return BCME_OK;
15045 }
15046 
15047 void
15048 dhdpcie_ssreset_dis_enum_rst(struct dhd_bus *bus)
15049 {
15050 	uint32 val;
15051 
15052 	val = dhdpcie_bus_cfg_read_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4);
15053 	dhdpcie_bus_cfg_write_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4,
15054 		val | (0x1 << PCIE_SSRESET_DIS_ENUM_RST_BIT));
15055 }
15056 
15057 #if defined(DBG_PKT_MON) || defined(DHD_PKT_LOGGING)
15058 /*
15059  * XXX: WAR: Update dongle that driver supports sending of d11
15060  * tx_status through unused status field of PCIe completion header
15061  * if dongle also supports the same WAR.
15062  */
15063 static int
15064 dhdpcie_init_d11status(struct dhd_bus *bus)
15065 {
15066 	uint32 addr;
15067 	uint32 flags2;
15068 	int ret = 0;
15069 
15070 	if (bus->pcie_sh->flags2 & PCIE_SHARED2_D2H_D11_TX_STATUS) {
15071 		flags2 = bus->pcie_sh->flags2;
15072 		addr = bus->shared_addr + OFFSETOF(pciedev_shared_t, flags2);
15073 		flags2 |= PCIE_SHARED2_H2D_D11_TX_STATUS;
15074 		ret = dhdpcie_bus_membytes(bus, TRUE, addr,
15075 			(uint8 *)&flags2, sizeof(flags2));
15076 		if (ret < 0) {
15077 			DHD_ERROR(("%s: update flag bit (H2D_D11_TX_STATUS) failed\n",
15078 				__FUNCTION__));
15079 			return ret;
15080 		}
15081 		bus->pcie_sh->flags2 = flags2;
15082 		bus->dhd->d11_tx_status = TRUE;
15083 	}
15084 	return ret;
15085 }
15086 
15087 #else
15088 static int
15089 dhdpcie_init_d11status(struct dhd_bus *bus)
15090 {
15091 	return 0;
15092 }
15093 #endif /* DBG_PKT_MON || DHD_PKT_LOGGING */
15094 
15095 int
15096 dhdpcie_get_max_eventbufpost(struct dhd_bus *bus)
15097 {
15098 	int evt_buf_pool = EVENT_BUF_POOL_LOW;
15099 	if (bus->pcie_sh->flags2 & (0x1 << PCIE_SHARED_EVENT_BUF_POOL_MAX_POS)) {
15100 		evt_buf_pool = EVENT_BUF_POOL_MEDIUM;
15101 	} else if (bus->pcie_sh->flags2 & (0x2 << PCIE_SHARED_EVENT_BUF_POOL_MAX_POS)) {
15102 		evt_buf_pool = EVENT_BUF_POOL_HIGH;
15103 	} else if (bus->pcie_sh->flags2 & (0x3 << PCIE_SHARED_EVENT_BUF_POOL_MAX_POS)) {
15104 		evt_buf_pool = EVENT_BUF_POOL_HIGHEST;
15105 	}
15106 	return evt_buf_pool;
15107 }
15108 
15109 int
15110 dhd_bus_oob_intr_register(dhd_pub_t *dhdp)
15111 {
15112 	int err = 0;
15113 #ifdef BCMPCIE_OOB_HOST_WAKE
15114 	err = dhdpcie_oob_intr_register(dhdp->bus);
15115 #endif /* BCMPCIE_OOB_HOST_WAKE */
15116 	return err;
15117 }
15118 
15119 void
15120 dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp)
15121 {
15122 #ifdef BCMPCIE_OOB_HOST_WAKE
15123 	dhdpcie_oob_intr_unregister(dhdp->bus);
15124 #endif /* BCMPCIE_OOB_HOST_WAKE */
15125 }
15126 
15127 void
15128 dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable)
15129 {
15130 #ifdef BCMPCIE_OOB_HOST_WAKE
15131 	dhdpcie_oob_intr_set(dhdp->bus, enable);
15132 #endif /* BCMPCIE_OOB_HOST_WAKE */
15133 }
15134 
15135 int
15136 dhd_bus_get_oob_irq_num(dhd_pub_t *dhdp)
15137 {
15138 	int irq_num = 0;
15139 #ifdef BCMPCIE_OOB_HOST_WAKE
15140 	irq_num = dhdpcie_get_oob_irq_num(dhdp->bus);
15141 #endif /* BCMPCIE_OOB_HOST_WAKE */
15142 	return irq_num;
15143 }
15144 
15145 #ifdef BCMDBG
15146 void
15147 dhd_bus_flow_ring_cnt_update(dhd_bus_t *bus, uint16 flowid, uint32 txstatus)
15148 {
15149 	flow_ring_node_t *flow_ring_node;
15150 #if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
15151 	dhd_awdl_stats_t *awdl_stats;
15152 	if_flow_lkup_t *if_flow_lkup;
15153 	unsigned long flags;
15154 	uint8 ifindex;
15155 	uint8 role;
15156 #endif /* DHD_AWDL && AWDL_SLOT_STATS */
15157 	/* If we have d2h sync enabled due to marker overloading, we cannot update this. */
15158 	if (bus->dhd->d2h_sync_mode)
15159 		return;
15160 	if (txstatus >= DHD_MAX_TX_STATUS_MSGS) {
15161 		/*
15162 		 * XXX: changed DHD_ERROR to DHD_INFO
15163 		 * There are flood of messages with P2P FW
15164 		 * It is being root-caused.
15165 		 */
15166 		DHD_INFO(("%s Unknown txtstatus = %d \n",
15167 		    __FUNCTION__, txstatus));
15168 		return;
15169 	}
15170 	flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
15171 	ASSERT(flow_ring_node->flowid == flowid);
15172 	flow_ring_node->flow_info.tx_status[txstatus]++;
15173 #if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
15174 	if_flow_lkup = (if_flow_lkup_t *)bus->dhd->if_flow_lkup;
15175 	ifindex = flow_ring_node->flow_info.ifindex;
15176 	role = if_flow_lkup[ifindex].role;
15177 	if (role == WLC_E_IF_ROLE_AWDL) {
15178 		DHD_AWDL_STATS_LOCK(bus->dhd->awdl_stats_lock, flags);
15179 		awdl_stats = &bus->dhd->awdl_stats[bus->dhd->awdl_tx_status_slot];
15180 		awdl_stats->tx_status[txstatus]++;
15181 		DHD_AWDL_STATS_UNLOCK(bus->dhd->awdl_stats_lock, flags);
15182 	}
15183 #endif /* DHD_AWDL && AWDL_SLOT_STATS */
15184 	return;
15185 }
15186 #endif /* BCMDBG */
15187 
15188 bool
15189 dhdpcie_bus_get_pcie_hostready_supported(dhd_bus_t *bus)
15190 {
15191 	return bus->dhd->d2h_hostrdy_supported;
15192 }
15193 
15194 void
15195 dhd_pcie_dump_core_regs(dhd_pub_t * pub, uint32 index, uint32 first_addr, uint32 last_addr)
15196 {
15197 	dhd_bus_t *bus = pub->bus;
15198 	uint32	coreoffset = index << 12;
15199 	uint32	core_addr = SI_ENUM_BASE(bus->sih) + coreoffset;
15200 	uint32 value;
15201 
15202 	while (first_addr <= last_addr) {
15203 		core_addr = SI_ENUM_BASE(bus->sih) + coreoffset + first_addr;
15204 		if (serialized_backplane_access(bus, core_addr, 4, &value, TRUE) != BCME_OK) {
15205 			DHD_ERROR(("Invalid size/addr combination \n"));
15206 		}
15207 		DHD_ERROR(("[0x%08x]: 0x%08x\n", core_addr, value));
15208 		first_addr = first_addr + 4;
15209 	}
15210 }
15211 
15212 bool
15213 dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t *bus)
15214 {
15215 	if (!bus->dhd)
15216 		return FALSE;
15217 	else if (bus->idma_enabled) {
15218 		return bus->dhd->idma_enable;
15219 	} else {
15220 		return FALSE;
15221 	}
15222 }
15223 
15224 bool
15225 dhdpcie_bus_get_pcie_ifrm_supported(dhd_bus_t *bus)
15226 {
15227 	if (!bus->dhd)
15228 		return FALSE;
15229 	else if (bus->ifrm_enabled) {
15230 		return bus->dhd->ifrm_enable;
15231 	} else {
15232 		return FALSE;
15233 	}
15234 }
15235 
15236 bool
15237 dhdpcie_bus_get_pcie_dar_supported(dhd_bus_t *bus)
15238 {
15239 	if (!bus->dhd) {
15240 		return FALSE;
15241 	} else if (bus->dar_enabled) {
15242 		return bus->dhd->dar_enable;
15243 	} else {
15244 		return FALSE;
15245 	}
15246 }
15247 
15248 #ifdef DHD_HP2P
15249 bool
15250 dhdpcie_bus_get_hp2p_supported(dhd_bus_t *bus)
15251 {
15252 	if (!bus->dhd) {
15253 		return FALSE;
15254 	} else if (bus->dhd->hp2p_enable) {
15255 		return bus->dhd->hp2p_capable;
15256 	} else {
15257 		return FALSE;
15258 	}
15259 }
15260 #endif /* DHD_HP2P */
15261 
15262 #ifdef PCIE_OOB
15263 bool
15264 dhdpcie_bus_get_pcie_oob_dw_supported(dhd_bus_t *bus)
15265 {
15266 	if (!bus->dhd)
15267 		return FALSE;
15268 	if (bus->oob_enabled) {
15269 		return !bus->dhd->d2h_no_oob_dw;
15270 	} else {
15271 		return FALSE;
15272 	}
15273 }
15274 #endif /* PCIE_OOB */
15275 
15276 void
15277 dhdpcie_bus_enab_pcie_dw(dhd_bus_t *bus, uint8 dw_option)
15278 {
15279 	DHD_ERROR(("ENABLING DW:%d\n", dw_option));
15280 	bus->dw_option = dw_option;
15281 }
15282 
15283 #ifdef PCIE_INB_DW
15284 bool
15285 dhdpcie_bus_get_pcie_inband_dw_supported(dhd_bus_t *bus)
15286 {
15287 	if (!bus->dhd)
15288 		return FALSE;
15289 	if (bus->inb_enabled) {
15290 		return bus->dhd->d2h_inband_dw;
15291 	} else {
15292 		return FALSE;
15293 	}
15294 }
15295 
15296 void
15297 dhdpcie_bus_set_pcie_inband_dw_state(dhd_bus_t *bus, enum dhd_bus_ds_state state)
15298 {
15299 	if (!INBAND_DW_ENAB(bus))
15300 		return;
15301 
15302 	DHD_PCIE_INFO(("%s:%d\n", __FUNCTION__, state));
15303 	bus->dhd->ds_state = state;
15304 	if (state == DW_DEVICE_DS_DISABLED_WAIT || state == DW_DEVICE_DS_D3_INFORM_WAIT) {
15305 		bus->ds_exit_timeout = 100;
15306 	}
15307 	if (state == DW_DEVICE_HOST_WAKE_WAIT) {
15308 		bus->host_sleep_exit_timeout = 100;
15309 	}
15310 	if (state == DW_DEVICE_DS_DEV_WAKE) {
15311 		bus->ds_exit_timeout = 0;
15312 	}
15313 	if (state == DW_DEVICE_DS_ACTIVE) {
15314 		bus->host_sleep_exit_timeout = 0;
15315 	}
15316 }
15317 
15318 enum dhd_bus_ds_state
15319 dhdpcie_bus_get_pcie_inband_dw_state(dhd_bus_t *bus)
15320 {
15321 	if (!INBAND_DW_ENAB(bus))
15322 		return DW_DEVICE_DS_INVALID;
15323 	return bus->dhd->ds_state;
15324 }
15325 #endif /* PCIE_INB_DW */
15326 
15327 #ifdef DHD_MMIO_TRACE
15328 static void
15329 dhd_bus_mmio_trace(dhd_bus_t *bus, uint32 addr, uint32 value, bool set)
15330 {
15331 	uint32 cnt = bus->mmio_trace_count % MAX_MMIO_TRACE_SIZE;
15332 	uint64 ts_cur = OSL_LOCALTIME_NS();
15333 	uint32 tmp_cnt;
15334 
15335 	tmp_cnt = (bus->mmio_trace_count) ? ((bus->mmio_trace_count - 1)
15336 		% MAX_MMIO_TRACE_SIZE) : cnt;
15337 
15338 	if (((DIV_U64_BY_U64(ts_cur, NSEC_PER_USEC) -
15339 		DIV_U64_BY_U64(bus->mmio_trace[tmp_cnt].timestamp, NSEC_PER_USEC))
15340 		> MIN_MMIO_TRACE_TIME) || (bus->mmio_trace[tmp_cnt].value !=
15341 		(value & DHD_RING_IDX))) {
15342 		bus->mmio_trace_count++;
15343 	} else {
15344 		cnt = tmp_cnt;
15345 	}
15346 	bus->mmio_trace[cnt].timestamp = ts_cur;
15347 	bus->mmio_trace[cnt].addr = addr;
15348 	bus->mmio_trace[cnt].set = set;
15349 	bus->mmio_trace[cnt].value = value;
15350 }
15351 
15352 void
15353 dhd_dump_bus_mmio_trace(dhd_bus_t *bus, struct bcmstrbuf *strbuf)
15354 {
15355 	int dumpsz;
15356 	int i;
15357 
15358 	dumpsz = bus->mmio_trace_count < MAX_MMIO_TRACE_SIZE ?
15359 		bus->mmio_trace_count : MAX_MMIO_TRACE_SIZE;
15360 	if (dumpsz == 0) {
15361 		bcm_bprintf(strbuf, "\nEmpty MMIO TRACE\n");
15362 		return;
15363 	}
15364 	bcm_bprintf(strbuf, "---- MMIO TRACE ------\n");
15365 	bcm_bprintf(strbuf, "Decoding value field, Ex: 0xFF2C00E4, 0xFF->WR/0XDD->RD "
15366 		"0x2C->Ringid 0x00E4->RD/WR Value\n");
15367 	bcm_bprintf(strbuf, "Timestamp ns\t\tAddr\t\tW/R\tValue\n");
15368 	for (i = 0; i < dumpsz; i ++) {
15369 		bcm_bprintf(strbuf, SEC_USEC_FMT"\t0x%08x\t%s\t0x%08x\n",
15370 			GET_SEC_USEC(bus->mmio_trace[i].timestamp),
15371 			bus->mmio_trace[i].addr,
15372 			bus->mmio_trace[i].set ? "W" : "R",
15373 			bus->mmio_trace[i].value);
15374 	}
15375 }
15376 #endif /* defined(DHD_MMIO_TRACE) */
15377 
15378 static void
15379 #ifdef PCIE_INB_DW
15380 dhd_bus_ds_trace(dhd_bus_t *bus, uint32 dsval, bool d2h, enum dhd_bus_ds_state inbstate)
15381 #else
15382 dhd_bus_ds_trace(dhd_bus_t *bus, uint32 dsval, bool d2h)
15383 #endif /* PCIE_INB_DW */
15384 {
15385 	uint32 cnt = bus->ds_trace_count % MAX_DS_TRACE_SIZE;
15386 
15387 	bus->ds_trace[cnt].timestamp = OSL_LOCALTIME_NS();
15388 	bus->ds_trace[cnt].d2h = d2h;
15389 	bus->ds_trace[cnt].dsval = dsval;
15390 #ifdef PCIE_INB_DW
15391 	bus->ds_trace[cnt].inbstate = inbstate;
15392 #endif /* PCIE_INB_DW */
15393 	bus->ds_trace_count ++;
15394 }
15395 
15396 #ifdef PCIE_INB_DW
15397 const char *
15398 dhd_convert_dsval(uint32 val, bool d2h)
15399 {
15400 	if (d2h) {
15401 		switch (val) {
15402 			case D2H_DEV_D3_ACK:
15403 				return "D2H_DEV_D3_ACK";
15404 			case D2H_DEV_DS_ENTER_REQ:
15405 				return "D2H_DEV_DS_ENTER_REQ";
15406 			case D2H_DEV_DS_EXIT_NOTE:
15407 				return "D2H_DEV_DS_EXIT_NOTE";
15408 			case D2H_DEV_FWHALT:
15409 				return "D2H_DEV_FWHALT";
15410 			case D2HMB_DS_HOST_SLEEP_EXIT_ACK:
15411 				return "D2HMB_DS_HOST_SLEEP_EXIT_ACK";
15412 			default:
15413 				return "INVALID";
15414 		}
15415 	} else {
15416 		switch (val) {
15417 			case H2DMB_DS_DEVICE_WAKE_DEASSERT:
15418 				return "H2DMB_DS_DEVICE_WAKE_DEASSERT";
15419 			case H2DMB_DS_DEVICE_WAKE_ASSERT:
15420 				return "H2DMB_DS_DEVICE_WAKE_ASSERT";
15421 			case H2D_HOST_D3_INFORM:
15422 				return "H2D_HOST_D3_INFORM";
15423 			case H2D_HOST_DS_ACK:
15424 				return "H2D_HOST_DS_ACK";
15425 			case H2D_HOST_DS_NAK:
15426 				return "H2D_HOST_DS_NAK";
15427 			case H2D_HOST_CONS_INT:
15428 				return "H2D_HOST_CONS_INT";
15429 			case H2D_FW_TRAP:
15430 				return "H2D_FW_TRAP";
15431 			default:
15432 				return "INVALID";
15433 		}
15434 	}
15435 }
15436 
15437 const char *
15438 dhd_convert_inb_state_names(enum dhd_bus_ds_state inbstate)
15439 {
15440 	switch (inbstate) {
15441 		case DW_DEVICE_DS_DEV_SLEEP:
15442 			return "DW_DEVICE_DS_DEV_SLEEP";
15443 		break;
15444 		case DW_DEVICE_DS_DISABLED_WAIT:
15445 			return "DW_DEVICE_DS_DISABLED_WAIT";
15446 		break;
15447 		case DW_DEVICE_DS_DEV_WAKE:
15448 			return "DW_DEVICE_DS_DEV_WAKE";
15449 		break;
15450 		case DW_DEVICE_DS_ACTIVE:
15451 			return "DW_DEVICE_DS_ACTIVE";
15452 		break;
15453 		case DW_DEVICE_HOST_SLEEP_WAIT:
15454 			return "DW_DEVICE_HOST_SLEEP_WAIT";
15455 		break;
15456 		case DW_DEVICE_HOST_SLEEP:
15457 			return "DW_DEVICE_HOST_SLEEP";
15458 		break;
15459 		case DW_DEVICE_HOST_WAKE_WAIT:
15460 			return "DW_DEVICE_HOST_WAKE_WAIT";
15461 		break;
15462 		case DW_DEVICE_DS_D3_INFORM_WAIT:
15463 			return "DW_DEVICE_DS_D3_INFORM_WAIT";
15464 		break;
15465 		default:
15466 			return "INVALID";
15467 	}
15468 }
15469 #endif /* PCIE_INB_DW */
15470 
15471 void
15472 dhd_dump_bus_ds_trace(dhd_bus_t *bus, struct bcmstrbuf *strbuf)
15473 {
15474 	int dumpsz;
15475 	int i;
15476 
15477 	dumpsz = bus->ds_trace_count < MAX_DS_TRACE_SIZE ?
15478 		bus->ds_trace_count : MAX_DS_TRACE_SIZE;
15479 	if (dumpsz == 0) {
15480 		bcm_bprintf(strbuf, "\nEmpty DS TRACE\n");
15481 		return;
15482 	}
15483 	bcm_bprintf(strbuf, "---- DS TRACE ------\n");
15484 #ifdef PCIE_INB_DW
15485 	bcm_bprintf(strbuf, "%s\t\t%s\t%-30s\t\t%s\n",
15486 		"Timestamp us", "Dir", "Value", "Inband-State");
15487 	for (i = 0; i < dumpsz; i ++) {
15488 		bcm_bprintf(strbuf, "%llu\t%s\t%-30s\t\t%s\n",
15489 		bus->ds_trace[i].timestamp,
15490 		bus->ds_trace[i].d2h ? "D2H":"H2D",
15491 		dhd_convert_dsval(bus->ds_trace[i].dsval, bus->ds_trace[i].d2h),
15492 		dhd_convert_inb_state_names(bus->ds_trace[i].inbstate));
15493 	}
15494 #else
15495 	bcm_bprintf(strbuf, "Timestamp us\t\tDir\tValue\n");
15496 	for (i = 0; i < dumpsz; i ++) {
15497 		bcm_bprintf(strbuf, "%llu\t%s\t%d\n",
15498 		bus->ds_trace[i].timestamp,
15499 		bus->ds_trace[i].d2h ? "D2H":"H2D",
15500 		bus->ds_trace[i].dsval);
15501 	}
15502 #endif /* PCIE_INB_DW */
15503 	bcm_bprintf(strbuf, "--------------------------\n");
15504 }
15505 
15506 void
15507 dhd_bus_dump_trap_info(dhd_bus_t *bus, struct bcmstrbuf *strbuf)
15508 {
15509 	trap_t *tr = &bus->dhd->last_trap_info;
15510 	bcm_bprintf(strbuf,
15511 		"\nTRAP type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x,"
15512 		" lp 0x%x, rpc 0x%x"
15513 		"\nTrap offset 0x%x, r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, "
15514 		"r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x, r8 0x%x, r9 0x%x, "
15515 		"r10 0x%x, r11 0x%x, r12 0x%x\n\n",
15516 		ltoh32(tr->type), ltoh32(tr->epc), ltoh32(tr->cpsr), ltoh32(tr->spsr),
15517 		ltoh32(tr->r13), ltoh32(tr->r14), ltoh32(tr->pc),
15518 		ltoh32(bus->pcie_sh->trap_addr),
15519 		ltoh32(tr->r0), ltoh32(tr->r1), ltoh32(tr->r2), ltoh32(tr->r3),
15520 		ltoh32(tr->r4), ltoh32(tr->r5), ltoh32(tr->r6), ltoh32(tr->r7),
15521 		ltoh32(tr->r8), ltoh32(tr->r9), ltoh32(tr->r10),
15522 		ltoh32(tr->r11), ltoh32(tr->r12));
15523 }
15524 
15525 int
15526 dhd_bus_readwrite_bp_addr(dhd_pub_t *dhdp, uint addr, uint size, uint* data, bool read)
15527 {
15528 	int bcmerror = 0;
15529 	struct dhd_bus *bus = dhdp->bus;
15530 
15531 	if (serialized_backplane_access(bus, addr, size, data, read) != BCME_OK) {
15532 			DHD_ERROR(("Invalid size/addr combination \n"));
15533 			bcmerror = BCME_ERROR;
15534 	}
15535 
15536 	return bcmerror;
15537 }
15538 
15539 int
15540 dhd_get_idletime(dhd_pub_t *dhd)
15541 {
15542 	return dhd->bus->idletime;
15543 }
15544 
15545 bool
15546 dhd_get_rpm_state(dhd_pub_t *dhd)
15547 {
15548 	return dhd->bus->rpm_enabled;
15549 }
15550 
15551 void
15552 dhd_set_rpm_state(dhd_pub_t *dhd, bool state)
15553 {
15554 	DHD_ERROR(("%s: %d\n", __FUNCTION__, state));
15555 	dhd->bus->rpm_enabled = state;
15556 }
15557 
15558 static INLINE void
15559 dhd_sbreg_op(dhd_pub_t *dhd, uint addr, uint *val, bool read)
15560 {
15561 	OSL_DELAY(1);
15562 	if (serialized_backplane_access(dhd->bus, addr, sizeof(uint), val, read) != BCME_OK) {
15563 		DHD_ERROR(("sbreg: Invalid uint addr: 0x%x \n", addr));
15564 	} else {
15565 		DHD_ERROR(("sbreg: addr:0x%x val:0x%x read:%d\n", addr, *val, read));
15566 	}
15567 	return;
15568 }
15569 
15570 #ifdef DHD_SSSR_DUMP
15571 static int
15572 dhdpcie_get_sssr_fifo_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size,
15573 	uint addr_reg, uint data_reg)
15574 {
15575 	uint addr;
15576 	uint val = 0;
15577 	int i;
15578 
15579 	DHD_ERROR(("%s\n", __FUNCTION__));
15580 
15581 	if (!buf) {
15582 		DHD_ERROR(("%s: buf is NULL\n", __FUNCTION__));
15583 		return BCME_ERROR;
15584 	}
15585 
15586 	if (!fifo_size) {
15587 		DHD_ERROR(("%s: fifo_size is 0\n", __FUNCTION__));
15588 		return BCME_ERROR;
15589 	}
15590 
15591 	/* Set the base address offset to 0 */
15592 	addr = addr_reg;
15593 	val = 0;
15594 	dhd_sbreg_op(dhd, addr, &val, FALSE);
15595 
15596 	addr = data_reg;
15597 	/* Read 4 bytes at once and loop for fifo_size / 4 */
15598 	for (i = 0; i < fifo_size / 4; i++) {
15599 		if (serialized_backplane_access(dhd->bus, addr,
15600 				sizeof(uint), &val, TRUE) != BCME_OK) {
15601 			DHD_ERROR(("%s: error in serialized_backplane_access\n", __FUNCTION__));
15602 			return BCME_ERROR;
15603 		}
15604 		buf[i] = val;
15605 		OSL_DELAY(1);
15606 	}
15607 	return BCME_OK;
15608 }
15609 
15610 static int
15611 dhdpcie_get_sssr_dig_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size,
15612 	uint addr_reg)
15613 {
15614 	uint addr;
15615 	uint val = 0;
15616 	int i;
15617 	si_t *sih = dhd->bus->sih;
15618 	bool vasip_enab, dig_mem_check;
15619 	uint32 ioctrl_addr = 0;
15620 
15621 	DHD_ERROR(("%s addr_reg=0x%x size=0x%x\n", __FUNCTION__, addr_reg, fifo_size));
15622 
15623 	if (!buf) {
15624 		DHD_ERROR(("%s: buf is NULL\n", __FUNCTION__));
15625 		return BCME_ERROR;
15626 	}
15627 
15628 	if (!fifo_size) {
15629 		DHD_ERROR(("%s: fifo_size is 0\n", __FUNCTION__));
15630 		return BCME_ERROR;
15631 	}
15632 
15633 	vasip_enab = FALSE;
15634 	dig_mem_check = FALSE;
15635 	/* SSSR register information structure v0 and v1 shares most except dig_mem */
15636 	switch (dhd->sssr_reg_info->rev2.version) {
15637 		case SSSR_REG_INFO_VER_3 :
15638 			/* intentional fall through */
15639 		case SSSR_REG_INFO_VER_2 :
15640 			if ((dhd->sssr_reg_info->rev2.length > OFFSETOF(sssr_reg_info_v2_t,
15641 			dig_mem_info)) && dhd->sssr_reg_info->rev2.dig_mem_info.dig_sr_size) {
15642 				dig_mem_check = TRUE;
15643 			}
15644 			break;
15645 		case SSSR_REG_INFO_VER_1 :
15646 			if (dhd->sssr_reg_info->rev1.vasip_regs.vasip_sr_size) {
15647 				vasip_enab = TRUE;
15648 			} else if ((dhd->sssr_reg_info->rev1.length > OFFSETOF(sssr_reg_info_v1_t,
15649 				dig_mem_info)) && dhd->sssr_reg_info->rev1.
15650 				dig_mem_info.dig_sr_size) {
15651 				dig_mem_check = TRUE;
15652 			}
15653 			ioctrl_addr = dhd->sssr_reg_info->rev1.vasip_regs.wrapper_regs.ioctrl;
15654 			break;
15655 		case SSSR_REG_INFO_VER_0 :
15656 			if (dhd->sssr_reg_info->rev0.vasip_regs.vasip_sr_size) {
15657 				vasip_enab = TRUE;
15658 			}
15659 			ioctrl_addr = dhd->sssr_reg_info->rev0.vasip_regs.wrapper_regs.ioctrl;
15660 			break;
15661 		default :
15662 			DHD_ERROR(("invalid sssr_reg_ver"));
15663 			return BCME_UNSUPPORTED;
15664 	}
15665 	if (addr_reg) {
15666 		DHD_ERROR(("dig_mem_check=%d vasip_enab=%d\n", dig_mem_check, vasip_enab));
15667 		if (!vasip_enab && dig_mem_check) {
15668 			int err = dhdpcie_bus_membytes(dhd->bus, FALSE, addr_reg, (uint8 *)buf,
15669 				fifo_size);
15670 			if (err != BCME_OK) {
15671 				DHD_ERROR(("%s: Error reading dig dump from dongle !\n",
15672 					__FUNCTION__));
15673 			}
15674 		} else {
15675 			/* Check if vasip clk is disabled, if yes enable it */
15676 			addr = ioctrl_addr;
15677 			dhd_sbreg_op(dhd, addr, &val, TRUE);
15678 			if (!val) {
15679 				val = 1;
15680 				dhd_sbreg_op(dhd, addr, &val, FALSE);
15681 			}
15682 
15683 			addr = addr_reg;
15684 			/* Read 4 bytes at once and loop for fifo_size / 4 */
15685 			for (i = 0; i < fifo_size / 4; i++, addr += 4) {
15686 				if (serialized_backplane_access(dhd->bus, addr, sizeof(uint),
15687 					&val, TRUE) != BCME_OK) {
15688 					DHD_ERROR(("%s: Invalid uint addr: 0x%x \n", __FUNCTION__,
15689 						addr));
15690 					return BCME_ERROR;
15691 				}
15692 				buf[i] = val;
15693 				OSL_DELAY(1);
15694 			}
15695 		}
15696 	} else {
15697 		uint cur_coreid;
15698 		uint chipc_corerev;
15699 		chipcregs_t *chipcregs;
15700 
15701 		/* Save the current core */
15702 		cur_coreid = si_coreid(sih);
15703 
15704 		/* Switch to ChipC */
15705 		chipcregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
15706 		if (!chipcregs) {
15707 			DHD_ERROR(("%s: si_setcore returns NULL for core id %u \n",
15708 				__FUNCTION__, CC_CORE_ID));
15709 			return BCME_ERROR;
15710 		}
15711 
15712 		chipc_corerev = si_corerev(sih);
15713 
15714 		if ((chipc_corerev == 64) || (chipc_corerev == 65)) {
15715 			W_REG(si_osh(sih), &chipcregs->sr_memrw_addr, 0);
15716 
15717 			/* Read 4 bytes at once and loop for fifo_size / 4 */
15718 			for (i = 0; i < fifo_size / 4; i++) {
15719 				buf[i] = R_REG(si_osh(sih), &chipcregs->sr_memrw_data);
15720 				OSL_DELAY(1);
15721 			}
15722 		}
15723 
15724 		/* Switch back to the original core */
15725 		si_setcore(sih, cur_coreid, 0);
15726 	}
15727 
15728 	return BCME_OK;
15729 }
15730 
15731 #if defined(BCMPCIE) && defined(EWP_ETD_PRSRV_LOGS)
15732 void
15733 dhdpcie_get_etd_preserve_logs(dhd_pub_t *dhd,
15734 		uint8 *ext_trap_data, void *event_decode_data)
15735 {
15736 	hnd_ext_trap_hdr_t *hdr = NULL;
15737 	bcm_tlv_t *tlv;
15738 	eventlog_trapdata_info_t *etd_evtlog = NULL;
15739 	eventlog_trap_buf_info_t *evtlog_buf_arr = NULL;
15740 	uint arr_size = 0;
15741 	int i = 0;
15742 	int err = 0;
15743 	uint32 seqnum = 0;
15744 
15745 	if (!ext_trap_data || !event_decode_data || !dhd)
15746 		return;
15747 
15748 	if (!dhd->concise_dbg_buf)
15749 		return;
15750 
15751 	/* First word is original trap_data, skip */
15752 	ext_trap_data += sizeof(uint32);
15753 
15754 	hdr = (hnd_ext_trap_hdr_t *)ext_trap_data;
15755 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_LOG_DATA);
15756 	if (tlv) {
15757 		uint32 baseaddr = 0;
15758 		uint32 endaddr = dhd->bus->dongle_ram_base + dhd->bus->ramsize - 4;
15759 
15760 		etd_evtlog = (eventlog_trapdata_info_t *)tlv->data;
15761 		DHD_ERROR(("%s: etd_evtlog tlv found, num_elements=%x; "
15762 			"seq_num=%x; log_arr_addr=%x\n", __FUNCTION__,
15763 			(etd_evtlog->num_elements),
15764 			ntoh32(etd_evtlog->seq_num), (etd_evtlog->log_arr_addr)));
15765 		if (!etd_evtlog->num_elements ||
15766 				etd_evtlog->num_elements > MAX_EVENTLOG_BUFFERS) {
15767 			DHD_ERROR(("%s: ETD has bad 'num_elements' !\n", __FUNCTION__));
15768 			return;
15769 		}
15770 		if (!etd_evtlog->log_arr_addr) {
15771 			DHD_ERROR(("%s: ETD has bad 'log_arr_addr' !\n", __FUNCTION__));
15772 			return;
15773 		}
15774 
15775 		arr_size = (uint32)sizeof(*evtlog_buf_arr) * (etd_evtlog->num_elements);
15776 		evtlog_buf_arr = MALLOCZ(dhd->osh, arr_size);
15777 		if (!evtlog_buf_arr) {
15778 			DHD_ERROR(("%s: out of memory !\n",	__FUNCTION__));
15779 			return;
15780 		}
15781 
15782 		/* boundary check */
15783 		baseaddr = etd_evtlog->log_arr_addr;
15784 		if ((baseaddr < dhd->bus->dongle_ram_base) ||
15785 			((baseaddr + arr_size) > endaddr)) {
15786 			DHD_ERROR(("%s: Error reading invalid address\n",
15787 				__FUNCTION__));
15788 			goto err;
15789 		}
15790 
15791 		/* read the eventlog_trap_buf_info_t array from dongle memory */
15792 		err = dhdpcie_bus_membytes(dhd->bus, FALSE,
15793 				(ulong)(etd_evtlog->log_arr_addr),
15794 				(uint8 *)evtlog_buf_arr, arr_size);
15795 		if (err != BCME_OK) {
15796 			DHD_ERROR(("%s: Error reading event log array from dongle !\n",
15797 				__FUNCTION__));
15798 			goto err;
15799 		}
15800 		/* ntoh is required only for seq_num, because in the original
15801 		* case of event logs from info ring, it is sent from dongle in that way
15802 		* so for ETD also dongle follows same convention
15803 		*/
15804 		seqnum = ntoh32(etd_evtlog->seq_num);
15805 		memset(dhd->concise_dbg_buf, 0, CONCISE_DUMP_BUFLEN);
15806 		for (i = 0; i < (etd_evtlog->num_elements); ++i) {
15807 			/* boundary check */
15808 			baseaddr = evtlog_buf_arr[i].buf_addr;
15809 			if ((baseaddr < dhd->bus->dongle_ram_base) ||
15810 				((baseaddr + evtlog_buf_arr[i].len) > endaddr)) {
15811 				DHD_ERROR(("%s: Error reading invalid address\n",
15812 					__FUNCTION__));
15813 				goto err;
15814 			}
15815 			/* read each individual event log buf from dongle memory */
15816 			err = dhdpcie_bus_membytes(dhd->bus, FALSE,
15817 					((ulong)evtlog_buf_arr[i].buf_addr),
15818 					dhd->concise_dbg_buf, (evtlog_buf_arr[i].len));
15819 			if (err != BCME_OK) {
15820 				DHD_ERROR(("%s: Error reading event log buffer from dongle !\n",
15821 					__FUNCTION__));
15822 				goto err;
15823 			}
15824 			dhd_dbg_msgtrace_log_parser(dhd, dhd->concise_dbg_buf,
15825 				event_decode_data, (evtlog_buf_arr[i].len),
15826 				FALSE, hton32(seqnum));
15827 			++seqnum;
15828 		}
15829 err:
15830 		MFREE(dhd->osh, evtlog_buf_arr, arr_size);
15831 	} else {
15832 		DHD_ERROR(("%s: Error getting trap log data in ETD !\n", __FUNCTION__));
15833 	}
15834 }
15835 #endif /* BCMPCIE && DHD_LOG_DUMP */
15836 
15837 static uint32
15838 dhdpcie_resume_chipcommon_powerctrl(dhd_pub_t *dhd, uint32 reg_val)
15839 {
15840 	uint addr;
15841 	uint val = 0;
15842 	uint powerctrl_mask;
15843 
15844 	DHD_ERROR(("%s\n", __FUNCTION__));
15845 
15846 	/* SSSR register information structure v0 and v1 shares most except dig_mem */
15847 	switch (dhd->sssr_reg_info->rev2.version) {
15848 		case SSSR_REG_INFO_VER_3 :
15849 			/* intentional fall through */
15850 		case SSSR_REG_INFO_VER_2 :
15851 			addr = dhd->sssr_reg_info->rev2.chipcommon_regs.base_regs.powerctrl;
15852 			powerctrl_mask = dhd->sssr_reg_info->rev2.
15853 				chipcommon_regs.base_regs.powerctrl_mask;
15854 			break;
15855 		case SSSR_REG_INFO_VER_1 :
15856 		case SSSR_REG_INFO_VER_0 :
15857 			addr = dhd->sssr_reg_info->rev1.chipcommon_regs.base_regs.powerctrl;
15858 			powerctrl_mask = dhd->sssr_reg_info->rev1.
15859 				chipcommon_regs.base_regs.powerctrl_mask;
15860 			break;
15861 		default :
15862 			DHD_ERROR(("invalid sssr_reg_ver"));
15863 			return BCME_UNSUPPORTED;
15864 	}
15865 
15866 	/* conditionally clear bits [11:8] of PowerCtrl */
15867 	dhd_sbreg_op(dhd, addr, &val, TRUE);
15868 
15869 	if (!(val & powerctrl_mask)) {
15870 		dhd_sbreg_op(dhd, addr, &reg_val, FALSE);
15871 	}
15872 	return BCME_OK;
15873 }
15874 
15875 static uint32
15876 dhdpcie_suspend_chipcommon_powerctrl(dhd_pub_t *dhd)
15877 {
15878 	uint addr;
15879 	uint val = 0, reg_val = 0;
15880 	uint powerctrl_mask;
15881 
15882 	DHD_ERROR(("%s\n", __FUNCTION__));
15883 
15884 	/* SSSR register information structure v0 and v1 shares most except dig_mem */
15885 	switch (dhd->sssr_reg_info->rev2.version) {
15886 		case SSSR_REG_INFO_VER_3 :
15887 			/* intentional fall through */
15888 		case SSSR_REG_INFO_VER_2 :
15889 			addr = dhd->sssr_reg_info->rev2.chipcommon_regs.base_regs.powerctrl;
15890 			powerctrl_mask = dhd->sssr_reg_info->rev2.
15891 				chipcommon_regs.base_regs.powerctrl_mask;
15892 			break;
15893 		case SSSR_REG_INFO_VER_1 :
15894 		case SSSR_REG_INFO_VER_0 :
15895 			addr = dhd->sssr_reg_info->rev1.chipcommon_regs.base_regs.powerctrl;
15896 			powerctrl_mask = dhd->sssr_reg_info->rev1.
15897 				chipcommon_regs.base_regs.powerctrl_mask;
15898 			break;
15899 		default :
15900 			DHD_ERROR(("invalid sssr_reg_ver"));
15901 			return BCME_UNSUPPORTED;
15902 	}
15903 
15904 	/* conditionally clear bits [11:8] of PowerCtrl */
15905 	dhd_sbreg_op(dhd, addr, &reg_val, TRUE);
15906 	if (reg_val & powerctrl_mask) {
15907 		val = 0;
15908 		dhd_sbreg_op(dhd, addr, &val, FALSE);
15909 	}
15910 	return reg_val;
15911 }
15912 
15913 static int
15914 dhdpcie_clear_intmask_and_timer(dhd_pub_t *dhd)
15915 {
15916 	uint addr;
15917 	uint val;
15918 	uint32 cc_intmask, pmuintmask0, pmuintmask1, resreqtimer, macresreqtimer,
15919 	 macresreqtimer1, vasip_sr_size = 0;
15920 
15921 	DHD_ERROR(("%s\n", __FUNCTION__));
15922 
15923 	/* SSSR register information structure v0 and v1 shares most except dig_mem */
15924 	switch (dhd->sssr_reg_info->rev2.version) {
15925 		case SSSR_REG_INFO_VER_3 :
15926 			/* intentional fall through */
15927 		case SSSR_REG_INFO_VER_2 :
15928 			cc_intmask = dhd->sssr_reg_info->rev2.chipcommon_regs.base_regs.intmask;
15929 			pmuintmask0 = dhd->sssr_reg_info->rev2.pmu_regs.base_regs.pmuintmask0;
15930 			pmuintmask1 = dhd->sssr_reg_info->rev2.pmu_regs.base_regs.pmuintmask1;
15931 			resreqtimer = dhd->sssr_reg_info->rev2.pmu_regs.base_regs.resreqtimer;
15932 			macresreqtimer = dhd->sssr_reg_info->rev2.pmu_regs.base_regs.macresreqtimer;
15933 			macresreqtimer1 = dhd->sssr_reg_info->rev2.
15934 				pmu_regs.base_regs.macresreqtimer1;
15935 			break;
15936 		case SSSR_REG_INFO_VER_1 :
15937 		case SSSR_REG_INFO_VER_0 :
15938 			cc_intmask = dhd->sssr_reg_info->rev1.chipcommon_regs.base_regs.intmask;
15939 			pmuintmask0 = dhd->sssr_reg_info->rev1.pmu_regs.base_regs.pmuintmask0;
15940 			pmuintmask1 = dhd->sssr_reg_info->rev1.pmu_regs.base_regs.pmuintmask1;
15941 			resreqtimer = dhd->sssr_reg_info->rev1.pmu_regs.base_regs.resreqtimer;
15942 			macresreqtimer = dhd->sssr_reg_info->rev1.pmu_regs.base_regs.macresreqtimer;
15943 			macresreqtimer1 = dhd->sssr_reg_info->rev1.
15944 				pmu_regs.base_regs.macresreqtimer1;
15945 			vasip_sr_size = dhd->sssr_reg_info->rev1.vasip_regs.vasip_sr_size;
15946 			break;
15947 		default :
15948 			DHD_ERROR(("invalid sssr_reg_ver"));
15949 			return BCME_UNSUPPORTED;
15950 	}
15951 
15952 	/* clear chipcommon intmask */
15953 	val = 0x0;
15954 	dhd_sbreg_op(dhd, cc_intmask, &val, FALSE);
15955 
15956 	/* clear PMUIntMask0 */
15957 	val = 0x0;
15958 	dhd_sbreg_op(dhd, pmuintmask0, &val, FALSE);
15959 
15960 	/* clear PMUIntMask1 */
15961 	val = 0x0;
15962 	dhd_sbreg_op(dhd, pmuintmask1, &val, FALSE);
15963 
15964 	/* clear res_req_timer */
15965 	val = 0x0;
15966 	dhd_sbreg_op(dhd, resreqtimer, &val, FALSE);
15967 
15968 	/* clear macresreqtimer */
15969 	val = 0x0;
15970 	dhd_sbreg_op(dhd, macresreqtimer, &val, FALSE);
15971 
15972 	/* clear macresreqtimer1 */
15973 	val = 0x0;
15974 	dhd_sbreg_op(dhd, macresreqtimer1, &val, FALSE);
15975 
15976 	/* clear VasipClkEn */
15977 	if (vasip_sr_size) {
15978 		addr = dhd->sssr_reg_info->rev1.vasip_regs.wrapper_regs.ioctrl;
15979 		val = 0x0;
15980 		dhd_sbreg_op(dhd, addr, &val, FALSE);
15981 	}
15982 
15983 	return BCME_OK;
15984 }
15985 
15986 static void
15987 dhdpcie_update_d11_status_from_trapdata(dhd_pub_t *dhd)
15988 {
15989 #define TRAP_DATA_MAIN_CORE_BIT_MASK	(1 << 1)
15990 #define TRAP_DATA_AUX_CORE_BIT_MASK	(1 << 4)
15991 	uint trap_data_mask[MAX_NUM_D11CORES] =
15992 		{TRAP_DATA_MAIN_CORE_BIT_MASK, TRAP_DATA_AUX_CORE_BIT_MASK};
15993 	int i;
15994 	/* Apply only for 4375 chip */
15995 	if (dhd_bus_chip_id(dhd) == BCM4375_CHIP_ID) {
15996 		for (i = 0; i < MAX_NUM_D11CORES; i++) {
15997 			if (dhd->sssr_d11_outofreset[i] &&
15998 				(dhd->dongle_trap_data & trap_data_mask[i])) {
15999 				dhd->sssr_d11_outofreset[i] = TRUE;
16000 			} else {
16001 				dhd->sssr_d11_outofreset[i] = FALSE;
16002 			}
16003 			DHD_ERROR(("%s: sssr_d11_outofreset[%d] : %d after AND with "
16004 				"trap_data:0x%x-0x%x\n",
16005 				__FUNCTION__, i, dhd->sssr_d11_outofreset[i],
16006 				dhd->dongle_trap_data, trap_data_mask[i]));
16007 		}
16008 	}
16009 }
16010 
16011 static int
16012 dhdpcie_d11_check_outofreset(dhd_pub_t *dhd)
16013 {
16014 	int i;
16015 	uint addr = 0;
16016 	uint val = 0;
16017 	uint8 num_d11cores;
16018 
16019 	DHD_ERROR(("%s\n", __FUNCTION__));
16020 
16021 	num_d11cores = dhd_d11_slices_num_get(dhd);
16022 
16023 	for (i = 0; i < num_d11cores; i++) {
16024 		/* Check if bit 0 of resetctrl is cleared */
16025 		/* SSSR register information structure v0 and
16026 		 * v1 shares most except dig_mem
16027 		 */
16028 		switch (dhd->sssr_reg_info->rev2.version) {
16029 			case SSSR_REG_INFO_VER_3 :
16030 				/* intentional fall through */
16031 			case SSSR_REG_INFO_VER_2 :
16032 				addr = dhd->sssr_reg_info->rev2.
16033 					mac_regs[i].wrapper_regs.resetctrl;
16034 				break;
16035 			case SSSR_REG_INFO_VER_1 :
16036 			case SSSR_REG_INFO_VER_0 :
16037 				addr = dhd->sssr_reg_info->rev1.
16038 					mac_regs[i].wrapper_regs.resetctrl;
16039 				break;
16040 			default :
16041 				DHD_ERROR(("invalid sssr_reg_ver"));
16042 				return BCME_UNSUPPORTED;
16043 		}
16044 		if (!addr) {
16045 			DHD_ERROR(("%s: skipping for core[%d] as 'addr' is NULL\n",
16046 				__FUNCTION__, i));
16047 			continue;
16048 		}
16049 		dhd_sbreg_op(dhd, addr, &val, TRUE);
16050 		if (!(val & 1)) {
16051 			dhd->sssr_d11_outofreset[i] = TRUE;
16052 		} else {
16053 			dhd->sssr_d11_outofreset[i] = FALSE;
16054 		}
16055 		DHD_ERROR(("%s: sssr_d11_outofreset[%d] : %d\n",
16056 			__FUNCTION__, i, dhd->sssr_d11_outofreset[i]));
16057 	}
16058 	/* XXX Temporary WAR for 4375 to handle AXI errors on bad core
16059 	 * to not collect SSSR dump for the core whose bit is not set in trap_data.
16060 	 * It will be reverted once AXI errors are fixed
16061 	 */
16062 	dhdpcie_update_d11_status_from_trapdata(dhd);
16063 
16064 	return BCME_OK;
16065 }
16066 
16067 static int
16068 dhdpcie_d11_clear_clk_req(dhd_pub_t *dhd)
16069 {
16070 	int i;
16071 	uint val = 0;
16072 	uint8 num_d11cores;
16073 	uint32 clockrequeststatus, clockcontrolstatus, clockcontrolstatus_val;
16074 
16075 	DHD_ERROR(("%s\n", __FUNCTION__));
16076 
16077 	num_d11cores = dhd_d11_slices_num_get(dhd);
16078 
16079 	for (i = 0; i < num_d11cores; i++) {
16080 		if (dhd->sssr_d11_outofreset[i]) {
16081 			/* clear request clk only if itopoobb/extrsrcreqs is non zero */
16082 			/* SSSR register information structure v0 and
16083 			 * v1 shares most except dig_mem
16084 			 */
16085 			switch (dhd->sssr_reg_info->rev2.version) {
16086 				case SSSR_REG_INFO_VER_3 :
16087 					/* intentional fall through */
16088 				case SSSR_REG_INFO_VER_2 :
16089 					clockrequeststatus = dhd->sssr_reg_info->rev2.
16090 						mac_regs[i].wrapper_regs.extrsrcreq;
16091 					clockcontrolstatus = dhd->sssr_reg_info->rev2.
16092 						mac_regs[i].base_regs.clockcontrolstatus;
16093 					clockcontrolstatus_val = dhd->sssr_reg_info->rev2.
16094 						mac_regs[i].base_regs.clockcontrolstatus_val;
16095 					break;
16096 				case SSSR_REG_INFO_VER_1 :
16097 				case SSSR_REG_INFO_VER_0 :
16098 					clockrequeststatus = dhd->sssr_reg_info->rev1.
16099 						mac_regs[i].wrapper_regs.itopoobb;
16100 					clockcontrolstatus = dhd->sssr_reg_info->rev1.
16101 						mac_regs[i].base_regs.clockcontrolstatus;
16102 					clockcontrolstatus_val = dhd->sssr_reg_info->rev1.
16103 						mac_regs[i].base_regs.clockcontrolstatus_val;
16104 					break;
16105 				default :
16106 					DHD_ERROR(("invalid sssr_reg_ver"));
16107 					return BCME_UNSUPPORTED;
16108 			}
16109 			dhd_sbreg_op(dhd, clockrequeststatus, &val, TRUE);
16110 			if (val != 0) {
16111 				/* clear clockcontrolstatus */
16112 				dhd_sbreg_op(dhd, clockcontrolstatus,
16113 				 &clockcontrolstatus_val, FALSE);
16114 			}
16115 		}
16116 	}
16117 	return BCME_OK;
16118 }
16119 
16120 static int
16121 dhdpcie_arm_clear_clk_req(dhd_pub_t *dhd)
16122 {
16123 	uint val = 0;
16124 	uint cfgval = 0;
16125 	uint32 resetctrl, clockrequeststatus, clockcontrolstatus, clockcontrolstatus_val;
16126 
16127 	DHD_ERROR(("%s\n", __FUNCTION__));
16128 
16129 	/* SSSR register information structure v0 and v1 shares most except dig_mem */
16130 	switch (dhd->sssr_reg_info->rev2.version) {
16131 		case SSSR_REG_INFO_VER_3 :
16132 			/* intentional fall through */
16133 		case SSSR_REG_INFO_VER_2 :
16134 			resetctrl = dhd->sssr_reg_info->rev2.
16135 				arm_regs.wrapper_regs.resetctrl;
16136 			clockrequeststatus = dhd->sssr_reg_info->rev2.
16137 				arm_regs.wrapper_regs.extrsrcreq;
16138 			clockcontrolstatus = dhd->sssr_reg_info->rev2.
16139 				arm_regs.base_regs.clockcontrolstatus;
16140 			clockcontrolstatus_val = dhd->sssr_reg_info->rev2.
16141 				arm_regs.base_regs.clockcontrolstatus_val;
16142 			break;
16143 		case SSSR_REG_INFO_VER_1 :
16144 		case SSSR_REG_INFO_VER_0 :
16145 			resetctrl = dhd->sssr_reg_info->rev1.
16146 				arm_regs.wrapper_regs.resetctrl;
16147 			clockrequeststatus = dhd->sssr_reg_info->rev1.
16148 				arm_regs.wrapper_regs.itopoobb;
16149 			clockcontrolstatus = dhd->sssr_reg_info->rev1.
16150 				arm_regs.base_regs.clockcontrolstatus;
16151 			clockcontrolstatus_val = dhd->sssr_reg_info->rev1.
16152 				arm_regs.base_regs.clockcontrolstatus_val;
16153 			break;
16154 		default :
16155 			DHD_ERROR(("invalid sssr_reg_ver"));
16156 			return BCME_UNSUPPORTED;
16157 	}
16158 
16159 	/* Check if bit 0 of resetctrl is cleared */
16160 	dhd_sbreg_op(dhd, resetctrl, &val, TRUE);
16161 	if (!(val & 1)) {
16162 		/* clear request clk only if itopoobb/extrsrcreqs is non zero */
16163 		dhd_sbreg_op(dhd, clockrequeststatus, &val, TRUE);
16164 		if (val != 0) {
16165 			/* clear clockcontrolstatus */
16166 			dhd_sbreg_op(dhd, clockcontrolstatus, &clockcontrolstatus_val, FALSE);
16167 		}
16168 
16169 		if (MULTIBP_ENAB(dhd->bus->sih)) {
16170 			/* Clear coherent bits for CA7 because CPU is halted */
16171 			if (dhd->bus->coreid == ARMCA7_CORE_ID) {
16172 				cfgval = dhdpcie_bus_cfg_read_dword(dhd->bus,
16173 					PCIE_CFG_SUBSYSTEM_CONTROL, 4);
16174 				dhdpcie_bus_cfg_write_dword(dhd->bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4,
16175 					(cfgval & ~PCIE_BARCOHERENTACCEN_MASK));
16176 			}
16177 
16178 			/* Just halt ARM but do not reset the core */
16179 			resetctrl &= ~(SI_CORE_SIZE - 1);
16180 			resetctrl += OFFSETOF(aidmp_t, ioctrl);
16181 
16182 			dhd_sbreg_op(dhd, resetctrl, &val, TRUE);
16183 			val |= SICF_CPUHALT;
16184 			dhd_sbreg_op(dhd, resetctrl, &val, FALSE);
16185 		}
16186 	}
16187 
16188 	return BCME_OK;
16189 }
16190 
16191 static int
16192 dhdpcie_arm_resume_clk_req(dhd_pub_t *dhd)
16193 {
16194 	uint val = 0;
16195 	uint32 resetctrl;
16196 
16197 	DHD_ERROR(("%s\n", __FUNCTION__));
16198 
16199 	/* SSSR register information structure v0 and v1 shares most except dig_mem */
16200 	switch (dhd->sssr_reg_info->rev2.version) {
16201 		case SSSR_REG_INFO_VER_3 :
16202 			/* intentional fall through */
16203 		case SSSR_REG_INFO_VER_2 :
16204 			resetctrl = dhd->sssr_reg_info->rev2.
16205 				arm_regs.wrapper_regs.resetctrl;
16206 			break;
16207 		case SSSR_REG_INFO_VER_1 :
16208 		case SSSR_REG_INFO_VER_0 :
16209 			resetctrl = dhd->sssr_reg_info->rev1.
16210 				arm_regs.wrapper_regs.resetctrl;
16211 			break;
16212 		default :
16213 			DHD_ERROR(("invalid sssr_reg_ver"));
16214 			return BCME_UNSUPPORTED;
16215 	}
16216 
16217 	/* Check if bit 0 of resetctrl is cleared */
16218 	dhd_sbreg_op(dhd, resetctrl, &val, TRUE);
16219 	if (!(val & 1)) {
16220 		if (MULTIBP_ENAB(dhd->bus->sih) && (dhd->bus->coreid != ARMCA7_CORE_ID)) {
16221 			/* Take ARM out of halt but do not reset core */
16222 			resetctrl &= ~(SI_CORE_SIZE - 1);
16223 			resetctrl += OFFSETOF(aidmp_t, ioctrl);
16224 
16225 			dhd_sbreg_op(dhd, resetctrl, &val, TRUE);
16226 			val &= ~SICF_CPUHALT;
16227 			dhd_sbreg_op(dhd, resetctrl, &val, FALSE);
16228 			dhd_sbreg_op(dhd, resetctrl, &val, TRUE);
16229 		}
16230 	}
16231 
16232 	return BCME_OK;
16233 }
16234 
16235 static int
16236 dhdpcie_pcie_clear_clk_req(dhd_pub_t *dhd)
16237 {
16238 	uint val = 0;
16239 	uint32 clockrequeststatus, clockcontrolstatus_addr, clockcontrolstatus_val;
16240 
16241 	DHD_ERROR(("%s\n", __FUNCTION__));
16242 
16243 	/* SSSR register information structure v0 and v1 shares most except dig_mem */
16244 	switch (dhd->sssr_reg_info->rev2.version) {
16245 		case SSSR_REG_INFO_VER_3 :
16246 			/* intentional fall through */
16247 		case SSSR_REG_INFO_VER_2 :
16248 			clockrequeststatus = dhd->sssr_reg_info->rev2.
16249 				pcie_regs.wrapper_regs.extrsrcreq;
16250 			clockcontrolstatus_addr = dhd->sssr_reg_info->rev2.
16251 				pcie_regs.base_regs.clockcontrolstatus;
16252 			clockcontrolstatus_val = dhd->sssr_reg_info->rev2.
16253 				pcie_regs.base_regs.clockcontrolstatus_val;
16254 			break;
16255 		case SSSR_REG_INFO_VER_1 :
16256 		case SSSR_REG_INFO_VER_0 :
16257 			clockrequeststatus = dhd->sssr_reg_info->rev1.
16258 				pcie_regs.wrapper_regs.itopoobb;
16259 			clockcontrolstatus_addr = dhd->sssr_reg_info->rev1.
16260 				pcie_regs.base_regs.clockcontrolstatus;
16261 			clockcontrolstatus_val = dhd->sssr_reg_info->rev1.
16262 				pcie_regs.base_regs.clockcontrolstatus_val;
16263 			break;
16264 		default :
16265 			DHD_ERROR(("invalid sssr_reg_ver"));
16266 			return BCME_UNSUPPORTED;
16267 	}
16268 
16269 	/* clear request clk only if itopoobb/extrsrcreqs is non zero */
16270 	dhd_sbreg_op(dhd, clockrequeststatus, &val, TRUE);
16271 	if (val) {
16272 		/* clear clockcontrolstatus */
16273 		dhd_sbreg_op(dhd, clockcontrolstatus_addr, &clockcontrolstatus_val, FALSE);
16274 	}
16275 	return BCME_OK;
16276 }
16277 
16278 static int
16279 dhdpcie_pcie_send_ltrsleep(dhd_pub_t *dhd)
16280 {
16281 	uint addr;
16282 	uint val = 0;
16283 
16284 	DHD_ERROR(("%s\n", __FUNCTION__));
16285 
16286 	/* SSSR register information structure v0 and v1 shares most except dig_mem */
16287 	switch (dhd->sssr_reg_info->rev2.version) {
16288 		case SSSR_REG_INFO_VER_3 :
16289 			/* intentional fall through */
16290 		case SSSR_REG_INFO_VER_2 :
16291 			addr = dhd->sssr_reg_info->rev2.pcie_regs.base_regs.ltrstate;
16292 			break;
16293 		case SSSR_REG_INFO_VER_1 :
16294 		case SSSR_REG_INFO_VER_0 :
16295 			addr = dhd->sssr_reg_info->rev1.pcie_regs.base_regs.ltrstate;
16296 			break;
16297 		default :
16298 			DHD_ERROR(("invalid sssr_reg_ver"));
16299 			return BCME_UNSUPPORTED;
16300 	}
16301 
16302 	val = LTR_ACTIVE;
16303 	dhd_sbreg_op(dhd, addr, &val, FALSE);
16304 
16305 	val = LTR_SLEEP;
16306 	dhd_sbreg_op(dhd, addr, &val, FALSE);
16307 
16308 	return BCME_OK;
16309 }
16310 
16311 static int
16312 dhdpcie_clear_clk_req(dhd_pub_t *dhd)
16313 {
16314 	DHD_ERROR(("%s\n", __FUNCTION__));
16315 
16316 	dhdpcie_arm_clear_clk_req(dhd);
16317 
16318 	dhdpcie_d11_clear_clk_req(dhd);
16319 
16320 	dhdpcie_pcie_clear_clk_req(dhd);
16321 
16322 	return BCME_OK;
16323 }
16324 
16325 static int
16326 dhdpcie_bring_d11_outofreset(dhd_pub_t *dhd)
16327 {
16328 	int i;
16329 	uint val = 0;
16330 	uint8 num_d11cores;
16331 	uint32 resetctrl_addr, ioctrl_addr, ioctrl_resetseq_val0, ioctrl_resetseq_val1,
16332 	 ioctrl_resetseq_val2, ioctrl_resetseq_val3, ioctrl_resetseq_val4;
16333 
16334 	DHD_ERROR(("%s\n", __FUNCTION__));
16335 
16336 	num_d11cores = dhd_d11_slices_num_get(dhd);
16337 
16338 	for (i = 0; i < num_d11cores; i++) {
16339 		if (dhd->sssr_d11_outofreset[i]) {
16340 			/* SSSR register information structure v0 and v1 shares
16341 			 * most except dig_mem
16342 			 */
16343 			switch (dhd->sssr_reg_info->rev2.version) {
16344 				case SSSR_REG_INFO_VER_3 :
16345 					/* intentional fall through */
16346 				case SSSR_REG_INFO_VER_2 :
16347 					resetctrl_addr = dhd->sssr_reg_info->rev2.mac_regs[i].
16348 						wrapper_regs.resetctrl;
16349 					ioctrl_addr = dhd->sssr_reg_info->rev2.mac_regs[i].
16350 						wrapper_regs.ioctrl;
16351 					ioctrl_resetseq_val0 = dhd->sssr_reg_info->rev2.
16352 						mac_regs[0].wrapper_regs.ioctrl_resetseq_val[0];
16353 					ioctrl_resetseq_val1 = dhd->sssr_reg_info->rev2.
16354 						mac_regs[0].wrapper_regs.ioctrl_resetseq_val[1];
16355 					ioctrl_resetseq_val2 = dhd->sssr_reg_info->rev2.
16356 						mac_regs[0].wrapper_regs.ioctrl_resetseq_val[2];
16357 					ioctrl_resetseq_val3 = dhd->sssr_reg_info->rev2.
16358 						mac_regs[0].wrapper_regs.ioctrl_resetseq_val[3];
16359 					ioctrl_resetseq_val4 = dhd->sssr_reg_info->rev2.
16360 						mac_regs[0].wrapper_regs.ioctrl_resetseq_val[4];
16361 					break;
16362 				case SSSR_REG_INFO_VER_1 :
16363 				case SSSR_REG_INFO_VER_0 :
16364 					resetctrl_addr = dhd->sssr_reg_info->rev1.mac_regs[i].
16365 						wrapper_regs.resetctrl;
16366 					ioctrl_addr = dhd->sssr_reg_info->rev1.mac_regs[i].
16367 						wrapper_regs.ioctrl;
16368 					ioctrl_resetseq_val0 = dhd->sssr_reg_info->rev1.
16369 						mac_regs[0].wrapper_regs.ioctrl_resetseq_val[0];
16370 					ioctrl_resetseq_val1 = dhd->sssr_reg_info->rev1.
16371 						mac_regs[0].wrapper_regs.ioctrl_resetseq_val[1];
16372 					ioctrl_resetseq_val2 = dhd->sssr_reg_info->rev1.
16373 						mac_regs[0].wrapper_regs.ioctrl_resetseq_val[2];
16374 					ioctrl_resetseq_val3 = dhd->sssr_reg_info->rev1.
16375 						mac_regs[0].wrapper_regs.ioctrl_resetseq_val[3];
16376 					ioctrl_resetseq_val4 = dhd->sssr_reg_info->rev1.
16377 						mac_regs[0].wrapper_regs.ioctrl_resetseq_val[4];
16378 					break;
16379 				default :
16380 					DHD_ERROR(("invalid sssr_reg_ver"));
16381 					return BCME_UNSUPPORTED;
16382 			}
16383 			/* disable core by setting bit 0 */
16384 			val = 1;
16385 			dhd_sbreg_op(dhd, resetctrl_addr, &val, FALSE);
16386 			OSL_DELAY(6000);
16387 
16388 			dhd_sbreg_op(dhd, ioctrl_addr, &ioctrl_resetseq_val0, FALSE);
16389 
16390 			dhd_sbreg_op(dhd, ioctrl_addr, &ioctrl_resetseq_val1, FALSE);
16391 
16392 			/* enable core by clearing bit 0 */
16393 			val = 0;
16394 			dhd_sbreg_op(dhd, resetctrl_addr, &val, FALSE);
16395 
16396 			dhd_sbreg_op(dhd, ioctrl_addr, &ioctrl_resetseq_val2, FALSE);
16397 
16398 			dhd_sbreg_op(dhd, ioctrl_addr, &ioctrl_resetseq_val3, FALSE);
16399 
16400 			dhd_sbreg_op(dhd, ioctrl_addr, &ioctrl_resetseq_val4, FALSE);
16401 		}
16402 	}
16403 	return BCME_OK;
16404 }
16405 
16406 #ifdef DHD_SSSR_DUMP_BEFORE_SR
16407 static int
16408 dhdpcie_sssr_dump_get_before_sr(dhd_pub_t *dhd)
16409 {
16410 	int i;
16411 	uint32 sr_size, xmtaddress, xmtdata, dig_buf_size, dig_buf_addr;
16412 	uint8 num_d11cores;
16413 
16414 	DHD_ERROR(("%s\n", __FUNCTION__));
16415 
16416 	num_d11cores = dhd_d11_slices_num_get(dhd);
16417 
16418 	for (i = 0; i < num_d11cores; i++) {
16419 		if (dhd->sssr_d11_outofreset[i]) {
16420 			sr_size = dhd_sssr_mac_buf_size(dhd, i);
16421 			xmtaddress = dhd_sssr_mac_xmtaddress(dhd, i);
16422 			xmtdata = dhd_sssr_mac_xmtdata(dhd, i);
16423 			dhdpcie_get_sssr_fifo_dump(dhd, dhd->sssr_d11_before[i],
16424 				sr_size, xmtaddress, xmtdata);
16425 		}
16426 	}
16427 
16428 	dig_buf_size = dhd_sssr_dig_buf_size(dhd);
16429 	dig_buf_addr = dhd_sssr_dig_buf_addr(dhd);
16430 	if (dig_buf_size) {
16431 		dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_before,
16432 			dig_buf_size, dig_buf_addr);
16433 	}
16434 
16435 	return BCME_OK;
16436 }
16437 #endif /* DHD_SSSR_DUMP_BEFORE_SR */
16438 
16439 static int
16440 dhdpcie_sssr_dump_get_after_sr(dhd_pub_t *dhd)
16441 {
16442 	int i;
16443 	uint32 sr_size, xmtaddress, xmtdata, dig_buf_size, dig_buf_addr;
16444 	uint8 num_d11cores;
16445 
16446 	DHD_ERROR(("%s\n", __FUNCTION__));
16447 
16448 	num_d11cores = dhd_d11_slices_num_get(dhd);
16449 
16450 	for (i = 0; i < num_d11cores; i++) {
16451 		if (dhd->sssr_d11_outofreset[i]) {
16452 			sr_size = dhd_sssr_mac_buf_size(dhd, i);
16453 			xmtaddress = dhd_sssr_mac_xmtaddress(dhd, i);
16454 			xmtdata = dhd_sssr_mac_xmtdata(dhd, i);
16455 			dhdpcie_get_sssr_fifo_dump(dhd, dhd->sssr_d11_after[i],
16456 				sr_size, xmtaddress, xmtdata);
16457 		}
16458 	}
16459 
16460 	dig_buf_size = dhd_sssr_dig_buf_size(dhd);
16461 	dig_buf_addr = dhd_sssr_dig_buf_addr(dhd);
16462 
16463 	if (dig_buf_size) {
16464 		dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_after, dig_buf_size, dig_buf_addr);
16465 	}
16466 
16467 	return BCME_OK;
16468 }
16469 
16470 int
16471 dhdpcie_sssr_dump(dhd_pub_t *dhd)
16472 {
16473 	uint32 powerctrl_val;
16474 
16475 	if (!dhd->sssr_inited) {
16476 		DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__));
16477 		return BCME_ERROR;
16478 	}
16479 
16480 	if (dhd->bus->is_linkdown) {
16481 		DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
16482 		return BCME_ERROR;
16483 	}
16484 
16485 	DHD_ERROR(("%s: Before WL down (powerctl: pcie:0x%x chipc:0x%x) "
16486 		"PMU rctl:0x%x res_state:0x%x\n", __FUNCTION__,
16487 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
16488 			OFFSETOF(chipcregs_t, powerctl), 0, 0),
16489 		si_corereg(dhd->bus->sih, 0, OFFSETOF(chipcregs_t, powerctl), 0, 0),
16490 		PMU_REG(dhd->bus->sih, retention_ctl, 0, 0),
16491 		PMU_REG(dhd->bus->sih, res_state, 0, 0)));
16492 
16493 	dhdpcie_d11_check_outofreset(dhd);
16494 
16495 #ifdef DHD_SSSR_DUMP_BEFORE_SR
16496 	DHD_ERROR(("%s: Collecting Dump before SR\n", __FUNCTION__));
16497 	if (dhdpcie_sssr_dump_get_before_sr(dhd) != BCME_OK) {
16498 		DHD_ERROR(("%s: dhdpcie_sssr_dump_get_before_sr failed\n", __FUNCTION__));
16499 		return BCME_ERROR;
16500 	}
16501 #endif /* DHD_SSSR_DUMP_BEFORE_SR */
16502 
16503 	dhdpcie_clear_intmask_and_timer(dhd);
16504 	dhdpcie_clear_clk_req(dhd);
16505 	powerctrl_val = dhdpcie_suspend_chipcommon_powerctrl(dhd);
16506 	dhdpcie_pcie_send_ltrsleep(dhd);
16507 
16508 	if (MULTIBP_ENAB(dhd->bus->sih)) {
16509 		dhd_bus_pcie_pwr_req_wl_domain(dhd->bus, OFFSETOF(chipcregs_t, powerctl), FALSE);
16510 	}
16511 
16512 	/* Wait for some time before Restore */
16513 	OSL_DELAY(6000);
16514 
16515 	DHD_ERROR(("%s: After WL down (powerctl: pcie:0x%x chipc:0x%x) "
16516 		"PMU rctl:0x%x res_state:0x%x\n", __FUNCTION__,
16517 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
16518 			OFFSETOF(chipcregs_t, powerctl), 0, 0),
16519 		si_corereg(dhd->bus->sih, 0, OFFSETOF(chipcregs_t, powerctl), 0, 0),
16520 		PMU_REG(dhd->bus->sih, retention_ctl, 0, 0),
16521 		PMU_REG(dhd->bus->sih, res_state, 0, 0)));
16522 
16523 	if (MULTIBP_ENAB(dhd->bus->sih)) {
16524 		dhd_bus_pcie_pwr_req_wl_domain(dhd->bus, OFFSETOF(chipcregs_t, powerctl), TRUE);
16525 		/* Add delay for WL domain to power up */
16526 		OSL_DELAY(15000);
16527 
16528 		DHD_ERROR(("%s: After WL up again (powerctl: pcie:0x%x chipc:0x%x) "
16529 			"PMU rctl:0x%x res_state:0x%x\n", __FUNCTION__,
16530 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
16531 				OFFSETOF(chipcregs_t, powerctl), 0, 0),
16532 			si_corereg(dhd->bus->sih, 0, OFFSETOF(chipcregs_t, powerctl), 0, 0),
16533 			PMU_REG(dhd->bus->sih, retention_ctl, 0, 0),
16534 			PMU_REG(dhd->bus->sih, res_state, 0, 0)));
16535 	}
16536 
16537 	dhdpcie_resume_chipcommon_powerctrl(dhd, powerctrl_val);
16538 	dhdpcie_arm_resume_clk_req(dhd);
16539 	dhdpcie_bring_d11_outofreset(dhd);
16540 
16541 	DHD_ERROR(("%s: Collecting Dump after SR\n", __FUNCTION__));
16542 	if (dhdpcie_sssr_dump_get_after_sr(dhd) != BCME_OK) {
16543 		DHD_ERROR(("%s: dhdpcie_sssr_dump_get_after_sr failed\n", __FUNCTION__));
16544 		return BCME_ERROR;
16545 	}
16546 	dhd->sssr_dump_collected = TRUE;
16547 	dhd_write_sssr_dump(dhd, SSSR_DUMP_MODE_SSSR);
16548 
16549 	return BCME_OK;
16550 }
16551 
16552 #define PCIE_CFG_DSTATE_MASK	0x11u
16553 
16554 static int
16555 dhdpcie_fis_trigger(dhd_pub_t *dhd)
16556 {
16557 	uint32 fis_ctrl_status;
16558 	uint32 cfg_status_cmd;
16559 	uint32 cfg_pmcsr;
16560 
16561 	if (!dhd->sssr_inited) {
16562 		DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__));
16563 		return BCME_ERROR;
16564 	}
16565 
16566 	if (dhd->bus->is_linkdown) {
16567 		DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
16568 		return BCME_ERROR;
16569 	}
16570 
16571 #ifdef DHD_PCIE_RUNTIMEPM
16572 	/* Bring back to D0 */
16573 	dhdpcie_runtime_bus_wake(dhd, CAN_SLEEP(), __builtin_return_address(0));
16574 	/* Stop RPM timer so that even INB DW DEASSERT should not happen */
16575 	DHD_STOP_RPM_TIMER(dhd);
16576 #endif /* DHD_PCIE_RUNTIMEPM */
16577 
16578 	/* Set fis_triggered flag to ignore link down callback from RC */
16579 	dhd->fis_triggered = TRUE;
16580 
16581 	/* Set FIS PwrswForceOnAll */
16582 	PMU_REG(dhd->bus->sih, fis_ctrl_status, PMU_FIS_FORCEON_ALL_MASK, PMU_FIS_FORCEON_ALL_MASK);
16583 
16584 	fis_ctrl_status = PMU_REG(dhd->bus->sih, fis_ctrl_status, 0, 0);
16585 
16586 	DHD_ERROR(("%s: fis_ctrl_status=0x%x\n", __FUNCTION__, fis_ctrl_status));
16587 
16588 	cfg_status_cmd = dhd_pcie_config_read(dhd->bus, PCIECFGREG_STATUS_CMD, sizeof(uint32));
16589 	cfg_pmcsr = dhd_pcie_config_read(dhd->bus, PCIE_CFG_PMCSR, sizeof(uint32));
16590 	DHD_ERROR(("before save: Status Command(0x%x)=0x%x PCIE_CFG_PMCSR(0x%x)=0x%x\n",
16591 		PCIECFGREG_STATUS_CMD, cfg_status_cmd, PCIE_CFG_PMCSR, cfg_pmcsr));
16592 
16593 	DHD_PCIE_CONFIG_SAVE(dhd->bus);
16594 
16595 	/* Trigger FIS */
16596 	si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
16597 		DAR_FIS_CTRL(dhd->bus->sih->buscorerev), ~0, DAR_FIS_START_MASK);
16598 	OSL_DELAY(100 * 1000);
16599 
16600 #ifdef OEM_ANDROID
16601 	/*
16602 	 * For android built-in platforms need to perform REG ON/OFF
16603 	 * to restore pcie link.
16604 	 * dhd_download_fw_on_driverload will be FALSE for built-in.
16605 	 */
16606 	if (!dhd_download_fw_on_driverload) {
16607 		DHD_ERROR(("%s: Toggle REG_ON and restore config space\n", __FUNCTION__));
16608 		dhdpcie_bus_stop_host_dev(dhd->bus);
16609 		dhd_wifi_platform_set_power(dhd, FALSE);
16610 		dhd_wifi_platform_set_power(dhd, TRUE);
16611 		dhdpcie_bus_start_host_dev(dhd->bus);
16612 		/* Restore inited pcie cfg from pci_load_saved_state */
16613 		dhdpcie_bus_enable_device(dhd->bus);
16614 	}
16615 #endif /* OEM_ANDROID */
16616 
16617 	cfg_status_cmd = dhd_pcie_config_read(dhd->bus, PCIECFGREG_STATUS_CMD, sizeof(uint32));
16618 	cfg_pmcsr = dhd_pcie_config_read(dhd->bus, PCIE_CFG_PMCSR, sizeof(uint32));
16619 	DHD_ERROR(("after regon-restore: Status Command(0x%x)=0x%x PCIE_CFG_PMCSR(0x%x)=0x%x\n",
16620 		PCIECFGREG_STATUS_CMD, cfg_status_cmd, PCIE_CFG_PMCSR, cfg_pmcsr));
16621 
16622 	/* To-Do: below is debug code, remove this if EP is in D0 after REG-ON restore */
16623 	DHD_PCIE_CONFIG_RESTORE(dhd->bus);
16624 
16625 	cfg_status_cmd = dhd_pcie_config_read(dhd->bus, PCIECFGREG_STATUS_CMD, sizeof(uint32));
16626 	cfg_pmcsr = dhd_pcie_config_read(dhd->bus, PCIE_CFG_PMCSR, sizeof(uint32));
16627 	DHD_ERROR(("after normal-restore: Status Command(0x%x)=0x%x PCIE_CFG_PMCSR(0x%x)=0x%x\n",
16628 		PCIECFGREG_STATUS_CMD, cfg_status_cmd, PCIE_CFG_PMCSR, cfg_pmcsr));
16629 
16630 	/*
16631 	 * To-Do: below is debug code, remove this if EP is in D0 after REG-ON restore
16632 	 * in both MSM and LSI RCs
16633 	 */
16634 	if ((cfg_pmcsr & PCIE_CFG_DSTATE_MASK) != 0) {
16635 		int ret = dhdpcie_set_master_and_d0_pwrstate(dhd->bus);
16636 		if (ret != BCME_OK) {
16637 			DHD_ERROR(("%s: Setting D0 failed, ABORT FIS collection\n", __FUNCTION__));
16638 			return ret;
16639 		}
16640 		cfg_status_cmd =
16641 			dhd_pcie_config_read(dhd->bus, PCIECFGREG_STATUS_CMD, sizeof(uint32));
16642 		cfg_pmcsr = dhd_pcie_config_read(dhd->bus, PCIE_CFG_PMCSR, sizeof(uint32));
16643 		DHD_ERROR(("after force-d0: Status Command(0x%x)=0x%x PCIE_CFG_PMCSR(0x%x)=0x%x\n",
16644 			PCIECFGREG_STATUS_CMD, cfg_status_cmd, PCIE_CFG_PMCSR, cfg_pmcsr));
16645 	}
16646 
16647 	/* Clear fis_triggered as REG OFF/ON recovered link */
16648 	dhd->fis_triggered = FALSE;
16649 
16650 	return BCME_OK;
16651 }
16652 
16653 int
16654 dhd_bus_fis_trigger(dhd_pub_t *dhd)
16655 {
16656 	return dhdpcie_fis_trigger(dhd);
16657 }
16658 
16659 static int
16660 dhdpcie_reset_hwa(dhd_pub_t *dhd)
16661 {
16662 	int ret;
16663 	sssr_reg_info_cmn_t *sssr_reg_info_cmn = dhd->sssr_reg_info;
16664 	sssr_reg_info_v3_t *sssr_reg_info = (sssr_reg_info_v3_t *)&sssr_reg_info_cmn->rev3;
16665 
16666 	/* HWA wrapper registers */
16667 	uint32 ioctrl, resetctrl;
16668 	/* HWA base registers */
16669 	uint32 clkenable, clkgatingenable, clkext, clkctlstatus;
16670 	uint32 hwa_resetseq_val[SSSR_HWA_RESET_SEQ_STEPS];
16671 	int i = 0;
16672 
16673 	if (sssr_reg_info->version < SSSR_REG_INFO_VER_3) {
16674 		DHD_ERROR(("%s: not supported for version:%d\n",
16675 			__FUNCTION__, sssr_reg_info->version));
16676 		return BCME_UNSUPPORTED;
16677 	}
16678 
16679 	if (sssr_reg_info->hwa_regs.base_regs.clkenable == 0) {
16680 		DHD_ERROR(("%s: hwa regs are not set\n", __FUNCTION__));
16681 		return BCME_UNSUPPORTED;
16682 	}
16683 
16684 	DHD_ERROR(("%s: version:%d\n", __FUNCTION__, sssr_reg_info->version));
16685 
16686 	ioctrl = sssr_reg_info->hwa_regs.wrapper_regs.ioctrl;
16687 	resetctrl = sssr_reg_info->hwa_regs.wrapper_regs.resetctrl;
16688 
16689 	clkenable = sssr_reg_info->hwa_regs.base_regs.clkenable;
16690 	clkgatingenable = sssr_reg_info->hwa_regs.base_regs.clkgatingenable;
16691 	clkext = sssr_reg_info->hwa_regs.base_regs.clkext;
16692 	clkctlstatus = sssr_reg_info->hwa_regs.base_regs.clkctlstatus;
16693 
16694 	ret = memcpy_s(hwa_resetseq_val, sizeof(hwa_resetseq_val),
16695 		sssr_reg_info->hwa_regs.hwa_resetseq_val,
16696 		sizeof(sssr_reg_info->hwa_regs.hwa_resetseq_val));
16697 	if (ret) {
16698 		DHD_ERROR(("%s: hwa_resetseq_val memcpy_s failed: %d\n",
16699 			__FUNCTION__, ret));
16700 		return ret;
16701 	}
16702 
16703 	dhd_sbreg_op(dhd, ioctrl, &hwa_resetseq_val[i++], FALSE);
16704 	dhd_sbreg_op(dhd, resetctrl, &hwa_resetseq_val[i++], FALSE);
16705 	dhd_sbreg_op(dhd, resetctrl, &hwa_resetseq_val[i++], FALSE);
16706 	dhd_sbreg_op(dhd, ioctrl, &hwa_resetseq_val[i++], FALSE);
16707 
16708 	dhd_sbreg_op(dhd, clkenable, &hwa_resetseq_val[i++], FALSE);
16709 	dhd_sbreg_op(dhd, clkgatingenable, &hwa_resetseq_val[i++], FALSE);
16710 	dhd_sbreg_op(dhd, clkext, &hwa_resetseq_val[i++], FALSE);
16711 	dhd_sbreg_op(dhd, clkctlstatus, &hwa_resetseq_val[i++], FALSE);
16712 
16713 	return BCME_OK;
16714 }
16715 
16716 static int
16717 dhdpcie_fis_dump(dhd_pub_t *dhd)
16718 {
16719 	int i;
16720 	uint8 num_d11cores;
16721 
16722 	DHD_ERROR(("%s\n", __FUNCTION__));
16723 
16724 	if (!dhd->sssr_inited) {
16725 		DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__));
16726 		return BCME_ERROR;
16727 	}
16728 
16729 	if (dhd->bus->is_linkdown) {
16730 		DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
16731 		return BCME_ERROR;
16732 	}
16733 
16734 	/* bring up all pmu resources */
16735 	PMU_REG(dhd->bus->sih, min_res_mask, ~0,
16736 		PMU_REG(dhd->bus->sih, max_res_mask, 0, 0));
16737 	OSL_DELAY(10 * 1000);
16738 
16739 	num_d11cores = dhd_d11_slices_num_get(dhd);
16740 
16741 	for (i = 0; i < num_d11cores; i++) {
16742 		dhd->sssr_d11_outofreset[i] = TRUE;
16743 	}
16744 
16745 	dhdpcie_bring_d11_outofreset(dhd);
16746 	OSL_DELAY(6000);
16747 
16748 	/* clear FIS Done */
16749 	PMU_REG(dhd->bus->sih, fis_ctrl_status, PMU_CLEAR_FIS_DONE_MASK, PMU_CLEAR_FIS_DONE_MASK);
16750 
16751 	if (dhdpcie_reset_hwa(dhd) != BCME_OK) {
16752 		DHD_ERROR(("%s: dhdpcie_reset_hwa failed\n", __FUNCTION__));
16753 		return BCME_ERROR;
16754 	}
16755 
16756 	dhdpcie_d11_check_outofreset(dhd);
16757 
16758 	DHD_ERROR(("%s: Collecting Dump after SR\n", __FUNCTION__));
16759 	if (dhdpcie_sssr_dump_get_after_sr(dhd) != BCME_OK) {
16760 		DHD_ERROR(("%s: dhdpcie_sssr_dump_get_after_sr failed\n", __FUNCTION__));
16761 		return BCME_ERROR;
16762 	}
16763 	dhd->sssr_dump_collected = TRUE;
16764 	dhd_write_sssr_dump(dhd, SSSR_DUMP_MODE_FIS);
16765 
16766 	return BCME_OK;
16767 }
16768 
16769 int
16770 dhd_bus_fis_dump(dhd_pub_t *dhd)
16771 {
16772 	return dhdpcie_fis_dump(dhd);
16773 }
16774 #endif /* DHD_SSSR_DUMP */
16775 
16776 #ifdef DHD_SDTC_ETB_DUMP
16777 int
16778 dhd_bus_get_etb_info(dhd_pub_t *dhd, uint32 etbinfo_addr, etb_info_t *etb_info)
16779 {
16780 
16781 	int ret = 0;
16782 
16783 	if ((ret = dhdpcie_bus_membytes(dhd->bus, FALSE, etbinfo_addr,
16784 		(unsigned char *)etb_info, sizeof(*etb_info)))) {
16785 		DHD_ERROR(("%s: Read Error membytes %d\n", __FUNCTION__, ret));
16786 		return BCME_ERROR;
16787 	}
16788 
16789 	return BCME_OK;
16790 }
16791 
16792 int
16793 dhd_bus_get_sdtc_etb(dhd_pub_t *dhd, uint8 *sdtc_etb_mempool, uint addr, uint read_bytes)
16794 {
16795 	int ret = 0;
16796 
16797 	if ((ret = dhdpcie_bus_membytes(dhd->bus, FALSE, addr,
16798 		(unsigned char *)sdtc_etb_mempool, read_bytes))) {
16799 		DHD_ERROR(("%s: Read Error membytes %d\n", __FUNCTION__, ret));
16800 		return BCME_ERROR;
16801 	}
16802 	return BCME_OK;
16803 }
16804 #endif /* DHD_SDTC_ETB_DUMP */
16805 
16806 #ifdef BTLOG
16807 void
16808 BCMFASTPATH(dhd_bus_rx_bt_log)(struct dhd_bus *bus, void* pkt)
16809 {
16810 	dhd_rx_bt_log(bus->dhd, pkt);
16811 }
16812 #endif	/* BTLOG */
16813 
16814 #ifdef DHD_WAKE_STATUS
16815 wake_counts_t*
16816 dhd_bus_get_wakecount(dhd_pub_t *dhd)
16817 {
16818 	return &dhd->bus->wake_counts;
16819 }
16820 int
16821 dhd_bus_get_bus_wake(dhd_pub_t *dhd)
16822 {
16823 	return bcmpcie_set_get_wake(dhd->bus, 0);
16824 }
16825 #endif /* DHD_WAKE_STATUS */
16826 
16827 /* Writes random number(s) to the TCM. FW upon initialization reads this register
16828  * to fetch the random number, and uses it to randomize heap address space layout.
16829  */
16830 static int
16831 dhdpcie_wrt_rnd(struct dhd_bus *bus)
16832 {
16833 	bcm_rand_metadata_t rnd_data;
16834 	uint8 rand_buf[BCM_ENTROPY_HOST_NBYTES];
16835 	uint32 count = BCM_ENTROPY_HOST_NBYTES;
16836 	int ret = 0;
16837 	uint32 addr = bus->dongle_ram_base + (bus->ramsize - BCM_NVRAM_OFFSET_TCM) -
16838 		((bus->nvram_csm & 0xffff)* BCM_NVRAM_IMG_COMPRS_FACTOR + sizeof(rnd_data));
16839 
16840 	memset(rand_buf, 0, BCM_ENTROPY_HOST_NBYTES);
16841 	rnd_data.signature = htol32(BCM_NVRAM_RNG_SIGNATURE);
16842 	rnd_data.count = htol32(count);
16843 	/* write the metadata about random number */
16844 	dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&rnd_data, sizeof(rnd_data));
16845 	/* scale back by number of random number counts */
16846 	addr -= count;
16847 
16848 	bus->ramtop_addr = addr;
16849 
16850 #ifdef DHD_RND_DEBUG
16851 	bus->dhd->rnd_buf = NULL;
16852 	/* get random contents from file */
16853 	ret = dhd_get_rnd_info(bus->dhd);
16854 	if (bus->dhd->rnd_buf) {
16855 		/* write file contents to TCM */
16856 		DHD_ERROR(("%s: use stored .rnd.in content\n", __FUNCTION__));
16857 		dhdpcie_bus_membytes(bus, TRUE, addr, bus->dhd->rnd_buf, bus->dhd->rnd_len);
16858 
16859 		/* Dump random content to out file */
16860 		dhd_dump_rnd_info(bus->dhd, bus->dhd->rnd_buf, bus->dhd->rnd_len);
16861 
16862 		/* bus->dhd->rnd_buf is allocated in dhd_get_rnd_info, free here */
16863 		MFREE(bus->dhd->osh, bus->dhd->rnd_buf, bus->dhd->rnd_len);
16864 		bus->dhd->rnd_buf = NULL;
16865 		return BCME_OK;
16866 	}
16867 #endif /* DHD_RND_DEBUG */
16868 
16869 	/* Now write the random number(s) */
16870 	ret = dhd_get_random_bytes(rand_buf, count);
16871 	if (ret != BCME_OK) {
16872 		return ret;
16873 	}
16874 	dhdpcie_bus_membytes(bus, TRUE, addr, rand_buf, count);
16875 
16876 #ifdef DHD_RND_DEBUG
16877 	/* Dump random content to out file */
16878 	dhd_dump_rnd_info(bus->dhd, rand_buf, count);
16879 #endif /* DHD_RND_DEBUG */
16880 
16881 	bus->next_tlv = addr;
16882 
16883 	return BCME_OK;
16884 }
16885 
16886 #ifdef D2H_MINIDUMP
16887 bool
16888 dhd_bus_is_minidump_enabled(dhd_pub_t *dhdp)
16889 {
16890 	return dhdp->bus->d2h_minidump;
16891 }
16892 #endif /* D2H_MINIDUMP */
16893 
16894 void
16895 dhd_pcie_intr_count_dump(dhd_pub_t *dhd)
16896 {
16897 	struct dhd_bus *bus = dhd->bus;
16898 	uint64 current_time;
16899 
16900 	DHD_ERROR(("\n ------- DUMPING INTR enable/disable counters  ------- \r\n"));
16901 	DHD_ERROR(("resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n",
16902 		bus->resume_intr_enable_count, bus->dpc_intr_enable_count));
16903 	DHD_ERROR(("isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n",
16904 		bus->isr_intr_disable_count, bus->suspend_intr_disable_count));
16905 #ifdef BCMPCIE_OOB_HOST_WAKE
16906 	DHD_ERROR(("oob_intr_count=%lu oob_intr_enable_count=%lu oob_intr_disable_count=%lu\n",
16907 		bus->oob_intr_count, bus->oob_intr_enable_count,
16908 		bus->oob_intr_disable_count));
16909 	DHD_ERROR(("oob_irq_num=%d last_oob_irq_times="SEC_USEC_FMT":"SEC_USEC_FMT"\n",
16910 		dhdpcie_get_oob_irq_num(bus),
16911 		GET_SEC_USEC(bus->last_oob_irq_isr_time),
16912 		GET_SEC_USEC(bus->last_oob_irq_thr_time)));
16913 	DHD_ERROR(("last_oob_irq_enable_time="SEC_USEC_FMT
16914 		" last_oob_irq_disable_time="SEC_USEC_FMT"\n",
16915 		GET_SEC_USEC(bus->last_oob_irq_enable_time),
16916 		GET_SEC_USEC(bus->last_oob_irq_disable_time)));
16917 	DHD_ERROR(("oob_irq_enabled=%d oob_gpio_level=%d\n",
16918 		dhdpcie_get_oob_irq_status(bus),
16919 		dhdpcie_get_oob_irq_level()));
16920 #endif /* BCMPCIE_OOB_HOST_WAKE */
16921 	DHD_ERROR(("dpc_return_busdown_count=%lu non_ours_irq_count=%lu\n",
16922 		bus->dpc_return_busdown_count, bus->non_ours_irq_count));
16923 
16924 	current_time = OSL_LOCALTIME_NS();
16925 	DHD_ERROR(("\ncurrent_time="SEC_USEC_FMT"\n",
16926 		GET_SEC_USEC(current_time)));
16927 	DHD_ERROR(("isr_entry_time="SEC_USEC_FMT
16928 		" isr_exit_time="SEC_USEC_FMT"\n",
16929 		GET_SEC_USEC(bus->isr_entry_time),
16930 		GET_SEC_USEC(bus->isr_exit_time)));
16931 	DHD_ERROR(("isr_sched_dpc_time="SEC_USEC_FMT
16932 		" rpm_sched_dpc_time="SEC_USEC_FMT
16933 		" last_non_ours_irq_time="SEC_USEC_FMT"\n",
16934 		GET_SEC_USEC(bus->isr_sched_dpc_time),
16935 		GET_SEC_USEC(bus->rpm_sched_dpc_time),
16936 		GET_SEC_USEC(bus->last_non_ours_irq_time)));
16937 	DHD_ERROR(("dpc_entry_time="SEC_USEC_FMT
16938 		" last_process_ctrlbuf_time="SEC_USEC_FMT"\n",
16939 		GET_SEC_USEC(bus->dpc_entry_time),
16940 		GET_SEC_USEC(bus->last_process_ctrlbuf_time)));
16941 	DHD_ERROR(("last_process_flowring_time="SEC_USEC_FMT
16942 		" last_process_txcpl_time="SEC_USEC_FMT"\n",
16943 		GET_SEC_USEC(bus->last_process_flowring_time),
16944 		GET_SEC_USEC(bus->last_process_txcpl_time)));
16945 	DHD_ERROR(("last_process_rxcpl_time="SEC_USEC_FMT
16946 		" last_process_infocpl_time="SEC_USEC_FMT
16947 		" last_process_edl_time="SEC_USEC_FMT"\n",
16948 		GET_SEC_USEC(bus->last_process_rxcpl_time),
16949 		GET_SEC_USEC(bus->last_process_infocpl_time),
16950 		GET_SEC_USEC(bus->last_process_edl_time)));
16951 	DHD_ERROR(("dpc_exit_time="SEC_USEC_FMT
16952 		" resched_dpc_time="SEC_USEC_FMT"\n",
16953 		GET_SEC_USEC(bus->dpc_exit_time),
16954 		GET_SEC_USEC(bus->resched_dpc_time)));
16955 	DHD_ERROR(("last_d3_inform_time="SEC_USEC_FMT"\n",
16956 		GET_SEC_USEC(bus->last_d3_inform_time)));
16957 
16958 	DHD_ERROR(("\nlast_suspend_start_time="SEC_USEC_FMT
16959 		" last_suspend_end_time="SEC_USEC_FMT"\n",
16960 		GET_SEC_USEC(bus->last_suspend_start_time),
16961 		GET_SEC_USEC(bus->last_suspend_end_time)));
16962 	DHD_ERROR(("last_resume_start_time="SEC_USEC_FMT
16963 		" last_resume_end_time="SEC_USEC_FMT"\n",
16964 		GET_SEC_USEC(bus->last_resume_start_time),
16965 		GET_SEC_USEC(bus->last_resume_end_time)));
16966 
16967 #if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE)
16968 	DHD_ERROR(("logtrace_thread_entry_time="SEC_USEC_FMT
16969 		" logtrace_thread_sem_down_time="SEC_USEC_FMT
16970 		"\nlogtrace_thread_flush_time="SEC_USEC_FMT
16971 		" logtrace_thread_unexpected_break_time="SEC_USEC_FMT
16972 		"\nlogtrace_thread_complete_time="SEC_USEC_FMT"\n",
16973 		GET_SEC_USEC(dhd->logtrace_thr_ts.entry_time),
16974 		GET_SEC_USEC(dhd->logtrace_thr_ts.sem_down_time),
16975 		GET_SEC_USEC(dhd->logtrace_thr_ts.flush_time),
16976 		GET_SEC_USEC(dhd->logtrace_thr_ts.unexpected_break_time),
16977 		GET_SEC_USEC(dhd->logtrace_thr_ts.complete_time)));
16978 #endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */
16979 }
16980 
16981 void
16982 dhd_bus_intr_count_dump(dhd_pub_t *dhd)
16983 {
16984 	dhd_pcie_intr_count_dump(dhd);
16985 }
16986 
16987 int
16988 dhd_pcie_dump_wrapper_regs(dhd_pub_t *dhd)
16989 {
16990 	uint32 save_idx, val;
16991 	si_t *sih = dhd->bus->sih;
16992 	uint32 oob_base, oob_base1;
16993 	uint32 wrapper_dump_list[] = {
16994 		AI_OOBSELOUTA30, AI_OOBSELOUTA74, AI_OOBSELOUTB30, AI_OOBSELOUTB74,
16995 		AI_OOBSELOUTC30, AI_OOBSELOUTC74, AI_OOBSELOUTD30, AI_OOBSELOUTD74,
16996 		AI_RESETSTATUS, AI_RESETCTRL,
16997 		AI_ITIPOOBA, AI_ITIPOOBB, AI_ITIPOOBC, AI_ITIPOOBD,
16998 		AI_ITIPOOBAOUT, AI_ITIPOOBBOUT, AI_ITIPOOBCOUT, AI_ITIPOOBDOUT
16999 	};
17000 	uint32 i;
17001 	hndoobr_reg_t *reg;
17002 	cr4regs_t *cr4regs;
17003 	ca7regs_t *ca7regs;
17004 
17005 	save_idx = si_coreidx(sih);
17006 
17007 	DHD_ERROR(("%s: Master wrapper Reg\n", __FUNCTION__));
17008 
17009 	if (si_setcore(sih, PCIE2_CORE_ID, 0) != NULL) {
17010 		for (i = 0; i < (uint32)sizeof(wrapper_dump_list) / 4; i++) {
17011 			val = si_wrapperreg(sih, wrapper_dump_list[i], 0, 0);
17012 			DHD_ERROR(("sbreg: addr:0x%x val:0x%x\n", wrapper_dump_list[i], val));
17013 		}
17014 	}
17015 
17016 	if ((cr4regs = si_setcore(sih, ARMCR4_CORE_ID, 0)) != NULL) {
17017 		DHD_ERROR(("%s: ARM CR4 wrapper Reg\n", __FUNCTION__));
17018 		for (i = 0; i < (uint32)sizeof(wrapper_dump_list) / 4; i++) {
17019 			val = si_wrapperreg(sih, wrapper_dump_list[i], 0, 0);
17020 			DHD_ERROR(("sbreg: addr:0x%x val:0x%x\n", wrapper_dump_list[i], val));
17021 		}
17022 		DHD_ERROR(("%s: ARM CR4 core Reg\n", __FUNCTION__));
17023 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corecontrol));
17024 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, corecontrol), val));
17025 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corecapabilities));
17026 		DHD_ERROR(("reg:0x%x val:0x%x\n",
17027 			(uint)OFFSETOF(cr4regs_t, corecapabilities), val));
17028 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corestatus));
17029 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, corestatus), val));
17030 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, nmiisrst));
17031 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, nmiisrst), val));
17032 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, nmimask));
17033 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, nmimask), val));
17034 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, isrmask));
17035 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, isrmask), val));
17036 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, swintreg));
17037 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, swintreg), val));
17038 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, intstatus));
17039 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, intstatus), val));
17040 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, cyclecnt));
17041 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, cyclecnt), val));
17042 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, inttimer));
17043 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, inttimer), val));
17044 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, clk_ctl_st));
17045 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, clk_ctl_st), val));
17046 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, powerctl));
17047 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, powerctl), val));
17048 	}
17049 	/* XXX: Currently dumping CA7 registers causing CTO, temporarily disabling it */
17050 	BCM_REFERENCE(ca7regs);
17051 #ifdef NOT_YET
17052 	if ((ca7regs = si_setcore(sih, ARMCA7_CORE_ID, 0)) != NULL) {
17053 		DHD_ERROR(("%s: ARM CA7 core Reg\n", __FUNCTION__));
17054 		val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corecontrol));
17055 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, corecontrol), val));
17056 		val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corecapabilities));
17057 		DHD_ERROR(("reg:0x%x val:0x%x\n",
17058 			(uint)OFFSETOF(ca7regs_t, corecapabilities), val));
17059 		val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corestatus));
17060 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, corestatus), val));
17061 		val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, tracecontrol));
17062 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, tracecontrol), val));
17063 		val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, clk_ctl_st));
17064 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, clk_ctl_st), val));
17065 		val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, powerctl));
17066 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, powerctl), val));
17067 	}
17068 #endif /* NOT_YET */
17069 
17070 	DHD_ERROR(("%s: OOBR Reg\n", __FUNCTION__));
17071 
17072 	oob_base = si_oobr_baseaddr(sih, FALSE);
17073 	oob_base1 = si_oobr_baseaddr(sih, TRUE);
17074 	if (oob_base) {
17075 		dhd_sbreg_op(dhd, oob_base + OOB_STATUSA, &val, TRUE);
17076 		dhd_sbreg_op(dhd, oob_base + OOB_STATUSB, &val, TRUE);
17077 		dhd_sbreg_op(dhd, oob_base + OOB_STATUSC, &val, TRUE);
17078 		dhd_sbreg_op(dhd, oob_base + OOB_STATUSD, &val, TRUE);
17079 	} else if ((reg = si_setcore(sih, HND_OOBR_CORE_ID, 0)) != NULL) {
17080 		val = R_REG(dhd->osh, &reg->intstatus[0]);
17081 		DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
17082 		val = R_REG(dhd->osh, &reg->intstatus[1]);
17083 		DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
17084 		val = R_REG(dhd->osh, &reg->intstatus[2]);
17085 		DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
17086 		val = R_REG(dhd->osh, &reg->intstatus[3]);
17087 		DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
17088 	}
17089 
17090 	if (oob_base1) {
17091 		DHD_ERROR(("%s: Second OOBR Reg\n", __FUNCTION__));
17092 
17093 		dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSA, &val, TRUE);
17094 		dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSB, &val, TRUE);
17095 		dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSC, &val, TRUE);
17096 		dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSD, &val, TRUE);
17097 	}
17098 
17099 	si_setcoreidx(dhd->bus->sih, save_idx);
17100 
17101 	return 0;
17102 }
17103 
17104 static void
17105 dhdpcie_hw_war_regdump(dhd_bus_t *bus)
17106 {
17107 	uint32 save_idx, val;
17108 	volatile uint32 *reg;
17109 
17110 	save_idx = si_coreidx(bus->sih);
17111 	if ((reg = si_setcore(bus->sih, CC_CORE_ID, 0)) != NULL) {
17112 		val = R_REG(bus->osh, reg + REG_WORK_AROUND);
17113 		DHD_ERROR(("CC HW_WAR :0x%x\n", val));
17114 	}
17115 
17116 	if ((reg = si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) != NULL) {
17117 		val = R_REG(bus->osh, reg + REG_WORK_AROUND);
17118 		DHD_ERROR(("ARM HW_WAR:0x%x\n", val));
17119 	}
17120 
17121 	if ((reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0)) != NULL) {
17122 		val = R_REG(bus->osh, reg + REG_WORK_AROUND);
17123 		DHD_ERROR(("PCIE HW_WAR :0x%x\n", val));
17124 	}
17125 	si_setcoreidx(bus->sih, save_idx);
17126 
17127 	val = PMU_REG_NEW(bus->sih, min_res_mask, 0, 0);
17128 	DHD_ERROR(("MINRESMASK :0x%x\n", val));
17129 }
17130 
17131 int
17132 dhd_pcie_dma_info_dump(dhd_pub_t *dhd)
17133 {
17134 	if (dhd->bus->is_linkdown) {
17135 		DHD_ERROR(("\n ------- SKIP DUMPING DMA Registers "
17136 			"due to PCIe link down ------- \r\n"));
17137 		return 0;
17138 	}
17139 
17140 	DHD_ERROR(("\n ------- DUMPING DMA Registers ------- \r\n"));
17141 
17142 	//HostToDev
17143 	DHD_ERROR(("HostToDev TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n",
17144 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x200, 0, 0),
17145 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x204, 0, 0)));
17146 	DHD_ERROR(("            : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n",
17147 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x208, 0, 0),
17148 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x20C, 0, 0)));
17149 	DHD_ERROR(("            : XmtStatus0=0x%08x XmtStatus1=0x%08x\n",
17150 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x210, 0, 0),
17151 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x214, 0, 0)));
17152 
17153 	DHD_ERROR(("HostToDev RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n",
17154 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x220, 0, 0),
17155 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x224, 0, 0)));
17156 	DHD_ERROR(("            : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n",
17157 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x228, 0, 0),
17158 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x22C, 0, 0)));
17159 	DHD_ERROR(("            : RcvStatus0=0x%08x RcvStatus1=0x%08x\n",
17160 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x230, 0, 0),
17161 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x234, 0, 0)));
17162 
17163 	//DevToHost
17164 	DHD_ERROR(("DevToHost TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n",
17165 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x240, 0, 0),
17166 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x244, 0, 0)));
17167 	DHD_ERROR(("            : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n",
17168 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x248, 0, 0),
17169 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x24C, 0, 0)));
17170 	DHD_ERROR(("            : XmtStatus0=0x%08x XmtStatus1=0x%08x\n",
17171 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x250, 0, 0),
17172 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x254, 0, 0)));
17173 
17174 	DHD_ERROR(("DevToHost RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n",
17175 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x260, 0, 0),
17176 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x264, 0, 0)));
17177 	DHD_ERROR(("            : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n",
17178 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x268, 0, 0),
17179 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x26C, 0, 0)));
17180 	DHD_ERROR(("            : RcvStatus0=0x%08x RcvStatus1=0x%08x\n",
17181 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x270, 0, 0),
17182 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x274, 0, 0)));
17183 
17184 	return 0;
17185 }
17186 
17187 bool
17188 dhd_pcie_dump_int_regs(dhd_pub_t *dhd)
17189 {
17190 	uint32 intstatus = 0;
17191 	uint32 intmask = 0;
17192 	uint32 d2h_db0 = 0;
17193 	uint32 d2h_mb_data = 0;
17194 
17195 	DHD_ERROR(("\n ------- DUMPING INTR Status and Masks ------- \r\n"));
17196 	intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
17197 		dhd->bus->pcie_mailbox_int, 0, 0);
17198 	if (intstatus == (uint32)-1) {
17199 		DHD_ERROR(("intstatus=0x%x \n", intstatus));
17200 		return FALSE;
17201 	}
17202 
17203 	intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
17204 		dhd->bus->pcie_mailbox_mask, 0, 0);
17205 	if (intmask == (uint32) -1) {
17206 		DHD_ERROR(("intstatus=0x%x intmask=0x%x \n", intstatus, intmask));
17207 		return FALSE;
17208 	}
17209 
17210 	d2h_db0 = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
17211 		PCID2H_MailBox, 0, 0);
17212 	if (d2h_db0 == (uint32)-1) {
17213 		DHD_ERROR(("intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
17214 		intstatus, intmask, d2h_db0));
17215 		return FALSE;
17216 	}
17217 
17218 	DHD_ERROR(("intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
17219 		intstatus, intmask, d2h_db0));
17220 	dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0);
17221 	DHD_ERROR(("d2h_mb_data=0x%x def_intmask=0x%x \r\n", d2h_mb_data,
17222 		dhd->bus->def_intmask));
17223 
17224 	return TRUE;
17225 }
17226 
17227 void
17228 dhd_pcie_dump_rc_conf_space_cap(dhd_pub_t *dhd)
17229 {
17230 	DHD_ERROR(("\n ------- DUMPING PCIE RC config space Registers ------- \r\n"));
17231 	DHD_ERROR(("Pcie RC Uncorrectable Error Status Val=0x%x\n",
17232 		dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
17233 		PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0)));
17234 #ifdef EXTENDED_PCIE_DEBUG_DUMP
17235 	DHD_ERROR(("hdrlog0 =0x%08x hdrlog1 =0x%08x hdrlog2 =0x%08x hdrlog3 =0x%08x\n",
17236 		dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
17237 		PCIE_EXTCAP_ERR_HEADER_LOG_0, TRUE, FALSE, 0),
17238 		dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
17239 		PCIE_EXTCAP_ERR_HEADER_LOG_1, TRUE, FALSE, 0),
17240 		dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
17241 		PCIE_EXTCAP_ERR_HEADER_LOG_2, TRUE, FALSE, 0),
17242 		dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
17243 		PCIE_EXTCAP_ERR_HEADER_LOG_3, TRUE, FALSE, 0)));
17244 #endif /* EXTENDED_PCIE_DEBUG_DUMP */
17245 }
17246 
17247 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
17248 #define MAX_RC_REG_INFO_VAL 8
17249 #define PCIE_EXTCAP_ERR_HD_SZ 4
17250 void
17251 dhd_dump_pcie_rc_regs_for_linkdown(dhd_pub_t *dhd, int *bytes_written)
17252 {
17253 	int i;
17254 	int remain_len;
17255 
17256 	/* dump link control & status */
17257 	if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
17258 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
17259 		*bytes_written += scnprintf(&dhd->hang_info[*bytes_written], remain_len, "%08x%c",
17260 			dhdpcie_rc_access_cap(dhd->bus, PCIE_CAP_ID_EXP,
17261 				PCIE_CAP_LINKCTRL_OFFSET, FALSE, FALSE, 0), HANG_KEY_DEL);
17262 		dhd->hang_info_cnt++;
17263 	}
17264 
17265 	/* dump device control & status */
17266 	if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
17267 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
17268 		*bytes_written += scnprintf(&dhd->hang_info[*bytes_written], remain_len, "%08x%c",
17269 			dhdpcie_rc_access_cap(dhd->bus, PCIE_CAP_ID_EXP,
17270 				PCIE_CAP_DEVCTRL_OFFSET, FALSE, FALSE, 0), HANG_KEY_DEL);
17271 		dhd->hang_info_cnt++;
17272 	}
17273 
17274 	/* dump uncorrectable error */
17275 	if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
17276 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
17277 		*bytes_written += scnprintf(&dhd->hang_info[*bytes_written], remain_len, "%08x%c",
17278 			dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
17279 			PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0), HANG_KEY_DEL);
17280 		dhd->hang_info_cnt++;
17281 	}
17282 
17283 	/* dump correctable error */
17284 	if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
17285 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
17286 		*bytes_written += scnprintf(&dhd->hang_info[*bytes_written], remain_len, "%08x%c",
17287 			dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
17288 			/* XXX: use definition in linux/pcie_regs.h */
17289 			PCI_ERR_COR_STATUS, TRUE, FALSE, 0), HANG_KEY_DEL);
17290 		dhd->hang_info_cnt++;
17291 	}
17292 
17293 	/* HG05/06 reserved */
17294 	if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
17295 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
17296 		*bytes_written += scnprintf(&dhd->hang_info[*bytes_written], remain_len, "%08x%c",
17297 			0, HANG_KEY_DEL);
17298 		dhd->hang_info_cnt++;
17299 	}
17300 
17301 	if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
17302 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
17303 		*bytes_written += scnprintf(&dhd->hang_info[*bytes_written], remain_len, "%08x%c",
17304 			0, HANG_KEY_DEL);
17305 		dhd->hang_info_cnt++;
17306 	}
17307 
17308 	/* dump error header log in RAW */
17309 	for (i = 0; i < PCIE_EXTCAP_ERR_HD_SZ; i++) {
17310 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
17311 		*bytes_written += scnprintf(&dhd->hang_info[*bytes_written], remain_len,
17312 			"%c%08x", HANG_RAW_DEL, dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
17313 			PCIE_EXTCAP_ERR_HEADER_LOG_0 + i * PCIE_EXTCAP_ERR_HD_SZ,
17314 			TRUE, FALSE, 0));
17315 	}
17316 	dhd->hang_info_cnt++;
17317 }
17318 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
17319 
17320 int
17321 dhd_pcie_debug_info_dump(dhd_pub_t *dhd)
17322 {
17323 	int host_irq_disabled;
17324 
17325 	DHD_ERROR(("bus->bus_low_power_state = %d\n", dhd->bus->bus_low_power_state));
17326 	host_irq_disabled = dhdpcie_irq_disabled(dhd->bus);
17327 	DHD_ERROR(("host pcie_irq disabled = %d\n", host_irq_disabled));
17328 	dhd_print_tasklet_status(dhd);
17329 	dhd_pcie_intr_count_dump(dhd);
17330 
17331 #if defined(LINUX) || defined(linux)
17332 	DHD_ERROR(("\n ------- DUMPING PCIE EP Resouce Info ------- \r\n"));
17333 	dhdpcie_dump_resource(dhd->bus);
17334 #endif /* LINUX || linux */
17335 
17336 	dhd_pcie_dump_rc_conf_space_cap(dhd);
17337 
17338 	DHD_ERROR(("RootPort PCIe linkcap=0x%08x\n",
17339 		dhd_debug_get_rc_linkcap(dhd->bus)));
17340 #ifdef CUSTOMER_HW4_DEBUG
17341 	if (dhd->bus->is_linkdown) {
17342 		DHD_ERROR(("Skip dumping the PCIe Config and Core registers. "
17343 			"link may be DOWN\n"));
17344 		return 0;
17345 	}
17346 #endif /* CUSTOMER_HW4_DEBUG */
17347 	DHD_ERROR(("\n ------- DUMPING PCIE EP config space Registers ------- \r\n"));
17348 	/* XXX: hwnbu-twiki.sj.broadcom.com/bin/view/Mwgroup/CurrentPcieGen2ProgramGuide */
17349 	dhd_bus_dump_imp_cfg_registers(dhd->bus);
17350 #ifdef EXTENDED_PCIE_DEBUG_DUMP
17351 	DHD_ERROR(("Pcie EP Uncorrectable Error Status Val=0x%x\n",
17352 		dhdpcie_ep_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
17353 		PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0)));
17354 	DHD_ERROR(("hdrlog0(0x%x)=0x%08x hdrlog1(0x%x)=0x%08x hdrlog2(0x%x)=0x%08x "
17355 		"hdrlog3(0x%x)=0x%08x\n", PCI_TLP_HDR_LOG1,
17356 		dhd_pcie_config_read(dhd->bus, PCI_TLP_HDR_LOG1, sizeof(uint32)),
17357 		PCI_TLP_HDR_LOG2,
17358 		dhd_pcie_config_read(dhd->bus, PCI_TLP_HDR_LOG2, sizeof(uint32)),
17359 		PCI_TLP_HDR_LOG3,
17360 		dhd_pcie_config_read(dhd->bus, PCI_TLP_HDR_LOG3, sizeof(uint32)),
17361 		PCI_TLP_HDR_LOG4,
17362 		dhd_pcie_config_read(dhd->bus, PCI_TLP_HDR_LOG4, sizeof(uint32))));
17363 	if (dhd->bus->sih->buscorerev >= 24) {
17364 		DHD_ERROR(("DeviceStatusControl(0x%x)=0x%x SubsystemControl(0x%x)=0x%x "
17365 			"L1SSControl2(0x%x)=0x%x\n", PCIECFGREG_DEV_STATUS_CTRL,
17366 			dhd_pcie_config_read(dhd->bus, PCIECFGREG_DEV_STATUS_CTRL,
17367 			sizeof(uint32)), PCIE_CFG_SUBSYSTEM_CONTROL,
17368 			dhd_pcie_config_read(dhd->bus, PCIE_CFG_SUBSYSTEM_CONTROL,
17369 			sizeof(uint32)), PCIECFGREG_PML1_SUB_CTRL2,
17370 			dhd_pcie_config_read(dhd->bus, PCIECFGREG_PML1_SUB_CTRL2,
17371 			sizeof(uint32))));
17372 		dhd_bus_dump_dar_registers(dhd->bus);
17373 	}
17374 #endif /* EXTENDED_PCIE_DEBUG_DUMP */
17375 
17376 	if (dhd->bus->is_linkdown) {
17377 		DHD_ERROR(("Skip dumping the PCIe Core registers. link may be DOWN\n"));
17378 		return 0;
17379 	}
17380 
17381 	if (MULTIBP_ENAB(dhd->bus->sih)) {
17382 		dhd_bus_pcie_pwr_req(dhd->bus);
17383 	}
17384 
17385 	DHD_ERROR(("\n ------- DUMPING PCIE core Registers ------- \r\n"));
17386 	/* XXX: hwnbu-twiki.sj.broadcom.com/twiki/pub/Mwgroup/
17387 	 * CurrentPcieGen2ProgramGuide/pcie_ep.htm
17388 	 */
17389 
17390 	DHD_ERROR(("ClkReq0(0x%x)=0x%x ClkReq1(0x%x)=0x%x ClkReq2(0x%x)=0x%x "
17391 		"ClkReq3(0x%x)=0x%x\n", PCIECFGREG_PHY_DBG_CLKREQ0,
17392 		dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ0),
17393 		PCIECFGREG_PHY_DBG_CLKREQ1,
17394 		dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ1),
17395 		PCIECFGREG_PHY_DBG_CLKREQ2,
17396 		dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ2),
17397 		PCIECFGREG_PHY_DBG_CLKREQ3,
17398 		dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ3)));
17399 
17400 #ifdef EXTENDED_PCIE_DEBUG_DUMP
17401 	if (dhd->bus->sih->buscorerev >= 24) {
17402 
17403 		DHD_ERROR(("ltssm_hist_0(0x%x)=0x%x ltssm_hist_1(0x%x)=0x%x "
17404 			"ltssm_hist_2(0x%x)=0x%x "
17405 			"ltssm_hist_3(0x%x)=0x%x\n", PCIECFGREG_PHY_LTSSM_HIST_0,
17406 			dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_0),
17407 			PCIECFGREG_PHY_LTSSM_HIST_1,
17408 			dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_1),
17409 			PCIECFGREG_PHY_LTSSM_HIST_2,
17410 			dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_2),
17411 			PCIECFGREG_PHY_LTSSM_HIST_3,
17412 			dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_3)));
17413 
17414 		DHD_ERROR(("trefup(0x%x)=0x%x trefup_ext(0x%x)=0x%x\n",
17415 			PCIECFGREG_TREFUP,
17416 			dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_TREFUP),
17417 			PCIECFGREG_TREFUP_EXT,
17418 			dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_TREFUP_EXT)));
17419 		DHD_ERROR(("errlog(0x%x)=0x%x errlog_addr(0x%x)=0x%x "
17420 			"Function_Intstatus(0x%x)=0x%x "
17421 			"Function_Intmask(0x%x)=0x%x Power_Intstatus(0x%x)=0x%x "
17422 			"Power_Intmask(0x%x)=0x%x\n",
17423 			PCIE_CORE_REG_ERRLOG,
17424 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
17425 			PCIE_CORE_REG_ERRLOG, 0, 0),
17426 			PCIE_CORE_REG_ERR_ADDR,
17427 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
17428 				PCIE_CORE_REG_ERR_ADDR, 0, 0),
17429 			PCIFunctionIntstatus(dhd->bus->sih->buscorerev),
17430 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
17431 				PCIFunctionIntstatus(dhd->bus->sih->buscorerev), 0, 0),
17432 			PCIFunctionIntmask(dhd->bus->sih->buscorerev),
17433 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
17434 				PCIFunctionIntmask(dhd->bus->sih->buscorerev), 0, 0),
17435 			PCIPowerIntstatus(dhd->bus->sih->buscorerev),
17436 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
17437 				PCIPowerIntstatus(dhd->bus->sih->buscorerev), 0, 0),
17438 			PCIPowerIntmask(dhd->bus->sih->buscorerev),
17439 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
17440 				PCIPowerIntmask(dhd->bus->sih->buscorerev), 0, 0)));
17441 		DHD_ERROR(("err_hdrlog1(0x%x)=0x%x err_hdrlog2(0x%x)=0x%x "
17442 			"err_hdrlog3(0x%x)=0x%x err_hdrlog4(0x%x)=0x%x\n",
17443 			(uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg1),
17444 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
17445 				OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg1), 0, 0),
17446 			(uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg2),
17447 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
17448 				OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg2), 0, 0),
17449 			(uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg3),
17450 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
17451 				OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg3), 0, 0),
17452 			(uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg4),
17453 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
17454 				OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg4), 0, 0)));
17455 		DHD_ERROR(("err_code(0x%x)=0x%x\n",
17456 			(uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_code_logreg),
17457 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
17458 				OFFSETOF(sbpcieregs_t, u.pcie2.err_code_logreg), 0, 0)));
17459 
17460 		dhd_pcie_dump_wrapper_regs(dhd);
17461 		dhdpcie_hw_war_regdump(dhd->bus);
17462 	}
17463 #endif /* EXTENDED_PCIE_DEBUG_DUMP */
17464 
17465 	dhd_pcie_dma_info_dump(dhd);
17466 
17467 	if (MULTIBP_ENAB(dhd->bus->sih)) {
17468 		dhd_bus_pcie_pwr_req_clear(dhd->bus);
17469 	}
17470 
17471 	return 0;
17472 }
17473 
17474 bool
17475 dhd_bus_force_bt_quiesce_enabled(struct dhd_bus *bus)
17476 {
17477 	return bus->force_bt_quiesce;
17478 }
17479 #ifdef BCMINTERNAL
17480 #ifdef DHD_FWTRACE
17481 uint32 dhd_bus_get_bp_base(dhd_pub_t *dhdp)
17482 {
17483 	return (dhdp->bus->bp_base);
17484 }
17485 #endif /* DHD_FWTRACE */
17486 #endif /* BCMINTERNAL */
17487 #ifdef DHD_HP2P
17488 uint16
17489 dhd_bus_get_hp2p_ring_max_size(struct dhd_bus *bus, bool tx)
17490 {
17491 	if (tx)
17492 		return bus->hp2p_txcpl_max_items;
17493 	else
17494 		return bus->hp2p_rxcpl_max_items;
17495 }
17496 
17497 static uint16
17498 dhd_bus_set_hp2p_ring_max_size(struct dhd_bus *bus, bool tx, uint16 val)
17499 {
17500 	if (tx)
17501 		bus->hp2p_txcpl_max_items = val;
17502 	else
17503 		bus->hp2p_rxcpl_max_items = val;
17504 	return val;
17505 }
17506 #endif /* DHD_HP2P */
17507 
17508 uint8
17509 dhd_d11_slices_num_get(dhd_pub_t *dhdp)
17510 {
17511 	return si_scan_core_present(dhdp->bus->sih) ?
17512 		MAX_NUM_D11_CORES_WITH_SCAN : MAX_NUM_D11CORES;
17513 }
17514 
17515 #if defined(linux) || defined(LINUX)
17516 static bool
17517 dhd_bus_tcm_test(struct dhd_bus *bus)
17518 {
17519 	int ret = 0;
17520 	int size; /* Full mem size */
17521 	int start; /* Start address */
17522 	int read_size = 0; /* Read size of each iteration */
17523 	int num = 0;
17524 	uint8 *read_buf, *write_buf;
17525 	uint8 init_val[NUM_PATTERNS] = {
17526 		0xFFu, /* 11111111 */
17527 		0x00u, /* 00000000 */
17528 #if !defined(DHD_FW_MEM_CORRUPTION)
17529 		0x77u, /* 01110111 */
17530 		0x22u, /* 00100010 */
17531 		0x27u, /* 00100111 */
17532 		0x72u, /* 01110010 */
17533 #endif /* !DHD_FW_MEM_CORRUPTION */
17534 	};
17535 
17536 	if (!bus) {
17537 		DHD_ERROR(("%s: bus is NULL !\n", __FUNCTION__));
17538 		return FALSE;
17539 	}
17540 
17541 	read_buf = MALLOCZ(bus->dhd->osh, MEMBLOCK);
17542 
17543 	if (!read_buf) {
17544 		DHD_ERROR(("%s: MALLOC of read_buf failed\n", __FUNCTION__));
17545 		return FALSE;
17546 	}
17547 
17548 	write_buf = MALLOCZ(bus->dhd->osh, MEMBLOCK);
17549 
17550 	if (!write_buf) {
17551 		MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
17552 		DHD_ERROR(("%s: MALLOC of write_buf failed\n", __FUNCTION__));
17553 		return FALSE;
17554 	}
17555 
17556 	DHD_ERROR(("%s: start %x,  size: %x\n", __FUNCTION__, bus->dongle_ram_base, bus->ramsize));
17557 	DHD_ERROR(("%s: memblock size %d,  #pattern %d\n", __FUNCTION__, MEMBLOCK, NUM_PATTERNS));
17558 
17559 	while (num < NUM_PATTERNS) {
17560 		start = bus->dongle_ram_base;
17561 		/* Get full mem size */
17562 		size = bus->ramsize;
17563 
17564 		memset(write_buf, init_val[num], MEMBLOCK);
17565 		while (size > 0) {
17566 			read_size = MIN(MEMBLOCK, size);
17567 			memset(read_buf, 0, read_size);
17568 
17569 			/* Write */
17570 			if ((ret = dhdpcie_bus_membytes(bus, TRUE, start, write_buf, read_size))) {
17571 				DHD_ERROR(("%s: Write Error membytes %d\n", __FUNCTION__, ret));
17572 				MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
17573 				MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
17574 				return FALSE;
17575 			}
17576 
17577 			/* Read */
17578 			if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, read_buf, read_size))) {
17579 				DHD_ERROR(("%s: Read Error membytes %d\n", __FUNCTION__, ret));
17580 				MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
17581 				MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
17582 				return FALSE;
17583 			}
17584 
17585 			/* Compare */
17586 			if (memcmp(read_buf, write_buf, read_size)) {
17587 				DHD_ERROR(("%s: Mismatch at %x, iter : %d\n",
17588 					__FUNCTION__, start, num));
17589 				prhex("Readbuf", read_buf, read_size);
17590 				prhex("Writebuf", write_buf, read_size);
17591 				MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
17592 				MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
17593 				return FALSE;
17594 			}
17595 
17596 			/* Decrement size and increment start address */
17597 			size -= read_size;
17598 			start += read_size;
17599 		}
17600 		num++;
17601 	}
17602 
17603 	MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
17604 	MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
17605 
17606 	DHD_ERROR(("%s: Success iter : %d\n", __FUNCTION__, num));
17607 	return TRUE;
17608 }
17609 #endif /* LINUX || linux */
17610 
17611 #define PCI_CFG_LINK_SPEED_SHIFT	16
17612 int
17613 dhd_get_pcie_linkspeed(dhd_pub_t *dhd)
17614 {
17615 	uint32 pcie_lnkst;
17616 	uint32 pcie_lnkspeed;
17617 	pcie_lnkst = OSL_PCI_READ_CONFIG(dhd->osh, PCIECFGREG_LINK_STATUS_CTRL,
17618 		sizeof(pcie_lnkst));
17619 
17620 	pcie_lnkspeed = (pcie_lnkst >> PCI_CFG_LINK_SPEED_SHIFT) & PCI_LINK_SPEED_MASK;
17621 	DHD_INFO(("%s: Link speed: %d\n", __FUNCTION__, pcie_lnkspeed));
17622 	return pcie_lnkspeed;
17623 }
17624 
17625 int
17626 dhd_bus_checkdied(struct dhd_bus *bus, char *data, uint size)
17627 {
17628 	return dhdpcie_checkdied(bus, data, size);
17629 }
17630 
17631 /* Common backplane can be hung by butting APB2 bridge in reset */
17632 void
17633 dhdpcie_induce_cbp_hang(dhd_pub_t *dhd)
17634 {
17635 	uint32 addr, val;
17636 	uint32 apb2_wrapper_reg = 0x18106000;
17637 	uint32 apb2_reset_ctrl_offset = 0x800;
17638 	addr = apb2_wrapper_reg + apb2_reset_ctrl_offset;
17639 	val = 1;
17640 	dhd_sbreg_op(dhd, addr, &val, FALSE);
17641 }
17642