1 /*
2 * DHD Bus Module for PCIE
3 *
4 * Copyright (C) 1999-2017, Broadcom Corporation
5 *
6 * Unless you and Broadcom execute a separate written software license
7 * agreement governing use of this software, this software is licensed to you
8 * under the terms of the GNU General Public License version 2 (the "GPL"),
9 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10 * following added to such license:
11 *
12 * As a special exception, the copyright holders of this software give you
13 * permission to link this software with independent modules, and to copy and
14 * distribute the resulting executable under terms of your choice, provided that
15 * you also meet, for each linked independent module, the terms and conditions of
16 * the license of that module. An independent module is a module which is not
17 * derived from this software. The special exception does not apply to any
18 * modifications of the software.
19 *
20 * Notwithstanding the above, under no circumstances may you combine this
21 * software in any way with any other Broadcom software provided under a license
22 * other than the GPL, without Broadcom's express prior written consent.
23 *
24 *
25 * <<Broadcom-WL-IPTag/Open:>>
26 *
27 * $Id: dhd_pcie.c 710862 2017-07-14 07:43:59Z $
28 */
29
30
31 /* include files */
32 #include <typedefs.h>
33 #include <bcmutils.h>
34 #include <bcmdevs.h>
35 #include <siutils.h>
36 #include <hndsoc.h>
37 #include <hndpmu.h>
38 #include <hnd_debug.h>
39 #include <sbchipc.h>
40 #include <hnd_armtrap.h>
41 #if defined(DHD_DEBUG)
42 #include <hnd_cons.h>
43 #endif /* defined(DHD_DEBUG) */
44 #include <dngl_stats.h>
45 #include <pcie_core.h>
46 #include <dhd.h>
47 #include <dhd_bus.h>
48 #include <dhd_flowring.h>
49 #include <dhd_proto.h>
50 #include <dhd_dbg.h>
51 #include <dhd_daemon.h>
52 #include <dhdioctl.h>
53 #include <sdiovar.h>
54 #include <bcmmsgbuf.h>
55 #include <pcicfg.h>
56 #include <dhd_pcie.h>
57 #include <bcmpcie.h>
58 #include <bcmendian.h>
59 #ifdef DHDTCPACK_SUPPRESS
60 #include <dhd_ip.h>
61 #endif /* DHDTCPACK_SUPPRESS */
62 #include <bcmevent.h>
63 #include <dhd_config.h>
64
65 #ifdef DHD_TIMESYNC
66 #include <dhd_timesync.h>
67 #endif /* DHD_TIMESYNC */
68
69 #if defined(BCMEMBEDIMAGE)
70 #ifndef DHD_EFI
71 #include BCMEMBEDIMAGE
72 #else
73 #include <rtecdc_4364.h>
74 #endif /* !DHD_EFI */
75 #endif /* BCMEMBEDIMAGE */
76
77 #define MEMBLOCK 2048 /* Block size used for downloading of dongle image */
78 #define MAX_WKLK_IDLE_CHECK 3 /* times wake_lock checked before deciding not to suspend */
79
80 #define ARMCR4REG_BANKIDX (0x40/sizeof(uint32))
81 #define ARMCR4REG_BANKPDA (0x4C/sizeof(uint32))
82 /* Temporary war to fix precommit till sync issue between trunk & precommit branch is resolved */
83
84 /* CTO Prevention Recovery */
85 #define CTO_TO_CLEAR_WAIT_MS 1000
86 #define CTO_TO_CLEAR_WAIT_MAX_CNT 10
87
88 #if defined(SUPPORT_MULTIPLE_BOARD_REV)
89 extern unsigned int system_rev;
90 #endif /* SUPPORT_MULTIPLE_BOARD_REV */
91
92 int dhd_dongle_memsize;
93 int dhd_dongle_ramsize;
94 static int dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size);
95 static int dhdpcie_bus_readconsole(dhd_bus_t *bus);
96 #if defined(DHD_FW_COREDUMP)
97 struct dhd_bus *g_dhd_bus = NULL;
98 static int dhdpcie_mem_dump(dhd_bus_t *bus);
99 #endif /* DHD_FW_COREDUMP */
100
101 static int dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size);
102 static int dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid,
103 const char *name, void *params,
104 int plen, void *arg, int len, int val_size);
105 static int dhdpcie_bus_lpback_req(struct dhd_bus *bus, uint32 intval);
106 static int dhdpcie_bus_dmaxfer_req(struct dhd_bus *bus,
107 uint32 len, uint32 srcdelay, uint32 destdelay, uint32 d11_lpbk);
108 static int dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter);
109 static int _dhdpcie_download_firmware(struct dhd_bus *bus);
110 static int dhdpcie_download_firmware(dhd_bus_t *bus, osl_t *osh);
111 static int dhdpcie_bus_write_vars(dhd_bus_t *bus);
112 static bool dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus);
113 static bool dhdpci_bus_read_frames(dhd_bus_t *bus);
114 static int dhdpcie_readshared(dhd_bus_t *bus);
115 static void dhdpcie_init_shared_addr(dhd_bus_t *bus);
116 static bool dhdpcie_dongle_attach(dhd_bus_t *bus);
117 static void dhdpcie_bus_dongle_setmemsize(dhd_bus_t *bus, int mem_size);
118 static void dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh,
119 bool dongle_isolation, bool reset_flag);
120 static void dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh);
121 static int dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len);
122 static uint8 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset);
123 static void dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data);
124 static void dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data);
125 static uint16 dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset);
126 static void dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data);
127 static uint32 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset);
128 #ifdef DHD_SUPPORT_64BIT
129 static void dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data);
130 static uint64 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset);
131 #endif /* DHD_SUPPORT_64BIT */
132 static void dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data);
133 static void dhdpcie_bus_reg_unmap(osl_t *osh, volatile char *addr, int size);
134 static int dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b);
135 static void dhdpcie_fw_trap(dhd_bus_t *bus);
136 static void dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info);
137 extern void dhd_dpc_enable(dhd_pub_t *dhdp);
138 extern void dhd_dpc_kill(dhd_pub_t *dhdp);
139
140 #ifdef IDLE_TX_FLOW_MGMT
141 static void dhd_bus_check_idle_scan(dhd_bus_t *bus);
142 static void dhd_bus_idle_scan(dhd_bus_t *bus);
143 #endif /* IDLE_TX_FLOW_MGMT */
144
145 #ifdef BCMEMBEDIMAGE
146 static int dhdpcie_download_code_array(dhd_bus_t *bus);
147 #endif /* BCMEMBEDIMAGE */
148
149
150 #ifdef EXYNOS_PCIE_DEBUG
151 extern void exynos_pcie_register_dump(int ch_num);
152 #endif /* EXYNOS_PCIE_DEBUG */
153
154 #define PCI_VENDOR_ID_BROADCOM 0x14e4
155
156 #define DHD_DEFAULT_DOORBELL_TIMEOUT 200 /* ms */
157 #if defined(PCIE_OOB) || defined(PCIE_INB_DW)
158 static uint dhd_doorbell_timeout = DHD_DEFAULT_DOORBELL_TIMEOUT;
159 #endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */
160 static bool dhdpcie_check_firmware_compatible(uint32 f_api_version, uint32 h_api_version);
161 static void dhdpcie_cto_error_recovery(struct dhd_bus *bus);
162
163 #ifdef BCM_ASLR_HEAP
164 static void dhdpcie_wrt_rnd(struct dhd_bus *bus);
165 #endif /* BCM_ASLR_HEAP */
166
167 extern uint16 dhd_prot_get_h2d_max_txpost(dhd_pub_t *dhd);
168 extern void dhd_prot_set_h2d_max_txpost(dhd_pub_t *dhd, uint16 max_txpost);
169
170 /* IOVar table */
171 enum {
172 IOV_INTR = 1,
173 IOV_MEMSIZE,
174 IOV_SET_DOWNLOAD_STATE,
175 IOV_DEVRESET,
176 IOV_VARS,
177 IOV_MSI_SIM,
178 IOV_PCIE_LPBK,
179 IOV_CC_NVMSHADOW,
180 IOV_RAMSIZE,
181 IOV_RAMSTART,
182 IOV_SLEEP_ALLOWED,
183 IOV_PCIE_DMAXFER,
184 IOV_PCIE_SUSPEND,
185 IOV_DONGLEISOLATION,
186 IOV_LTRSLEEPON_UNLOOAD,
187 IOV_METADATA_DBG,
188 IOV_RX_METADATALEN,
189 IOV_TX_METADATALEN,
190 IOV_TXP_THRESHOLD,
191 IOV_BUZZZ_DUMP,
192 IOV_DUMP_RINGUPD_BLOCK,
193 IOV_DMA_RINGINDICES,
194 IOV_FORCE_FW_TRAP,
195 IOV_DB1_FOR_MB,
196 IOV_FLOW_PRIO_MAP,
197 #ifdef DHD_PCIE_RUNTIMEPM
198 IOV_IDLETIME,
199 #endif /* DHD_PCIE_RUNTIMEPM */
200 IOV_RXBOUND,
201 IOV_TXBOUND,
202 IOV_HANGREPORT,
203 IOV_H2D_MAILBOXDATA,
204 IOV_INFORINGS,
205 IOV_H2D_PHASE,
206 IOV_H2D_ENABLE_TRAP_BADPHASE,
207 IOV_H2D_TXPOST_MAX_ITEM,
208 IOV_TRAPDATA,
209 IOV_TRAPDATA_RAW,
210 IOV_CTO_PREVENTION,
211 #ifdef PCIE_OOB
212 IOV_OOB_BT_REG_ON,
213 IOV_OOB_ENABLE,
214 #endif /* PCIE_OOB */
215 IOV_PCIE_WD_RESET,
216 IOV_CTO_THRESHOLD,
217 #ifdef DHD_EFI
218 IOV_CONTROL_SIGNAL,
219 #if defined(PCIE_OOB) || defined(PCIE_INB_DW)
220 IOV_DEEP_SLEEP,
221 #endif /* PCIE_OOB || PCIE_INB_DW */
222 #endif /* DHD_EFI */
223 #ifdef DEVICE_TX_STUCK_DETECT
224 IOV_DEVICE_TX_STUCK_DETECT,
225 #endif /* DEVICE_TX_STUCK_DETECT */
226 IOV_INB_DW_ENABLE,
227 IOV_IDMA_ENABLE,
228 IOV_IFRM_ENABLE,
229 IOV_CLEAR_RING,
230 #ifdef DHD_EFI
231 IOV_WIFI_PROPERTIES,
232 IOV_OTP_DUMP
233 #endif
234 };
235
236
237 const bcm_iovar_t dhdpcie_iovars[] = {
238 {"intr", IOV_INTR, 0, 0, IOVT_BOOL, 0 },
239 {"memsize", IOV_MEMSIZE, 0, 0, IOVT_UINT32, 0 },
240 {"dwnldstate", IOV_SET_DOWNLOAD_STATE, 0, 0, IOVT_BOOL, 0 },
241 {"vars", IOV_VARS, 0, 0, IOVT_BUFFER, 0 },
242 {"devreset", IOV_DEVRESET, 0, 0, IOVT_BOOL, 0 },
243 {"pcie_device_trap", IOV_FORCE_FW_TRAP, 0, 0, 0, 0 },
244 {"pcie_lpbk", IOV_PCIE_LPBK, 0, 0, IOVT_UINT32, 0 },
245 {"cc_nvmshadow", IOV_CC_NVMSHADOW, 0, 0, IOVT_BUFFER, 0 },
246 {"ramsize", IOV_RAMSIZE, 0, 0, IOVT_UINT32, 0 },
247 {"ramstart", IOV_RAMSTART, 0, 0, IOVT_UINT32, 0 },
248 {"pcie_dmaxfer", IOV_PCIE_DMAXFER, 0, 0, IOVT_BUFFER, 3 * sizeof(int32) },
249 {"pcie_suspend", IOV_PCIE_SUSPEND, 0, 0, IOVT_UINT32, 0 },
250 #ifdef PCIE_OOB
251 {"oob_bt_reg_on", IOV_OOB_BT_REG_ON, 0, 0, IOVT_UINT32, 0 },
252 {"oob_enable", IOV_OOB_ENABLE, 0, 0, IOVT_UINT32, 0 },
253 #endif /* PCIE_OOB */
254 {"sleep_allowed", IOV_SLEEP_ALLOWED, 0, 0, IOVT_BOOL, 0 },
255 {"dngl_isolation", IOV_DONGLEISOLATION, 0, 0, IOVT_UINT32, 0 },
256 {"ltrsleep_on_unload", IOV_LTRSLEEPON_UNLOOAD, 0, 0, IOVT_UINT32, 0 },
257 {"dump_ringupdblk", IOV_DUMP_RINGUPD_BLOCK, 0, 0, IOVT_BUFFER, 0 },
258 {"dma_ring_indices", IOV_DMA_RINGINDICES, 0, 0, IOVT_UINT32, 0},
259 {"metadata_dbg", IOV_METADATA_DBG, 0, 0, IOVT_BOOL, 0 },
260 {"rx_metadata_len", IOV_RX_METADATALEN, 0, 0, IOVT_UINT32, 0 },
261 {"tx_metadata_len", IOV_TX_METADATALEN, 0, 0, IOVT_UINT32, 0 },
262 {"db1_for_mb", IOV_DB1_FOR_MB, 0, 0, IOVT_UINT32, 0 },
263 {"txp_thresh", IOV_TXP_THRESHOLD, 0, 0, IOVT_UINT32, 0 },
264 {"buzzz_dump", IOV_BUZZZ_DUMP, 0, 0, IOVT_UINT32, 0 },
265 {"flow_prio_map", IOV_FLOW_PRIO_MAP, 0, 0, IOVT_UINT32, 0 },
266 #ifdef DHD_PCIE_RUNTIMEPM
267 {"idletime", IOV_IDLETIME, 0, 0, IOVT_INT32, 0 },
268 #endif /* DHD_PCIE_RUNTIMEPM */
269 {"rxbound", IOV_RXBOUND, 0, 0, IOVT_UINT32, 0 },
270 {"txbound", IOV_TXBOUND, 0, 0, IOVT_UINT32, 0 },
271 {"fw_hang_report", IOV_HANGREPORT, 0, 0, IOVT_BOOL, 0 },
272 {"h2d_mb_data", IOV_H2D_MAILBOXDATA, 0, 0, IOVT_UINT32, 0 },
273 {"inforings", IOV_INFORINGS, 0, 0, IOVT_UINT32, 0 },
274 {"h2d_phase", IOV_H2D_PHASE, 0, 0, IOVT_UINT32, 0 },
275 {"force_trap_bad_h2d_phase", IOV_H2D_ENABLE_TRAP_BADPHASE, 0, 0,
276 IOVT_UINT32, 0 },
277 {"h2d_max_txpost", IOV_H2D_TXPOST_MAX_ITEM, 0, 0, IOVT_UINT32, 0 },
278 {"trap_data", IOV_TRAPDATA, 0, 0, IOVT_BUFFER, 0 },
279 {"trap_data_raw", IOV_TRAPDATA_RAW, 0, 0, IOVT_BUFFER, 0 },
280 {"cto_prevention", IOV_CTO_PREVENTION, 0, 0, IOVT_UINT32, 0 },
281 {"pcie_wd_reset", IOV_PCIE_WD_RESET, 0, 0, IOVT_BOOL, 0 },
282 {"cto_threshold", IOV_CTO_THRESHOLD, 0, 0, IOVT_UINT32, 0 },
283 #ifdef DHD_EFI
284 {"control_signal", IOV_CONTROL_SIGNAL, 0, 0, IOVT_UINT32, 0},
285 #if defined(PCIE_OOB) || defined(PCIE_INB_DW)
286 {"deep_sleep", IOV_DEEP_SLEEP, 0, 0, IOVT_UINT32, 0},
287 #endif /* PCIE_OOB || PCIE_INB_DW */
288 #endif /* DHD_EFI */
289 {"inb_dw_enable", IOV_INB_DW_ENABLE, 0, 0, IOVT_UINT32, 0 },
290 #ifdef DEVICE_TX_STUCK_DETECT
291 {"dev_tx_stuck_monitor", IOV_DEVICE_TX_STUCK_DETECT, 0, 0, IOVT_UINT32, 0 },
292 #endif /* DEVICE_TX_STUCK_DETECT */
293 {"idma_enable", IOV_IDMA_ENABLE, 0, 0, IOVT_UINT32, 0 },
294 {"ifrm_enable", IOV_IFRM_ENABLE, 0, 0, IOVT_UINT32, 0 },
295 {"clear_ring", IOV_CLEAR_RING, 0, 0, IOVT_UINT32, 0 },
296 #ifdef DHD_EFI
297 {"properties", IOV_WIFI_PROPERTIES, 0, 0, IOVT_BUFFER, 0},
298 {"otp_dump", IOV_OTP_DUMP, 0, 0, IOVT_BUFFER, 0},
299 #endif
300 {NULL, 0, 0, 0, 0, 0 }
301 };
302
303
304 #define MAX_READ_TIMEOUT 5 * 1000 * 1000
305
306 #ifndef DHD_RXBOUND
307 #define DHD_RXBOUND 64
308 #endif
309 #ifndef DHD_TXBOUND
310 #define DHD_TXBOUND 64
311 #endif
312
313 #define DHD_INFORING_BOUND 32
314
315 uint dhd_rxbound = DHD_RXBOUND;
316 uint dhd_txbound = DHD_TXBOUND;
317
318 /**
319 * Register/Unregister functions are called by the main DHD entry point (eg module insertion) to
320 * link with the bus driver, in order to look for or await the device.
321 */
322 int
dhd_bus_register(void)323 dhd_bus_register(void)
324 {
325 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
326
327 return dhdpcie_bus_register();
328 }
329
330 void
dhd_bus_unregister(void)331 dhd_bus_unregister(void)
332 {
333 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
334
335 dhdpcie_bus_unregister();
336 return;
337 }
338
339
340 /** returns a host virtual address */
341 uint32 *
dhdpcie_bus_reg_map(osl_t * osh,ulong addr,int size)342 dhdpcie_bus_reg_map(osl_t *osh, ulong addr, int size)
343 {
344 return (uint32 *)REG_MAP(addr, size);
345 }
346
347 void
dhdpcie_bus_reg_unmap(osl_t * osh,volatile char * addr,int size)348 dhdpcie_bus_reg_unmap(osl_t *osh, volatile char *addr, int size)
349 {
350 REG_UNMAP(addr);
351 return;
352 }
353
354 /**
355 * 'regs' is the host virtual address that maps to the start of the PCIe BAR0 window. The first 4096
356 * bytes in this window are mapped to the backplane address in the PCIEBAR0Window register. The
357 * precondition is that the PCIEBAR0Window register 'points' at the PCIe core.
358 *
359 * 'tcm' is the *host* virtual address at which tcm is mapped.
360 */
dhdpcie_bus_attach(osl_t * osh,volatile char * regs,volatile char * tcm,void * pci_dev)361 dhd_bus_t* dhdpcie_bus_attach(osl_t *osh,
362 volatile char *regs, volatile char *tcm, void *pci_dev)
363 {
364 dhd_bus_t *bus;
365
366 DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
367
368 do {
369 if (!(bus = MALLOCZ(osh, sizeof(dhd_bus_t)))) {
370 DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
371 break;
372 }
373
374 bus->regs = regs;
375 bus->tcm = tcm;
376 bus->osh = osh;
377 /* Save pci_dev into dhd_bus, as it may be needed in dhd_attach */
378 bus->dev = (struct pci_dev *)pci_dev;
379
380
381 dll_init(&bus->flowring_active_list);
382 #ifdef IDLE_TX_FLOW_MGMT
383 bus->active_list_last_process_ts = OSL_SYSUPTIME();
384 #endif /* IDLE_TX_FLOW_MGMT */
385
386 #ifdef DEVICE_TX_STUCK_DETECT
387 /* Enable the Device stuck detection feature by default */
388 bus->dev_tx_stuck_monitor = TRUE;
389 bus->device_tx_stuck_check = OSL_SYSUPTIME();
390 #endif /* DEVICE_TX_STUCK_DETECT */
391
392 /* Attach pcie shared structure */
393 if (!(bus->pcie_sh = MALLOCZ(osh, sizeof(pciedev_shared_t)))) {
394 DHD_ERROR(("%s: MALLOC of bus->pcie_sh failed\n", __FUNCTION__));
395 break;
396 }
397
398 if (dhdpcie_dongle_attach(bus)) {
399 DHD_ERROR(("%s: dhdpcie_probe_attach failed\n", __FUNCTION__));
400 break;
401 }
402
403 /* software resources */
404 if (!(bus->dhd = dhd_attach(osh, bus, PCMSGBUF_HDRLEN))) {
405 DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__));
406
407 break;
408 }
409 bus->dhd->busstate = DHD_BUS_DOWN;
410 bus->db1_for_mb = TRUE;
411 bus->dhd->hang_report = TRUE;
412 bus->use_mailbox = FALSE;
413 bus->use_d0_inform = FALSE;
414 #ifdef IDLE_TX_FLOW_MGMT
415 bus->enable_idle_flowring_mgmt = FALSE;
416 #endif /* IDLE_TX_FLOW_MGMT */
417 bus->irq_registered = FALSE;
418
419 DHD_TRACE(("%s: EXIT SUCCESS\n",
420 __FUNCTION__));
421 #ifdef DHD_FW_COREDUMP
422 g_dhd_bus = bus;
423 #endif
424 return bus;
425 } while (0);
426
427 DHD_TRACE(("%s: EXIT FAILURE\n", __FUNCTION__));
428
429 if (bus && bus->pcie_sh) {
430 MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t));
431 }
432
433 if (bus) {
434 MFREE(osh, bus, sizeof(dhd_bus_t));
435 }
436 return NULL;
437 }
438
439 uint
dhd_bus_chip(struct dhd_bus * bus)440 dhd_bus_chip(struct dhd_bus *bus)
441 {
442 ASSERT(bus->sih != NULL);
443 return bus->sih->chip;
444 }
445
446 uint
dhd_bus_chiprev(struct dhd_bus * bus)447 dhd_bus_chiprev(struct dhd_bus *bus)
448 {
449 ASSERT(bus);
450 ASSERT(bus->sih != NULL);
451 return bus->sih->chiprev;
452 }
453
454 void *
dhd_bus_pub(struct dhd_bus * bus)455 dhd_bus_pub(struct dhd_bus *bus)
456 {
457 return bus->dhd;
458 }
459
460 const void *
dhd_bus_sih(struct dhd_bus * bus)461 dhd_bus_sih(struct dhd_bus *bus)
462 {
463 return (const void *)bus->sih;
464 }
465
466 void *
dhd_bus_txq(struct dhd_bus * bus)467 dhd_bus_txq(struct dhd_bus *bus)
468 {
469 return &bus->txq;
470 }
471
472 /** Get Chip ID version */
dhd_bus_chip_id(dhd_pub_t * dhdp)473 uint dhd_bus_chip_id(dhd_pub_t *dhdp)
474 {
475 dhd_bus_t *bus = dhdp->bus;
476 return bus->sih->chip;
477 }
478
479 /** Get Chip Rev ID version */
dhd_bus_chiprev_id(dhd_pub_t * dhdp)480 uint dhd_bus_chiprev_id(dhd_pub_t *dhdp)
481 {
482 dhd_bus_t *bus = dhdp->bus;
483 return bus->sih->chiprev;
484 }
485
486 /** Get Chip Pkg ID version */
dhd_bus_chippkg_id(dhd_pub_t * dhdp)487 uint dhd_bus_chippkg_id(dhd_pub_t *dhdp)
488 {
489 dhd_bus_t *bus = dhdp->bus;
490 return bus->sih->chippkg;
491 }
492
493 /** Read and clear intstatus. This should be called with interrupts disabled or inside isr */
494 uint32
dhdpcie_bus_intstatus(dhd_bus_t * bus)495 dhdpcie_bus_intstatus(dhd_bus_t *bus)
496 {
497 uint32 intstatus = 0;
498 #ifndef DHD_READ_INTSTATUS_IN_DPC
499 uint32 intmask = 0;
500 #endif /* DHD_READ_INTSTATUS_IN_DPC */
501
502 if ((bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) &&
503 bus->wait_for_d3_ack) {
504 #ifdef DHD_EFI
505 DHD_INFO(("%s: trying to clear intstatus during suspend (%d)"
506 " or suspend in progress %d\n",
507 __FUNCTION__, bus->dhd->busstate, bus->d3_suspend_pending));
508 #else
509 DHD_ERROR(("%s: trying to clear intstatus during suspend (%d)"
510 " or suspend in progress %d\n",
511 __FUNCTION__, bus->dhd->busstate, bus->d3_suspend_pending));
512 #endif /* !DHD_EFI */
513 return intstatus;
514 }
515 if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) ||
516 (bus->sih->buscorerev == 2)) {
517 intstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
518 dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, intstatus);
519 intstatus &= I_MB;
520 } else {
521 /* this is a PCIE core register..not a config register... */
522 intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
523
524 #ifndef DHD_READ_INTSTATUS_IN_DPC
525 /* this is a PCIE core register..not a config register... */
526 intmask = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask, 0, 0);
527
528 intstatus &= intmask;
529 #endif /* DHD_READ_INTSTATUS_IN_DPC */
530 /* Is device removed. intstatus & intmask read 0xffffffff */
531 if (intstatus == (uint32)-1) {
532 DHD_ERROR(("%s: Device is removed or Link is down.\n", __FUNCTION__));
533 #ifdef CUSTOMER_HW4_DEBUG
534 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
535 bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN;
536 dhd_os_send_hang_message(bus->dhd);
537 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
538 #endif /* CUSTOMER_HW4_DEBUG */
539 return intstatus;
540 }
541
542
543 /*
544 * The fourth argument to si_corereg is the "mask" fields of the register to update
545 * and the fifth field is the "value" to update. Now if we are interested in only
546 * few fields of the "mask" bit map, we should not be writing back what we read
547 * By doing so, we might clear/ack interrupts that are not handled yet.
548 */
549 si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, bus->def_intmask,
550 intstatus);
551
552 intstatus &= bus->def_intmask;
553 }
554
555 return intstatus;
556 }
557
558 /**
559 * Name: dhdpcie_bus_isr
560 * Parameters:
561 * 1: IN int irq -- interrupt vector
562 * 2: IN void *arg -- handle to private data structure
563 * Return value:
564 * Status (TRUE or FALSE)
565 *
566 * Description:
567 * Interrupt Service routine checks for the status register,
568 * disable interrupt and queue DPC if mail box interrupts are raised.
569 */
570 int32
dhdpcie_bus_isr(dhd_bus_t * bus)571 dhdpcie_bus_isr(dhd_bus_t *bus)
572 {
573 uint32 intstatus = 0;
574
575 do {
576 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
577 /* verify argument */
578 if (!bus) {
579 DHD_ERROR(("%s : bus is null pointer, exit \n", __FUNCTION__));
580 break;
581 }
582
583 if (bus->dhd->dongle_reset) {
584 break;
585 }
586
587 if (bus->dhd->busstate == DHD_BUS_DOWN) {
588 break;
589 }
590
591
592 if (PCIECTO_ENAB(bus->dhd)) {
593 /* read pci_intstatus */
594 intstatus = dhdpcie_bus_cfg_read_dword(bus, PCI_INT_STATUS, 4);
595
596 if (intstatus & PCI_CTO_INT_MASK) {
597 /* reset backplane and cto,
598 * then access through pcie is recovered.
599 */
600 dhdpcie_cto_error_recovery(bus);
601 return TRUE;
602 }
603 }
604
605 #ifndef DHD_READ_INTSTATUS_IN_DPC
606 intstatus = dhdpcie_bus_intstatus(bus);
607
608 /* Check if the interrupt is ours or not */
609 if (intstatus == 0) {
610 break;
611 }
612
613 /* save the intstatus */
614 /* read interrupt status register!! Status bits will be cleared in DPC !! */
615 bus->intstatus = intstatus;
616
617 /* return error for 0xFFFFFFFF */
618 if (intstatus == (uint32)-1) {
619 dhdpcie_disable_irq_nosync(bus);
620 bus->is_linkdown = TRUE;
621 return BCME_ERROR;
622 }
623
624 /* Overall operation:
625 * - Mask further interrupts
626 * - Read/ack intstatus
627 * - Take action based on bits and state
628 * - Reenable interrupts (as per state)
629 */
630
631 /* Count the interrupt call */
632 bus->intrcount++;
633 #endif /* DHD_READ_INTSTATUS_IN_DPC */
634
635 bus->ipend = TRUE;
636
637 bus->isr_intr_disable_count++;
638 dhdpcie_bus_intr_disable(bus); /* Disable interrupt using IntMask!! */
639
640 bus->intdis = TRUE;
641
642 #if defined(PCIE_ISR_THREAD)
643
644 DHD_TRACE(("Calling dhd_bus_dpc() from %s\n", __FUNCTION__));
645 DHD_OS_WAKE_LOCK(bus->dhd);
646 while (dhd_bus_dpc(bus));
647 DHD_OS_WAKE_UNLOCK(bus->dhd);
648 #else
649 bus->dpc_sched = TRUE;
650 dhd_sched_dpc(bus->dhd); /* queue DPC now!! */
651 #endif /* defined(SDIO_ISR_THREAD) */
652
653 DHD_TRACE(("%s: Exit Success DPC Queued\n", __FUNCTION__));
654 return TRUE;
655 } while (0);
656
657 DHD_TRACE(("%s: Exit Failure\n", __FUNCTION__));
658 return FALSE;
659 }
660
661 int
dhdpcie_set_pwr_state(dhd_bus_t * bus,uint state)662 dhdpcie_set_pwr_state(dhd_bus_t *bus, uint state)
663 {
664 uint32 cur_state = 0;
665 uint32 pm_csr = 0;
666 osl_t *osh = bus->osh;
667
668 pm_csr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32));
669 cur_state = pm_csr & PCIECFGREG_PM_CSR_STATE_MASK;
670
671 if (cur_state == state) {
672 DHD_ERROR(("%s: Already in state %u \n", __FUNCTION__, cur_state));
673 return BCME_OK;
674 }
675
676 if (state > PCIECFGREG_PM_CSR_STATE_D3_HOT)
677 return BCME_ERROR;
678
679 /* Validate the state transition
680 * if already in a lower power state, return error
681 */
682 if (state != PCIECFGREG_PM_CSR_STATE_D0 &&
683 cur_state <= PCIECFGREG_PM_CSR_STATE_D3_COLD &&
684 cur_state > state) {
685 DHD_ERROR(("%s: Invalid power state transition !\n", __FUNCTION__));
686 return BCME_ERROR;
687 }
688
689 pm_csr &= ~PCIECFGREG_PM_CSR_STATE_MASK;
690 pm_csr |= state;
691
692 OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32), pm_csr);
693
694 /* need to wait for the specified mandatory pcie power transition delay time */
695 if (state == PCIECFGREG_PM_CSR_STATE_D3_HOT ||
696 cur_state == PCIECFGREG_PM_CSR_STATE_D3_HOT)
697 OSL_DELAY(DHDPCIE_PM_D3_DELAY);
698 else if (state == PCIECFGREG_PM_CSR_STATE_D2 ||
699 cur_state == PCIECFGREG_PM_CSR_STATE_D2)
700 OSL_DELAY(DHDPCIE_PM_D2_DELAY);
701
702 /* read back the power state and verify */
703 pm_csr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32));
704 cur_state = pm_csr & PCIECFGREG_PM_CSR_STATE_MASK;
705 if (cur_state != state) {
706 DHD_ERROR(("%s: power transition failed ! Current state is %u \n",
707 __FUNCTION__, cur_state));
708 return BCME_ERROR;
709 } else {
710 DHD_ERROR(("%s: power transition to %u success \n",
711 __FUNCTION__, cur_state));
712 }
713
714 return BCME_OK;
715 }
716
717 int
dhdpcie_config_check(dhd_bus_t * bus)718 dhdpcie_config_check(dhd_bus_t *bus)
719 {
720 uint32 i, val;
721 int ret = BCME_ERROR;
722
723 for (i = 0; i < DHDPCIE_CONFIG_CHECK_RETRY_COUNT; i++) {
724 val = OSL_PCI_READ_CONFIG(bus->osh, PCI_CFG_VID, sizeof(uint32));
725 if ((val & 0xFFFF) == VENDOR_BROADCOM) {
726 ret = BCME_OK;
727 break;
728 }
729 OSL_DELAY(DHDPCIE_CONFIG_CHECK_DELAY_MS * 1000);
730 }
731
732 return ret;
733 }
734
735 int
dhdpcie_config_restore(dhd_bus_t * bus,bool restore_pmcsr)736 dhdpcie_config_restore(dhd_bus_t *bus, bool restore_pmcsr)
737 {
738 uint32 i;
739 osl_t *osh = bus->osh;
740
741 if (BCME_OK != dhdpcie_config_check(bus)) {
742 return BCME_ERROR;
743 }
744
745 for (i = PCI_CFG_REV >> 2; i < DHDPCIE_CONFIG_HDR_SIZE; i++) {
746 OSL_PCI_WRITE_CONFIG(osh, i << 2, sizeof(uint32), bus->saved_config.header[i]);
747 }
748 OSL_PCI_WRITE_CONFIG(osh, PCI_CFG_CMD, sizeof(uint32), bus->saved_config.header[1]);
749
750 if (restore_pmcsr)
751 OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PM_CSR,
752 sizeof(uint32), bus->saved_config.pmcsr);
753
754 OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_CAP, sizeof(uint32), bus->saved_config.msi_cap);
755 OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_ADDR_L, sizeof(uint32),
756 bus->saved_config.msi_addr0);
757 OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_ADDR_H,
758 sizeof(uint32), bus->saved_config.msi_addr1);
759 OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_DATA,
760 sizeof(uint32), bus->saved_config.msi_data);
761
762 OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_DEV_STATUS_CTRL,
763 sizeof(uint32), bus->saved_config.exp_dev_ctrl_stat);
764 OSL_PCI_WRITE_CONFIG(osh, PCIECFGGEN_DEV_STATUS_CTRL2,
765 sizeof(uint32), bus->saved_config.exp_dev_ctrl_stat2);
766 OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_LINK_STATUS_CTRL,
767 sizeof(uint32), bus->saved_config.exp_link_ctrl_stat);
768 OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_LINK_STATUS_CTRL2,
769 sizeof(uint32), bus->saved_config.exp_link_ctrl_stat2);
770
771 OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL1,
772 sizeof(uint32), bus->saved_config.l1pm0);
773 OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL2,
774 sizeof(uint32), bus->saved_config.l1pm1);
775
776 OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN,
777 sizeof(uint32), bus->saved_config.bar0_win);
778 OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR1_WIN,
779 sizeof(uint32), bus->saved_config.bar1_win);
780
781 return BCME_OK;
782 }
783
784 int
dhdpcie_config_save(dhd_bus_t * bus)785 dhdpcie_config_save(dhd_bus_t *bus)
786 {
787 uint32 i;
788 osl_t *osh = bus->osh;
789
790 if (BCME_OK != dhdpcie_config_check(bus)) {
791 return BCME_ERROR;
792 }
793
794 for (i = 0; i < DHDPCIE_CONFIG_HDR_SIZE; i++) {
795 bus->saved_config.header[i] = OSL_PCI_READ_CONFIG(osh, i << 2, sizeof(uint32));
796 }
797
798 bus->saved_config.pmcsr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32));
799
800 bus->saved_config.msi_cap = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_CAP,
801 sizeof(uint32));
802 bus->saved_config.msi_addr0 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_ADDR_L,
803 sizeof(uint32));
804 bus->saved_config.msi_addr1 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_ADDR_H,
805 sizeof(uint32));
806 bus->saved_config.msi_data = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_DATA,
807 sizeof(uint32));
808
809 bus->saved_config.exp_dev_ctrl_stat = OSL_PCI_READ_CONFIG(osh,
810 PCIECFGREG_DEV_STATUS_CTRL, sizeof(uint32));
811 bus->saved_config.exp_dev_ctrl_stat2 = OSL_PCI_READ_CONFIG(osh,
812 PCIECFGGEN_DEV_STATUS_CTRL2, sizeof(uint32));
813 bus->saved_config.exp_link_ctrl_stat = OSL_PCI_READ_CONFIG(osh,
814 PCIECFGREG_LINK_STATUS_CTRL, sizeof(uint32));
815 bus->saved_config.exp_link_ctrl_stat2 = OSL_PCI_READ_CONFIG(osh,
816 PCIECFGREG_LINK_STATUS_CTRL2, sizeof(uint32));
817
818 bus->saved_config.l1pm0 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL1,
819 sizeof(uint32));
820 bus->saved_config.l1pm1 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL2,
821 sizeof(uint32));
822
823 bus->saved_config.bar0_win = OSL_PCI_READ_CONFIG(osh, PCI_BAR0_WIN,
824 sizeof(uint32));
825 bus->saved_config.bar1_win = OSL_PCI_READ_CONFIG(osh, PCI_BAR1_WIN,
826 sizeof(uint32));
827 return BCME_OK;
828 }
829
830 #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
831 dhd_pub_t *link_recovery = NULL;
832 #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
833 static bool
dhdpcie_dongle_attach(dhd_bus_t * bus)834 dhdpcie_dongle_attach(dhd_bus_t *bus)
835 {
836 osl_t *osh = bus->osh;
837 volatile void *regsva = (volatile void*)bus->regs;
838 uint16 devid = bus->cl_devid;
839 uint32 val;
840 sbpcieregs_t *sbpcieregs;
841
842 DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
843
844 #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
845 link_recovery = bus->dhd;
846 #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
847
848 bus->alp_only = TRUE;
849 bus->sih = NULL;
850
851 /* Set bar0 window to si_enum_base */
852 dhdpcie_bus_cfg_set_bar0_win(bus, SI_ENUM_BASE);
853
854 /* Checking PCIe bus status with reading configuration space */
855 val = OSL_PCI_READ_CONFIG(osh, PCI_CFG_VID, sizeof(uint32));
856 if ((val & 0xFFFF) != VENDOR_BROADCOM) {
857 DHD_ERROR(("%s : failed to read PCI configuration space!\n", __FUNCTION__));
858 goto fail;
859 }
860
861 /*
862 * Checking PCI_SPROM_CONTROL register for preventing invalid address access
863 * due to switch address space from PCI_BUS to SI_BUS.
864 */
865 val = OSL_PCI_READ_CONFIG(osh, PCI_SPROM_CONTROL, sizeof(uint32));
866 if (val == 0xffffffff) {
867 DHD_ERROR(("%s : failed to read SPROM control register\n", __FUNCTION__));
868 goto fail;
869 }
870
871 #ifdef DHD_EFI
872 /* Save good copy of PCIe config space */
873 if (BCME_OK != dhdpcie_config_save(bus)) {
874 DHD_ERROR(("%s : failed to save PCI configuration space!\n", __FUNCTION__));
875 goto fail;
876 }
877 #endif /* DHD_EFI */
878
879 /* si_attach() will provide an SI handle and scan the backplane */
880 if (!(bus->sih = si_attach((uint)devid, osh, regsva, PCI_BUS, bus,
881 &bus->vars, &bus->varsz))) {
882 DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__));
883 goto fail;
884 }
885
886 /* Olympic EFI requirement - stop driver load if FW is already running
887 * need to do this here before pcie_watchdog_reset, because
888 * pcie_watchdog_reset will put the ARM back into halt state
889 */
890 if (!dhdpcie_is_arm_halted(bus)) {
891 DHD_ERROR(("%s: ARM is not halted,FW is already running! Abort.\n",
892 __FUNCTION__));
893 goto fail;
894 }
895
896 /* Enable CLKREQ# */
897 dhdpcie_clkreq(bus->osh, 1, 1);
898
899 #ifndef DONGLE_ENABLE_ISOLATION
900 /*
901 * Issue CC watchdog to reset all the cores on the chip - similar to rmmod dhd
902 * This is required to avoid spurious interrupts to the Host and bring back
903 * dongle to a sane state (on host soft-reboot / watchdog-reboot).
904 */
905 pcie_watchdog_reset(bus->osh, bus->sih, (sbpcieregs_t *) bus->regs);
906 #endif /* !DONGLE_ENABLE_ISOLATION */
907
908 #ifdef DHD_EFI
909 dhdpcie_dongle_pwr_toggle(bus);
910 #endif
911
912 si_setcore(bus->sih, PCIE2_CORE_ID, 0);
913 sbpcieregs = (sbpcieregs_t*)(bus->regs);
914
915 /* WAR where the BAR1 window may not be sized properly */
916 W_REG(osh, &sbpcieregs->configaddr, 0x4e0);
917 val = R_REG(osh, &sbpcieregs->configdata);
918 W_REG(osh, &sbpcieregs->configdata, val);
919
920 /* Get info on the ARM and SOCRAM cores... */
921 /* Should really be qualified by device id */
922 if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) ||
923 (si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) ||
924 (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) ||
925 (si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
926 bus->armrev = si_corerev(bus->sih);
927 } else {
928 DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__));
929 goto fail;
930 }
931
932 if (si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) {
933 /* Only set dongle RAMSIZE to default value when ramsize is not adjusted */
934 if (!bus->ramsize_adjusted) {
935 if (!(bus->orig_ramsize = si_sysmem_size(bus->sih))) {
936 DHD_ERROR(("%s: failed to find SYSMEM memory!\n", __FUNCTION__));
937 goto fail;
938 }
939 /* also populate base address */
940 bus->dongle_ram_base = CA7_4365_RAM_BASE;
941 /* Default reserve 1.75MB for CA7 */
942 bus->orig_ramsize = 0x1c0000;
943 }
944 } else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
945 if (!(bus->orig_ramsize = si_socram_size(bus->sih))) {
946 DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__));
947 goto fail;
948 }
949 } else {
950 /* cr4 has a different way to find the RAM size from TCM's */
951 if (!(bus->orig_ramsize = si_tcm_size(bus->sih))) {
952 DHD_ERROR(("%s: failed to find CR4-TCM memory!\n", __FUNCTION__));
953 goto fail;
954 }
955 /* also populate base address */
956 switch ((uint16)bus->sih->chip) {
957 case BCM4339_CHIP_ID:
958 case BCM4335_CHIP_ID:
959 bus->dongle_ram_base = CR4_4335_RAM_BASE;
960 break;
961 case BCM4358_CHIP_ID:
962 case BCM4354_CHIP_ID:
963 case BCM43567_CHIP_ID:
964 case BCM43569_CHIP_ID:
965 case BCM4350_CHIP_ID:
966 case BCM43570_CHIP_ID:
967 bus->dongle_ram_base = CR4_4350_RAM_BASE;
968 break;
969 case BCM4360_CHIP_ID:
970 bus->dongle_ram_base = CR4_4360_RAM_BASE;
971 break;
972
973 case BCM4364_CHIP_ID:
974 bus->dongle_ram_base = CR4_4364_RAM_BASE;
975 break;
976
977 CASE_BCM4345_CHIP:
978 bus->dongle_ram_base = (bus->sih->chiprev < 6) /* changed at 4345C0 */
979 ? CR4_4345_LT_C0_RAM_BASE : CR4_4345_GE_C0_RAM_BASE;
980 break;
981 CASE_BCM43602_CHIP:
982 bus->dongle_ram_base = CR4_43602_RAM_BASE;
983 break;
984 case BCM4349_CHIP_GRPID:
985 /* RAM based changed from 4349c0(revid=9) onwards */
986 bus->dongle_ram_base = ((bus->sih->chiprev < 9) ?
987 CR4_4349_RAM_BASE : CR4_4349_RAM_BASE_FROM_REV_9);
988 break;
989 case BCM4347_CHIP_GRPID:
990 bus->dongle_ram_base = CR4_4347_RAM_BASE;
991 break;
992 case BCM4362_CHIP_ID:
993 bus->dongle_ram_base = CR4_4362_RAM_BASE;
994 break;
995 default:
996 bus->dongle_ram_base = 0;
997 DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n",
998 __FUNCTION__, bus->dongle_ram_base));
999 }
1000 }
1001 bus->ramsize = bus->orig_ramsize;
1002 if (dhd_dongle_memsize)
1003 dhdpcie_bus_dongle_setmemsize(bus, dhd_dongle_memsize);
1004
1005 DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d) at 0x%x\n",
1006 bus->ramsize, bus->orig_ramsize, bus->dongle_ram_base));
1007
1008 bus->srmemsize = si_socram_srmem_size(bus->sih);
1009
1010
1011 bus->def_intmask = PCIE_MB_D2H_MB_MASK | PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1;
1012
1013 /* Set the poll and/or interrupt flags */
1014 bus->intr = (bool)dhd_intr;
1015 if ((bus->poll = (bool)dhd_poll))
1016 bus->pollrate = 1;
1017
1018 bus->wait_for_d3_ack = 1;
1019 #ifdef PCIE_OOB
1020 dhdpcie_oob_init(bus);
1021 #endif /* PCIE_OOB */
1022 #ifdef PCIE_INB_DW
1023 bus->inb_enabled = TRUE;
1024 #endif /* PCIE_INB_DW */
1025 bus->dongle_in_ds = FALSE;
1026 bus->idma_enabled = TRUE;
1027 bus->ifrm_enabled = TRUE;
1028 #if defined(PCIE_OOB) || defined(PCIE_INB_DW)
1029 bus->ds_enabled = TRUE;
1030 #endif
1031 DHD_TRACE(("%s: EXIT: SUCCESS\n", __FUNCTION__));
1032 return 0;
1033
1034 fail:
1035 if (bus->sih != NULL) {
1036 si_detach(bus->sih);
1037 bus->sih = NULL;
1038 }
1039 DHD_TRACE(("%s: EXIT: FAILURE\n", __FUNCTION__));
1040 return -1;
1041 }
1042
1043 int
dhpcie_bus_unmask_interrupt(dhd_bus_t * bus)1044 dhpcie_bus_unmask_interrupt(dhd_bus_t *bus)
1045 {
1046 dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, I_MB);
1047 return 0;
1048 }
1049 int
dhpcie_bus_mask_interrupt(dhd_bus_t * bus)1050 dhpcie_bus_mask_interrupt(dhd_bus_t *bus)
1051 {
1052 dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, 0x0);
1053 return 0;
1054 }
1055
1056 void
dhdpcie_bus_intr_enable(dhd_bus_t * bus)1057 dhdpcie_bus_intr_enable(dhd_bus_t *bus)
1058 {
1059 DHD_TRACE(("%s Enter\n", __FUNCTION__));
1060 if (bus && bus->sih && !bus->is_linkdown) {
1061 if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
1062 (bus->sih->buscorerev == 4)) {
1063 dhpcie_bus_unmask_interrupt(bus);
1064 } else {
1065 /* Skip after recieving D3 ACK */
1066 if ((bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) &&
1067 bus->wait_for_d3_ack) {
1068 return;
1069 }
1070 si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask,
1071 bus->def_intmask, bus->def_intmask);
1072 }
1073 }
1074 DHD_TRACE(("%s Exit\n", __FUNCTION__));
1075 }
1076
1077 void
dhdpcie_bus_intr_disable(dhd_bus_t * bus)1078 dhdpcie_bus_intr_disable(dhd_bus_t *bus)
1079 {
1080 DHD_TRACE(("%s Enter\n", __FUNCTION__));
1081 if (bus && bus->sih && !bus->is_linkdown) {
1082 if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
1083 (bus->sih->buscorerev == 4)) {
1084 dhpcie_bus_mask_interrupt(bus);
1085 } else {
1086 /* Skip after recieving D3 ACK */
1087 if ((bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) &&
1088 bus->wait_for_d3_ack) {
1089 return;
1090 }
1091 si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask,
1092 bus->def_intmask, 0);
1093 }
1094 }
1095 DHD_TRACE(("%s Exit\n", __FUNCTION__));
1096 }
1097
1098 /*
1099 * dhdpcie_advertise_bus_cleanup advertises that clean up is under progress
1100 * to other bus user contexts like Tx, Rx, IOVAR, WD etc and it waits for other contexts
1101 * to gracefully exit. All the bus usage contexts before marking busstate as busy, will check for
1102 * whether the busstate is DHD_BUS_DOWN or DHD_BUS_DOWN_IN_PROGRESS, if so
1103 * they will exit from there itself without marking dhd_bus_busy_state as BUSY.
1104 */
1105 static void
dhdpcie_advertise_bus_cleanup(dhd_pub_t * dhdp)1106 dhdpcie_advertise_bus_cleanup(dhd_pub_t *dhdp)
1107 {
1108 unsigned long flags;
1109 int timeleft;
1110
1111 DHD_GENERAL_LOCK(dhdp, flags);
1112 dhdp->busstate = DHD_BUS_DOWN_IN_PROGRESS;
1113 DHD_GENERAL_UNLOCK(dhdp, flags);
1114
1115 timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state);
1116 if ((timeleft == 0) || (timeleft == 1)) {
1117 DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n",
1118 __FUNCTION__, dhdp->dhd_bus_busy_state));
1119 ASSERT(0);
1120 }
1121
1122 return;
1123 }
1124
1125 static void
dhdpcie_advertise_bus_remove(dhd_pub_t * dhdp)1126 dhdpcie_advertise_bus_remove(dhd_pub_t *dhdp)
1127 {
1128 unsigned long flags;
1129 int timeleft;
1130
1131 DHD_GENERAL_LOCK(dhdp, flags);
1132 dhdp->busstate = DHD_BUS_REMOVE;
1133 DHD_GENERAL_UNLOCK(dhdp, flags);
1134
1135 timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state);
1136 if ((timeleft == 0) || (timeleft == 1)) {
1137 DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n",
1138 __FUNCTION__, dhdp->dhd_bus_busy_state));
1139 ASSERT(0);
1140 }
1141
1142 return;
1143 }
1144
1145
1146 static void
dhdpcie_bus_remove_prep(dhd_bus_t * bus)1147 dhdpcie_bus_remove_prep(dhd_bus_t *bus)
1148 {
1149 unsigned long flags;
1150 DHD_TRACE(("%s Enter\n", __FUNCTION__));
1151
1152 DHD_GENERAL_LOCK(bus->dhd, flags);
1153 bus->dhd->busstate = DHD_BUS_DOWN;
1154 DHD_GENERAL_UNLOCK(bus->dhd, flags);
1155
1156 #ifdef PCIE_INB_DW
1157 /* De-Initialize the lock to serialize Device Wake Inband activities */
1158 if (bus->inb_lock) {
1159 dhd_os_spin_lock_deinit(bus->dhd->osh, bus->inb_lock);
1160 bus->inb_lock = NULL;
1161 }
1162 #endif
1163
1164
1165 dhd_os_sdlock(bus->dhd);
1166
1167 if (bus->sih && !bus->dhd->dongle_isolation) {
1168 /* Has insmod fails after rmmod issue in Brix Android */
1169 /* if the pcie link is down, watchdog reset should not be done, as it may hang */
1170 if (!bus->is_linkdown)
1171 pcie_watchdog_reset(bus->osh, bus->sih, (sbpcieregs_t *) bus->regs);
1172 else
1173 DHD_ERROR(("%s: skipping watchdog reset, due to pcie link down ! \n",
1174 __FUNCTION__));
1175
1176 bus->dhd->is_pcie_watchdog_reset = TRUE;
1177 }
1178
1179 dhd_os_sdunlock(bus->dhd);
1180
1181 DHD_TRACE(("%s Exit\n", __FUNCTION__));
1182 }
1183
1184 /** Detach and free everything */
1185 void
dhdpcie_bus_release(dhd_bus_t * bus)1186 dhdpcie_bus_release(dhd_bus_t *bus)
1187 {
1188 bool dongle_isolation = FALSE;
1189 osl_t *osh = NULL;
1190
1191 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
1192
1193 if (bus) {
1194 osh = bus->osh;
1195 ASSERT(osh);
1196
1197 if (bus->dhd) {
1198 dhdpcie_advertise_bus_remove(bus->dhd);
1199 dongle_isolation = bus->dhd->dongle_isolation;
1200 bus->dhd->is_pcie_watchdog_reset = FALSE;
1201 dhdpcie_bus_remove_prep(bus);
1202
1203 if (bus->intr) {
1204 dhdpcie_bus_intr_disable(bus);
1205 dhdpcie_free_irq(bus);
1206 }
1207 dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
1208 dhd_detach(bus->dhd);
1209 dhd_free(bus->dhd);
1210 bus->dhd = NULL;
1211 }
1212
1213 /* unmap the regs and tcm here!! */
1214 if (bus->regs) {
1215 dhdpcie_bus_reg_unmap(osh, bus->regs, DONGLE_REG_MAP_SIZE);
1216 bus->regs = NULL;
1217 }
1218 if (bus->tcm) {
1219 dhdpcie_bus_reg_unmap(osh, bus->tcm, DONGLE_TCM_MAP_SIZE);
1220 bus->tcm = NULL;
1221 }
1222
1223 dhdpcie_bus_release_malloc(bus, osh);
1224 /* Detach pcie shared structure */
1225 if (bus->pcie_sh) {
1226 MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t));
1227 bus->pcie_sh = NULL;
1228 }
1229
1230 if (bus->console.buf != NULL) {
1231 MFREE(osh, bus->console.buf, bus->console.bufsize);
1232 }
1233
1234
1235 /* Finally free bus info */
1236 MFREE(osh, bus, sizeof(dhd_bus_t));
1237 }
1238
1239 DHD_TRACE(("%s: Exit\n", __FUNCTION__));
1240 } /* dhdpcie_bus_release */
1241
1242
1243 void
dhdpcie_bus_release_dongle(dhd_bus_t * bus,osl_t * osh,bool dongle_isolation,bool reset_flag)1244 dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag)
1245 {
1246 DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n", __FUNCTION__,
1247 bus->dhd, bus->dhd->dongle_reset));
1248
1249 if ((bus->dhd && bus->dhd->dongle_reset) && reset_flag) {
1250 DHD_TRACE(("%s Exit\n", __FUNCTION__));
1251 return;
1252 }
1253
1254 if (bus->sih) {
1255 if (!dongle_isolation &&
1256 (bus->dhd && !bus->dhd->is_pcie_watchdog_reset))
1257 pcie_watchdog_reset(bus->osh, bus->sih,
1258 (sbpcieregs_t *) bus->regs);
1259 #ifdef DHD_EFI
1260 dhdpcie_dongle_pwr_toggle(bus);
1261 #endif
1262 if (bus->ltrsleep_on_unload) {
1263 si_corereg(bus->sih, bus->sih->buscoreidx,
1264 OFFSETOF(sbpcieregs_t, u.pcie2.ltr_state), ~0, 0);
1265 }
1266
1267 if (bus->sih->buscorerev == 13) {
1268 pcie_serdes_iddqdisable(bus->osh, bus->sih, (sbpcieregs_t *) bus->regs);
1269 }
1270
1271 /* Disable CLKREQ# */
1272 dhdpcie_clkreq(bus->osh, 1, 0);
1273
1274 if (bus->sih != NULL) {
1275 si_detach(bus->sih);
1276 bus->sih = NULL;
1277 }
1278 if (bus->vars && bus->varsz)
1279 MFREE(osh, bus->vars, bus->varsz);
1280 bus->vars = NULL;
1281 }
1282
1283 DHD_TRACE(("%s Exit\n", __FUNCTION__));
1284 }
1285
1286 uint32
dhdpcie_bus_cfg_read_dword(dhd_bus_t * bus,uint32 addr,uint32 size)1287 dhdpcie_bus_cfg_read_dword(dhd_bus_t *bus, uint32 addr, uint32 size)
1288 {
1289 uint32 data = OSL_PCI_READ_CONFIG(bus->osh, addr, size);
1290 return data;
1291 }
1292
1293 /** 32 bit config write */
1294 void
dhdpcie_bus_cfg_write_dword(dhd_bus_t * bus,uint32 addr,uint32 size,uint32 data)1295 dhdpcie_bus_cfg_write_dword(dhd_bus_t *bus, uint32 addr, uint32 size, uint32 data)
1296 {
1297 OSL_PCI_WRITE_CONFIG(bus->osh, addr, size, data);
1298 }
1299
1300 void
dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t * bus,uint32 data)1301 dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data)
1302 {
1303 OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, 4, data);
1304 }
1305
1306 void
dhdpcie_bus_dongle_setmemsize(struct dhd_bus * bus,int mem_size)1307 dhdpcie_bus_dongle_setmemsize(struct dhd_bus *bus, int mem_size)
1308 {
1309 int32 min_size = DONGLE_MIN_MEMSIZE;
1310 /* Restrict the memsize to user specified limit */
1311 DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d\n",
1312 dhd_dongle_memsize, min_size));
1313 if ((dhd_dongle_memsize > min_size) &&
1314 (dhd_dongle_memsize < (int32)bus->orig_ramsize))
1315 bus->ramsize = dhd_dongle_memsize;
1316 }
1317
1318 void
dhdpcie_bus_release_malloc(dhd_bus_t * bus,osl_t * osh)1319 dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh)
1320 {
1321 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
1322
1323 if (bus->dhd && bus->dhd->dongle_reset)
1324 return;
1325
1326 if (bus->vars && bus->varsz) {
1327 MFREE(osh, bus->vars, bus->varsz);
1328 bus->vars = NULL;
1329 }
1330
1331 DHD_TRACE(("%s: Exit\n", __FUNCTION__));
1332 return;
1333 }
1334
1335 /** Stop bus module: clear pending frames, disable data flow */
dhd_bus_stop(struct dhd_bus * bus,bool enforce_mutex)1336 void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex)
1337 {
1338 uint32 status;
1339 unsigned long flags;
1340
1341 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
1342
1343 if (!bus->dhd)
1344 return;
1345
1346 if (bus->dhd->busstate == DHD_BUS_DOWN) {
1347 DHD_ERROR(("%s: already down by net_dev_reset\n", __FUNCTION__));
1348 goto done;
1349 }
1350
1351 DHD_DISABLE_RUNTIME_PM(bus->dhd);
1352
1353 DHD_GENERAL_LOCK(bus->dhd, flags);
1354 bus->dhd->busstate = DHD_BUS_DOWN;
1355 DHD_GENERAL_UNLOCK(bus->dhd, flags);
1356
1357 dhdpcie_bus_intr_disable(bus);
1358 status = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
1359 dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, status);
1360
1361 if (!dhd_download_fw_on_driverload) {
1362 dhd_dpc_kill(bus->dhd);
1363 }
1364
1365 /* Clear rx control and wake any waiters */
1366 dhd_os_set_ioctl_resp_timeout(IOCTL_DISABLE_TIMEOUT);
1367 dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_BUS_STOP);
1368
1369 done:
1370 return;
1371 }
1372
1373 #ifdef DEVICE_TX_STUCK_DETECT
1374 void
dhd_bus_send_msg_to_daemon(int reason)1375 dhd_bus_send_msg_to_daemon(int reason)
1376 {
1377 bcm_to_info_t to_info;
1378
1379 to_info.magic = BCM_TO_MAGIC;
1380 to_info.reason = reason;
1381
1382 dhd_send_msg_to_daemon(NULL, (void *)&to_info, sizeof(bcm_to_info_t));
1383 return;
1384 }
1385
1386 /**
1387 * scan the flow rings in active list to check if stuck and notify application
1388 * The conditions for warn/stuck detection are
1389 * 1. Flow ring is active
1390 * 2. There are packets to be consumed by the consumer (wr != rd)
1391 * If 1 and 2 are true, then
1392 * 3. Warn, if Tx completion is not received for a duration of DEVICE_TX_STUCK_WARN_DURATION
1393 * 4. Trap FW, if Tx completion is not received for a duration of DEVICE_TX_STUCK_DURATION
1394 */
1395 static void
dhd_bus_device_tx_stuck_scan(dhd_bus_t * bus)1396 dhd_bus_device_tx_stuck_scan(dhd_bus_t *bus)
1397 {
1398 uint32 tx_cmpl;
1399 unsigned long list_lock_flags;
1400 unsigned long ring_lock_flags;
1401 dll_t *item, *prev;
1402 flow_ring_node_t *flow_ring_node;
1403 bool ring_empty;
1404 bool active;
1405
1406 DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, list_lock_flags);
1407
1408 for (item = dll_tail_p(&bus->flowring_active_list);
1409 !dll_end(&bus->flowring_active_list, item); item = prev) {
1410 prev = dll_prev_p(item);
1411
1412 flow_ring_node = dhd_constlist_to_flowring(item);
1413 DHD_FLOWRING_LOCK(flow_ring_node->lock, ring_lock_flags);
1414 tx_cmpl = flow_ring_node->tx_cmpl;
1415 active = flow_ring_node->active;
1416 ring_empty = dhd_prot_is_cmpl_ring_empty(bus->dhd, flow_ring_node->prot_info);
1417 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, ring_lock_flags);
1418
1419 if (ring_empty) {
1420 /* reset conters... etc */
1421 flow_ring_node->stuck_count = 0;
1422 flow_ring_node->tx_cmpl_prev = tx_cmpl;
1423 continue;
1424 }
1425 /**
1426 * DEVICE_TX_STUCK_WARN_DURATION, DEVICE_TX_STUCK_DURATION are integer
1427 * representation of time, to decide if a flow is in warn state or stuck.
1428 *
1429 * flow_ring_node->stuck_count is an integer counter representing how long
1430 * tx_cmpl is not received though there are pending packets in the ring
1431 * to be consumed by the dongle for that particular flow.
1432 *
1433 * This method of determining time elapsed is helpful in sleep/wake scenarios.
1434 * If host sleeps and wakes up, that sleep time is not considered into
1435 * stuck duration.
1436 */
1437 if ((tx_cmpl == flow_ring_node->tx_cmpl_prev) && active) {
1438 flow_ring_node->stuck_count++;
1439
1440 DHD_ERROR(("%s: flowid: %d tx_cmpl: %u tx_cmpl_prev: %u stuck_count: %d\n",
1441 __func__, flow_ring_node->flowid, tx_cmpl,
1442 flow_ring_node->tx_cmpl_prev, flow_ring_node->stuck_count));
1443
1444 switch (flow_ring_node->stuck_count) {
1445 case DEVICE_TX_STUCK_WARN_DURATION:
1446 /**
1447 * Notify Device Tx Stuck Notification App about the
1448 * device Tx stuck warning for this flowid.
1449 * App will collect the logs required.
1450 */
1451 DHD_ERROR(("stuck warning for flowid: %d sent to app\n",
1452 flow_ring_node->flowid));
1453 dhd_bus_send_msg_to_daemon(REASON_DEVICE_TX_STUCK_WARNING);
1454 break;
1455 case DEVICE_TX_STUCK_DURATION:
1456 /**
1457 * Notify Device Tx Stuck Notification App about the
1458 * device Tx stuck info for this flowid.
1459 * App will collect the logs required.
1460 */
1461 DHD_ERROR(("stuck information for flowid: %d sent to app\n",
1462 flow_ring_node->flowid));
1463 dhd_bus_send_msg_to_daemon(REASON_DEVICE_TX_STUCK);
1464 break;
1465 default:
1466 break;
1467 }
1468 } else {
1469 flow_ring_node->tx_cmpl_prev = tx_cmpl;
1470 flow_ring_node->stuck_count = 0;
1471 }
1472 }
1473 DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, list_lock_flags);
1474 }
1475 /**
1476 * schedules dhd_bus_device_tx_stuck_scan after DEVICE_TX_STUCK_CKECK_TIMEOUT,
1477 * to determine if any flowid is stuck.
1478 */
1479 static void
dhd_bus_device_stuck_scan(dhd_bus_t * bus)1480 dhd_bus_device_stuck_scan(dhd_bus_t *bus)
1481 {
1482 uint32 time_stamp; /* in millisec */
1483 uint32 diff;
1484
1485 /* Need not run the algorith if Dongle has trapped */
1486 if (bus->dhd->dongle_trap_occured) {
1487 return;
1488 }
1489 time_stamp = OSL_SYSUPTIME();
1490 diff = time_stamp - bus->device_tx_stuck_check;
1491 if (diff > DEVICE_TX_STUCK_CKECK_TIMEOUT) {
1492 dhd_bus_device_tx_stuck_scan(bus);
1493 bus->device_tx_stuck_check = OSL_SYSUPTIME();
1494 }
1495 return;
1496 }
1497 #endif /* DEVICE_TX_STUCK_DETECT */
1498
1499 /** Watchdog timer function */
dhd_bus_watchdog(dhd_pub_t * dhd)1500 bool dhd_bus_watchdog(dhd_pub_t *dhd)
1501 {
1502 unsigned long flags;
1503 dhd_bus_t *bus;
1504 bus = dhd->bus;
1505
1506 DHD_GENERAL_LOCK(dhd, flags);
1507 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd) ||
1508 DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd)) {
1509 DHD_GENERAL_UNLOCK(dhd, flags);
1510 return FALSE;
1511 }
1512 DHD_BUS_BUSY_SET_IN_WD(dhd);
1513 DHD_GENERAL_UNLOCK(dhd, flags);
1514
1515 #ifdef DHD_PCIE_RUNTIMEPM
1516 dhdpcie_runtime_bus_wake(dhd, TRUE, __builtin_return_address(0));
1517 #endif /* DHD_PCIE_RUNTIMEPM */
1518
1519
1520
1521 /* Poll for console output periodically */
1522 if (dhd->busstate == DHD_BUS_DATA &&
1523 dhd_console_ms != 0 && !bus->d3_suspend_pending) {
1524 bus->console.count += dhd_watchdog_ms;
1525 if (bus->console.count >= dhd_console_ms) {
1526 bus->console.count -= dhd_console_ms;
1527 /* Make sure backplane clock is on */
1528 if (dhdpcie_bus_readconsole(bus) < 0)
1529 dhd_console_ms = 0; /* On error, stop trying */
1530 }
1531 }
1532
1533 #ifdef DHD_READ_INTSTATUS_IN_DPC
1534 if (bus->poll) {
1535 bus->ipend = TRUE;
1536 bus->dpc_sched = TRUE;
1537 dhd_sched_dpc(bus->dhd); /* queue DPC now!! */
1538 }
1539 #endif /* DHD_READ_INTSTATUS_IN_DPC */
1540
1541 #if defined(PCIE_OOB) || defined(PCIE_INB_DW)
1542 /* If haven't communicated with device for a while, deassert the Device_Wake GPIO */
1543 if (dhd_doorbell_timeout != 0 && dhd->busstate == DHD_BUS_DATA &&
1544 dhd->up && dhd_timeout_expired(&bus->doorbell_timer)) {
1545 dhd_bus_set_device_wake(bus, FALSE);
1546 }
1547 #endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */
1548 #ifdef PCIE_INB_DW
1549 if (INBAND_DW_ENAB(bus)) {
1550 if (bus->ds_exit_timeout) {
1551 bus->ds_exit_timeout --;
1552 if (bus->ds_exit_timeout == 1) {
1553 DHD_ERROR(("DS-EXIT TIMEOUT\n"));
1554 bus->ds_exit_timeout = 0;
1555 bus->inband_ds_exit_to_cnt++;
1556 }
1557 }
1558 if (bus->host_sleep_exit_timeout) {
1559 bus->host_sleep_exit_timeout --;
1560 if (bus->host_sleep_exit_timeout == 1) {
1561 DHD_ERROR(("HOST_SLEEP-EXIT TIMEOUT\n"));
1562 bus->host_sleep_exit_timeout = 0;
1563 bus->inband_host_sleep_exit_to_cnt++;
1564 }
1565 }
1566 }
1567 #endif /* PCIE_INB_DW */
1568
1569 #ifdef DEVICE_TX_STUCK_DETECT
1570 if (dhd->bus->dev_tx_stuck_monitor == TRUE) {
1571 dhd_bus_device_stuck_scan(dhd->bus);
1572 }
1573 #endif /* DEVICE_TX_STUCK_DETECT */
1574
1575 DHD_GENERAL_LOCK(dhd, flags);
1576 DHD_BUS_BUSY_CLEAR_IN_WD(dhd);
1577 dhd_os_busbusy_wake(dhd);
1578 DHD_GENERAL_UNLOCK(dhd, flags);
1579 return TRUE;
1580 } /* dhd_bus_watchdog */
1581
1582
1583 uint16
dhd_get_chipid(dhd_pub_t * dhd)1584 dhd_get_chipid(dhd_pub_t *dhd)
1585 {
1586 dhd_bus_t *bus = dhd->bus;
1587
1588 if (bus && bus->sih)
1589 return (uint16)si_chipid(bus->sih);
1590 else
1591 return 0;
1592 }
1593
1594 /* Download firmware image and nvram image */
1595 int
dhd_bus_download_firmware(struct dhd_bus * bus,osl_t * osh,char * pfw_path,char * pnv_path,char * pclm_path,char * pconf_path)1596 dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
1597 char *pfw_path, char *pnv_path,
1598 char *pclm_path, char *pconf_path)
1599 {
1600 int ret;
1601
1602 bus->fw_path = pfw_path;
1603 bus->nv_path = pnv_path;
1604 bus->dhd->clm_path = pclm_path;
1605 bus->dhd->conf_path = pconf_path;
1606
1607
1608 #if defined(DHD_BLOB_EXISTENCE_CHECK)
1609 dhd_set_blob_support(bus->dhd, bus->fw_path);
1610 #endif /* DHD_BLOB_EXISTENCE_CHECK */
1611
1612 DHD_ERROR(("%s: firmware path=%s, nvram path=%s\n",
1613 __FUNCTION__, bus->fw_path, bus->nv_path));
1614
1615 ret = dhdpcie_download_firmware(bus, osh);
1616
1617 return ret;
1618 }
1619
1620 void
dhd_set_path_params(struct dhd_bus * bus)1621 dhd_set_path_params(struct dhd_bus *bus)
1622 {
1623 /* External conf takes precedence if specified */
1624 dhd_conf_preinit(bus->dhd);
1625
1626 if (bus->dhd->clm_path[0] == '\0') {
1627 dhd_conf_set_path(bus->dhd, "clm.blob", bus->dhd->clm_path, bus->fw_path);
1628 }
1629 dhd_conf_set_clm_name_by_chip(bus->dhd, bus->dhd->clm_path);
1630 if (bus->dhd->conf_path[0] == '\0') {
1631 dhd_conf_set_path(bus->dhd, "config.txt", bus->dhd->conf_path, bus->nv_path);
1632 }
1633 #ifdef CONFIG_PATH_AUTO_SELECT
1634 dhd_conf_set_conf_name_by_chip(bus->dhd, bus->dhd->conf_path);
1635 #endif
1636
1637 dhd_conf_read_config(bus->dhd, bus->dhd->conf_path);
1638
1639 dhd_conf_set_fw_name_by_chip(bus->dhd, bus->fw_path);
1640 dhd_conf_set_nv_name_by_chip(bus->dhd, bus->nv_path);
1641 dhd_conf_set_clm_name_by_chip(bus->dhd, bus->dhd->clm_path);
1642
1643 printf("Final fw_path=%s\n", bus->fw_path);
1644 printf("Final nv_path=%s\n", bus->nv_path);
1645 printf("Final clm_path=%s\n", bus->dhd->clm_path);
1646 printf("Final conf_path=%s\n", bus->dhd->conf_path);
1647 }
1648
1649 void
dhd_set_bus_params(struct dhd_bus * bus)1650 dhd_set_bus_params(struct dhd_bus *bus)
1651 {
1652 if (bus->dhd->conf->dhd_poll >= 0) {
1653 bus->poll = bus->dhd->conf->dhd_poll;
1654 if (!bus->pollrate)
1655 bus->pollrate = 1;
1656 printf("%s: set polling mode %d\n", __FUNCTION__, bus->dhd->conf->dhd_poll);
1657 }
1658 }
1659
1660 static int
dhdpcie_download_firmware(struct dhd_bus * bus,osl_t * osh)1661 dhdpcie_download_firmware(struct dhd_bus *bus, osl_t *osh)
1662 {
1663 int ret = 0;
1664 #if defined(BCM_REQUEST_FW)
1665 uint chipid = bus->sih->chip;
1666 uint revid = bus->sih->chiprev;
1667 char fw_path[64] = "/lib/firmware/brcm/bcm"; /* path to firmware image */
1668 char nv_path[64]; /* path to nvram vars file */
1669 bus->fw_path = fw_path;
1670 bus->nv_path = nv_path;
1671 switch (chipid) {
1672 case BCM43570_CHIP_ID:
1673 bcmstrncat(fw_path, "43570", 5);
1674 switch (revid) {
1675 case 0:
1676 bcmstrncat(fw_path, "a0", 2);
1677 break;
1678 case 2:
1679 bcmstrncat(fw_path, "a2", 2);
1680 break;
1681 default:
1682 DHD_ERROR(("%s: revid is not found %x\n", __FUNCTION__,
1683 revid));
1684 break;
1685 }
1686 break;
1687 default:
1688 DHD_ERROR(("%s: unsupported device %x\n", __FUNCTION__,
1689 chipid));
1690 return 0;
1691 }
1692 /* load board specific nvram file */
1693 snprintf(bus->nv_path, sizeof(nv_path), "%s.nvm", fw_path);
1694 /* load firmware */
1695 snprintf(bus->fw_path, sizeof(fw_path), "%s-firmware.bin", fw_path);
1696 #endif /* BCM_REQUEST_FW */
1697
1698 DHD_OS_WAKE_LOCK(bus->dhd);
1699
1700 dhd_set_path_params(bus);
1701 dhd_set_bus_params(bus);
1702
1703 ret = _dhdpcie_download_firmware(bus);
1704
1705 DHD_OS_WAKE_UNLOCK(bus->dhd);
1706 return ret;
1707 }
1708
1709 static int
dhdpcie_download_code_file(struct dhd_bus * bus,char * pfw_path)1710 dhdpcie_download_code_file(struct dhd_bus *bus, char *pfw_path)
1711 {
1712 int bcmerror = BCME_ERROR;
1713 int offset = 0;
1714 int len = 0;
1715 bool store_reset;
1716 char *imgbuf = NULL;
1717 uint8 *memblock = NULL, *memptr;
1718 uint8 *memptr_tmp = NULL; // terence: check downloaded firmware is correct
1719
1720 int offset_end = bus->ramsize;
1721
1722 #ifndef DHD_EFI
1723 DHD_ERROR(("%s: download firmware %s\n", __FUNCTION__, pfw_path));
1724 #endif /* DHD_EFI */
1725
1726 /* Should succeed in opening image if it is actually given through registry
1727 * entry or in module param.
1728 */
1729 imgbuf = dhd_os_open_image(pfw_path);
1730 if (imgbuf == NULL) {
1731 printf("%s: Open firmware file failed %s\n", __FUNCTION__, pfw_path);
1732 goto err;
1733 }
1734
1735 memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
1736 if (memblock == NULL) {
1737 DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
1738 goto err;
1739 }
1740 if (dhd_msg_level & DHD_TRACE_VAL) {
1741 memptr_tmp = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
1742 if (memptr_tmp == NULL) {
1743 DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
1744 goto err;
1745 }
1746 }
1747 if ((uint32)(uintptr)memblock % DHD_SDALIGN) {
1748 memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
1749 }
1750
1751
1752 /* check if CR4/CA7 */
1753 store_reset = (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) ||
1754 si_setcore(bus->sih, ARMCA7_CORE_ID, 0));
1755
1756 /* Download image with MEMBLOCK size */
1757 while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, imgbuf))) {
1758 if (len < 0) {
1759 DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len));
1760 bcmerror = BCME_ERROR;
1761 goto err;
1762 }
1763 /* if address is 0, store the reset instruction to be written in 0 */
1764 if (store_reset) {
1765 ASSERT(offset == 0);
1766 bus->resetinstr = *(((uint32*)memptr));
1767 /* Add start of RAM address to the address given by user */
1768 offset += bus->dongle_ram_base;
1769 offset_end += offset;
1770 store_reset = FALSE;
1771 }
1772
1773 bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len);
1774 if (bcmerror) {
1775 DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
1776 __FUNCTION__, bcmerror, MEMBLOCK, offset));
1777 goto err;
1778 }
1779
1780 if (dhd_msg_level & DHD_TRACE_VAL) {
1781 bcmerror = dhdpcie_bus_membytes(bus, FALSE, offset, memptr_tmp, len);
1782 if (bcmerror) {
1783 DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
1784 __FUNCTION__, bcmerror, MEMBLOCK, offset));
1785 goto err;
1786 }
1787 if (memcmp(memptr_tmp, memptr, len)) {
1788 DHD_ERROR(("%s: Downloaded image is corrupted.\n", __FUNCTION__));
1789 goto err;
1790 } else
1791 DHD_INFO(("%s: Download, Upload and compare succeeded.\n", __FUNCTION__));
1792 }
1793 offset += MEMBLOCK;
1794
1795 if (offset >= offset_end) {
1796 DHD_ERROR(("%s: invalid address access to %x (offset end: %x)\n",
1797 __FUNCTION__, offset, offset_end));
1798 bcmerror = BCME_ERROR;
1799 goto err;
1800 }
1801 }
1802
1803 err:
1804 if (memblock) {
1805 MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
1806 if (dhd_msg_level & DHD_TRACE_VAL) {
1807 if (memptr_tmp)
1808 MFREE(bus->dhd->osh, memptr_tmp, MEMBLOCK + DHD_SDALIGN);
1809 }
1810 }
1811
1812 if (imgbuf) {
1813 dhd_os_close_image(imgbuf);
1814 }
1815
1816 return bcmerror;
1817 } /* dhdpcie_download_code_file */
1818
1819 #ifdef CUSTOMER_HW4_DEBUG
1820 #define MIN_NVRAMVARS_SIZE 128
1821 #endif /* CUSTOMER_HW4_DEBUG */
1822
1823 static int
dhdpcie_download_nvram(struct dhd_bus * bus)1824 dhdpcie_download_nvram(struct dhd_bus *bus)
1825 {
1826 int bcmerror = BCME_ERROR;
1827 uint len;
1828 char * memblock = NULL;
1829 char *bufp;
1830 char *pnv_path;
1831 bool nvram_file_exists;
1832 bool nvram_uefi_exists = FALSE;
1833 bool local_alloc = FALSE;
1834 pnv_path = bus->nv_path;
1835
1836 #ifdef BCMEMBEDIMAGE
1837 nvram_file_exists = TRUE;
1838 #else
1839 nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0'));
1840 #endif
1841
1842 /* First try UEFI */
1843 len = MAX_NVRAMBUF_SIZE;
1844 dhd_get_download_buffer(bus->dhd, NULL, NVRAM, &memblock, (int *)&len);
1845
1846 /* If UEFI empty, then read from file system */
1847 if ((len <= 0) || (memblock == NULL)) {
1848 if (nvram_file_exists) {
1849 len = MAX_NVRAMBUF_SIZE;
1850 dhd_get_download_buffer(bus->dhd, pnv_path, NVRAM, &memblock, (int *)&len);
1851 if ((len <= 0 || len > MAX_NVRAMBUF_SIZE)) {
1852 goto err;
1853 }
1854 }
1855 else {
1856 /* For SROM OTP no external file or UEFI required */
1857 bcmerror = BCME_OK;
1858 }
1859 } else {
1860 nvram_uefi_exists = TRUE;
1861 }
1862
1863 DHD_ERROR(("%s: dhd_get_download_buffer len %d\n", __FUNCTION__, len));
1864
1865 if (len > 0 && len <= MAX_NVRAMBUF_SIZE && memblock != NULL) {
1866 bufp = (char *) memblock;
1867
1868 #ifdef CACHE_FW_IMAGES
1869 if (bus->processed_nvram_params_len) {
1870 len = bus->processed_nvram_params_len;
1871 }
1872
1873 if (!bus->processed_nvram_params_len) {
1874 bufp[len] = 0;
1875 if (nvram_uefi_exists || nvram_file_exists) {
1876 len = process_nvram_vars(bufp, len);
1877 bus->processed_nvram_params_len = len;
1878 }
1879 } else
1880 #else
1881 {
1882 bufp[len] = 0;
1883 if (nvram_uefi_exists || nvram_file_exists) {
1884 len = process_nvram_vars(bufp, len);
1885 }
1886 }
1887 #endif /* CACHE_FW_IMAGES */
1888
1889 DHD_ERROR(("%s: process_nvram_vars len %d\n", __FUNCTION__, len));
1890 #ifdef CUSTOMER_HW4_DEBUG
1891 if (len < MIN_NVRAMVARS_SIZE) {
1892 DHD_ERROR(("%s: invalid nvram size in process_nvram_vars \n",
1893 __FUNCTION__));
1894 bcmerror = BCME_ERROR;
1895 goto err;
1896 }
1897 #endif /* CUSTOMER_HW4_DEBUG */
1898
1899 if (len % 4) {
1900 len += 4 - (len % 4);
1901 }
1902 bufp += len;
1903 *bufp++ = 0;
1904 if (len)
1905 bcmerror = dhdpcie_downloadvars(bus, memblock, len + 1);
1906 if (bcmerror) {
1907 DHD_ERROR(("%s: error downloading vars: %d\n",
1908 __FUNCTION__, bcmerror));
1909 }
1910 }
1911
1912
1913 err:
1914 if (memblock) {
1915 if (local_alloc) {
1916 MFREE(bus->dhd->osh, memblock, MAX_NVRAMBUF_SIZE);
1917 } else {
1918 dhd_free_download_buffer(bus->dhd, memblock, MAX_NVRAMBUF_SIZE);
1919 }
1920 }
1921
1922 return bcmerror;
1923 }
1924
1925
1926 #ifdef BCMEMBEDIMAGE
1927 int
dhdpcie_download_code_array(struct dhd_bus * bus)1928 dhdpcie_download_code_array(struct dhd_bus *bus)
1929 {
1930 int bcmerror = -1;
1931 int offset = 0;
1932 unsigned char *p_dlarray = NULL;
1933 unsigned int dlarray_size = 0;
1934 unsigned int downloded_len, remaining_len, len;
1935 char *p_dlimagename, *p_dlimagever, *p_dlimagedate;
1936 uint8 *memblock = NULL, *memptr;
1937
1938 downloded_len = 0;
1939 remaining_len = 0;
1940 len = 0;
1941
1942 #ifdef DHD_EFI
1943 p_dlarray = rtecdc_fw_arr;
1944 dlarray_size = sizeof(rtecdc_fw_arr);
1945 #else
1946 p_dlarray = dlarray;
1947 dlarray_size = sizeof(dlarray);
1948 p_dlimagename = dlimagename;
1949 p_dlimagever = dlimagever;
1950 p_dlimagedate = dlimagedate;
1951 #endif /* DHD_EFI */
1952
1953 #ifndef DHD_EFI
1954 if ((p_dlarray == 0) || (dlarray_size == 0) ||(dlarray_size > bus->ramsize) ||
1955 (p_dlimagename == 0) || (p_dlimagever == 0) || (p_dlimagedate == 0))
1956 goto err;
1957 #endif /* DHD_EFI */
1958
1959 memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
1960 if (memblock == NULL) {
1961 DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
1962 goto err;
1963 }
1964 if ((uint32)(uintptr)memblock % DHD_SDALIGN)
1965 memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
1966
1967 while (downloded_len < dlarray_size) {
1968 remaining_len = dlarray_size - downloded_len;
1969 if (remaining_len >= MEMBLOCK)
1970 len = MEMBLOCK;
1971 else
1972 len = remaining_len;
1973
1974 memcpy(memptr, (p_dlarray + downloded_len), len);
1975 /* check if CR4/CA7 */
1976 if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) ||
1977 si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) {
1978 /* if address is 0, store the reset instruction to be written in 0 */
1979 if (offset == 0) {
1980 bus->resetinstr = *(((uint32*)memptr));
1981 /* Add start of RAM address to the address given by user */
1982 offset += bus->dongle_ram_base;
1983 }
1984 }
1985 bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len);
1986 downloded_len += len;
1987 if (bcmerror) {
1988 DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
1989 __FUNCTION__, bcmerror, MEMBLOCK, offset));
1990 goto err;
1991 }
1992 offset += MEMBLOCK;
1993 }
1994
1995 #ifdef DHD_DEBUG
1996 /* Upload and compare the downloaded code */
1997 {
1998 unsigned char *ularray = NULL;
1999 unsigned int uploded_len;
2000 uploded_len = 0;
2001 bcmerror = -1;
2002 ularray = MALLOC(bus->dhd->osh, dlarray_size);
2003 if (ularray == NULL)
2004 goto upload_err;
2005 /* Upload image to verify downloaded contents. */
2006 offset = bus->dongle_ram_base;
2007 memset(ularray, 0xaa, dlarray_size);
2008 while (uploded_len < dlarray_size) {
2009 remaining_len = dlarray_size - uploded_len;
2010 if (remaining_len >= MEMBLOCK)
2011 len = MEMBLOCK;
2012 else
2013 len = remaining_len;
2014 bcmerror = dhdpcie_bus_membytes(bus, FALSE, offset,
2015 (uint8 *)(ularray + uploded_len), len);
2016 if (bcmerror) {
2017 DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
2018 __FUNCTION__, bcmerror, MEMBLOCK, offset));
2019 goto upload_err;
2020 }
2021
2022 uploded_len += len;
2023 offset += MEMBLOCK;
2024 }
2025 #ifdef DHD_EFI
2026 if (memcmp(p_dlarray, ularray, dlarray_size)) {
2027 DHD_ERROR(("%s: Downloaded image is corrupted ! \n", __FUNCTION__));
2028 goto upload_err;
2029 } else
2030 DHD_ERROR(("%s: Download, Upload and compare succeeded .\n", __FUNCTION__));
2031 #else
2032 if (memcmp(p_dlarray, ularray, dlarray_size)) {
2033 DHD_ERROR(("%s: Downloaded image is corrupted (%s, %s, %s).\n",
2034 __FUNCTION__, p_dlimagename, p_dlimagever, p_dlimagedate));
2035 goto upload_err;
2036 } else
2037 DHD_ERROR(("%s: Download, Upload and compare succeeded (%s, %s, %s).\n",
2038 __FUNCTION__, p_dlimagename, p_dlimagever, p_dlimagedate));
2039 #endif /* DHD_EFI */
2040
2041 upload_err:
2042 if (ularray)
2043 MFREE(bus->dhd->osh, ularray, dlarray_size);
2044 }
2045 #endif /* DHD_DEBUG */
2046 err:
2047
2048 if (memblock)
2049 MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
2050
2051 return bcmerror;
2052 } /* dhdpcie_download_code_array */
2053 #endif /* BCMEMBEDIMAGE */
2054
2055
2056 static int
dhdpcie_ramsize_read_image(struct dhd_bus * bus,char * buf,int len)2057 dhdpcie_ramsize_read_image(struct dhd_bus *bus, char *buf, int len)
2058 {
2059 int bcmerror = BCME_ERROR;
2060 char *imgbuf = NULL;
2061
2062 if (buf == NULL || len == 0)
2063 goto err;
2064
2065 /* External image takes precedence if specified */
2066 if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
2067 imgbuf = dhd_os_open_image(bus->fw_path);
2068 if (imgbuf == NULL) {
2069 DHD_ERROR(("%s: Failed to open firmware file\n", __FUNCTION__));
2070 goto err;
2071 }
2072
2073 /* Read it */
2074 if (len != dhd_os_get_image_block(buf, len, imgbuf)) {
2075 DHD_ERROR(("%s: Failed to read %d bytes data\n", __FUNCTION__, len));
2076 goto err;
2077 }
2078
2079 bcmerror = BCME_OK;
2080 }
2081
2082 err:
2083 if (imgbuf)
2084 dhd_os_close_image(imgbuf);
2085
2086 return bcmerror;
2087 }
2088
2089
2090 /* The ramsize can be changed in the dongle image, for example 4365 chip share the sysmem
2091 * with BMC and we can adjust how many sysmem belong to CA7 during dongle compilation.
2092 * So in DHD we need to detect this case and update the correct dongle RAMSIZE as well.
2093 */
2094 static void
dhdpcie_ramsize_adj(struct dhd_bus * bus)2095 dhdpcie_ramsize_adj(struct dhd_bus *bus)
2096 {
2097 int i, search_len = 0;
2098 uint8 *memptr = NULL;
2099 uint8 *ramsizeptr = NULL;
2100 uint ramsizelen;
2101 uint32 ramsize_ptr_ptr[] = {RAMSIZE_PTR_PTR_LIST};
2102 hnd_ramsize_ptr_t ramsize_info;
2103
2104 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2105
2106 /* Adjust dongle RAMSIZE already called. */
2107 if (bus->ramsize_adjusted) {
2108 return;
2109 }
2110
2111 /* success or failure, we don't want to be here
2112 * more than once.
2113 */
2114 bus->ramsize_adjusted = TRUE;
2115
2116 /* Not handle if user restrict dongle ram size enabled */
2117 if (dhd_dongle_memsize) {
2118 DHD_ERROR(("%s: user restrict dongle ram size to %d.\n", __FUNCTION__,
2119 dhd_dongle_memsize));
2120 return;
2121 }
2122
2123 #ifndef BCMEMBEDIMAGE
2124 /* Out immediately if no image to download */
2125 if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
2126 DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__));
2127 return;
2128 }
2129 #endif /* !BCMEMBEDIMAGE */
2130
2131 /* Get maximum RAMSIZE info search length */
2132 for (i = 0; ; i++) {
2133 if (ramsize_ptr_ptr[i] == RAMSIZE_PTR_PTR_END)
2134 break;
2135
2136 if (search_len < (int)ramsize_ptr_ptr[i])
2137 search_len = (int)ramsize_ptr_ptr[i];
2138 }
2139
2140 if (!search_len)
2141 return;
2142
2143 search_len += sizeof(hnd_ramsize_ptr_t);
2144
2145 memptr = MALLOC(bus->dhd->osh, search_len);
2146 if (memptr == NULL) {
2147 DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, search_len));
2148 return;
2149 }
2150
2151 /* External image takes precedence if specified */
2152 if (dhdpcie_ramsize_read_image(bus, (char *)memptr, search_len) != BCME_OK) {
2153 #if defined(BCMEMBEDIMAGE) && !defined(DHD_EFI)
2154 unsigned char *p_dlarray = NULL;
2155 unsigned int dlarray_size = 0;
2156 char *p_dlimagename, *p_dlimagever, *p_dlimagedate;
2157
2158 p_dlarray = dlarray;
2159 dlarray_size = sizeof(dlarray);
2160 p_dlimagename = dlimagename;
2161 p_dlimagever = dlimagever;
2162 p_dlimagedate = dlimagedate;
2163
2164 if ((p_dlarray == 0) || (dlarray_size == 0) || (p_dlimagename == 0) ||
2165 (p_dlimagever == 0) || (p_dlimagedate == 0))
2166 goto err;
2167
2168 ramsizeptr = p_dlarray;
2169 ramsizelen = dlarray_size;
2170 #else
2171 goto err;
2172 #endif /* BCMEMBEDIMAGE && !DHD_EFI */
2173 }
2174 else {
2175 ramsizeptr = memptr;
2176 ramsizelen = search_len;
2177 }
2178
2179 if (ramsizeptr) {
2180 /* Check Magic */
2181 for (i = 0; ; i++) {
2182 if (ramsize_ptr_ptr[i] == RAMSIZE_PTR_PTR_END)
2183 break;
2184
2185 if (ramsize_ptr_ptr[i] + sizeof(hnd_ramsize_ptr_t) > ramsizelen)
2186 continue;
2187
2188 memcpy((char *)&ramsize_info, ramsizeptr + ramsize_ptr_ptr[i],
2189 sizeof(hnd_ramsize_ptr_t));
2190
2191 if (ramsize_info.magic == HTOL32(HND_RAMSIZE_PTR_MAGIC)) {
2192 bus->orig_ramsize = LTOH32(ramsize_info.ram_size);
2193 bus->ramsize = LTOH32(ramsize_info.ram_size);
2194 DHD_ERROR(("%s: Adjust dongle RAMSIZE to 0x%x\n", __FUNCTION__,
2195 bus->ramsize));
2196 break;
2197 }
2198 }
2199 }
2200
2201 err:
2202 if (memptr)
2203 MFREE(bus->dhd->osh, memptr, search_len);
2204
2205 return;
2206 } /* _dhdpcie_download_firmware */
2207
2208 static int
_dhdpcie_download_firmware(struct dhd_bus * bus)2209 _dhdpcie_download_firmware(struct dhd_bus *bus)
2210 {
2211 int bcmerror = -1;
2212
2213 bool embed = FALSE; /* download embedded firmware */
2214 bool dlok = FALSE; /* download firmware succeeded */
2215
2216 /* Out immediately if no image to download */
2217 if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
2218 #ifdef BCMEMBEDIMAGE
2219 embed = TRUE;
2220 #else
2221 DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__));
2222 return 0;
2223 #endif
2224 }
2225 /* Adjust ram size */
2226 dhdpcie_ramsize_adj(bus);
2227
2228 /* Keep arm in reset */
2229 if (dhdpcie_bus_download_state(bus, TRUE)) {
2230 DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__));
2231 goto err;
2232 }
2233
2234 /* External image takes precedence if specified */
2235 if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
2236 if (dhdpcie_download_code_file(bus, bus->fw_path)) {
2237 DHD_ERROR(("%s: dongle image file download failed\n", __FUNCTION__));
2238 #ifdef BCMEMBEDIMAGE
2239 embed = TRUE;
2240 #else
2241 goto err;
2242 #endif
2243 } else {
2244 embed = FALSE;
2245 dlok = TRUE;
2246 }
2247 }
2248
2249 #ifdef BCMEMBEDIMAGE
2250 if (embed) {
2251 if (dhdpcie_download_code_array(bus)) {
2252 DHD_ERROR(("%s: dongle image array download failed\n", __FUNCTION__));
2253 goto err;
2254 } else {
2255 dlok = TRUE;
2256 }
2257 }
2258 #else
2259 BCM_REFERENCE(embed);
2260 #endif
2261 if (!dlok) {
2262 DHD_ERROR(("%s: dongle image download failed\n", __FUNCTION__));
2263 goto err;
2264 }
2265
2266 /* EXAMPLE: nvram_array */
2267 /* If a valid nvram_arry is specified as above, it can be passed down to dongle */
2268
2269
2270 /* External nvram takes precedence if specified */
2271 if (dhdpcie_download_nvram(bus)) {
2272 DHD_ERROR(("%s: dongle nvram file download failed\n", __FUNCTION__));
2273 goto err;
2274 }
2275
2276 /* Take arm out of reset */
2277 if (dhdpcie_bus_download_state(bus, FALSE)) {
2278 DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__));
2279 goto err;
2280 }
2281
2282 bcmerror = 0;
2283
2284 err:
2285 return bcmerror;
2286 } /* _dhdpcie_download_firmware */
2287
2288 #define CONSOLE_LINE_MAX 192
2289
2290 static int
dhdpcie_bus_readconsole(dhd_bus_t * bus)2291 dhdpcie_bus_readconsole(dhd_bus_t *bus)
2292 {
2293 dhd_console_t *c = &bus->console;
2294 uint8 line[CONSOLE_LINE_MAX], ch;
2295 uint32 n, idx, addr;
2296 int rv;
2297
2298 /* Don't do anything until FWREADY updates console address */
2299 if (bus->console_addr == 0)
2300 return -1;
2301
2302 /* Read console log struct */
2303 addr = bus->console_addr + OFFSETOF(hnd_cons_t, log);
2304
2305 if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&c->log, sizeof(c->log))) < 0)
2306 return rv;
2307
2308 /* Allocate console buffer (one time only) */
2309 if (c->buf == NULL) {
2310 c->bufsize = ltoh32(c->log.buf_size);
2311 if ((c->buf = MALLOC(bus->dhd->osh, c->bufsize)) == NULL)
2312 return BCME_NOMEM;
2313 }
2314 idx = ltoh32(c->log.idx);
2315
2316 /* Protect against corrupt value */
2317 if (idx > c->bufsize)
2318 return BCME_ERROR;
2319
2320 /* Skip reading the console buffer if the index pointer has not moved */
2321 if (idx == c->last)
2322 return BCME_OK;
2323
2324 /* Read the console buffer */
2325 addr = ltoh32(c->log.buf);
2326 if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, c->buf, c->bufsize)) < 0)
2327 return rv;
2328
2329 while (c->last != idx) {
2330 for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
2331 if (c->last == idx) {
2332 /* This would output a partial line. Instead, back up
2333 * the buffer pointer and output this line next time around.
2334 */
2335 if (c->last >= n)
2336 c->last -= n;
2337 else
2338 c->last = c->bufsize - n;
2339 goto break2;
2340 }
2341 ch = c->buf[c->last];
2342 c->last = (c->last + 1) % c->bufsize;
2343 if (ch == '\n')
2344 break;
2345 line[n] = ch;
2346 }
2347
2348 if (n > 0) {
2349 if (line[n - 1] == '\r')
2350 n--;
2351 line[n] = 0;
2352 DHD_FWLOG(("CONSOLE: %s\n", line));
2353 }
2354 }
2355 break2:
2356
2357 return BCME_OK;
2358 } /* dhdpcie_bus_readconsole */
2359
2360 void
dhd_bus_dump_console_buffer(dhd_bus_t * bus)2361 dhd_bus_dump_console_buffer(dhd_bus_t *bus)
2362 {
2363 uint32 n, i;
2364 uint32 addr;
2365 char *console_buffer = NULL;
2366 uint32 console_ptr, console_size, console_index;
2367 uint8 line[CONSOLE_LINE_MAX], ch;
2368 int rv;
2369
2370 DHD_ERROR(("%s: Dump Complete Console Buffer\n", __FUNCTION__));
2371
2372 if (bus->is_linkdown) {
2373 DHD_ERROR(("%s: Skip dump Console Buffer due to PCIe link down\n", __FUNCTION__));
2374 return;
2375 }
2376
2377 addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log);
2378 if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
2379 (uint8 *)&console_ptr, sizeof(console_ptr))) < 0) {
2380 goto exit;
2381 }
2382
2383 addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.buf_size);
2384 if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
2385 (uint8 *)&console_size, sizeof(console_size))) < 0) {
2386 goto exit;
2387 }
2388
2389 addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.idx);
2390 if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
2391 (uint8 *)&console_index, sizeof(console_index))) < 0) {
2392 goto exit;
2393 }
2394
2395 console_ptr = ltoh32(console_ptr);
2396 console_size = ltoh32(console_size);
2397 console_index = ltoh32(console_index);
2398
2399 if (console_size > CONSOLE_BUFFER_MAX ||
2400 !(console_buffer = MALLOC(bus->dhd->osh, console_size))) {
2401 goto exit;
2402 }
2403
2404 if ((rv = dhdpcie_bus_membytes(bus, FALSE, console_ptr,
2405 (uint8 *)console_buffer, console_size)) < 0) {
2406 goto exit;
2407 }
2408
2409 for (i = 0, n = 0; i < console_size; i += n + 1) {
2410 for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
2411 ch = console_buffer[(console_index + i + n) % console_size];
2412 if (ch == '\n')
2413 break;
2414 line[n] = ch;
2415 }
2416
2417
2418 if (n > 0) {
2419 if (line[n - 1] == '\r')
2420 n--;
2421 line[n] = 0;
2422 /* Don't use DHD_ERROR macro since we print
2423 * a lot of information quickly. The macro
2424 * will truncate a lot of the printfs
2425 */
2426
2427 DHD_FWLOG(("CONSOLE: %s\n", line));
2428 }
2429 }
2430
2431 exit:
2432 if (console_buffer)
2433 MFREE(bus->dhd->osh, console_buffer, console_size);
2434 return;
2435 }
2436
2437 static int
dhdpcie_checkdied(dhd_bus_t * bus,char * data,uint size)2438 dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size)
2439 {
2440 int bcmerror = 0;
2441 uint msize = 512;
2442 char *mbuffer = NULL;
2443 uint maxstrlen = 256;
2444 char *str = NULL;
2445 pciedev_shared_t *local_pciedev_shared = bus->pcie_sh;
2446 struct bcmstrbuf strbuf;
2447 unsigned long flags;
2448
2449 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2450
2451 if (DHD_NOCHECKDIED_ON()) {
2452 return 0;
2453 }
2454
2455 if (data == NULL) {
2456 /*
2457 * Called after a rx ctrl timeout. "data" is NULL.
2458 * allocate memory to trace the trap or assert.
2459 */
2460 size = msize;
2461 mbuffer = data = MALLOC(bus->dhd->osh, msize);
2462
2463 if (mbuffer == NULL) {
2464 DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize));
2465 bcmerror = BCME_NOMEM;
2466 goto done;
2467 }
2468 }
2469
2470 if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) {
2471 DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen));
2472 bcmerror = BCME_NOMEM;
2473 goto done;
2474 }
2475 DHD_GENERAL_LOCK(bus->dhd, flags);
2476 DHD_BUS_BUSY_SET_IN_CHECKDIED(bus->dhd);
2477 DHD_GENERAL_UNLOCK(bus->dhd, flags);
2478
2479 if ((bcmerror = dhdpcie_readshared(bus)) < 0) {
2480 goto done;
2481 }
2482
2483 bcm_binit(&strbuf, data, size);
2484
2485 bcm_bprintf(&strbuf, "msgtrace address : 0x%08X\nconsole address : 0x%08X\n",
2486 local_pciedev_shared->msgtrace_addr, local_pciedev_shared->console_addr);
2487
2488 if ((local_pciedev_shared->flags & PCIE_SHARED_ASSERT_BUILT) == 0) {
2489 /* NOTE: Misspelled assert is intentional - DO NOT FIX.
2490 * (Avoids conflict with real asserts for programmatic parsing of output.)
2491 */
2492 bcm_bprintf(&strbuf, "Assrt not built in dongle\n");
2493 }
2494
2495 if ((bus->pcie_sh->flags & (PCIE_SHARED_ASSERT|PCIE_SHARED_TRAP)) == 0) {
2496 /* NOTE: Misspelled assert is intentional - DO NOT FIX.
2497 * (Avoids conflict with real asserts for programmatic parsing of output.)
2498 */
2499 bcm_bprintf(&strbuf, "No trap%s in dongle",
2500 (bus->pcie_sh->flags & PCIE_SHARED_ASSERT_BUILT)
2501 ?"/assrt" :"");
2502 } else {
2503 if (bus->pcie_sh->flags & PCIE_SHARED_ASSERT) {
2504 /* Download assert */
2505 bcm_bprintf(&strbuf, "Dongle assert");
2506 if (bus->pcie_sh->assert_exp_addr != 0) {
2507 str[0] = '\0';
2508 if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
2509 bus->pcie_sh->assert_exp_addr,
2510 (uint8 *)str, maxstrlen)) < 0) {
2511 goto done;
2512 }
2513
2514 str[maxstrlen - 1] = '\0';
2515 bcm_bprintf(&strbuf, " expr \"%s\"", str);
2516 }
2517
2518 if (bus->pcie_sh->assert_file_addr != 0) {
2519 str[0] = '\0';
2520 if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
2521 bus->pcie_sh->assert_file_addr,
2522 (uint8 *)str, maxstrlen)) < 0) {
2523 goto done;
2524 }
2525
2526 str[maxstrlen - 1] = '\0';
2527 bcm_bprintf(&strbuf, " file \"%s\"", str);
2528 }
2529
2530 bcm_bprintf(&strbuf, " line %d ", bus->pcie_sh->assert_line);
2531 }
2532
2533 if (bus->pcie_sh->flags & PCIE_SHARED_TRAP) {
2534 trap_t *tr = &bus->dhd->last_trap_info;
2535 bus->dhd->dongle_trap_occured = TRUE;
2536 if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
2537 bus->pcie_sh->trap_addr, (uint8*)tr, sizeof(trap_t))) < 0) {
2538 goto done;
2539 }
2540 dhd_bus_dump_trap_info(bus, &strbuf);
2541
2542 dhd_bus_dump_console_buffer(bus);
2543 }
2544 }
2545
2546 if (bus->pcie_sh->flags & (PCIE_SHARED_ASSERT | PCIE_SHARED_TRAP)) {
2547 printf("%s: %s\n", __FUNCTION__, strbuf.origbuf);
2548 #ifdef REPORT_FATAL_TIMEOUTS
2549 /**
2550 * stop the timers as FW trapped
2551 */
2552 if (dhd_stop_scan_timer(bus->dhd)) {
2553 DHD_ERROR(("dhd_stop_scan_timer failed\n"));
2554 ASSERT(0);
2555 }
2556 if (dhd_stop_bus_timer(bus->dhd)) {
2557 DHD_ERROR(("dhd_stop_bus_timer failed\n"));
2558 ASSERT(0);
2559 }
2560 if (dhd_stop_cmd_timer(bus->dhd)) {
2561 DHD_ERROR(("dhd_stop_cmd_timer failed\n"));
2562 ASSERT(0);
2563 }
2564 if (dhd_stop_join_timer(bus->dhd)) {
2565 DHD_ERROR(("dhd_stop_join_timer failed\n"));
2566 ASSERT(0);
2567 }
2568 #endif /* REPORT_FATAL_TIMEOUTS */
2569
2570 dhd_prot_debug_info_print(bus->dhd);
2571
2572 #if defined(DHD_FW_COREDUMP)
2573 /* save core dump or write to a file */
2574 if (bus->dhd->memdump_enabled) {
2575 bus->dhd->memdump_type = DUMP_TYPE_DONGLE_TRAP;
2576 dhdpcie_mem_dump(bus);
2577 }
2578 #endif /* DHD_FW_COREDUMP */
2579
2580 /* wake up IOCTL wait event */
2581 dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_TRAP);
2582
2583 dhd_schedule_reset(bus->dhd);
2584 }
2585
2586 DHD_GENERAL_LOCK(bus->dhd, flags);
2587 DHD_BUS_BUSY_CLEAR_IN_CHECKDIED(bus->dhd);
2588 dhd_os_busbusy_wake(bus->dhd);
2589 DHD_GENERAL_UNLOCK(bus->dhd, flags);
2590
2591 done:
2592 if (mbuffer)
2593 MFREE(bus->dhd->osh, mbuffer, msize);
2594 if (str)
2595 MFREE(bus->dhd->osh, str, maxstrlen);
2596
2597 return bcmerror;
2598 } /* dhdpcie_checkdied */
2599
2600
2601 /* Custom copy of dhdpcie_mem_dump() that can be called at interrupt level */
dhdpcie_mem_dump_bugcheck(dhd_bus_t * bus,uint8 * buf)2602 void dhdpcie_mem_dump_bugcheck(dhd_bus_t *bus, uint8 *buf)
2603 {
2604 int ret = 0;
2605 int size; /* Full mem size */
2606 int start; /* Start address */
2607 int read_size = 0; /* Read size of each iteration */
2608 uint8 *databuf = buf;
2609
2610 if (bus == NULL) {
2611 return;
2612 }
2613
2614 start = bus->dongle_ram_base;
2615 read_size = 4;
2616 /* check for dead bus */
2617 {
2618 uint test_word = 0;
2619 ret = dhdpcie_bus_membytes(bus, FALSE, start, (uint8*)&test_word, read_size);
2620 /* if read error or bus timeout */
2621 if (ret || (test_word == 0xFFFFFFFF)) {
2622 return;
2623 }
2624 }
2625
2626 /* Get full mem size */
2627 size = bus->ramsize;
2628 /* Read mem content */
2629 while (size)
2630 {
2631 read_size = MIN(MEMBLOCK, size);
2632 if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size))) {
2633 return;
2634 }
2635
2636 /* Decrement size and increment start address */
2637 size -= read_size;
2638 start += read_size;
2639 databuf += read_size;
2640 }
2641 bus->dhd->soc_ram = buf;
2642 bus->dhd->soc_ram_length = bus->ramsize;
2643 return;
2644 }
2645
2646
2647 #if defined(DHD_FW_COREDUMP)
2648 static int
dhdpcie_mem_dump(dhd_bus_t * bus)2649 dhdpcie_mem_dump(dhd_bus_t *bus)
2650 {
2651 int ret = 0;
2652 int size; /* Full mem size */
2653 int start = bus->dongle_ram_base; /* Start address */
2654 int read_size = 0; /* Read size of each iteration */
2655 uint8 *buf = NULL, *databuf = NULL;
2656
2657 #ifdef EXYNOS_PCIE_DEBUG
2658 exynos_pcie_register_dump(1);
2659 #endif /* EXYNOS_PCIE_DEBUG */
2660
2661 #ifdef SUPPORT_LINKDOWN_RECOVERY
2662 if (bus->is_linkdown) {
2663 DHD_ERROR(("%s: PCIe link is down so skip\n", __FUNCTION__));
2664 return BCME_ERROR;
2665 }
2666 #endif /* SUPPORT_LINKDOWN_RECOVERY */
2667
2668 /* Get full mem size */
2669 size = bus->ramsize;
2670 buf = dhd_get_fwdump_buf(bus->dhd, size);
2671 if (!buf) {
2672 DHD_ERROR(("%s: Out of memory (%d bytes)\n", __FUNCTION__, size));
2673 return BCME_ERROR;
2674 }
2675
2676 /* Read mem content */
2677 DHD_TRACE_HW4(("Dump dongle memory\n"));
2678 databuf = buf;
2679 while (size)
2680 {
2681 read_size = MIN(MEMBLOCK, size);
2682 if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size)))
2683 {
2684 DHD_ERROR(("%s: Error membytes %d\n", __FUNCTION__, ret));
2685 bus->dhd->memdump_success = FALSE;
2686 return BCME_ERROR;
2687 }
2688 DHD_TRACE(("."));
2689
2690 /* Decrement size and increment start address */
2691 size -= read_size;
2692 start += read_size;
2693 databuf += read_size;
2694 }
2695 bus->dhd->memdump_success = TRUE;
2696
2697 dhd_schedule_memdump(bus->dhd, buf, bus->ramsize);
2698 /* buf, actually soc_ram free handled in dhd_{free,clear} */
2699
2700 return ret;
2701 }
2702
2703 int
dhd_bus_mem_dump(dhd_pub_t * dhdp)2704 dhd_bus_mem_dump(dhd_pub_t *dhdp)
2705 {
2706 dhd_bus_t *bus = dhdp->bus;
2707
2708 if (dhdp->busstate == DHD_BUS_DOWN) {
2709 DHD_ERROR(("%s bus is down\n", __FUNCTION__));
2710 return BCME_ERROR;
2711 }
2712 #ifdef DHD_PCIE_RUNTIMEPM
2713 if (dhdp->memdump_type == DUMP_TYPE_BY_SYSDUMP) {
2714 DHD_ERROR(("%s : bus wakeup by SYSDUMP\n", __FUNCTION__));
2715 dhdpcie_runtime_bus_wake(dhdp, TRUE, __builtin_return_address(0));
2716 }
2717 #endif /* DHD_PCIE_RUNTIMEPM */
2718
2719 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) {
2720 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
2721 __FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
2722 return BCME_ERROR;
2723 }
2724
2725 return dhdpcie_mem_dump(bus);
2726 }
2727
2728 int
dhd_dongle_mem_dump(void)2729 dhd_dongle_mem_dump(void)
2730 {
2731 if (!g_dhd_bus) {
2732 DHD_ERROR(("%s: Bus is NULL\n", __FUNCTION__));
2733 return -ENODEV;
2734 }
2735
2736 dhd_bus_dump_console_buffer(g_dhd_bus);
2737 dhd_prot_debug_info_print(g_dhd_bus->dhd);
2738
2739 g_dhd_bus->dhd->memdump_enabled = DUMP_MEMFILE_BUGON;
2740 g_dhd_bus->dhd->memdump_type = DUMP_TYPE_AP_ABNORMAL_ACCESS;
2741
2742 #ifdef DHD_PCIE_RUNTIMEPM
2743 dhdpcie_runtime_bus_wake(g_dhd_bus->dhd, TRUE, __builtin_return_address(0));
2744 #endif /* DHD_PCIE_RUNTIMEPM */
2745
2746 DHD_OS_WAKE_LOCK(g_dhd_bus->dhd);
2747 dhd_bus_mem_dump(g_dhd_bus->dhd);
2748 DHD_OS_WAKE_UNLOCK(g_dhd_bus->dhd);
2749 return 0;
2750 }
2751 EXPORT_SYMBOL(dhd_dongle_mem_dump);
2752 #endif /* DHD_FW_COREDUMP */
2753
2754 int
dhd_socram_dump(dhd_bus_t * bus)2755 dhd_socram_dump(dhd_bus_t *bus)
2756 {
2757 #ifdef DHD_PCIE_RUNTIMEPM
2758 dhdpcie_runtime_bus_wake(bus->dhd, TRUE, __builtin_return_address(0));
2759 #endif /* DHD_PCIE_RUNTIMEPM */
2760
2761 #if defined(DHD_FW_COREDUMP)
2762 DHD_OS_WAKE_LOCK(bus->dhd);
2763 dhd_bus_mem_dump(bus->dhd);
2764 DHD_OS_WAKE_UNLOCK(bus->dhd);
2765 return 0;
2766 #else
2767 return -1;
2768 #endif
2769 }
2770
2771 /**
2772 * Transfers bytes from host to dongle using pio mode.
2773 * Parameter 'address' is a backplane address.
2774 */
2775 static int
dhdpcie_bus_membytes(dhd_bus_t * bus,bool write,ulong address,uint8 * data,uint size)2776 dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size)
2777 {
2778 uint dsize;
2779 int detect_endian_flag = 0x01;
2780 bool little_endian;
2781
2782 if (write && bus->is_linkdown) {
2783 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
2784 return BCME_ERROR;
2785 }
2786
2787
2788 /* Detect endianness. */
2789 little_endian = *(char *)&detect_endian_flag;
2790
2791 /* In remap mode, adjust address beyond socram and redirect
2792 * to devram at SOCDEVRAM_BP_ADDR since remap address > orig_ramsize
2793 * is not backplane accessible
2794 */
2795
2796 /* Determine initial transfer parameters */
2797 #ifdef DHD_SUPPORT_64BIT
2798 dsize = sizeof(uint64);
2799 #else /* !DHD_SUPPORT_64BIT */
2800 dsize = sizeof(uint32);
2801 #endif /* DHD_SUPPORT_64BIT */
2802
2803 /* Do the transfer(s) */
2804 DHD_INFO(("%s: %s %d bytes in window 0x%08lx\n",
2805 __FUNCTION__, (write ? "write" : "read"), size, address));
2806 if (write) {
2807 while (size) {
2808 #ifdef DHD_SUPPORT_64BIT
2809 if (size >= sizeof(uint64) && little_endian && !(address % 8)) {
2810 dhdpcie_bus_wtcm64(bus, address, *((uint64 *)data));
2811 }
2812 #else /* !DHD_SUPPORT_64BIT */
2813 if (size >= sizeof(uint32) && little_endian && !(address % 4)) {
2814 dhdpcie_bus_wtcm32(bus, address, *((uint32*)data));
2815 }
2816 #endif /* DHD_SUPPORT_64BIT */
2817 else {
2818 dsize = sizeof(uint8);
2819 dhdpcie_bus_wtcm8(bus, address, *data);
2820 }
2821
2822 /* Adjust for next transfer (if any) */
2823 if ((size -= dsize)) {
2824 data += dsize;
2825 address += dsize;
2826 }
2827 }
2828 } else {
2829 while (size) {
2830 #ifdef DHD_SUPPORT_64BIT
2831 if (size >= sizeof(uint64) && little_endian && !(address % 8))
2832 {
2833 *(uint64 *)data = dhdpcie_bus_rtcm64(bus, address);
2834 }
2835 #else /* !DHD_SUPPORT_64BIT */
2836 if (size >= sizeof(uint32) && little_endian && !(address % 4))
2837 {
2838 *(uint32 *)data = dhdpcie_bus_rtcm32(bus, address);
2839 }
2840 #endif /* DHD_SUPPORT_64BIT */
2841 else {
2842 dsize = sizeof(uint8);
2843 *data = dhdpcie_bus_rtcm8(bus, address);
2844 }
2845
2846 /* Adjust for next transfer (if any) */
2847 if ((size -= dsize) > 0) {
2848 data += dsize;
2849 address += dsize;
2850 }
2851 }
2852 }
2853 return BCME_OK;
2854 } /* dhdpcie_bus_membytes */
2855
2856 /**
2857 * Transfers one transmit (ethernet) packet that was queued in the (flow controlled) flow ring queue
2858 * to the (non flow controlled) flow ring.
2859 */
2860 int BCMFASTPATH
dhd_bus_schedule_queue(struct dhd_bus * bus,uint16 flow_id,bool txs)2861 dhd_bus_schedule_queue(struct dhd_bus *bus, uint16 flow_id, bool txs)
2862 {
2863 flow_ring_node_t *flow_ring_node;
2864 int ret = BCME_OK;
2865 #ifdef DHD_LOSSLESS_ROAMING
2866 dhd_pub_t *dhdp = bus->dhd;
2867 #endif
2868 DHD_INFO(("%s: flow_id is %d\n", __FUNCTION__, flow_id));
2869
2870 /* ASSERT on flow_id */
2871 if (flow_id >= bus->max_submission_rings) {
2872 DHD_ERROR(("%s: flow_id is invalid %d, max %d\n", __FUNCTION__,
2873 flow_id, bus->max_submission_rings));
2874 return 0;
2875 }
2876
2877 flow_ring_node = DHD_FLOW_RING(bus->dhd, flow_id);
2878
2879 #ifdef DHD_LOSSLESS_ROAMING
2880 if ((dhdp->dequeue_prec_map & (1 << flow_ring_node->flow_info.tid)) == 0) {
2881 DHD_INFO(("%s: tid %d is not in precedence map. block scheduling\n",
2882 __FUNCTION__, flow_ring_node->flow_info.tid));
2883 return BCME_OK;
2884 }
2885 #endif /* DHD_LOSSLESS_ROAMING */
2886
2887 {
2888 unsigned long flags;
2889 void *txp = NULL;
2890 flow_queue_t *queue;
2891 #ifdef DHD_LOSSLESS_ROAMING
2892 struct ether_header *eh;
2893 uint8 *pktdata;
2894 #endif /* DHD_LOSSLESS_ROAMING */
2895
2896 queue = &flow_ring_node->queue; /* queue associated with flow ring */
2897
2898 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
2899
2900 if (flow_ring_node->status != FLOW_RING_STATUS_OPEN) {
2901 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
2902 return BCME_NOTREADY;
2903 }
2904
2905 while ((txp = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
2906 PKTORPHAN(txp, bus->dhd->conf->tsq);
2907
2908 /*
2909 * Modifying the packet length caused P2P cert failures.
2910 * Specifically on test cases where a packet of size 52 bytes
2911 * was injected, the sniffer capture showed 62 bytes because of
2912 * which the cert tests failed. So making the below change
2913 * only Router specific.
2914 */
2915
2916 #ifdef DHDTCPACK_SUPPRESS
2917 if (bus->dhd->tcpack_sup_mode != TCPACK_SUP_HOLD) {
2918 ret = dhd_tcpack_check_xmit(bus->dhd, txp);
2919 if (ret != BCME_OK) {
2920 DHD_ERROR(("%s: dhd_tcpack_check_xmit() error.\n",
2921 __FUNCTION__));
2922 }
2923 }
2924 #endif /* DHDTCPACK_SUPPRESS */
2925 #ifdef DHD_LOSSLESS_ROAMING
2926 pktdata = (uint8 *)PKTDATA(OSH_NULL, txp);
2927 eh = (struct ether_header *) pktdata;
2928 if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) {
2929 uint8 prio = (uint8)PKTPRIO(txp);
2930
2931 /* Restore to original priority for 802.1X packet */
2932 if (prio == PRIO_8021D_NC) {
2933 PKTSETPRIO(txp, dhdp->prio_8021x);
2934 }
2935 }
2936 #endif /* DHD_LOSSLESS_ROAMING */
2937
2938 /* Attempt to transfer packet over flow ring */
2939 ret = dhd_prot_txdata(bus->dhd, txp, flow_ring_node->flow_info.ifindex);
2940 if (ret != BCME_OK) { /* may not have resources in flow ring */
2941 DHD_INFO(("%s: Reinserrt %d\n", __FUNCTION__, ret));
2942 dhd_prot_txdata_write_flush(bus->dhd, flow_id, FALSE);
2943 /* reinsert at head */
2944 dhd_flow_queue_reinsert(bus->dhd, queue, txp);
2945 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
2946
2947 /* If we are able to requeue back, return success */
2948 return BCME_OK;
2949 }
2950 }
2951
2952 dhd_prot_txdata_write_flush(bus->dhd, flow_id, FALSE);
2953
2954 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
2955 }
2956
2957 return ret;
2958 } /* dhd_bus_schedule_queue */
2959
2960 /** Sends an (ethernet) data frame (in 'txp') to the dongle. Callee disposes of txp. */
2961 int BCMFASTPATH
dhd_bus_txdata(struct dhd_bus * bus,void * txp,uint8 ifidx)2962 dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx)
2963 {
2964 uint16 flowid;
2965 #ifdef IDLE_TX_FLOW_MGMT
2966 uint8 node_status;
2967 #endif /* IDLE_TX_FLOW_MGMT */
2968 flow_queue_t *queue;
2969 flow_ring_node_t *flow_ring_node;
2970 unsigned long flags;
2971 int ret = BCME_OK;
2972 void *txp_pend = NULL;
2973
2974 if (!bus->dhd->flowid_allocator) {
2975 DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__));
2976 goto toss;
2977 }
2978
2979 flowid = DHD_PKT_GET_FLOWID(txp);
2980
2981 flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
2982
2983 DHD_TRACE(("%s: pkt flowid %d, status %d active %d\n",
2984 __FUNCTION__, flowid, flow_ring_node->status, flow_ring_node->active));
2985
2986 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
2987 if ((flowid >= bus->dhd->num_flow_rings) ||
2988 #ifdef IDLE_TX_FLOW_MGMT
2989 (!flow_ring_node->active))
2990 #else
2991 (!flow_ring_node->active) ||
2992 (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) ||
2993 (flow_ring_node->status == FLOW_RING_STATUS_STA_FREEING))
2994 #endif /* IDLE_TX_FLOW_MGMT */
2995 {
2996 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
2997 DHD_INFO(("%s: Dropping pkt flowid %d, status %d active %d\n",
2998 __FUNCTION__, flowid, flow_ring_node->status,
2999 flow_ring_node->active));
3000 ret = BCME_ERROR;
3001 goto toss;
3002 }
3003
3004 #ifdef IDLE_TX_FLOW_MGMT
3005 node_status = flow_ring_node->status;
3006
3007 /* handle diffrent status states here!! */
3008 switch (node_status)
3009 {
3010 case FLOW_RING_STATUS_OPEN:
3011
3012 if (bus->enable_idle_flowring_mgmt) {
3013 /* Move the node to the head of active list */
3014 dhd_flow_ring_move_to_active_list_head(bus, flow_ring_node);
3015 }
3016 break;
3017
3018 case FLOW_RING_STATUS_SUSPENDED:
3019 DHD_INFO(("Need to Initiate TX Flow resume\n"));
3020 /* Issue resume_ring request */
3021 dhd_bus_flow_ring_resume_request(bus,
3022 flow_ring_node);
3023 break;
3024
3025 case FLOW_RING_STATUS_CREATE_PENDING:
3026 case FLOW_RING_STATUS_RESUME_PENDING:
3027 /* Dont do anything here!! */
3028 DHD_INFO(("Waiting for Flow create/resume! status is %u\n",
3029 node_status));
3030 break;
3031
3032 case FLOW_RING_STATUS_DELETE_PENDING:
3033 default:
3034 DHD_ERROR(("Dropping packet!! flowid %u status is %u\n",
3035 flowid, node_status));
3036 /* error here!! */
3037 ret = BCME_ERROR;
3038 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
3039 goto toss;
3040 }
3041 /* Now queue the packet */
3042 #endif /* IDLE_TX_FLOW_MGMT */
3043
3044 queue = &flow_ring_node->queue; /* queue associated with flow ring */
3045
3046 if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp)) != BCME_OK)
3047 txp_pend = txp;
3048
3049 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
3050
3051 if (flow_ring_node->status) {
3052 DHD_INFO(("%s: Enq pkt flowid %d, status %d active %d\n",
3053 __FUNCTION__, flowid, flow_ring_node->status,
3054 flow_ring_node->active));
3055 if (txp_pend) {
3056 txp = txp_pend;
3057 goto toss;
3058 }
3059 return BCME_OK;
3060 }
3061 ret = dhd_bus_schedule_queue(bus, flowid, FALSE); /* from queue to flowring */
3062
3063 /* If we have anything pending, try to push into q */
3064 if (txp_pend) {
3065 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
3066
3067 if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp_pend)) != BCME_OK) {
3068 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
3069 txp = txp_pend;
3070 goto toss;
3071 }
3072
3073 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
3074 }
3075
3076 return ret;
3077
3078 toss:
3079 DHD_INFO(("%s: Toss %d\n", __FUNCTION__, ret));
3080 /* for EFI, pass the 'send' flag as false, to avoid enqueuing the failed tx pkt
3081 * into the Tx done queue
3082 */
3083 #ifdef DHD_EFI
3084 PKTCFREE(bus->dhd->osh, txp, FALSE);
3085 #else
3086 PKTCFREE(bus->dhd->osh, txp, TRUE);
3087 #endif
3088 return ret;
3089 } /* dhd_bus_txdata */
3090
3091
3092 void
dhd_bus_stop_queue(struct dhd_bus * bus)3093 dhd_bus_stop_queue(struct dhd_bus *bus)
3094 {
3095 dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
3096 bus->bus_flowctrl = TRUE;
3097 }
3098
3099 void
dhd_bus_start_queue(struct dhd_bus * bus)3100 dhd_bus_start_queue(struct dhd_bus *bus)
3101 {
3102 dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
3103 bus->bus_flowctrl = TRUE;
3104 }
3105
3106 /* Device console input function */
dhd_bus_console_in(dhd_pub_t * dhd,uchar * msg,uint msglen)3107 int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen)
3108 {
3109 dhd_bus_t *bus = dhd->bus;
3110 uint32 addr, val;
3111 int rv;
3112 /* Address could be zero if CONSOLE := 0 in dongle Makefile */
3113 if (bus->console_addr == 0)
3114 return BCME_UNSUPPORTED;
3115
3116 /* Don't allow input if dongle is in reset */
3117 if (bus->dhd->dongle_reset) {
3118 return BCME_NOTREADY;
3119 }
3120
3121 /* Zero cbuf_index */
3122 addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf_idx);
3123 val = htol32(0);
3124 if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
3125 goto done;
3126
3127 /* Write message into cbuf */
3128 addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf);
3129 if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)msg, msglen)) < 0)
3130 goto done;
3131
3132 /* Write length into vcons_in */
3133 addr = bus->console_addr + OFFSETOF(hnd_cons_t, vcons_in);
3134 val = htol32(msglen);
3135 if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
3136 goto done;
3137
3138 /* generate an interrupt to dongle to indicate that it needs to process cons command */
3139 dhdpcie_send_mb_data(bus, H2D_HOST_CONS_INT);
3140 done:
3141 return rv;
3142 } /* dhd_bus_console_in */
3143
3144 /**
3145 * Called on frame reception, the frame was received from the dongle on interface 'ifidx' and is
3146 * contained in 'pkt'. Processes rx frame, forwards up the layer to netif.
3147 */
3148 void BCMFASTPATH
dhd_bus_rx_frame(struct dhd_bus * bus,void * pkt,int ifidx,uint pkt_count)3149 dhd_bus_rx_frame(struct dhd_bus *bus, void* pkt, int ifidx, uint pkt_count)
3150 {
3151 dhd_rx_frame(bus->dhd, ifidx, pkt, pkt_count, 0);
3152 }
3153
3154 /** 'offset' is a backplane address */
3155 void
dhdpcie_bus_wtcm8(dhd_bus_t * bus,ulong offset,uint8 data)3156 dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data)
3157 {
3158 W_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset), data);
3159 }
3160
3161 uint8
dhdpcie_bus_rtcm8(dhd_bus_t * bus,ulong offset)3162 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset)
3163 {
3164 volatile uint8 data;
3165 data = R_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset));
3166 return data;
3167 }
3168
3169 void
dhdpcie_bus_wtcm32(dhd_bus_t * bus,ulong offset,uint32 data)3170 dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data)
3171 {
3172 W_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset), data);
3173 }
3174 void
dhdpcie_bus_wtcm16(dhd_bus_t * bus,ulong offset,uint16 data)3175 dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data)
3176 {
3177 W_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset), data);
3178 }
3179 #ifdef DHD_SUPPORT_64BIT
3180 void
dhdpcie_bus_wtcm64(dhd_bus_t * bus,ulong offset,uint64 data)3181 dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data)
3182 {
3183 W_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset), data);
3184 }
3185 #endif /* DHD_SUPPORT_64BIT */
3186
3187 uint16
dhdpcie_bus_rtcm16(dhd_bus_t * bus,ulong offset)3188 dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset)
3189 {
3190 volatile uint16 data;
3191 data = R_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset));
3192 return data;
3193 }
3194
3195 uint32
dhdpcie_bus_rtcm32(dhd_bus_t * bus,ulong offset)3196 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset)
3197 {
3198 volatile uint32 data;
3199 data = R_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset));
3200 return data;
3201 }
3202
3203 #ifdef DHD_SUPPORT_64BIT
3204 uint64
dhdpcie_bus_rtcm64(dhd_bus_t * bus,ulong offset)3205 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset)
3206 {
3207 volatile uint64 data;
3208 data = R_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset));
3209 return data;
3210 }
3211 #endif /* DHD_SUPPORT_64BIT */
3212
3213 /** A snippet of dongle memory is shared between host and dongle */
3214 void
dhd_bus_cmn_writeshared(dhd_bus_t * bus,void * data,uint32 len,uint8 type,uint16 ringid)3215 dhd_bus_cmn_writeshared(dhd_bus_t *bus, void *data, uint32 len, uint8 type, uint16 ringid)
3216 {
3217 uint64 long_data;
3218 uintptr tcm_offset;
3219
3220 DHD_INFO(("%s: writing to dongle type %d len %d\n", __FUNCTION__, type, len));
3221
3222 if (bus->is_linkdown) {
3223 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
3224 return;
3225 }
3226
3227 switch (type) {
3228 case D2H_DMA_SCRATCH_BUF:
3229 {
3230 pciedev_shared_t *sh = (pciedev_shared_t*)bus->shared_addr;
3231 long_data = HTOL64(*(uint64 *)data);
3232 tcm_offset = (uintptr)&(sh->host_dma_scratch_buffer);
3233 dhdpcie_bus_membytes(bus, TRUE,
3234 (ulong)tcm_offset, (uint8*) &long_data, len);
3235 if (dhd_msg_level & DHD_INFO_VAL) {
3236 prhex(__FUNCTION__, data, len);
3237 }
3238 break;
3239 }
3240
3241 case D2H_DMA_SCRATCH_BUF_LEN :
3242 {
3243 pciedev_shared_t *sh = (pciedev_shared_t*)bus->shared_addr;
3244 tcm_offset = (uintptr)&(sh->host_dma_scratch_buffer_len);
3245 dhdpcie_bus_wtcm32(bus,
3246 (ulong)tcm_offset, (uint32) HTOL32(*(uint32 *)data));
3247 if (dhd_msg_level & DHD_INFO_VAL) {
3248 prhex(__FUNCTION__, data, len);
3249 }
3250 break;
3251 }
3252
3253 case H2D_DMA_INDX_WR_BUF:
3254 {
3255 pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh;
3256
3257 long_data = HTOL64(*(uint64 *)data);
3258 tcm_offset = (uintptr)shmem->rings_info_ptr;
3259 tcm_offset += OFFSETOF(ring_info_t, h2d_w_idx_hostaddr);
3260 dhdpcie_bus_membytes(bus, TRUE,
3261 (ulong)tcm_offset, (uint8*) &long_data, len);
3262 if (dhd_msg_level & DHD_INFO_VAL) {
3263 prhex(__FUNCTION__, data, len);
3264 }
3265 break;
3266 }
3267
3268 case H2D_DMA_INDX_RD_BUF:
3269 {
3270 pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh;
3271 long_data = HTOL64(*(uint64 *)data);
3272 tcm_offset = (uintptr)shmem->rings_info_ptr;
3273 tcm_offset += OFFSETOF(ring_info_t, h2d_r_idx_hostaddr);
3274 dhdpcie_bus_membytes(bus, TRUE,
3275 (ulong)tcm_offset, (uint8*) &long_data, len);
3276 if (dhd_msg_level & DHD_INFO_VAL) {
3277 prhex(__FUNCTION__, data, len);
3278 }
3279 break;
3280 }
3281
3282 case D2H_DMA_INDX_WR_BUF:
3283 {
3284 pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh;
3285 long_data = HTOL64(*(uint64 *)data);
3286 tcm_offset = (uintptr)shmem->rings_info_ptr;
3287 tcm_offset += OFFSETOF(ring_info_t, d2h_w_idx_hostaddr);
3288 dhdpcie_bus_membytes(bus, TRUE,
3289 (ulong)tcm_offset, (uint8*) &long_data, len);
3290 if (dhd_msg_level & DHD_INFO_VAL) {
3291 prhex(__FUNCTION__, data, len);
3292 }
3293 break;
3294 }
3295
3296 case D2H_DMA_INDX_RD_BUF:
3297 {
3298 pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh;
3299 long_data = HTOL64(*(uint64 *)data);
3300 tcm_offset = (uintptr)shmem->rings_info_ptr;
3301 tcm_offset += OFFSETOF(ring_info_t, d2h_r_idx_hostaddr);
3302 dhdpcie_bus_membytes(bus, TRUE,
3303 (ulong)tcm_offset, (uint8*) &long_data, len);
3304 if (dhd_msg_level & DHD_INFO_VAL) {
3305 prhex(__FUNCTION__, data, len);
3306 }
3307 break;
3308 }
3309
3310 case H2D_IFRM_INDX_WR_BUF:
3311 {
3312 pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh;
3313
3314 long_data = HTOL64(*(uint64 *)data);
3315 tcm_offset = (uintptr)shmem->rings_info_ptr;
3316 tcm_offset += OFFSETOF(ring_info_t, ifrm_w_idx_hostaddr);
3317 dhdpcie_bus_membytes(bus, TRUE,
3318 (ulong)tcm_offset, (uint8*) &long_data, len);
3319 if (dhd_msg_level & DHD_INFO_VAL) {
3320 prhex(__FUNCTION__, data, len);
3321 }
3322 break;
3323 }
3324
3325 case RING_ITEM_LEN :
3326 tcm_offset = bus->ring_sh[ringid].ring_mem_addr;
3327 tcm_offset += OFFSETOF(ring_mem_t, len_items);
3328 dhdpcie_bus_wtcm16(bus,
3329 (ulong)tcm_offset, (uint16) HTOL16(*(uint16 *)data));
3330 break;
3331
3332 case RING_MAX_ITEMS :
3333 tcm_offset = bus->ring_sh[ringid].ring_mem_addr;
3334 tcm_offset += OFFSETOF(ring_mem_t, max_item);
3335 dhdpcie_bus_wtcm16(bus,
3336 (ulong)tcm_offset, (uint16) HTOL16(*(uint16 *)data));
3337 break;
3338
3339 case RING_BUF_ADDR :
3340 long_data = HTOL64(*(uint64 *)data);
3341 tcm_offset = bus->ring_sh[ringid].ring_mem_addr;
3342 tcm_offset += OFFSETOF(ring_mem_t, base_addr);
3343 dhdpcie_bus_membytes(bus, TRUE,
3344 (ulong)tcm_offset, (uint8 *) &long_data, len);
3345 if (dhd_msg_level & DHD_INFO_VAL) {
3346 prhex(__FUNCTION__, data, len);
3347 }
3348 break;
3349
3350 case RING_WR_UPD :
3351 tcm_offset = bus->ring_sh[ringid].ring_state_w;
3352 dhdpcie_bus_wtcm16(bus,
3353 (ulong)tcm_offset, (uint16) HTOL16(*(uint16 *)data));
3354 break;
3355
3356 case RING_RD_UPD :
3357 tcm_offset = bus->ring_sh[ringid].ring_state_r;
3358 dhdpcie_bus_wtcm16(bus,
3359 (ulong)tcm_offset, (uint16) HTOL16(*(uint16 *)data));
3360 break;
3361
3362 case D2H_MB_DATA:
3363 dhdpcie_bus_wtcm32(bus, bus->d2h_mb_data_ptr_addr,
3364 (uint32) HTOL32(*(uint32 *)data));
3365 break;
3366
3367 case H2D_MB_DATA:
3368 dhdpcie_bus_wtcm32(bus, bus->h2d_mb_data_ptr_addr,
3369 (uint32) HTOL32(*(uint32 *)data));
3370 break;
3371
3372 case HOST_API_VERSION:
3373 {
3374 pciedev_shared_t *sh = (pciedev_shared_t*) bus->shared_addr;
3375 tcm_offset = (uintptr)sh + OFFSETOF(pciedev_shared_t, host_cap);
3376 dhdpcie_bus_wtcm32(bus,
3377 (ulong)tcm_offset, (uint32) HTOL32(*(uint32 *)data));
3378 break;
3379 }
3380
3381 case DNGL_TO_HOST_TRAP_ADDR:
3382 {
3383 pciedev_shared_t *sh = (pciedev_shared_t*) bus->shared_addr;
3384 long_data = HTOL64(*(uint64 *)data);
3385 tcm_offset = (uintptr)&(sh->host_trap_addr);
3386 dhdpcie_bus_membytes(bus, TRUE,
3387 (ulong)tcm_offset, (uint8*) &long_data, len);
3388 break;
3389 }
3390
3391 #ifdef HOFFLOAD_MODULES
3392 case WRT_HOST_MODULE_ADDR:
3393 {
3394 pciedev_shared_t *sh = (pciedev_shared_t*) bus->shared_addr;
3395 long_data = HTOL64(*(uint64 *)data);
3396 tcm_offset = (uintptr)&(sh->hoffload_addr);
3397 dhdpcie_bus_membytes(bus, TRUE,
3398 (ulong)tcm_offset, (uint8*) &long_data, len);
3399 break;
3400 }
3401 #endif
3402 default:
3403 break;
3404 }
3405 } /* dhd_bus_cmn_writeshared */
3406
3407 /** A snippet of dongle memory is shared between host and dongle */
3408 void
dhd_bus_cmn_readshared(dhd_bus_t * bus,void * data,uint8 type,uint16 ringid)3409 dhd_bus_cmn_readshared(dhd_bus_t *bus, void* data, uint8 type, uint16 ringid)
3410 {
3411 ulong tcm_offset;
3412
3413 switch (type) {
3414 case RING_WR_UPD :
3415 tcm_offset = bus->ring_sh[ringid].ring_state_w;
3416 *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, tcm_offset));
3417 break;
3418 case RING_RD_UPD :
3419 tcm_offset = bus->ring_sh[ringid].ring_state_r;
3420 *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, tcm_offset));
3421 break;
3422 case TOTAL_LFRAG_PACKET_CNT :
3423 {
3424 pciedev_shared_t *sh = (pciedev_shared_t*)bus->shared_addr;
3425 *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus,
3426 (ulong)(uintptr) &sh->total_lfrag_pkt_cnt));
3427 break;
3428 }
3429 case H2D_MB_DATA:
3430 *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, bus->h2d_mb_data_ptr_addr));
3431 break;
3432 case D2H_MB_DATA:
3433 *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, bus->d2h_mb_data_ptr_addr));
3434 break;
3435 case MAX_HOST_RXBUFS :
3436 {
3437 pciedev_shared_t *sh = (pciedev_shared_t*)bus->shared_addr;
3438 *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus,
3439 (ulong)(uintptr) &sh->max_host_rxbufs));
3440 break;
3441 }
3442 default :
3443 break;
3444 }
3445 }
3446
dhd_bus_get_sharedflags(dhd_bus_t * bus)3447 uint32 dhd_bus_get_sharedflags(dhd_bus_t *bus)
3448 {
3449 return ((pciedev_shared_t*)bus->pcie_sh)->flags;
3450 }
3451
3452 void
dhd_bus_clearcounts(dhd_pub_t * dhdp)3453 dhd_bus_clearcounts(dhd_pub_t *dhdp)
3454 {
3455 }
3456
3457 int
dhd_bus_iovar_op(dhd_pub_t * dhdp,const char * name,void * params,int plen,void * arg,int len,bool set)3458 dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
3459 void *params, int plen, void *arg, int len, bool set)
3460 {
3461 dhd_bus_t *bus = dhdp->bus;
3462 const bcm_iovar_t *vi = NULL;
3463 int bcmerror = BCME_UNSUPPORTED;
3464 int val_size;
3465 uint32 actionid;
3466
3467 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3468
3469 ASSERT(name);
3470 ASSERT(len >= 0);
3471
3472 /* Get MUST have return space */
3473 ASSERT(set || (arg && len));
3474
3475 /* Set does NOT take qualifiers */
3476 ASSERT(!set || (!params && !plen));
3477
3478 DHD_INFO(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
3479 name, (set ? "set" : "get"), len, plen));
3480
3481 /* Look up var locally; if not found pass to host driver */
3482 if ((vi = bcm_iovar_lookup(dhdpcie_iovars, name)) == NULL) {
3483 goto exit;
3484 }
3485
3486
3487 /* set up 'params' pointer in case this is a set command so that
3488 * the convenience int and bool code can be common to set and get
3489 */
3490 if (params == NULL) {
3491 params = arg;
3492 plen = len;
3493 }
3494
3495 if (vi->type == IOVT_VOID)
3496 val_size = 0;
3497 else if (vi->type == IOVT_BUFFER)
3498 val_size = len;
3499 else
3500 /* all other types are integer sized */
3501 val_size = sizeof(int);
3502
3503 actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
3504 bcmerror = dhdpcie_bus_doiovar(bus, vi, actionid, name, params, plen, arg, len, val_size);
3505
3506 exit:
3507 return bcmerror;
3508 } /* dhd_bus_iovar_op */
3509
3510 #ifdef BCM_BUZZZ
3511 #include <bcm_buzzz.h>
3512
3513 int
dhd_buzzz_dump_cntrs(char * p,uint32 * core,uint32 * log,const int num_counters)3514 dhd_buzzz_dump_cntrs(char *p, uint32 *core, uint32 *log,
3515 const int num_counters)
3516 {
3517 int bytes = 0;
3518 uint32 ctr;
3519 uint32 curr[BCM_BUZZZ_COUNTERS_MAX], prev[BCM_BUZZZ_COUNTERS_MAX];
3520 uint32 delta[BCM_BUZZZ_COUNTERS_MAX];
3521
3522 /* Compute elapsed counter values per counter event type */
3523 for (ctr = 0U; ctr < num_counters; ctr++) {
3524 prev[ctr] = core[ctr];
3525 curr[ctr] = *log++;
3526 core[ctr] = curr[ctr]; /* saved for next log */
3527
3528 if (curr[ctr] < prev[ctr])
3529 delta[ctr] = curr[ctr] + (~0U - prev[ctr]);
3530 else
3531 delta[ctr] = (curr[ctr] - prev[ctr]);
3532
3533 bytes += sprintf(p + bytes, "%12u ", delta[ctr]);
3534 }
3535
3536 return bytes;
3537 }
3538
3539 typedef union cm3_cnts { /* export this in bcm_buzzz.h */
3540 uint32 u32;
3541 uint8 u8[4];
3542 struct {
3543 uint8 cpicnt;
3544 uint8 exccnt;
3545 uint8 sleepcnt;
3546 uint8 lsucnt;
3547 };
3548 } cm3_cnts_t;
3549
3550 int
dhd_bcm_buzzz_dump_cntrs6(char * p,uint32 * core,uint32 * log)3551 dhd_bcm_buzzz_dump_cntrs6(char *p, uint32 *core, uint32 *log)
3552 {
3553 int bytes = 0;
3554
3555 uint32 cyccnt, instrcnt;
3556 cm3_cnts_t cm3_cnts;
3557 uint8 foldcnt;
3558
3559 { /* 32bit cyccnt */
3560 uint32 curr, prev, delta;
3561 prev = core[0]; curr = *log++; core[0] = curr;
3562 if (curr < prev)
3563 delta = curr + (~0U - prev);
3564 else
3565 delta = (curr - prev);
3566
3567 bytes += sprintf(p + bytes, "%12u ", delta);
3568 cyccnt = delta;
3569 }
3570
3571 { /* Extract the 4 cnts: cpi, exc, sleep and lsu */
3572 int i;
3573 uint8 max8 = ~0;
3574 cm3_cnts_t curr, prev, delta;
3575 prev.u32 = core[1]; curr.u32 = * log++; core[1] = curr.u32;
3576 for (i = 0; i < 4; i++) {
3577 if (curr.u8[i] < prev.u8[i])
3578 delta.u8[i] = curr.u8[i] + (max8 - prev.u8[i]);
3579 else
3580 delta.u8[i] = (curr.u8[i] - prev.u8[i]);
3581 bytes += sprintf(p + bytes, "%4u ", delta.u8[i]);
3582 }
3583 cm3_cnts.u32 = delta.u32;
3584 }
3585
3586 { /* Extract the foldcnt from arg0 */
3587 uint8 curr, prev, delta, max8 = ~0;
3588 bcm_buzzz_arg0_t arg0; arg0.u32 = *log;
3589 prev = core[2]; curr = arg0.klog.cnt; core[2] = curr;
3590 if (curr < prev)
3591 delta = curr + (max8 - prev);
3592 else
3593 delta = (curr - prev);
3594 bytes += sprintf(p + bytes, "%4u ", delta);
3595 foldcnt = delta;
3596 }
3597
3598 instrcnt = cyccnt - (cm3_cnts.u8[0] + cm3_cnts.u8[1] + cm3_cnts.u8[2]
3599 + cm3_cnts.u8[3]) + foldcnt;
3600 if (instrcnt > 0xFFFFFF00)
3601 bytes += sprintf(p + bytes, "[%10s] ", "~");
3602 else
3603 bytes += sprintf(p + bytes, "[%10u] ", instrcnt);
3604 return bytes;
3605 }
3606
3607 int
dhd_buzzz_dump_log(char * p,uint32 * core,uint32 * log,bcm_buzzz_t * buzzz)3608 dhd_buzzz_dump_log(char *p, uint32 *core, uint32 *log, bcm_buzzz_t *buzzz)
3609 {
3610 int bytes = 0;
3611 bcm_buzzz_arg0_t arg0;
3612 static uint8 * fmt[] = BCM_BUZZZ_FMT_STRINGS;
3613
3614 if (buzzz->counters == 6) {
3615 bytes += dhd_bcm_buzzz_dump_cntrs6(p, core, log);
3616 log += 2; /* 32bit cyccnt + (4 x 8bit) CM3 */
3617 } else {
3618 bytes += dhd_buzzz_dump_cntrs(p, core, log, buzzz->counters);
3619 log += buzzz->counters; /* (N x 32bit) CR4=3, CA7=4 */
3620 }
3621
3622 /* Dump the logged arguments using the registered formats */
3623 arg0.u32 = *log++;
3624
3625 switch (arg0.klog.args) {
3626 case 0:
3627 bytes += sprintf(p + bytes, fmt[arg0.klog.id]);
3628 break;
3629 case 1:
3630 {
3631 uint32 arg1 = *log++;
3632 bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1);
3633 break;
3634 }
3635 case 2:
3636 {
3637 uint32 arg1, arg2;
3638 arg1 = *log++; arg2 = *log++;
3639 bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2);
3640 break;
3641 }
3642 case 3:
3643 {
3644 uint32 arg1, arg2, arg3;
3645 arg1 = *log++; arg2 = *log++; arg3 = *log++;
3646 bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2, arg3);
3647 break;
3648 }
3649 case 4:
3650 {
3651 uint32 arg1, arg2, arg3, arg4;
3652 arg1 = *log++; arg2 = *log++;
3653 arg3 = *log++; arg4 = *log++;
3654 bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2, arg3, arg4);
3655 break;
3656 }
3657 default:
3658 printf("%s: Maximum one argument supported\n", __FUNCTION__);
3659 break;
3660 }
3661
3662 bytes += sprintf(p + bytes, "\n");
3663
3664 return bytes;
3665 }
3666
dhd_buzzz_dump(bcm_buzzz_t * buzzz_p,void * buffer_p,char * p)3667 void dhd_buzzz_dump(bcm_buzzz_t *buzzz_p, void *buffer_p, char *p)
3668 {
3669 int i;
3670 uint32 total, part1, part2, log_sz, core[BCM_BUZZZ_COUNTERS_MAX];
3671 void * log;
3672
3673 for (i = 0; i < BCM_BUZZZ_COUNTERS_MAX; i++) {
3674 core[i] = 0;
3675 }
3676
3677 log_sz = buzzz_p->log_sz;
3678
3679 part1 = ((uint32)buzzz_p->cur - (uint32)buzzz_p->log) / log_sz;
3680
3681 if (buzzz_p->wrap == TRUE) {
3682 part2 = ((uint32)buzzz_p->end - (uint32)buzzz_p->cur) / log_sz;
3683 total = (buzzz_p->buffer_sz - BCM_BUZZZ_LOGENTRY_MAXSZ) / log_sz;
3684 } else {
3685 part2 = 0U;
3686 total = buzzz_p->count;
3687 }
3688
3689 if (total == 0U) {
3690 printf("%s: bcm_buzzz_dump total<%u> done\n", __FUNCTION__, total);
3691 return;
3692 } else {
3693 printf("%s: bcm_buzzz_dump total<%u> : part2<%u> + part1<%u>\n", __FUNCTION__,
3694 total, part2, part1);
3695 }
3696
3697 if (part2) { /* with wrap */
3698 log = (void*)((size_t)buffer_p + (buzzz_p->cur - buzzz_p->log));
3699 while (part2--) { /* from cur to end : part2 */
3700 p[0] = '\0';
3701 dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p);
3702 printf("%s", p);
3703 log = (void*)((size_t)log + buzzz_p->log_sz);
3704 }
3705 }
3706
3707 log = (void*)buffer_p;
3708 while (part1--) {
3709 p[0] = '\0';
3710 dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p);
3711 printf("%s", p);
3712 log = (void*)((size_t)log + buzzz_p->log_sz);
3713 }
3714
3715 printf("%s: bcm_buzzz_dump done.\n", __FUNCTION__);
3716 }
3717
dhd_buzzz_dump_dngl(dhd_bus_t * bus)3718 int dhd_buzzz_dump_dngl(dhd_bus_t *bus)
3719 {
3720 bcm_buzzz_t * buzzz_p = NULL;
3721 void * buffer_p = NULL;
3722 char * page_p = NULL;
3723 pciedev_shared_t *sh;
3724 int ret = 0;
3725
3726 if (bus->dhd->busstate != DHD_BUS_DATA) {
3727 return BCME_UNSUPPORTED;
3728 }
3729 if ((page_p = (char *)MALLOC(bus->dhd->osh, 4096)) == NULL) {
3730 printf("%s: Page memory allocation failure\n", __FUNCTION__);
3731 goto done;
3732 }
3733 if ((buzzz_p = MALLOC(bus->dhd->osh, sizeof(bcm_buzzz_t))) == NULL) {
3734 printf("%s: BCM BUZZZ memory allocation failure\n", __FUNCTION__);
3735 goto done;
3736 }
3737
3738 ret = dhdpcie_readshared(bus);
3739 if (ret < 0) {
3740 DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__));
3741 goto done;
3742 }
3743
3744 sh = bus->pcie_sh;
3745
3746 DHD_INFO(("%s buzzz:%08x\n", __FUNCTION__, sh->buzz_dbg_ptr));
3747
3748 if (sh->buzz_dbg_ptr != 0U) { /* Fetch and display dongle BUZZZ Trace */
3749 dhdpcie_bus_membytes(bus, FALSE, (ulong)sh->buzz_dbg_ptr,
3750 (uint8 *)buzzz_p, sizeof(bcm_buzzz_t));
3751
3752 printf("BUZZZ[0x%08x]: log<0x%08x> cur<0x%08x> end<0x%08x> "
3753 "count<%u> status<%u> wrap<%u>\n"
3754 "cpu<0x%02X> counters<%u> group<%u> buffer_sz<%u> log_sz<%u>\n",
3755 (int)sh->buzz_dbg_ptr,
3756 (int)buzzz_p->log, (int)buzzz_p->cur, (int)buzzz_p->end,
3757 buzzz_p->count, buzzz_p->status, buzzz_p->wrap,
3758 buzzz_p->cpu_idcode, buzzz_p->counters, buzzz_p->group,
3759 buzzz_p->buffer_sz, buzzz_p->log_sz);
3760
3761 if (buzzz_p->count == 0) {
3762 printf("%s: Empty dongle BUZZZ trace\n\n", __FUNCTION__);
3763 goto done;
3764 }
3765
3766 /* Allocate memory for trace buffer and format strings */
3767 buffer_p = MALLOC(bus->dhd->osh, buzzz_p->buffer_sz);
3768 if (buffer_p == NULL) {
3769 printf("%s: Buffer memory allocation failure\n", __FUNCTION__);
3770 goto done;
3771 }
3772
3773 /* Fetch the trace. format strings are exported via bcm_buzzz.h */
3774 dhdpcie_bus_membytes(bus, FALSE, (uint32)buzzz_p->log, /* Trace */
3775 (uint8 *)buffer_p, buzzz_p->buffer_sz);
3776
3777 /* Process and display the trace using formatted output */
3778
3779 {
3780 int ctr;
3781 for (ctr = 0; ctr < buzzz_p->counters; ctr++) {
3782 printf("<Evt[%02X]> ", buzzz_p->eventid[ctr]);
3783 }
3784 printf("<code execution point>\n");
3785 }
3786
3787 dhd_buzzz_dump(buzzz_p, buffer_p, page_p);
3788
3789 printf("%s: ----- End of dongle BCM BUZZZ Trace -----\n\n", __FUNCTION__);
3790
3791 MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz); buffer_p = NULL;
3792 }
3793
3794 done:
3795
3796 if (page_p) MFREE(bus->dhd->osh, page_p, 4096);
3797 if (buzzz_p) MFREE(bus->dhd->osh, buzzz_p, sizeof(bcm_buzzz_t));
3798 if (buffer_p) MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz);
3799
3800 return BCME_OK;
3801 }
3802 #endif /* BCM_BUZZZ */
3803
3804 #define PCIE_GEN2(sih) ((BUSTYPE((sih)->bustype) == PCI_BUS) && \
3805 ((sih)->buscoretype == PCIE2_CORE_ID))
3806
3807 int
dhd_bus_devreset(dhd_pub_t * dhdp,uint8 flag)3808 dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag)
3809 {
3810 dhd_bus_t *bus = dhdp->bus;
3811 int bcmerror = 0;
3812 unsigned long flags;
3813 #ifdef CONFIG_ARCH_MSM
3814 int retry = POWERUP_MAX_RETRY;
3815 #endif /* CONFIG_ARCH_MSM */
3816
3817 if (dhd_download_fw_on_driverload) {
3818 bcmerror = dhd_bus_start(dhdp);
3819 } else {
3820 if (flag == TRUE) { /* Turn off WLAN */
3821 /* Removing Power */
3822 DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__));
3823
3824 bus->dhd->up = FALSE;
3825
3826 if (bus->dhd->busstate != DHD_BUS_DOWN) {
3827 dhdpcie_advertise_bus_cleanup(bus->dhd);
3828 if (bus->intr) {
3829 dhdpcie_bus_intr_disable(bus);
3830 dhdpcie_free_irq(bus);
3831 }
3832 #ifdef BCMPCIE_OOB_HOST_WAKE
3833 /* Clean up any pending host wake IRQ */
3834 dhd_bus_oob_intr_set(bus->dhd, FALSE);
3835 dhd_bus_oob_intr_unregister(bus->dhd);
3836 #endif /* BCMPCIE_OOB_HOST_WAKE */
3837 dhd_os_wd_timer(dhdp, 0);
3838 dhd_bus_stop(bus, TRUE);
3839 dhd_prot_reset(dhdp);
3840 dhd_clear(dhdp);
3841 dhd_bus_release_dongle(bus);
3842 dhdpcie_bus_free_resource(bus);
3843 bcmerror = dhdpcie_bus_disable_device(bus);
3844 if (bcmerror) {
3845 DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
3846 __FUNCTION__, bcmerror));
3847 goto done;
3848 }
3849 #ifdef CONFIG_ARCH_MSM
3850 bcmerror = dhdpcie_bus_clock_stop(bus);
3851 if (bcmerror) {
3852 DHD_ERROR(("%s: host clock stop failed: %d\n",
3853 __FUNCTION__, bcmerror));
3854 goto done;
3855 }
3856 #endif /* CONFIG_ARCH_MSM */
3857 DHD_GENERAL_LOCK(bus->dhd, flags);
3858 bus->dhd->busstate = DHD_BUS_DOWN;
3859 DHD_GENERAL_UNLOCK(bus->dhd, flags);
3860 } else {
3861 if (bus->intr) {
3862 dhdpcie_free_irq(bus);
3863 }
3864 #ifdef BCMPCIE_OOB_HOST_WAKE
3865 /* Clean up any pending host wake IRQ */
3866 dhd_bus_oob_intr_set(bus->dhd, FALSE);
3867 dhd_bus_oob_intr_unregister(bus->dhd);
3868 #endif /* BCMPCIE_OOB_HOST_WAKE */
3869 dhd_dpc_kill(bus->dhd);
3870 dhd_prot_reset(dhdp);
3871 dhd_clear(dhdp);
3872 dhd_bus_release_dongle(bus);
3873 dhdpcie_bus_free_resource(bus);
3874 bcmerror = dhdpcie_bus_disable_device(bus);
3875 if (bcmerror) {
3876 DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
3877 __FUNCTION__, bcmerror));
3878 goto done;
3879 }
3880
3881 #ifdef CONFIG_ARCH_MSM
3882 bcmerror = dhdpcie_bus_clock_stop(bus);
3883 if (bcmerror) {
3884 DHD_ERROR(("%s: host clock stop failed: %d\n",
3885 __FUNCTION__, bcmerror));
3886 goto done;
3887 }
3888 #endif /* CONFIG_ARCH_MSM */
3889 }
3890
3891 bus->dhd->dongle_reset = TRUE;
3892 DHD_ERROR(("%s: WLAN OFF Done\n", __FUNCTION__));
3893 } else { /* Turn on WLAN */
3894 if (bus->dhd->busstate == DHD_BUS_DOWN) {
3895 /* Powering On */
3896 DHD_ERROR(("%s: == Power ON ==\n", __FUNCTION__));
3897 #ifdef CONFIG_ARCH_MSM
3898 while (--retry) {
3899 bcmerror = dhdpcie_bus_clock_start(bus);
3900 if (!bcmerror) {
3901 DHD_ERROR(("%s: dhdpcie_bus_clock_start OK\n",
3902 __FUNCTION__));
3903 break;
3904 } else {
3905 OSL_SLEEP(10);
3906 }
3907 }
3908
3909 if (bcmerror && !retry) {
3910 DHD_ERROR(("%s: host pcie clock enable failed: %d\n",
3911 __FUNCTION__, bcmerror));
3912 goto done;
3913 }
3914 #endif /* CONFIG_ARCH_MSM */
3915 bus->is_linkdown = 0;
3916 #ifdef SUPPORT_LINKDOWN_RECOVERY
3917 bus->read_shm_fail = FALSE;
3918 #endif /* SUPPORT_LINKDOWN_RECOVERY */
3919 bcmerror = dhdpcie_bus_enable_device(bus);
3920 if (bcmerror) {
3921 DHD_ERROR(("%s: host configuration restore failed: %d\n",
3922 __FUNCTION__, bcmerror));
3923 goto done;
3924 }
3925
3926 bcmerror = dhdpcie_bus_alloc_resource(bus);
3927 if (bcmerror) {
3928 DHD_ERROR(("%s: dhdpcie_bus_resource_alloc failed: %d\n",
3929 __FUNCTION__, bcmerror));
3930 goto done;
3931 }
3932
3933 bcmerror = dhdpcie_bus_dongle_attach(bus);
3934 if (bcmerror) {
3935 DHD_ERROR(("%s: dhdpcie_bus_dongle_attach failed: %d\n",
3936 __FUNCTION__, bcmerror));
3937 goto done;
3938 }
3939
3940 bcmerror = dhd_bus_request_irq(bus);
3941 if (bcmerror) {
3942 DHD_ERROR(("%s: dhd_bus_request_irq failed: %d\n",
3943 __FUNCTION__, bcmerror));
3944 goto done;
3945 }
3946
3947 bus->dhd->dongle_reset = FALSE;
3948
3949 bcmerror = dhd_bus_start(dhdp);
3950 if (bcmerror) {
3951 DHD_ERROR(("%s: dhd_bus_start: %d\n",
3952 __FUNCTION__, bcmerror));
3953 goto done;
3954 }
3955
3956 bus->dhd->up = TRUE;
3957 DHD_ERROR(("%s: WLAN Power On Done\n", __FUNCTION__));
3958 } else {
3959 DHD_ERROR(("%s: what should we do here\n", __FUNCTION__));
3960 goto done;
3961 }
3962 }
3963 }
3964
3965 done:
3966 if (bcmerror) {
3967 DHD_GENERAL_LOCK(bus->dhd, flags);
3968 bus->dhd->busstate = DHD_BUS_DOWN;
3969 DHD_GENERAL_UNLOCK(bus->dhd, flags);
3970 }
3971
3972 return bcmerror;
3973 }
3974
3975 static int
dhdpcie_bus_doiovar(dhd_bus_t * bus,const bcm_iovar_t * vi,uint32 actionid,const char * name,void * params,int plen,void * arg,int len,int val_size)3976 dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name,
3977 void *params, int plen, void *arg, int len, int val_size)
3978 {
3979 int bcmerror = 0;
3980 int32 int_val = 0;
3981 int32 int_val2 = 0;
3982 int32 int_val3 = 0;
3983 bool bool_val = 0;
3984
3985 DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n",
3986 __FUNCTION__, actionid, name, params, plen, arg, len, val_size));
3987
3988 if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
3989 goto exit;
3990
3991 if (plen >= (int)sizeof(int_val))
3992 bcopy(params, &int_val, sizeof(int_val));
3993
3994 if (plen >= (int)sizeof(int_val) * 2)
3995 bcopy((void*)((uintptr)params + sizeof(int_val)), &int_val2, sizeof(int_val2));
3996
3997 if (plen >= (int)sizeof(int_val) * 3)
3998 bcopy((void*)((uintptr)params + 2 * sizeof(int_val)), &int_val3, sizeof(int_val3));
3999
4000 bool_val = (int_val != 0) ? TRUE : FALSE;
4001
4002 /* Check if dongle is in reset. If so, only allow DEVRESET iovars */
4003 if (bus->dhd->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) ||
4004 actionid == IOV_GVAL(IOV_DEVRESET))) {
4005 bcmerror = BCME_NOTREADY;
4006 goto exit;
4007 }
4008
4009 switch (actionid) {
4010 case IOV_SVAL(IOV_VARS):
4011 bcmerror = dhdpcie_downloadvars(bus, arg, len);
4012 break;
4013 case IOV_SVAL(IOV_PCIE_LPBK):
4014 bcmerror = dhdpcie_bus_lpback_req(bus, int_val);
4015 break;
4016
4017 case IOV_SVAL(IOV_PCIE_DMAXFER): {
4018 int int_val4 = 0;
4019 if (plen >= (int)sizeof(int_val) * 4) {
4020 bcopy((void*)((uintptr)params + 3 * sizeof(int_val)),
4021 &int_val4, sizeof(int_val4));
4022 }
4023 bcmerror = dhdpcie_bus_dmaxfer_req(bus, int_val, int_val2, int_val3, int_val4);
4024 break;
4025 }
4026
4027 #ifdef DEVICE_TX_STUCK_DETECT
4028 case IOV_GVAL(IOV_DEVICE_TX_STUCK_DETECT):
4029 int_val = bus->dev_tx_stuck_monitor;
4030 bcopy(&int_val, arg, val_size);
4031 break;
4032 case IOV_SVAL(IOV_DEVICE_TX_STUCK_DETECT):
4033 bus->dev_tx_stuck_monitor = (bool)int_val;
4034 break;
4035 #endif /* DEVICE_TX_STUCK_DETECT */
4036 case IOV_GVAL(IOV_PCIE_SUSPEND):
4037 int_val = (bus->dhd->busstate == DHD_BUS_SUSPEND) ? 1 : 0;
4038 bcopy(&int_val, arg, val_size);
4039 break;
4040
4041 case IOV_SVAL(IOV_PCIE_SUSPEND):
4042 if (bool_val) { /* Suspend */
4043 int ret;
4044 unsigned long flags;
4045
4046 /*
4047 * If some other context is busy, wait until they are done,
4048 * before starting suspend
4049 */
4050 ret = dhd_os_busbusy_wait_condition(bus->dhd,
4051 &bus->dhd->dhd_bus_busy_state, DHD_BUS_BUSY_IN_DHD_IOVAR);
4052 if (ret == 0) {
4053 DHD_ERROR(("%s:Wait Timedout, dhd_bus_busy_state = 0x%x\n",
4054 __FUNCTION__, bus->dhd->dhd_bus_busy_state));
4055 return BCME_BUSY;
4056 }
4057
4058 DHD_GENERAL_LOCK(bus->dhd, flags);
4059 DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd);
4060 DHD_GENERAL_UNLOCK(bus->dhd, flags);
4061
4062 dhdpcie_bus_suspend(bus, TRUE);
4063
4064 DHD_GENERAL_LOCK(bus->dhd, flags);
4065 DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd);
4066 dhd_os_busbusy_wake(bus->dhd);
4067 DHD_GENERAL_UNLOCK(bus->dhd, flags);
4068 } else { /* Resume */
4069 unsigned long flags;
4070 DHD_GENERAL_LOCK(bus->dhd, flags);
4071 DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd);
4072 DHD_GENERAL_UNLOCK(bus->dhd, flags);
4073
4074 dhdpcie_bus_suspend(bus, FALSE);
4075
4076 DHD_GENERAL_LOCK(bus->dhd, flags);
4077 DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd);
4078 dhd_os_busbusy_wake(bus->dhd);
4079 DHD_GENERAL_UNLOCK(bus->dhd, flags);
4080 }
4081 break;
4082
4083 case IOV_GVAL(IOV_MEMSIZE):
4084 int_val = (int32)bus->ramsize;
4085 bcopy(&int_val, arg, val_size);
4086 break;
4087
4088 #ifdef BCM_BUZZZ
4089 /* Dump dongle side buzzz trace to console */
4090 case IOV_GVAL(IOV_BUZZZ_DUMP):
4091 bcmerror = dhd_buzzz_dump_dngl(bus);
4092 break;
4093 #endif /* BCM_BUZZZ */
4094
4095 case IOV_SVAL(IOV_SET_DOWNLOAD_STATE):
4096 bcmerror = dhdpcie_bus_download_state(bus, bool_val);
4097 break;
4098
4099 case IOV_GVAL(IOV_RAMSIZE):
4100 int_val = (int32)bus->ramsize;
4101 bcopy(&int_val, arg, val_size);
4102 break;
4103
4104 case IOV_SVAL(IOV_RAMSIZE):
4105 bus->ramsize = int_val;
4106 bus->orig_ramsize = int_val;
4107 break;
4108
4109 case IOV_GVAL(IOV_RAMSTART):
4110 int_val = (int32)bus->dongle_ram_base;
4111 bcopy(&int_val, arg, val_size);
4112 break;
4113
4114 case IOV_GVAL(IOV_CC_NVMSHADOW):
4115 {
4116 struct bcmstrbuf dump_b;
4117
4118 bcm_binit(&dump_b, arg, len);
4119 bcmerror = dhdpcie_cc_nvmshadow(bus, &dump_b);
4120 break;
4121 }
4122
4123 case IOV_GVAL(IOV_SLEEP_ALLOWED):
4124 bool_val = bus->sleep_allowed;
4125 bcopy(&bool_val, arg, val_size);
4126 break;
4127
4128 case IOV_SVAL(IOV_SLEEP_ALLOWED):
4129 bus->sleep_allowed = bool_val;
4130 break;
4131
4132 case IOV_GVAL(IOV_DONGLEISOLATION):
4133 int_val = bus->dhd->dongle_isolation;
4134 bcopy(&int_val, arg, val_size);
4135 break;
4136
4137 case IOV_SVAL(IOV_DONGLEISOLATION):
4138 bus->dhd->dongle_isolation = bool_val;
4139 break;
4140
4141 case IOV_GVAL(IOV_LTRSLEEPON_UNLOOAD):
4142 int_val = bus->ltrsleep_on_unload;
4143 bcopy(&int_val, arg, val_size);
4144 break;
4145
4146 case IOV_SVAL(IOV_LTRSLEEPON_UNLOOAD):
4147 bus->ltrsleep_on_unload = bool_val;
4148 break;
4149
4150 case IOV_GVAL(IOV_DUMP_RINGUPD_BLOCK):
4151 {
4152 struct bcmstrbuf dump_b;
4153 bcm_binit(&dump_b, arg, len);
4154 bcmerror = dhd_prot_ringupd_dump(bus->dhd, &dump_b);
4155 break;
4156 }
4157 case IOV_GVAL(IOV_DMA_RINGINDICES):
4158 { int h2d_support, d2h_support;
4159 d2h_support = bus->dhd->dma_d2h_ring_upd_support ? 1 : 0;
4160 h2d_support = bus->dhd->dma_h2d_ring_upd_support ? 1 : 0;
4161 int_val = d2h_support | (h2d_support << 1);
4162 bcopy(&int_val, arg, sizeof(int_val));
4163 break;
4164 }
4165 case IOV_SVAL(IOV_DMA_RINGINDICES):
4166 /* Can change it only during initialization/FW download */
4167 if (bus->dhd->busstate == DHD_BUS_DOWN) {
4168 if ((int_val > 3) || (int_val < 0)) {
4169 DHD_ERROR(("%s: Bad argument. Possible values: 0, 1, 2 & 3\n", __FUNCTION__));
4170 bcmerror = BCME_BADARG;
4171 } else {
4172 bus->dhd->dma_d2h_ring_upd_support = (int_val & 1) ? TRUE : FALSE;
4173 bus->dhd->dma_h2d_ring_upd_support = (int_val & 2) ? TRUE : FALSE;
4174 bus->dhd->dma_ring_upd_overwrite = TRUE;
4175 }
4176 } else {
4177 DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
4178 __FUNCTION__));
4179 bcmerror = BCME_NOTDOWN;
4180 }
4181 break;
4182
4183 case IOV_GVAL(IOV_METADATA_DBG):
4184 int_val = dhd_prot_metadata_dbg_get(bus->dhd);
4185 bcopy(&int_val, arg, val_size);
4186 break;
4187 case IOV_SVAL(IOV_METADATA_DBG):
4188 dhd_prot_metadata_dbg_set(bus->dhd, (int_val != 0));
4189 break;
4190
4191 case IOV_GVAL(IOV_RX_METADATALEN):
4192 int_val = dhd_prot_metadatalen_get(bus->dhd, TRUE);
4193 bcopy(&int_val, arg, val_size);
4194 break;
4195
4196 case IOV_SVAL(IOV_RX_METADATALEN):
4197 if (int_val > 64) {
4198 bcmerror = BCME_BUFTOOLONG;
4199 break;
4200 }
4201 dhd_prot_metadatalen_set(bus->dhd, int_val, TRUE);
4202 break;
4203
4204 case IOV_SVAL(IOV_TXP_THRESHOLD):
4205 dhd_prot_txp_threshold(bus->dhd, TRUE, int_val);
4206 break;
4207
4208 case IOV_GVAL(IOV_TXP_THRESHOLD):
4209 int_val = dhd_prot_txp_threshold(bus->dhd, FALSE, int_val);
4210 bcopy(&int_val, arg, val_size);
4211 break;
4212
4213 case IOV_SVAL(IOV_DB1_FOR_MB):
4214 if (int_val)
4215 bus->db1_for_mb = TRUE;
4216 else
4217 bus->db1_for_mb = FALSE;
4218 break;
4219
4220 case IOV_GVAL(IOV_DB1_FOR_MB):
4221 if (bus->db1_for_mb)
4222 int_val = 1;
4223 else
4224 int_val = 0;
4225 bcopy(&int_val, arg, val_size);
4226 break;
4227
4228 case IOV_GVAL(IOV_TX_METADATALEN):
4229 int_val = dhd_prot_metadatalen_get(bus->dhd, FALSE);
4230 bcopy(&int_val, arg, val_size);
4231 break;
4232
4233 case IOV_SVAL(IOV_TX_METADATALEN):
4234 if (int_val > 64) {
4235 bcmerror = BCME_BUFTOOLONG;
4236 break;
4237 }
4238 dhd_prot_metadatalen_set(bus->dhd, int_val, FALSE);
4239 break;
4240
4241 case IOV_SVAL(IOV_DEVRESET):
4242 dhd_bus_devreset(bus->dhd, (uint8)bool_val);
4243 break;
4244 case IOV_SVAL(IOV_FORCE_FW_TRAP):
4245 if (bus->dhd->busstate == DHD_BUS_DATA)
4246 dhdpcie_fw_trap(bus);
4247 else {
4248 DHD_ERROR(("%s: Bus is NOT up\n", __FUNCTION__));
4249 bcmerror = BCME_NOTUP;
4250 }
4251 break;
4252 case IOV_GVAL(IOV_FLOW_PRIO_MAP):
4253 int_val = bus->dhd->flow_prio_map_type;
4254 bcopy(&int_val, arg, val_size);
4255 break;
4256
4257 case IOV_SVAL(IOV_FLOW_PRIO_MAP):
4258 int_val = (int32)dhd_update_flow_prio_map(bus->dhd, (uint8)int_val);
4259 bcopy(&int_val, arg, val_size);
4260 break;
4261
4262 #ifdef DHD_PCIE_RUNTIMEPM
4263 case IOV_GVAL(IOV_IDLETIME):
4264 int_val = bus->idletime;
4265 bcopy(&int_val, arg, val_size);
4266 break;
4267
4268 case IOV_SVAL(IOV_IDLETIME):
4269 if (int_val < 0) {
4270 bcmerror = BCME_BADARG;
4271 } else {
4272 bus->idletime = int_val;
4273 if (bus->idletime) {
4274 DHD_ENABLE_RUNTIME_PM(bus->dhd);
4275 } else {
4276 DHD_DISABLE_RUNTIME_PM(bus->dhd);
4277 }
4278 }
4279 break;
4280 #endif /* DHD_PCIE_RUNTIMEPM */
4281
4282 case IOV_GVAL(IOV_TXBOUND):
4283 int_val = (int32)dhd_txbound;
4284 bcopy(&int_val, arg, val_size);
4285 break;
4286
4287 case IOV_SVAL(IOV_TXBOUND):
4288 dhd_txbound = (uint)int_val;
4289 break;
4290
4291 case IOV_SVAL(IOV_H2D_MAILBOXDATA):
4292 dhdpcie_send_mb_data(bus, (uint)int_val);
4293 break;
4294
4295 case IOV_SVAL(IOV_INFORINGS):
4296 dhd_prot_init_info_rings(bus->dhd);
4297 break;
4298
4299 case IOV_SVAL(IOV_H2D_PHASE):
4300 if (bus->dhd->busstate != DHD_BUS_DOWN) {
4301 DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
4302 __FUNCTION__));
4303 bcmerror = BCME_NOTDOWN;
4304 break;
4305 }
4306 if (int_val)
4307 bus->dhd->h2d_phase_supported = TRUE;
4308 else
4309 bus->dhd->h2d_phase_supported = FALSE;
4310 break;
4311
4312 case IOV_GVAL(IOV_H2D_PHASE):
4313 int_val = (int32) bus->dhd->h2d_phase_supported;
4314 bcopy(&int_val, arg, val_size);
4315 break;
4316
4317 case IOV_SVAL(IOV_H2D_ENABLE_TRAP_BADPHASE):
4318 if (bus->dhd->busstate != DHD_BUS_DOWN) {
4319 DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
4320 __FUNCTION__));
4321 bcmerror = BCME_NOTDOWN;
4322 break;
4323 }
4324 if (int_val)
4325 bus->dhd->force_dongletrap_on_bad_h2d_phase = TRUE;
4326 else
4327 bus->dhd->force_dongletrap_on_bad_h2d_phase = FALSE;
4328 break;
4329
4330 case IOV_GVAL(IOV_H2D_ENABLE_TRAP_BADPHASE):
4331 int_val = (int32) bus->dhd->force_dongletrap_on_bad_h2d_phase;
4332 bcopy(&int_val, arg, val_size);
4333 break;
4334
4335 case IOV_SVAL(IOV_H2D_TXPOST_MAX_ITEM):
4336 if (bus->dhd->busstate != DHD_BUS_DOWN) {
4337 DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
4338 __FUNCTION__));
4339 bcmerror = BCME_NOTDOWN;
4340 break;
4341 }
4342 dhd_prot_set_h2d_max_txpost(bus->dhd, (uint16)int_val);
4343 break;
4344
4345 case IOV_GVAL(IOV_H2D_TXPOST_MAX_ITEM):
4346 int_val = dhd_prot_get_h2d_max_txpost(bus->dhd);
4347 bcopy(&int_val, arg, val_size);
4348 break;
4349
4350 case IOV_GVAL(IOV_RXBOUND):
4351 int_val = (int32)dhd_rxbound;
4352 bcopy(&int_val, arg, val_size);
4353 break;
4354
4355 case IOV_SVAL(IOV_RXBOUND):
4356 dhd_rxbound = (uint)int_val;
4357 break;
4358
4359 case IOV_GVAL(IOV_TRAPDATA):
4360 {
4361 struct bcmstrbuf dump_b;
4362 bcm_binit(&dump_b, arg, len);
4363 bcmerror = dhd_prot_dump_extended_trap(bus->dhd, &dump_b, FALSE);
4364 break;
4365 }
4366
4367 case IOV_GVAL(IOV_TRAPDATA_RAW):
4368 {
4369 struct bcmstrbuf dump_b;
4370 bcm_binit(&dump_b, arg, len);
4371 bcmerror = dhd_prot_dump_extended_trap(bus->dhd, &dump_b, TRUE);
4372 break;
4373 }
4374 case IOV_SVAL(IOV_HANGREPORT):
4375 bus->dhd->hang_report = bool_val;
4376 DHD_ERROR(("%s: Set hang_report as %d\n",
4377 __FUNCTION__, bus->dhd->hang_report));
4378 break;
4379
4380 case IOV_GVAL(IOV_HANGREPORT):
4381 int_val = (int32)bus->dhd->hang_report;
4382 bcopy(&int_val, arg, val_size);
4383 break;
4384
4385 case IOV_SVAL(IOV_CTO_PREVENTION):
4386 {
4387 uint32 pcie_lnkst;
4388
4389 if (bus->sih->buscorerev < 19) {
4390 bcmerror = BCME_UNSUPPORTED;
4391 break;
4392 }
4393 si_corereg(bus->sih, bus->sih->buscoreidx,
4394 OFFSETOF(sbpcieregs_t, configaddr), ~0, PCI_LINK_STATUS);
4395
4396 pcie_lnkst = si_corereg(bus->sih, bus->sih->buscoreidx,
4397 OFFSETOF(sbpcieregs_t, configdata), 0, 0);
4398
4399 /* 4347A0 in PCIEGEN1 doesn't support CTO prevention due to
4400 * 4347A0 DAR Issue : JIRA:CRWLPCIEGEN2-443: Issue in DAR write
4401 */
4402 if ((bus->sih->buscorerev == 19) &&
4403 (((pcie_lnkst >> PCI_LINK_SPEED_SHIFT) &
4404 PCI_LINK_SPEED_MASK) == PCIE_LNK_SPEED_GEN1)) {
4405 bcmerror = BCME_UNSUPPORTED;
4406 break;
4407 }
4408 bus->dhd->cto_enable = bool_val;
4409 dhdpcie_cto_init(bus, bus->dhd->cto_enable);
4410 DHD_ERROR(("%s: set CTO prevention and recovery enable/disable %d\n",
4411 __FUNCTION__, bus->dhd->cto_enable));
4412 }
4413 break;
4414
4415 case IOV_GVAL(IOV_CTO_PREVENTION):
4416 if (bus->sih->buscorerev < 19) {
4417 bcmerror = BCME_UNSUPPORTED;
4418 break;
4419 }
4420 int_val = (int32)bus->dhd->cto_enable;
4421 bcopy(&int_val, arg, val_size);
4422 break;
4423
4424 case IOV_SVAL(IOV_CTO_THRESHOLD):
4425 {
4426 if (bus->sih->buscorerev < 19) {
4427 bcmerror = BCME_UNSUPPORTED;
4428 break;
4429 }
4430 bus->dhd->cto_threshold = (uint32)int_val;
4431 }
4432 break;
4433
4434 case IOV_GVAL(IOV_CTO_THRESHOLD):
4435 if (bus->sih->buscorerev < 19) {
4436 bcmerror = BCME_UNSUPPORTED;
4437 break;
4438 }
4439 if (bus->dhd->cto_threshold)
4440 int_val = (int32)bus->dhd->cto_threshold;
4441 else
4442 int_val = (int32)PCIE_CTO_TO_THRESH_DEFAULT;
4443
4444 bcopy(&int_val, arg, val_size);
4445 break;
4446
4447 case IOV_SVAL(IOV_PCIE_WD_RESET):
4448 if (bool_val) {
4449 pcie_watchdog_reset(bus->osh, bus->sih, (sbpcieregs_t *) bus->regs);
4450 }
4451 break;
4452 #ifdef DHD_EFI
4453 case IOV_SVAL(IOV_CONTROL_SIGNAL):
4454 {
4455 bcmerror = dhd_control_signal(bus, arg, TRUE);
4456 break;
4457 }
4458
4459 case IOV_GVAL(IOV_CONTROL_SIGNAL):
4460 {
4461 bcmerror = dhd_control_signal(bus, params, FALSE);
4462 break;
4463 }
4464 #if defined(PCIE_OOB) || defined(PCIE_INB_DW)
4465 case IOV_GVAL(IOV_DEEP_SLEEP):
4466 int_val = bus->ds_enabled;
4467 bcopy(&int_val, arg, val_size);
4468 break;
4469
4470 case IOV_SVAL(IOV_DEEP_SLEEP):
4471 if (int_val == 1) {
4472 bus->ds_enabled = TRUE;
4473 /* Deassert */
4474 if (dhd_bus_set_device_wake(bus, FALSE) == BCME_OK) {
4475 #ifdef PCIE_INB_DW
4476 int timeleft;
4477 timeleft = dhd_os_ds_enter_wait(bus->dhd, NULL);
4478 if (timeleft == 0) {
4479 DHD_ERROR(("DS-ENTER timeout\n"));
4480 bus->ds_enabled = FALSE;
4481 break;
4482 }
4483 #endif /* PCIE_INB_DW */
4484 }
4485 else {
4486 DHD_ERROR(("%s: Enable Deep Sleep failed !\n", __FUNCTION__));
4487 bus->ds_enabled = FALSE;
4488 }
4489 }
4490 else if (int_val == 0) {
4491 /* Assert */
4492 if (dhd_bus_set_device_wake(bus, TRUE) == BCME_OK)
4493 bus->ds_enabled = FALSE;
4494 else
4495 DHD_ERROR(("%s: Disable Deep Sleep failed !\n", __FUNCTION__));
4496 }
4497 else
4498 DHD_ERROR(("%s: Invalid number, allowed only 0|1\n", __FUNCTION__));
4499
4500 break;
4501 #endif /* PCIE_OOB || PCIE_INB_DW */
4502
4503 case IOV_GVAL(IOV_WIFI_PROPERTIES):
4504 bcmerror = dhd_wifi_properties(bus, params);
4505 break;
4506
4507 case IOV_GVAL(IOV_OTP_DUMP):
4508 bcmerror = dhd_otp_dump(bus, params);
4509 break;
4510 #endif /* DHD_EFI */
4511
4512 case IOV_GVAL(IOV_IDMA_ENABLE):
4513 int_val = bus->idma_enabled;
4514 bcopy(&int_val, arg, val_size);
4515 break;
4516 case IOV_SVAL(IOV_IDMA_ENABLE):
4517 bus->idma_enabled = (bool)int_val;
4518 break;
4519 case IOV_GVAL(IOV_IFRM_ENABLE):
4520 int_val = bus->ifrm_enabled;
4521 bcopy(&int_val, arg, val_size);
4522 break;
4523 case IOV_SVAL(IOV_IFRM_ENABLE):
4524 bus->ifrm_enabled = (bool)int_val;
4525 break;
4526 case IOV_GVAL(IOV_CLEAR_RING):
4527 bcopy(&int_val, arg, val_size);
4528 dhd_flow_rings_flush(bus->dhd, 0);
4529 break;
4530 default:
4531 bcmerror = BCME_UNSUPPORTED;
4532 break;
4533 }
4534
4535 exit:
4536 return bcmerror;
4537 } /* dhdpcie_bus_doiovar */
4538
4539 /** Transfers bytes from host to dongle using pio mode */
4540 static int
dhdpcie_bus_lpback_req(struct dhd_bus * bus,uint32 len)4541 dhdpcie_bus_lpback_req(struct dhd_bus *bus, uint32 len)
4542 {
4543 if (bus->dhd == NULL) {
4544 DHD_ERROR(("%s: bus not inited\n", __FUNCTION__));
4545 return 0;
4546 }
4547 if (bus->dhd->prot == NULL) {
4548 DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__));
4549 return 0;
4550 }
4551 if (bus->dhd->busstate != DHD_BUS_DATA) {
4552 DHD_ERROR(("%s: not in a readystate to LPBK is not inited\n", __FUNCTION__));
4553 return 0;
4554 }
4555 dhdmsgbuf_lpbk_req(bus->dhd, len);
4556 return 0;
4557 }
4558
4559 /* Ring DoorBell1 to indicate Hostready i.e. D3 Exit */
4560 void
dhd_bus_hostready(struct dhd_bus * bus)4561 dhd_bus_hostready(struct dhd_bus *bus)
4562 {
4563 if (!bus->dhd->d2h_hostrdy_supported) {
4564 return;
4565 }
4566
4567 if (bus->is_linkdown) {
4568 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
4569 return;
4570 }
4571
4572 DHD_INFO_HW4(("%s : Read PCICMD Reg: 0x%08X\n", __FUNCTION__,
4573 dhd_pcie_config_read(bus->osh, PCI_CFG_CMD, sizeof(uint32))));
4574 si_corereg(bus->sih, bus->sih->buscoreidx, PCIH2D_DB1, ~0, 0x12345678);
4575 bus->hostready_count ++;
4576 DHD_INFO_HW4(("%s: Ring Hostready:%d\n", __FUNCTION__, bus->hostready_count));
4577 }
4578
4579 /* Clear INTSTATUS */
4580 void
dhdpcie_bus_clear_intstatus(struct dhd_bus * bus)4581 dhdpcie_bus_clear_intstatus(struct dhd_bus *bus)
4582 {
4583 uint32 intstatus = 0;
4584 if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) ||
4585 (bus->sih->buscorerev == 2)) {
4586 intstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
4587 dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, intstatus);
4588 } else {
4589 /* this is a PCIE core register..not a config register... */
4590 intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
4591 si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, bus->def_intmask,
4592 intstatus);
4593 }
4594 }
4595
4596 int
dhdpcie_bus_suspend(struct dhd_bus * bus,bool state)4597 dhdpcie_bus_suspend(struct dhd_bus *bus, bool state)
4598 {
4599 int timeleft;
4600 int rc = 0;
4601 unsigned long flags;
4602
4603 printf("%s: state=%d\n", __FUNCTION__, state);
4604 if (bus->dhd == NULL) {
4605 DHD_ERROR(("%s: bus not inited\n", __FUNCTION__));
4606 return BCME_ERROR;
4607 }
4608 if (bus->dhd->prot == NULL) {
4609 DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__));
4610 return BCME_ERROR;
4611 }
4612
4613 if (dhd_query_bus_erros(bus->dhd)) {
4614 return BCME_ERROR;
4615 }
4616
4617 DHD_GENERAL_LOCK(bus->dhd, flags);
4618 if (!(bus->dhd->busstate == DHD_BUS_DATA || bus->dhd->busstate == DHD_BUS_SUSPEND)) {
4619 DHD_ERROR(("%s: not in a readystate\n", __FUNCTION__));
4620 DHD_GENERAL_UNLOCK(bus->dhd, flags);
4621 return BCME_ERROR;
4622 }
4623 DHD_GENERAL_UNLOCK(bus->dhd, flags);
4624 if (bus->dhd->dongle_reset) {
4625 DHD_ERROR(("Dongle is in reset state.\n"));
4626 return -EIO;
4627 }
4628
4629 /* Check whether we are already in the requested state.
4630 * state=TRUE means Suspend
4631 * state=FALSE meanse Resume
4632 */
4633 if (state == TRUE && bus->dhd->busstate == DHD_BUS_SUSPEND) {
4634 DHD_ERROR(("Bus is already in SUSPEND state.\n"));
4635 return BCME_OK;
4636 } else if (state == FALSE && bus->dhd->busstate == DHD_BUS_DATA) {
4637 DHD_ERROR(("Bus is already in RESUME state.\n"));
4638 return BCME_OK;
4639 }
4640
4641 if (bus->d3_suspend_pending) {
4642 DHD_ERROR(("Suspend pending ...\n"));
4643 return BCME_ERROR;
4644 }
4645
4646
4647 if (state) {
4648 int idle_retry = 0;
4649 int active;
4650
4651 if (bus->is_linkdown) {
4652 DHD_ERROR(("%s: PCIe link was down, state=%d\n",
4653 __FUNCTION__, state));
4654 return BCME_ERROR;
4655 }
4656
4657 /* Suspend */
4658 DHD_ERROR(("%s: Entering suspend state\n", __FUNCTION__));
4659
4660 DHD_GENERAL_LOCK(bus->dhd, flags);
4661 if (DHD_BUS_BUSY_CHECK_IN_TX(bus->dhd)) {
4662 DHD_ERROR(("Tx Request is not ended\n"));
4663 bus->dhd->busstate = DHD_BUS_DATA;
4664 DHD_GENERAL_UNLOCK(bus->dhd, flags);
4665 #ifndef DHD_EFI
4666 return -EBUSY;
4667 #else
4668 return BCME_ERROR;
4669 #endif
4670 }
4671
4672 /* stop all interface network queue. */
4673 dhd_bus_stop_queue(bus);
4674 DHD_GENERAL_UNLOCK(bus->dhd, flags);
4675
4676 DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
4677 #ifdef DHD_TIMESYNC
4678 /* disable time sync mechanism, if configed */
4679 dhd_timesync_control(bus->dhd, TRUE);
4680 #endif /* DHD_TIMESYNC */
4681 #if defined(PCIE_OOB) || defined(PCIE_INB_DW)
4682 dhd_bus_set_device_wake(bus, TRUE);
4683 #endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */
4684 #ifdef PCIE_OOB
4685 bus->oob_presuspend = TRUE;
4686 #endif
4687 #ifdef PCIE_INB_DW
4688 /* De-assert at this point for In-band device_wake */
4689 if (INBAND_DW_ENAB(bus)) {
4690 dhd_bus_set_device_wake(bus, FALSE);
4691 dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_HOST_SLEEP_WAIT);
4692 }
4693 #endif /* PCIE_INB_DW */
4694
4695 /* Clear wait_for_d3_ack */
4696 bus->wait_for_d3_ack = 0;
4697 /*
4698 * Send H2D_HOST_D3_INFORM to dongle and mark
4699 * bus->d3_suspend_pending to TRUE in dhdpcie_send_mb_data
4700 * inside atomic context, so that no more DBs will be
4701 * rung after sending D3_INFORM
4702 */
4703 dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM);
4704
4705 /* Wait for D3 ACK for D3_ACK_RESP_TIMEOUT seconds */
4706 dhd_os_set_ioctl_resp_timeout(D3_ACK_RESP_TIMEOUT);
4707 timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
4708
4709 #ifdef DHD_RECOVER_TIMEOUT
4710 if (bus->wait_for_d3_ack == 0) {
4711 /* If wait_for_d3_ack was not updated because D2H MB was not received */
4712 uint32 intstatus = 0;
4713 uint32 intmask = 0;
4714 intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
4715 intmask = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask, 0, 0);
4716 if ((intstatus) && (!intmask) && (timeleft == 0) &&
4717 (!dhd_query_bus_erros(bus->dhd))) {
4718 DHD_ERROR(("%s: D3 ACK trying again intstatus=%x intmask=%x\n",
4719 __FUNCTION__, intstatus, intmask));
4720 DHD_ERROR(("\n ------- DUMPING INTR enable/disable counters\r\n"));
4721 DHD_ERROR(("resume_intr_enable_count=%lu dpc_intr_en_count=%lu\n"
4722 "isr_intr_disable_count=%lu suspend_intr_dis_count=%lu\n"
4723 "dpc_return_busdown_count=%lu\n",
4724 bus->resume_intr_enable_count, bus->dpc_intr_enable_count,
4725 bus->isr_intr_disable_count,
4726 bus->suspend_intr_disable_count,
4727 bus->dpc_return_busdown_count));
4728
4729 dhd_prot_process_ctrlbuf(bus->dhd);
4730
4731 timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
4732
4733 /* Enable Back Interrupts using IntMask */
4734 dhdpcie_bus_intr_enable(bus);
4735 }
4736 } /* bus->wait_for_d3_ack was 0 */
4737 #endif /* DHD_RECOVER_TIMEOUT */
4738
4739 DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
4740
4741 /* To allow threads that got pre-empted to complete.
4742 */
4743 while ((active = dhd_os_check_wakelock_all(bus->dhd)) &&
4744 (idle_retry < MAX_WKLK_IDLE_CHECK)) {
4745 OSL_SLEEP(1);
4746 idle_retry++;
4747 }
4748
4749 if (bus->wait_for_d3_ack) {
4750 DHD_ERROR(("%s: Got D3 Ack \n", __FUNCTION__));
4751
4752 /* Got D3 Ack. Suspend the bus */
4753 if (active) {
4754 DHD_ERROR(("%s():Suspend failed because of wakelock"
4755 "restoring Dongle to D0\n", __FUNCTION__));
4756
4757 /*
4758 * Dongle still thinks that it has to be in D3 state until
4759 * it gets a D0 Inform, but we are backing off from suspend.
4760 * Ensure that Dongle is brought back to D0.
4761 *
4762 * Bringing back Dongle from D3 Ack state to D0 state is a
4763 * 2 step process. Dongle would want to know that D0 Inform
4764 * would be sent as a MB interrupt to bring it out of D3 Ack
4765 * state to D0 state. So we have to send both this message.
4766 */
4767
4768 /* Clear wait_for_d3_ack to send D0_INFORM or host_ready */
4769 bus->wait_for_d3_ack = 0;
4770
4771 /* Enable back the intmask which was cleared in DPC
4772 * after getting D3_ACK.
4773 */
4774 bus->resume_intr_enable_count++;
4775 dhdpcie_bus_intr_enable(bus);
4776
4777 if (bus->use_d0_inform) {
4778 DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
4779 dhdpcie_send_mb_data(bus,
4780 (H2D_HOST_D0_INFORM_IN_USE | H2D_HOST_D0_INFORM));
4781 DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
4782 }
4783 /* ring doorbell 1 (hostready) */
4784 dhd_bus_hostready(bus);
4785
4786 DHD_GENERAL_LOCK(bus->dhd, flags);
4787 bus->d3_suspend_pending = FALSE;
4788 bus->dhd->busstate = DHD_BUS_DATA;
4789 /* resume all interface network queue. */
4790 dhd_bus_start_queue(bus);
4791 DHD_GENERAL_UNLOCK(bus->dhd, flags);
4792 rc = BCME_ERROR;
4793 } else {
4794 #ifdef PCIE_OOB
4795 bus->oob_presuspend = FALSE;
4796 if (OOB_DW_ENAB(bus)) {
4797 dhd_bus_set_device_wake(bus, FALSE);
4798 }
4799 #endif /* PCIE_OOB */
4800 #if defined(PCIE_OOB) || defined(BCMPCIE_OOB_HOST_WAKE)
4801 bus->oob_presuspend = TRUE;
4802 #endif /* PCIE_OOB || BCMPCIE_OOB_HOST_WAKE */
4803 #ifdef PCIE_INB_DW
4804 if (INBAND_DW_ENAB(bus)) {
4805 if (dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
4806 DW_DEVICE_HOST_SLEEP_WAIT) {
4807 dhdpcie_bus_set_pcie_inband_dw_state(bus,
4808 DW_DEVICE_HOST_SLEEP);
4809 }
4810 }
4811 #endif /* PCIE_INB_DW */
4812 if (bus->use_d0_inform &&
4813 (bus->api.fw_rev < PCIE_SHARED_VERSION_6)) {
4814 DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
4815 dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM_IN_USE));
4816 DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
4817 }
4818 #if defined(BCMPCIE_OOB_HOST_WAKE)
4819 dhdpcie_oob_intr_set(bus, TRUE);
4820 #endif /* BCMPCIE_OOB_HOST_WAKE */
4821
4822 DHD_GENERAL_LOCK(bus->dhd, flags);
4823 /* The Host cannot process interrupts now so disable the same.
4824 * No need to disable the dongle INTR using intmask, as we are
4825 * already calling dhdpcie_bus_intr_disable from DPC context after
4826 * getting D3_ACK. Code may not look symmetric between Suspend and
4827 * Resume paths but this is done to close down the timing window
4828 * between DPC and suspend context.
4829 */
4830 /* Disable interrupt from host side!! */
4831 dhdpcie_disable_irq_nosync(bus);
4832
4833 bus->dhd->d3ackcnt_timeout = 0;
4834 bus->d3_suspend_pending = FALSE;
4835 bus->dhd->busstate = DHD_BUS_SUSPEND;
4836 DHD_GENERAL_UNLOCK(bus->dhd, flags);
4837 /* Handle Host Suspend */
4838 rc = dhdpcie_pci_suspend_resume(bus, state);
4839 }
4840 } else if (timeleft == 0) {
4841 bus->dhd->d3ack_timeout_occured = TRUE;
4842 /* If the D3 Ack has timeout */
4843 bus->dhd->d3ackcnt_timeout++;
4844 DHD_ERROR(("%s: resumed on timeout for D3 ACK d3_inform_cnt %d \n",
4845 __FUNCTION__, bus->dhd->d3ackcnt_timeout));
4846 DHD_GENERAL_LOCK(bus->dhd, flags);
4847 bus->d3_suspend_pending = FALSE;
4848 bus->dhd->busstate = DHD_BUS_DATA;
4849 /* resume all interface network queue. */
4850 dhd_bus_start_queue(bus);
4851 DHD_GENERAL_UNLOCK(bus->dhd, flags);
4852 if (!bus->dhd->dongle_trap_occured) {
4853 uint32 intstatus = 0;
4854
4855 /* Check if PCIe bus status is valid */
4856 intstatus = si_corereg(bus->sih,
4857 bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
4858 if (intstatus == (uint32)-1) {
4859 /* Invalidate PCIe bus status */
4860 bus->is_linkdown = 1;
4861 }
4862
4863 dhd_bus_dump_console_buffer(bus);
4864 dhd_prot_debug_info_print(bus->dhd);
4865 #ifdef DHD_FW_COREDUMP
4866 if (bus->dhd->memdump_enabled) {
4867 /* write core dump to file */
4868 bus->dhd->memdump_type = DUMP_TYPE_D3_ACK_TIMEOUT;
4869 dhdpcie_mem_dump(bus);
4870 }
4871 #endif /* DHD_FW_COREDUMP */
4872 DHD_ERROR(("%s: Event HANG send up due to D3_ACK timeout\n",
4873 __FUNCTION__));
4874 #ifdef SUPPORT_LINKDOWN_RECOVERY
4875 #ifdef CONFIG_ARCH_MSM
4876 bus->no_cfg_restore = 1;
4877 #endif /* CONFIG_ARCH_MSM */
4878 #endif /* SUPPORT_LINKDOWN_RECOVERY */
4879 dhd_os_check_hang(bus->dhd, 0, -ETIMEDOUT);
4880 }
4881 rc = -ETIMEDOUT;
4882 }
4883 bus->wait_for_d3_ack = 1;
4884
4885 #ifdef PCIE_OOB
4886 bus->oob_presuspend = FALSE;
4887 #endif /* PCIE_OOB */
4888 } else {
4889 /* Resume */
4890 /**
4891 * PCIE2_BAR0_CORE2_WIN gets reset after D3 cold.
4892 * si_backplane_access(function to read/write backplane)
4893 * updates the window(PCIE2_BAR0_CORE2_WIN) only if
4894 * window being accessed is different form the window
4895 * being pointed by second_bar0win.
4896 * Since PCIE2_BAR0_CORE2_WIN is already reset because of D3 cold,
4897 * invalidating second_bar0win after resume updates
4898 * PCIE2_BAR0_CORE2_WIN with right window.
4899 */
4900 si_invalidate_second_bar0win(bus->sih);
4901 #if defined(BCMPCIE_OOB_HOST_WAKE)
4902 DHD_OS_OOB_IRQ_WAKE_UNLOCK(bus->dhd);
4903 #endif /* BCMPCIE_OOB_HOST_WAKE */
4904 #ifdef PCIE_INB_DW
4905 if (INBAND_DW_ENAB(bus)) {
4906 if (dhdpcie_bus_get_pcie_inband_dw_state(bus) == DW_DEVICE_HOST_SLEEP) {
4907 dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_HOST_WAKE_WAIT);
4908 }
4909 }
4910 #endif /* PCIE_INB_DW */
4911 rc = dhdpcie_pci_suspend_resume(bus, state);
4912
4913 #ifdef BCMPCIE_OOB_HOST_WAKE
4914 bus->oob_presuspend = FALSE;
4915 #endif /* BCMPCIE_OOB_HOST_WAKE */
4916
4917 if (bus->dhd->busstate == DHD_BUS_SUSPEND) {
4918 if (bus->use_d0_inform) {
4919 DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
4920 dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM));
4921 DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
4922 }
4923 /* ring doorbell 1 (hostready) */
4924 dhd_bus_hostready(bus);
4925 }
4926
4927 DHD_GENERAL_LOCK(bus->dhd, flags);
4928 bus->dhd->busstate = DHD_BUS_DATA;
4929 #ifdef DHD_PCIE_RUNTIMEPM
4930 if (DHD_BUS_BUSY_CHECK_RPM_SUSPEND_DONE(bus->dhd)) {
4931 bus->bus_wake = 1;
4932 OSL_SMP_WMB();
4933 wake_up_interruptible(&bus->rpm_queue);
4934 }
4935 #endif /* DHD_PCIE_RUNTIMEPM */
4936 #ifdef PCIE_OOB
4937 /*
4938 * Assert & Deassert the Device Wake. The following is the explanation for doing so.
4939 * 0) At this point,
4940 * Host is in suspend state, Link is in L2/L3, Dongle is in D3 Cold
4941 * Device Wake is enabled.
4942 * 1) When the Host comes out of Suspend, it first sends PERST# in the Link.
4943 * Looking at this the Dongle moves from D3 Cold to NO DS State
4944 * 2) Now The Host OS calls the "resume" function of DHD. From here the DHD first
4945 * Asserts the Device Wake.
4946 * From the defn, when the Device Wake is asserted, The dongle FW will ensure
4947 * that the Dongle is out of deep sleep IF the device is already in deep sleep.
4948 * But note that now the Dongle is NOT in Deep sleep and is actually in
4949 * NO DS state. So just driving the Device Wake high does not trigger any state
4950 * transitions. The Host should actually "Toggle" the Device Wake to ensure
4951 * that Dongle synchronizes with the Host and starts the State Transition to D0.
4952 * 4) Note that the above explanation is applicable Only when the Host comes out of
4953 * suspend and the Dongle comes out of D3 Cold
4954 */
4955 /* This logic is not required when hostready is enabled */
4956
4957 if (!bus->dhd->d2h_hostrdy_supported) {
4958 if (OOB_DW_ENAB(bus)) {
4959 dhd_bus_set_device_wake(bus, TRUE);
4960 OSL_DELAY(1000);
4961 dhd_bus_set_device_wake(bus, FALSE);
4962 }
4963 }
4964 #endif /* PCIE_OOB */
4965 /* resume all interface network queue. */
4966 dhd_bus_start_queue(bus);
4967 /* The Host is ready to process interrupts now so enable the same. */
4968
4969 /* TODO: for NDIS also we need to use enable_irq in future */
4970 bus->resume_intr_enable_count++;
4971 dhdpcie_bus_intr_enable(bus); /* Enable back interrupt using Intmask!! */
4972 dhdpcie_enable_irq(bus); /* Enable back interrupt from Host side!! */
4973 DHD_GENERAL_UNLOCK(bus->dhd, flags);
4974 #ifdef DHD_TIMESYNC
4975 DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
4976 /* enable time sync mechanism, if configed */
4977 dhd_timesync_control(bus->dhd, FALSE);
4978 DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
4979 #endif /* DHD_TIMESYNC */
4980 }
4981 return rc;
4982 }
4983
4984 uint32
dhdpcie_force_alp(struct dhd_bus * bus,bool enable)4985 dhdpcie_force_alp(struct dhd_bus *bus, bool enable)
4986 {
4987 ASSERT(bus && bus->sih);
4988 if (enable) {
4989 si_corereg(bus->sih, bus->sih->buscoreidx,
4990 OFFSETOF(sbpcieregs_t, u.pcie2.clk_ctl_st), CCS_FORCEALP, CCS_FORCEALP);
4991 } else {
4992 si_corereg(bus->sih, bus->sih->buscoreidx,
4993 OFFSETOF(sbpcieregs_t, u.pcie2.clk_ctl_st), CCS_FORCEALP, 0);
4994 }
4995 return 0;
4996 }
4997
4998 /* set pcie l1 entry time: dhd pciereg 0x1004[22:16] */
4999 uint32
dhdpcie_set_l1_entry_time(struct dhd_bus * bus,int l1_entry_time)5000 dhdpcie_set_l1_entry_time(struct dhd_bus *bus, int l1_entry_time)
5001 {
5002 uint reg_val;
5003
5004 ASSERT(bus && bus->sih);
5005
5006 si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0,
5007 0x1004);
5008 reg_val = si_corereg(bus->sih, bus->sih->buscoreidx,
5009 OFFSETOF(sbpcieregs_t, configdata), 0, 0);
5010 reg_val = (reg_val & ~(0x7f << 16)) | ((l1_entry_time & 0x7f) << 16);
5011 si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), ~0,
5012 reg_val);
5013
5014 return 0;
5015 }
5016
5017 /** Transfers bytes from host to dongle and to host again using DMA */
5018 static int
dhdpcie_bus_dmaxfer_req(struct dhd_bus * bus,uint32 len,uint32 srcdelay,uint32 destdelay,uint32 d11_lpbk)5019 dhdpcie_bus_dmaxfer_req(
5020 struct dhd_bus *bus, uint32 len, uint32 srcdelay, uint32 destdelay, uint32 d11_lpbk)
5021 {
5022 if (bus->dhd == NULL) {
5023 DHD_ERROR(("%s: bus not inited\n", __FUNCTION__));
5024 return BCME_ERROR;
5025 }
5026 if (bus->dhd->prot == NULL) {
5027 DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__));
5028 return BCME_ERROR;
5029 }
5030 if (bus->dhd->busstate != DHD_BUS_DATA) {
5031 DHD_ERROR(("%s: not in a readystate to LPBK is not inited\n", __FUNCTION__));
5032 return BCME_ERROR;
5033 }
5034
5035 if (len < 5 || len > 4194296) {
5036 DHD_ERROR(("%s: len is too small or too large\n", __FUNCTION__));
5037 return BCME_ERROR;
5038 }
5039 return dhdmsgbuf_dmaxfer_req(bus->dhd, len, srcdelay, destdelay, d11_lpbk);
5040 }
5041
5042
5043
5044 static int
dhdpcie_bus_download_state(dhd_bus_t * bus,bool enter)5045 dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter)
5046 {
5047 int bcmerror = 0;
5048 volatile uint32 *cr4_regs;
5049
5050 if (!bus->sih) {
5051 DHD_ERROR(("%s: NULL sih!!\n", __FUNCTION__));
5052 return BCME_ERROR;
5053 }
5054 /* To enter download state, disable ARM and reset SOCRAM.
5055 * To exit download state, simply reset ARM (default is RAM boot).
5056 */
5057 if (enter) {
5058 /* Make sure BAR1 maps to backplane address 0 */
5059 dhdpcie_bus_cfg_write_dword(bus, PCI_BAR1_WIN, 4, 0x00000000);
5060 bus->alp_only = TRUE;
5061
5062 /* some chips (e.g. 43602) have two ARM cores, the CR4 is receives the firmware. */
5063 cr4_regs = si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
5064
5065 if (cr4_regs == NULL && !(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
5066 !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) &&
5067 !(si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
5068 DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
5069 bcmerror = BCME_ERROR;
5070 goto fail;
5071 }
5072
5073 if (si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) {
5074 /* Halt ARM & remove reset */
5075 si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT);
5076 if (!(si_setcore(bus->sih, SYSMEM_CORE_ID, 0))) {
5077 DHD_ERROR(("%s: Failed to find SYSMEM core!\n", __FUNCTION__));
5078 bcmerror = BCME_ERROR;
5079 goto fail;
5080 }
5081 si_core_reset(bus->sih, 0, 0);
5082 /* reset last 4 bytes of RAM address. to be used for shared area */
5083 dhdpcie_init_shared_addr(bus);
5084 } else if (cr4_regs == NULL) { /* no CR4 present on chip */
5085 si_core_disable(bus->sih, 0);
5086
5087 if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
5088 DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
5089 bcmerror = BCME_ERROR;
5090 goto fail;
5091 }
5092
5093 si_core_reset(bus->sih, 0, 0);
5094
5095 /* Clear the top bit of memory */
5096 if (bus->ramsize) {
5097 uint32 zeros = 0;
5098 if (dhdpcie_bus_membytes(bus, TRUE, bus->ramsize - 4,
5099 (uint8*)&zeros, 4) < 0) {
5100 bcmerror = BCME_ERROR;
5101 goto fail;
5102 }
5103 }
5104 } else {
5105 /* For CR4,
5106 * Halt ARM
5107 * Remove ARM reset
5108 * Read RAM base address [0x18_0000]
5109 * [next] Download firmware
5110 * [done at else] Populate the reset vector
5111 * [done at else] Remove ARM halt
5112 */
5113 /* Halt ARM & remove reset */
5114 si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT);
5115 if (BCM43602_CHIP(bus->sih->chip)) {
5116 W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 5);
5117 W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0);
5118 W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 7);
5119 W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0);
5120 }
5121 /* reset last 4 bytes of RAM address. to be used for shared area */
5122 dhdpcie_init_shared_addr(bus);
5123 }
5124 } else {
5125 if (si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) {
5126 /* write vars */
5127 if ((bcmerror = dhdpcie_bus_write_vars(bus))) {
5128 DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
5129 goto fail;
5130 }
5131 /* switch back to arm core again */
5132 if (!(si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
5133 DHD_ERROR(("%s: Failed to find ARM CA7 core!\n", __FUNCTION__));
5134 bcmerror = BCME_ERROR;
5135 goto fail;
5136 }
5137 /* write address 0 with reset instruction */
5138 bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0,
5139 (uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
5140 /* now remove reset and halt and continue to run CA7 */
5141 } else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
5142 if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
5143 DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
5144 bcmerror = BCME_ERROR;
5145 goto fail;
5146 }
5147
5148 if (!si_iscoreup(bus->sih)) {
5149 DHD_ERROR(("%s: SOCRAM core is down after reset?\n", __FUNCTION__));
5150 bcmerror = BCME_ERROR;
5151 goto fail;
5152 }
5153
5154 /* Enable remap before ARM reset but after vars.
5155 * No backplane access in remap mode
5156 */
5157 if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) &&
5158 !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) {
5159 DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__));
5160 bcmerror = BCME_ERROR;
5161 goto fail;
5162 }
5163
5164
5165 if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
5166 !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
5167 DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
5168 bcmerror = BCME_ERROR;
5169 goto fail;
5170 }
5171 } else {
5172 if (BCM43602_CHIP(bus->sih->chip)) {
5173 /* Firmware crashes on SOCSRAM access when core is in reset */
5174 if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
5175 DHD_ERROR(("%s: Failed to find SOCRAM core!\n",
5176 __FUNCTION__));
5177 bcmerror = BCME_ERROR;
5178 goto fail;
5179 }
5180 si_core_reset(bus->sih, 0, 0);
5181 si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
5182 }
5183
5184 /* write vars */
5185 if ((bcmerror = dhdpcie_bus_write_vars(bus))) {
5186 DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
5187 goto fail;
5188 }
5189
5190 #ifdef BCM_ASLR_HEAP
5191 /* write a random number to TCM for the purpose of
5192 * randomizing heap address space.
5193 */
5194 dhdpcie_wrt_rnd(bus);
5195 #endif /* BCM_ASLR_HEAP */
5196
5197 /* switch back to arm core again */
5198 if (!(si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) {
5199 DHD_ERROR(("%s: Failed to find ARM CR4 core!\n", __FUNCTION__));
5200 bcmerror = BCME_ERROR;
5201 goto fail;
5202 }
5203
5204 /* write address 0 with reset instruction */
5205 bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0,
5206 (uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
5207
5208 if (bcmerror == BCME_OK) {
5209 uint32 tmp;
5210
5211 bcmerror = dhdpcie_bus_membytes(bus, FALSE, 0,
5212 (uint8 *)&tmp, sizeof(tmp));
5213
5214 if (bcmerror == BCME_OK && tmp != bus->resetinstr) {
5215 DHD_ERROR(("%s: Failed to write 0x%08x to addr 0\n",
5216 __FUNCTION__, bus->resetinstr));
5217 DHD_ERROR(("%s: contents of addr 0 is 0x%08x\n",
5218 __FUNCTION__, tmp));
5219 bcmerror = BCME_ERROR;
5220 goto fail;
5221 }
5222 }
5223
5224 /* now remove reset and halt and continue to run CR4 */
5225 }
5226
5227 si_core_reset(bus->sih, 0, 0);
5228
5229 /* Allow HT Clock now that the ARM is running. */
5230 bus->alp_only = FALSE;
5231
5232 bus->dhd->busstate = DHD_BUS_LOAD;
5233 }
5234
5235 fail:
5236 /* Always return to PCIE core */
5237 si_setcore(bus->sih, PCIE2_CORE_ID, 0);
5238
5239 return bcmerror;
5240 } /* dhdpcie_bus_download_state */
5241
5242 static int
dhdpcie_bus_write_vars(dhd_bus_t * bus)5243 dhdpcie_bus_write_vars(dhd_bus_t *bus)
5244 {
5245 int bcmerror = 0;
5246 uint32 varsize, phys_size;
5247 uint32 varaddr;
5248 uint8 *vbuffer;
5249 uint32 varsizew;
5250 #ifdef DHD_DEBUG
5251 uint8 *nvram_ularray;
5252 #endif /* DHD_DEBUG */
5253
5254 /* Even if there are no vars are to be written, we still need to set the ramsize. */
5255 varsize = bus->varsz ? ROUNDUP(bus->varsz, 4) : 0;
5256 varaddr = (bus->ramsize - 4) - varsize;
5257
5258 varaddr += bus->dongle_ram_base;
5259
5260 if (bus->vars) {
5261 vbuffer = (uint8 *)MALLOC(bus->dhd->osh, varsize);
5262 if (!vbuffer)
5263 return BCME_NOMEM;
5264
5265 bzero(vbuffer, varsize);
5266 bcopy(bus->vars, vbuffer, bus->varsz);
5267 /* Write the vars list */
5268 bcmerror = dhdpcie_bus_membytes(bus, TRUE, varaddr, vbuffer, varsize);
5269
5270 /* Implement read back and verify later */
5271 #ifdef DHD_DEBUG
5272 /* Verify NVRAM bytes */
5273 DHD_INFO(("%s: Compare NVRAM dl & ul; varsize=%d\n", __FUNCTION__, varsize));
5274 nvram_ularray = (uint8*)MALLOC(bus->dhd->osh, varsize);
5275 if (!nvram_ularray)
5276 return BCME_NOMEM;
5277
5278 /* Upload image to verify downloaded contents. */
5279 memset(nvram_ularray, 0xaa, varsize);
5280
5281 /* Read the vars list to temp buffer for comparison */
5282 bcmerror = dhdpcie_bus_membytes(bus, FALSE, varaddr, nvram_ularray, varsize);
5283 if (bcmerror) {
5284 DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n",
5285 __FUNCTION__, bcmerror, varsize, varaddr));
5286 }
5287
5288 /* Compare the org NVRAM with the one read from RAM */
5289 if (memcmp(vbuffer, nvram_ularray, varsize)) {
5290 DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__));
5291 } else
5292 DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n",
5293 __FUNCTION__));
5294
5295 MFREE(bus->dhd->osh, nvram_ularray, varsize);
5296 #endif /* DHD_DEBUG */
5297
5298 MFREE(bus->dhd->osh, vbuffer, varsize);
5299 }
5300
5301 phys_size = REMAP_ENAB(bus) ? bus->ramsize : bus->orig_ramsize;
5302
5303 phys_size += bus->dongle_ram_base;
5304
5305 /* adjust to the user specified RAM */
5306 DHD_INFO(("%s: Physical memory size: %d, usable memory size: %d\n", __FUNCTION__,
5307 phys_size, bus->ramsize));
5308 DHD_INFO(("%s: Vars are at %d, orig varsize is %d\n", __FUNCTION__,
5309 varaddr, varsize));
5310 varsize = ((phys_size - 4) - varaddr);
5311
5312 /*
5313 * Determine the length token:
5314 * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits.
5315 */
5316 if (bcmerror) {
5317 varsizew = 0;
5318 bus->nvram_csm = varsizew;
5319 } else {
5320 varsizew = varsize / 4;
5321 varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF);
5322 bus->nvram_csm = varsizew;
5323 varsizew = htol32(varsizew);
5324 }
5325
5326 DHD_INFO(("%s: New varsize is %d, length token=0x%08x\n", __FUNCTION__, varsize, varsizew));
5327
5328 /* Write the length token to the last word */
5329 bcmerror = dhdpcie_bus_membytes(bus, TRUE, (phys_size - 4),
5330 (uint8*)&varsizew, 4);
5331
5332 return bcmerror;
5333 } /* dhdpcie_bus_write_vars */
5334
5335 int
dhdpcie_downloadvars(dhd_bus_t * bus,void * arg,int len)5336 dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len)
5337 {
5338 int bcmerror = BCME_OK;
5339
5340 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5341
5342 /* Basic sanity checks */
5343 if (bus->dhd->up) {
5344 bcmerror = BCME_NOTDOWN;
5345 goto err;
5346 }
5347 if (!len) {
5348 bcmerror = BCME_BUFTOOSHORT;
5349 goto err;
5350 }
5351
5352 /* Free the old ones and replace with passed variables */
5353 if (bus->vars)
5354 MFREE(bus->dhd->osh, bus->vars, bus->varsz);
5355
5356 bus->vars = MALLOC(bus->dhd->osh, len);
5357 bus->varsz = bus->vars ? len : 0;
5358 if (bus->vars == NULL) {
5359 bcmerror = BCME_NOMEM;
5360 goto err;
5361 }
5362
5363 /* Copy the passed variables, which should include the terminating double-null */
5364 bcopy(arg, bus->vars, bus->varsz);
5365
5366 #ifdef DHD_USE_SINGLE_NVRAM_FILE
5367 if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) {
5368 char *sp = NULL;
5369 char *ep = NULL;
5370 int i;
5371 char tag[2][8] = {"ccode=", "regrev="};
5372
5373 /* Find ccode and regrev info */
5374 for (i = 0; i < 2; i++) {
5375 sp = strnstr(bus->vars, tag[i], bus->varsz);
5376 if (!sp) {
5377 DHD_ERROR(("%s: Could not find ccode info from the nvram %s\n",
5378 __FUNCTION__, bus->nv_path));
5379 bcmerror = BCME_ERROR;
5380 goto err;
5381 }
5382 sp = strchr(sp, '=');
5383 ep = strchr(sp, '\0');
5384 /* We assumed that string length of both ccode and
5385 * regrev values should not exceed WLC_CNTRY_BUF_SZ
5386 */
5387 if (sp && ep && ((ep - sp) <= WLC_CNTRY_BUF_SZ)) {
5388 sp++;
5389 while (*sp != '\0') {
5390 DHD_INFO(("%s: parse '%s', current sp = '%c'\n",
5391 __FUNCTION__, tag[i], *sp));
5392 *sp++ = '0';
5393 }
5394 } else {
5395 DHD_ERROR(("%s: Invalid parameter format when parsing for %s\n",
5396 __FUNCTION__, tag[i]));
5397 bcmerror = BCME_ERROR;
5398 goto err;
5399 }
5400 }
5401 }
5402 #endif /* DHD_USE_SINGLE_NVRAM_FILE */
5403
5404
5405 err:
5406 return bcmerror;
5407 }
5408
5409 /* loop through the capability list and see if the pcie capabilty exists */
5410 uint8
dhdpcie_find_pci_capability(osl_t * osh,uint8 req_cap_id)5411 dhdpcie_find_pci_capability(osl_t *osh, uint8 req_cap_id)
5412 {
5413 uint8 cap_id;
5414 uint8 cap_ptr = 0;
5415 uint8 byte_val;
5416
5417 /* check for Header type 0 */
5418 byte_val = read_pci_cfg_byte(PCI_CFG_HDR);
5419 if ((byte_val & 0x7f) != PCI_HEADER_NORMAL) {
5420 DHD_ERROR(("%s : PCI config header not normal.\n", __FUNCTION__));
5421 goto end;
5422 }
5423
5424 /* check if the capability pointer field exists */
5425 byte_val = read_pci_cfg_byte(PCI_CFG_STAT);
5426 if (!(byte_val & PCI_CAPPTR_PRESENT)) {
5427 DHD_ERROR(("%s : PCI CAP pointer not present.\n", __FUNCTION__));
5428 goto end;
5429 }
5430
5431 cap_ptr = read_pci_cfg_byte(PCI_CFG_CAPPTR);
5432 /* check if the capability pointer is 0x00 */
5433 if (cap_ptr == 0x00) {
5434 DHD_ERROR(("%s : PCI CAP pointer is 0x00.\n", __FUNCTION__));
5435 goto end;
5436 }
5437
5438 /* loop thr'u the capability list and see if the pcie capabilty exists */
5439
5440 cap_id = read_pci_cfg_byte(cap_ptr);
5441
5442 while (cap_id != req_cap_id) {
5443 cap_ptr = read_pci_cfg_byte((cap_ptr + 1));
5444 if (cap_ptr == 0x00) break;
5445 cap_id = read_pci_cfg_byte(cap_ptr);
5446 }
5447
5448 end:
5449 return cap_ptr;
5450 }
5451
5452 void
dhdpcie_pme_active(osl_t * osh,bool enable)5453 dhdpcie_pme_active(osl_t *osh, bool enable)
5454 {
5455 uint8 cap_ptr;
5456 uint32 pme_csr;
5457
5458 cap_ptr = dhdpcie_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID);
5459
5460 if (!cap_ptr) {
5461 DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__));
5462 return;
5463 }
5464
5465 pme_csr = OSL_PCI_READ_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32));
5466 DHD_ERROR(("%s : pme_sts_ctrl 0x%x\n", __FUNCTION__, pme_csr));
5467
5468 pme_csr |= PME_CSR_PME_STAT;
5469 if (enable) {
5470 pme_csr |= PME_CSR_PME_EN;
5471 } else {
5472 pme_csr &= ~PME_CSR_PME_EN;
5473 }
5474
5475 OSL_PCI_WRITE_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32), pme_csr);
5476 }
5477
5478 bool
dhdpcie_pme_cap(osl_t * osh)5479 dhdpcie_pme_cap(osl_t *osh)
5480 {
5481 uint8 cap_ptr;
5482 uint32 pme_cap;
5483
5484 cap_ptr = dhdpcie_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID);
5485
5486 if (!cap_ptr) {
5487 DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__));
5488 return FALSE;
5489 }
5490
5491 pme_cap = OSL_PCI_READ_CONFIG(osh, cap_ptr, sizeof(uint32));
5492
5493 DHD_ERROR(("%s : pme_cap 0x%x\n", __FUNCTION__, pme_cap));
5494
5495 return ((pme_cap & PME_CAP_PM_STATES) != 0);
5496 }
5497
5498 uint32
dhdpcie_lcreg(osl_t * osh,uint32 mask,uint32 val)5499 dhdpcie_lcreg(osl_t *osh, uint32 mask, uint32 val)
5500 {
5501 uint8 pcie_cap;
5502 uint8 lcreg_offset; /* PCIE capability LCreg offset in the config space */
5503 uint32 reg_val;
5504
5505
5506 pcie_cap = dhdpcie_find_pci_capability(osh, PCI_CAP_PCIECAP_ID);
5507
5508 if (!pcie_cap) {
5509 DHD_ERROR(("%s : PCIe Capability not present\n", __FUNCTION__));
5510 return 0;
5511 }
5512
5513 lcreg_offset = pcie_cap + PCIE_CAP_LINKCTRL_OFFSET;
5514
5515 /* set operation */
5516 if (mask) {
5517 /* read */
5518 reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
5519
5520 /* modify */
5521 reg_val &= ~mask;
5522 reg_val |= (mask & val);
5523
5524 /* write */
5525 OSL_PCI_WRITE_CONFIG(osh, lcreg_offset, sizeof(uint32), reg_val);
5526 }
5527 return OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
5528 }
5529
5530
5531
5532 uint8
dhdpcie_clkreq(osl_t * osh,uint32 mask,uint32 val)5533 dhdpcie_clkreq(osl_t *osh, uint32 mask, uint32 val)
5534 {
5535 uint8 pcie_cap;
5536 uint32 reg_val;
5537 uint8 lcreg_offset; /* PCIE capability LCreg offset in the config space */
5538
5539 pcie_cap = dhdpcie_find_pci_capability(osh, PCI_CAP_PCIECAP_ID);
5540
5541 if (!pcie_cap) {
5542 DHD_ERROR(("%s : PCIe Capability not present\n", __FUNCTION__));
5543 return 0;
5544 }
5545
5546 lcreg_offset = pcie_cap + PCIE_CAP_LINKCTRL_OFFSET;
5547
5548 reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
5549 /* set operation */
5550 if (mask) {
5551 if (val)
5552 reg_val |= PCIE_CLKREQ_ENAB;
5553 else
5554 reg_val &= ~PCIE_CLKREQ_ENAB;
5555 OSL_PCI_WRITE_CONFIG(osh, lcreg_offset, sizeof(uint32), reg_val);
5556 reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
5557 }
5558 if (reg_val & PCIE_CLKREQ_ENAB)
5559 return 1;
5560 else
5561 return 0;
5562 }
5563
dhd_dump_intr_registers(dhd_pub_t * dhd,struct bcmstrbuf * strbuf)5564 void dhd_dump_intr_registers(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
5565 {
5566 uint32 intstatus = 0;
5567 uint32 intmask = 0;
5568 uint32 mbintstatus = 0;
5569 uint32 d2h_mb_data = 0;
5570
5571 intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
5572 intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxMask, 0, 0);
5573 mbintstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCID2H_MailBox, 0, 0);
5574 dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0);
5575
5576 bcm_bprintf(strbuf, "intstatus=0x%x intmask=0x%x mbintstatus=0x%x\n",
5577 intstatus, intmask, mbintstatus);
5578 bcm_bprintf(strbuf, "d2h_mb_data=0x%x def_intmask=0x%x\n",
5579 d2h_mb_data, dhd->bus->def_intmask);
5580 bcm_bprintf(strbuf, "\n ------- DUMPING INTR enable/disable counters-------\n");
5581 bcm_bprintf(strbuf, "resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n"
5582 "isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n"
5583 "dpc_return_busdown_count=%lu\n",
5584 dhd->bus->resume_intr_enable_count, dhd->bus->dpc_intr_enable_count,
5585 dhd->bus->isr_intr_disable_count, dhd->bus->suspend_intr_disable_count,
5586 dhd->bus->dpc_return_busdown_count);
5587 }
5588
5589 /** Add bus dump output to a buffer */
dhd_bus_dump(dhd_pub_t * dhdp,struct bcmstrbuf * strbuf)5590 void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
5591 {
5592 uint16 flowid;
5593 int ix = 0;
5594 flow_ring_node_t *flow_ring_node;
5595 flow_info_t *flow_info;
5596 char eabuf[ETHER_ADDR_STR_LEN];
5597
5598 if (dhdp->busstate != DHD_BUS_DATA)
5599 return;
5600
5601 #ifdef DHD_WAKE_STATUS
5602 bcm_bprintf(strbuf, "wake %u rxwake %u readctrlwake %u\n",
5603 bcmpcie_get_total_wake(dhdp->bus), dhdp->bus->wake_counts.rxwake,
5604 dhdp->bus->wake_counts.rcwake);
5605 #ifdef DHD_WAKE_RX_STATUS
5606 bcm_bprintf(strbuf, " unicast %u muticast %u broadcast %u arp %u\n",
5607 dhdp->bus->wake_counts.rx_ucast, dhdp->bus->wake_counts.rx_mcast,
5608 dhdp->bus->wake_counts.rx_bcast, dhdp->bus->wake_counts.rx_arp);
5609 bcm_bprintf(strbuf, " multi4 %u multi6 %u icmp6 %u multiother %u\n",
5610 dhdp->bus->wake_counts.rx_multi_ipv4, dhdp->bus->wake_counts.rx_multi_ipv6,
5611 dhdp->bus->wake_counts.rx_icmpv6, dhdp->bus->wake_counts.rx_multi_other);
5612 bcm_bprintf(strbuf, " icmp6_ra %u, icmp6_na %u, icmp6_ns %u\n",
5613 dhdp->bus->wake_counts.rx_icmpv6_ra, dhdp->bus->wake_counts.rx_icmpv6_na,
5614 dhdp->bus->wake_counts.rx_icmpv6_ns);
5615 #endif /* DHD_WAKE_RX_STATUS */
5616 #ifdef DHD_WAKE_EVENT_STATUS
5617 for (flowid = 0; flowid < WLC_E_LAST; flowid++)
5618 if (dhdp->bus->wake_counts.rc_event[flowid] != 0)
5619 bcm_bprintf(strbuf, " %s = %u\n", bcmevent_get_name(flowid),
5620 dhdp->bus->wake_counts.rc_event[flowid]);
5621 bcm_bprintf(strbuf, "\n");
5622 #endif /* DHD_WAKE_EVENT_STATUS */
5623 #endif /* DHD_WAKE_STATUS */
5624
5625 dhd_prot_print_info(dhdp, strbuf);
5626 dhd_dump_intr_registers(dhdp, strbuf);
5627 bcm_bprintf(strbuf, "h2d_mb_data_ptr_addr 0x%x, d2h_mb_data_ptr_addr 0x%x\n",
5628 dhdp->bus->h2d_mb_data_ptr_addr, dhdp->bus->d2h_mb_data_ptr_addr);
5629 bcm_bprintf(strbuf, "dhd cumm_ctr %d\n", DHD_CUMM_CTR_READ(&dhdp->cumm_ctr));
5630 bcm_bprintf(strbuf,
5631 "%s %4s %2s %4s %17s %4s %4s %6s %10s %4s %4s ",
5632 "Num:", "Flow", "If", "Prio", ":Dest_MacAddress:", "Qlen", "CLen", "L2CLen",
5633 "Overflows", "RD", "WR");
5634 bcm_bprintf(strbuf, "%5s %6s %5s \n", "Acked", "tossed", "noack");
5635
5636 for (flowid = 0; flowid < dhdp->num_flow_rings; flowid++) {
5637 flow_ring_node = DHD_FLOW_RING(dhdp, flowid);
5638 if (!flow_ring_node->active)
5639 continue;
5640
5641 flow_info = &flow_ring_node->flow_info;
5642 bcm_bprintf(strbuf,
5643 "%3d. %4d %2d %4d %17s %4d %4d %6d %10u ", ix++,
5644 flow_ring_node->flowid, flow_info->ifindex, flow_info->tid,
5645 bcm_ether_ntoa((struct ether_addr *)&flow_info->da, eabuf),
5646 DHD_FLOW_QUEUE_LEN(&flow_ring_node->queue),
5647 DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_CLEN_PTR(&flow_ring_node->queue)),
5648 DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_L2CLEN_PTR(&flow_ring_node->queue)),
5649 DHD_FLOW_QUEUE_FAILURES(&flow_ring_node->queue));
5650 dhd_prot_print_flow_ring(dhdp, flow_ring_node->prot_info, strbuf,
5651 "%4d %4d ");
5652 bcm_bprintf(strbuf,
5653 "%5s %6s %5s\n", "NA", "NA", "NA");
5654 }
5655 bcm_bprintf(strbuf, "D3 inform cnt %d\n", dhdp->bus->d3_inform_cnt);
5656 bcm_bprintf(strbuf, "D0 inform cnt %d\n", dhdp->bus->d0_inform_cnt);
5657 bcm_bprintf(strbuf, "D0 inform in use cnt %d\n", dhdp->bus->d0_inform_in_use_cnt);
5658 if (dhdp->d2h_hostrdy_supported) {
5659 bcm_bprintf(strbuf, "hostready count:%d\n", dhdp->bus->hostready_count);
5660 }
5661 #ifdef PCIE_INB_DW
5662 /* Inband device wake counters */
5663 if (INBAND_DW_ENAB(dhdp->bus)) {
5664 bcm_bprintf(strbuf, "Inband device_wake assert count: %d\n",
5665 dhdp->bus->inband_dw_assert_cnt);
5666 bcm_bprintf(strbuf, "Inband device_wake deassert count: %d\n",
5667 dhdp->bus->inband_dw_deassert_cnt);
5668 bcm_bprintf(strbuf, "Inband DS-EXIT <host initiated> count: %d\n",
5669 dhdp->bus->inband_ds_exit_host_cnt);
5670 bcm_bprintf(strbuf, "Inband DS-EXIT <device initiated> count: %d\n",
5671 dhdp->bus->inband_ds_exit_device_cnt);
5672 bcm_bprintf(strbuf, "Inband DS-EXIT Timeout count: %d\n",
5673 dhdp->bus->inband_ds_exit_to_cnt);
5674 bcm_bprintf(strbuf, "Inband HOST_SLEEP-EXIT Timeout count: %d\n",
5675 dhdp->bus->inband_host_sleep_exit_to_cnt);
5676 }
5677 #endif /* PCIE_INB_DW */
5678 }
5679
5680 /**
5681 * Brings transmit packets on all flow rings closer to the dongle, by moving (a subset) from their
5682 * flow queue to their flow ring.
5683 */
5684 static void
dhd_update_txflowrings(dhd_pub_t * dhd)5685 dhd_update_txflowrings(dhd_pub_t *dhd)
5686 {
5687 unsigned long flags;
5688 dll_t *item, *next;
5689 flow_ring_node_t *flow_ring_node;
5690 struct dhd_bus *bus = dhd->bus;
5691
5692 /* Hold flowring_list_lock to ensure no race condition while accessing the List */
5693 DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
5694 for (item = dll_head_p(&bus->flowring_active_list);
5695 (!dhd_is_device_removed(dhd) && !dll_end(&bus->flowring_active_list, item));
5696 item = next) {
5697 if (dhd->hang_was_sent) {
5698 break;
5699 }
5700
5701 next = dll_next_p(item);
5702 flow_ring_node = dhd_constlist_to_flowring(item);
5703
5704 /* Ensure that flow_ring_node in the list is Not Null */
5705 ASSERT(flow_ring_node != NULL);
5706
5707 /* Ensure that the flowring node has valid contents */
5708 ASSERT(flow_ring_node->prot_info != NULL);
5709
5710 dhd_prot_update_txflowring(dhd, flow_ring_node->flowid, flow_ring_node->prot_info);
5711 }
5712 DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
5713 }
5714
5715 /** Mailbox ringbell Function */
5716 static void
dhd_bus_gen_devmb_intr(struct dhd_bus * bus)5717 dhd_bus_gen_devmb_intr(struct dhd_bus *bus)
5718 {
5719 if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
5720 (bus->sih->buscorerev == 4)) {
5721 DHD_ERROR(("%s: mailbox communication not supported\n", __FUNCTION__));
5722 return;
5723 }
5724 if (bus->db1_for_mb) {
5725 /* this is a pcie core register, not the config register */
5726 DHD_INFO(("%s: writing a mail box interrupt to the device, through doorbell 1\n", __FUNCTION__));
5727 si_corereg(bus->sih, bus->sih->buscoreidx, PCIH2D_DB1, ~0, 0x12345678);
5728 } else {
5729 DHD_INFO(("%s: writing a mail box interrupt to the device, through config space\n", __FUNCTION__));
5730 dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
5731 dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
5732 }
5733 }
5734
5735 /* Upon receiving a mailbox interrupt,
5736 * if H2D_FW_TRAP bit is set in mailbox location
5737 * device traps
5738 */
5739 static void
dhdpcie_fw_trap(dhd_bus_t * bus)5740 dhdpcie_fw_trap(dhd_bus_t *bus)
5741 {
5742 /* Send the mailbox data and generate mailbox intr. */
5743 dhdpcie_send_mb_data(bus, H2D_FW_TRAP);
5744 }
5745
5746 #if defined(PCIE_OOB) || defined(PCIE_INB_DW)
5747 void
dhd_bus_doorbell_timeout_reset(struct dhd_bus * bus)5748 dhd_bus_doorbell_timeout_reset(struct dhd_bus *bus)
5749 {
5750 if (dhd_doorbell_timeout)
5751 dhd_timeout_start(&bus->doorbell_timer,
5752 (dhd_doorbell_timeout * 1000) / dhd_watchdog_ms);
5753 else if (!(bus->dhd->busstate == DHD_BUS_SUSPEND)) {
5754 dhd_bus_set_device_wake(bus, FALSE);
5755 }
5756 }
5757 #endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */
5758
5759 #ifdef PCIE_INB_DW
5760
5761 void
dhd_bus_inb_ack_pending_ds_req(dhd_bus_t * bus)5762 dhd_bus_inb_ack_pending_ds_req(dhd_bus_t *bus)
5763 {
5764 /* The DHD_BUS_INB_DW_LOCK must be held before
5765 * calling this function !!
5766 */
5767 if ((dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
5768 DW_DEVICE_DS_DEV_SLEEP_PEND) &&
5769 (bus->host_active_cnt == 0)) {
5770 dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_DS_DEV_SLEEP);
5771 dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
5772 }
5773 }
5774
5775 int
dhd_bus_inb_set_device_wake(struct dhd_bus * bus,bool val)5776 dhd_bus_inb_set_device_wake(struct dhd_bus *bus, bool val)
5777 {
5778 int timeleft;
5779 unsigned long flags;
5780 int ret;
5781
5782 if (!INBAND_DW_ENAB(bus)) {
5783 return BCME_ERROR;
5784 }
5785
5786 if (val) {
5787 DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
5788
5789 /*
5790 * Reset the Door Bell Timeout value. So that the Watchdog
5791 * doesn't try to Deassert Device Wake, while we are in
5792 * the process of still Asserting the same.
5793 */
5794 if (dhd_doorbell_timeout) {
5795 dhd_timeout_start(&bus->doorbell_timer,
5796 (dhd_doorbell_timeout * 1000) / dhd_watchdog_ms);
5797 }
5798
5799 if (dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
5800 DW_DEVICE_DS_DEV_SLEEP) {
5801 /* Clear wait_for_ds_exit */
5802 bus->wait_for_ds_exit = 0;
5803 ret = dhdpcie_send_mb_data(bus, H2DMB_DS_DEVICE_WAKE_ASSERT);
5804 if (ret != BCME_OK) {
5805 DHD_ERROR(("Failed: assert Inband device_wake\n"));
5806 bus->wait_for_ds_exit = 1;
5807 DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
5808 ret = BCME_ERROR;
5809 goto exit;
5810 }
5811 dhdpcie_bus_set_pcie_inband_dw_state(bus,
5812 DW_DEVICE_DS_DISABLED_WAIT);
5813 bus->inband_dw_assert_cnt++;
5814 } else {
5815 DHD_INFO(("Not in DS SLEEP state \n"));
5816 DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
5817 ret = BCME_OK;
5818 goto exit;
5819 }
5820
5821 /*
5822 * Since we are going to wait/sleep .. release the lock.
5823 * The Device Wake sanity is still valid, because
5824 * a) If there is another context that comes in and tries
5825 * to assert DS again and if it gets the lock, since
5826 * ds_state would be now != DW_DEVICE_DS_DEV_SLEEP the
5827 * context would return saying Not in DS Sleep.
5828 * b) If ther is another context that comes in and tries
5829 * to de-assert DS and gets the lock,
5830 * since the ds_state is != DW_DEVICE_DS_DEV_WAKE
5831 * that context would return too. This can not happen
5832 * since the watchdog is the only context that can
5833 * De-Assert Device Wake and as the first step of
5834 * Asserting the Device Wake, we have pushed out the
5835 * Door Bell Timeout.
5836 *
5837 */
5838 DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
5839
5840 if (!CAN_SLEEP()) {
5841 /* Called from context that cannot sleep */
5842 OSL_DELAY(1000);
5843 bus->wait_for_ds_exit = 1;
5844 } else {
5845 /* Wait for DS EXIT for DS_EXIT_TIMEOUT seconds */
5846 timeleft = dhd_os_ds_exit_wait(bus->dhd, &bus->wait_for_ds_exit);
5847 if (!bus->wait_for_ds_exit && timeleft == 0) {
5848 DHD_ERROR(("DS-EXIT timeout\n"));
5849 bus->inband_ds_exit_to_cnt++;
5850 bus->ds_exit_timeout = 0;
5851 ret = BCME_ERROR;
5852 goto exit;
5853 }
5854 }
5855
5856 DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
5857 dhdpcie_bus_set_pcie_inband_dw_state(bus,
5858 DW_DEVICE_DS_DEV_WAKE);
5859 DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
5860
5861 ret = BCME_OK;
5862 } else {
5863 DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
5864 if ((dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
5865 DW_DEVICE_DS_DEV_WAKE)) {
5866 ret = dhdpcie_send_mb_data(bus, H2DMB_DS_DEVICE_WAKE_DEASSERT);
5867 if (ret != BCME_OK) {
5868 DHD_ERROR(("Failed: deassert Inband device_wake\n"));
5869 DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
5870 goto exit;
5871 }
5872 dhdpcie_bus_set_pcie_inband_dw_state(bus,
5873 DW_DEVICE_DS_ACTIVE);
5874 bus->inband_dw_deassert_cnt++;
5875 } else if ((dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
5876 DW_DEVICE_DS_DEV_SLEEP_PEND) &&
5877 (bus->host_active_cnt == 0)) {
5878 dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_DS_DEV_SLEEP);
5879 dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
5880 }
5881
5882 ret = BCME_OK;
5883 DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
5884 }
5885
5886 exit:
5887 return ret;
5888 }
5889 #endif /* PCIE_INB_DW */
5890
5891
5892 #if defined(PCIE_OOB) || defined(PCIE_INB_DW)
5893 int
dhd_bus_set_device_wake(struct dhd_bus * bus,bool val)5894 dhd_bus_set_device_wake(struct dhd_bus *bus, bool val)
5895 {
5896 if (bus->ds_enabled) {
5897 #ifdef PCIE_INB_DW
5898 if (INBAND_DW_ENAB(bus)) {
5899 return dhd_bus_inb_set_device_wake(bus, val);
5900 }
5901 #endif /* PCIE_INB_DW */
5902 #ifdef PCIE_OOB
5903 if (OOB_DW_ENAB(bus)) {
5904 return dhd_os_oob_set_device_wake(bus, val);
5905 }
5906 #endif /* PCIE_OOB */
5907 }
5908 return BCME_OK;
5909 }
5910 #endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */
5911
5912 /** mailbox doorbell ring function */
5913 void
dhd_bus_ringbell(struct dhd_bus * bus,uint32 value)5914 dhd_bus_ringbell(struct dhd_bus *bus, uint32 value)
5915 {
5916 /* Skip after sending D3_INFORM */
5917 if (bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) {
5918 DHD_ERROR(("%s: trying to ring the doorbell when in suspend state :"
5919 "busstate=%d, d3_suspend_pending=%d\n",
5920 __FUNCTION__, bus->dhd->busstate, bus->d3_suspend_pending));
5921 return;
5922 }
5923 if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
5924 (bus->sih->buscorerev == 4)) {
5925 si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, PCIE_INTB, PCIE_INTB);
5926 } else {
5927 /* this is a pcie core register, not the config regsiter */
5928 DHD_INFO(("%s: writing a door bell to the device\n", __FUNCTION__));
5929 if (IDMA_ACTIVE(bus->dhd)) {
5930 si_corereg(bus->sih, bus->sih->buscoreidx, PCIH2D_MailBox_2,
5931 ~0, value);
5932 } else {
5933 si_corereg(bus->sih, bus->sih->buscoreidx,
5934 PCIH2D_MailBox, ~0, 0x12345678);
5935 }
5936 }
5937 }
5938
5939 /** mailbox doorbell ring function for IDMA/IFRM using dma channel2 */
5940 void
dhd_bus_ringbell_2(struct dhd_bus * bus,uint32 value,bool devwake)5941 dhd_bus_ringbell_2(struct dhd_bus *bus, uint32 value, bool devwake)
5942 {
5943 /* this is a pcie core register, not the config regsiter */
5944 /* Skip after sending D3_INFORM */
5945 if (bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) {
5946 DHD_ERROR(("%s: trying to ring the doorbell when in suspend state :"
5947 "busstate=%d, d3_suspend_pending=%d\n",
5948 __FUNCTION__, bus->dhd->busstate, bus->d3_suspend_pending));
5949 return;
5950 }
5951 DHD_INFO(("writing a door bell 2 to the device\n"));
5952 si_corereg(bus->sih, bus->sih->buscoreidx, PCIH2D_MailBox_2,
5953 ~0, value);
5954 }
5955
5956 void
dhdpcie_bus_ringbell_fast(struct dhd_bus * bus,uint32 value)5957 dhdpcie_bus_ringbell_fast(struct dhd_bus *bus, uint32 value)
5958 {
5959 /* Skip after sending D3_INFORM */
5960 if (bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) {
5961 DHD_ERROR(("%s: trying to ring the doorbell when in suspend state :"
5962 "busstate=%d, d3_suspend_pending=%d\n",
5963 __FUNCTION__, bus->dhd->busstate, bus->d3_suspend_pending));
5964 return;
5965 }
5966 #if defined(PCIE_OOB) || defined(PCIE_INB_DW)
5967 if (OOB_DW_ENAB(bus)) {
5968 dhd_bus_set_device_wake(bus, TRUE);
5969 }
5970 dhd_bus_doorbell_timeout_reset(bus);
5971 #endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */
5972 W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, value);
5973 }
5974
5975 void
dhdpcie_bus_ringbell_2_fast(struct dhd_bus * bus,uint32 value,bool devwake)5976 dhdpcie_bus_ringbell_2_fast(struct dhd_bus *bus, uint32 value, bool devwake)
5977 {
5978 /* Skip after sending D3_INFORM */
5979 if (bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) {
5980 DHD_ERROR(("%s: trying to ring the doorbell when in suspend state :"
5981 "busstate=%d, d3_suspend_pending=%d\n",
5982 __FUNCTION__, bus->dhd->busstate, bus->d3_suspend_pending));
5983 return;
5984 }
5985 #if defined(PCIE_OOB) || defined(PCIE_INB_DW)
5986 if (devwake) {
5987 if (OOB_DW_ENAB(bus)) {
5988 dhd_bus_set_device_wake(bus, TRUE);
5989 }
5990 }
5991 dhd_bus_doorbell_timeout_reset(bus);
5992 #endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */
5993
5994 W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_2_addr, value);
5995 }
5996
5997 static void
dhd_bus_ringbell_oldpcie(struct dhd_bus * bus,uint32 value)5998 dhd_bus_ringbell_oldpcie(struct dhd_bus *bus, uint32 value)
5999 {
6000 uint32 w;
6001 /* Skip after sending D3_INFORM */
6002 if (bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) {
6003 DHD_ERROR(("%s: trying to ring the doorbell when in suspend state :"
6004 "busstate=%d, d3_suspend_pending=%d\n",
6005 __FUNCTION__, bus->dhd->busstate, bus->d3_suspend_pending));
6006 return;
6007 }
6008 w = (R_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr) & ~PCIE_INTB) | PCIE_INTB;
6009 W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, w);
6010 }
6011
6012 dhd_mb_ring_t
dhd_bus_get_mbintr_fn(struct dhd_bus * bus)6013 dhd_bus_get_mbintr_fn(struct dhd_bus *bus)
6014 {
6015 if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
6016 (bus->sih->buscorerev == 4)) {
6017 bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
6018 PCIMailBoxInt);
6019 if (bus->pcie_mb_intr_addr) {
6020 bus->pcie_mb_intr_osh = si_osh(bus->sih);
6021 return dhd_bus_ringbell_oldpcie;
6022 }
6023 } else {
6024 bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
6025 PCIH2D_MailBox);
6026 if (bus->pcie_mb_intr_addr) {
6027 bus->pcie_mb_intr_osh = si_osh(bus->sih);
6028 return dhdpcie_bus_ringbell_fast;
6029 }
6030 }
6031 return dhd_bus_ringbell;
6032 }
6033
6034 dhd_mb_ring_2_t
dhd_bus_get_mbintr_2_fn(struct dhd_bus * bus)6035 dhd_bus_get_mbintr_2_fn(struct dhd_bus *bus)
6036 {
6037 bus->pcie_mb_intr_2_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
6038 PCIH2D_MailBox_2);
6039 if (bus->pcie_mb_intr_2_addr) {
6040 bus->pcie_mb_intr_osh = si_osh(bus->sih);
6041 return dhdpcie_bus_ringbell_2_fast;
6042 }
6043 return dhd_bus_ringbell_2;
6044 }
6045
6046 bool BCMFASTPATH
dhd_bus_dpc(struct dhd_bus * bus)6047 dhd_bus_dpc(struct dhd_bus *bus)
6048 {
6049 bool resched = FALSE; /* Flag indicating resched wanted */
6050 unsigned long flags;
6051
6052 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
6053
6054 DHD_GENERAL_LOCK(bus->dhd, flags);
6055 /* Check for only DHD_BUS_DOWN and not for DHD_BUS_DOWN_IN_PROGRESS
6056 * to avoid IOCTL Resumed On timeout when ioctl is waiting for response
6057 * and rmmod is fired in parallel, which will make DHD_BUS_DOWN_IN_PROGRESS
6058 * and if we return from here, then IOCTL response will never be handled
6059 */
6060 if (bus->dhd->busstate == DHD_BUS_DOWN) {
6061 DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__));
6062 bus->intstatus = 0;
6063 DHD_GENERAL_UNLOCK(bus->dhd, flags);
6064 bus->dpc_return_busdown_count++;
6065 return 0;
6066 }
6067 #ifdef DHD_PCIE_RUNTIMEPM
6068 bus->idlecount = 0;
6069 #endif /* DHD_PCIE_RUNTIMEPM */
6070 DHD_BUS_BUSY_SET_IN_DPC(bus->dhd);
6071 DHD_GENERAL_UNLOCK(bus->dhd, flags);
6072
6073 #ifdef DHD_READ_INTSTATUS_IN_DPC
6074 if (bus->ipend) {
6075 bus->ipend = FALSE;
6076 bus->intstatus = dhdpcie_bus_intstatus(bus);
6077 /* Check if the interrupt is ours or not */
6078 if (bus->intstatus == 0) {
6079 goto INTR_ON;
6080 }
6081 bus->intrcount++;
6082 }
6083 #endif /* DHD_READ_INTSTATUS_IN_DPC */
6084
6085 resched = dhdpcie_bus_process_mailbox_intr(bus, bus->intstatus);
6086 if (!resched) {
6087 bus->intstatus = 0;
6088 #ifdef DHD_READ_INTSTATUS_IN_DPC
6089 INTR_ON:
6090 #endif /* DHD_READ_INTSTATUS_IN_DPC */
6091 bus->dpc_intr_enable_count++;
6092 dhdpcie_bus_intr_enable(bus); /* Enable back interrupt using Intmask!! */
6093 }
6094
6095 DHD_GENERAL_LOCK(bus->dhd, flags);
6096 DHD_BUS_BUSY_CLEAR_IN_DPC(bus->dhd);
6097 dhd_os_busbusy_wake(bus->dhd);
6098 DHD_GENERAL_UNLOCK(bus->dhd, flags);
6099
6100 return resched;
6101 }
6102
6103
6104 int
dhdpcie_send_mb_data(dhd_bus_t * bus,uint32 h2d_mb_data)6105 dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data)
6106 {
6107 uint32 cur_h2d_mb_data = 0;
6108 unsigned long flags;
6109
6110 DHD_INFO_HW4(("%s: H2D_MB_DATA: 0x%08X\n", __FUNCTION__, h2d_mb_data));
6111
6112 if (bus->is_linkdown) {
6113 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
6114 return BCME_ERROR;
6115 }
6116
6117 DHD_GENERAL_LOCK(bus->dhd, flags);
6118
6119 if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !bus->use_mailbox) {
6120 DHD_INFO(("API rev is 6, sending mb data as H2D Ctrl message to dongle, 0x%04x\n",
6121 h2d_mb_data));
6122 /* Prevent asserting device_wake during doorbell ring for mb data to avoid loop. */
6123 #ifdef PCIE_OOB
6124 bus->oob_enabled = FALSE;
6125 #endif /* PCIE_OOB */
6126 if (dhd_prot_h2d_mbdata_send_ctrlmsg(bus->dhd, h2d_mb_data)) {
6127 DHD_ERROR(("failure sending the H2D Mailbox message to firmware\n"));
6128 goto fail;
6129 }
6130 #ifdef PCIE_OOB
6131 bus->oob_enabled = TRUE;
6132 #endif /* PCIE_OOB */
6133 goto done;
6134 }
6135
6136 dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, H2D_MB_DATA, 0);
6137
6138 if (cur_h2d_mb_data != 0) {
6139 uint32 i = 0;
6140 DHD_INFO(("%s: GRRRRRRR: MB transaction is already pending 0x%04x\n", __FUNCTION__, cur_h2d_mb_data));
6141 while ((i++ < 100) && cur_h2d_mb_data) {
6142 OSL_DELAY(10);
6143 dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, H2D_MB_DATA, 0);
6144 }
6145 if (i >= 100) {
6146 DHD_ERROR(("%s : waited 1ms for the dngl "
6147 "to ack the previous mb transaction\n", __FUNCTION__));
6148 DHD_ERROR(("%s : MB transaction is still pending 0x%04x\n",
6149 __FUNCTION__, cur_h2d_mb_data));
6150 }
6151 }
6152
6153 dhd_bus_cmn_writeshared(bus, &h2d_mb_data, sizeof(uint32), H2D_MB_DATA, 0);
6154 dhd_bus_gen_devmb_intr(bus);
6155
6156 done:
6157 if (h2d_mb_data == H2D_HOST_D3_INFORM) {
6158 DHD_INFO_HW4(("%s: send H2D_HOST_D3_INFORM to dongle\n", __FUNCTION__));
6159 /* Mark D3_INFORM in the atomic context to
6160 * skip ringing H2D DB after D3_INFORM
6161 */
6162 bus->d3_suspend_pending = TRUE;
6163 bus->d3_inform_cnt++;
6164 }
6165 if (h2d_mb_data == H2D_HOST_D0_INFORM_IN_USE) {
6166 DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM_IN_USE to dongle\n", __FUNCTION__));
6167 bus->d0_inform_in_use_cnt++;
6168 }
6169 if (h2d_mb_data == H2D_HOST_D0_INFORM) {
6170 DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM to dongle\n", __FUNCTION__));
6171 bus->d0_inform_cnt++;
6172 }
6173 DHD_GENERAL_UNLOCK(bus->dhd, flags);
6174 return BCME_OK;
6175
6176 fail:
6177 DHD_GENERAL_UNLOCK(bus->dhd, flags);
6178 return BCME_ERROR;
6179 }
6180
6181 void
dhd_bus_handle_mb_data(dhd_bus_t * bus,uint32 d2h_mb_data)6182 dhd_bus_handle_mb_data(dhd_bus_t *bus, uint32 d2h_mb_data)
6183 {
6184 #ifdef PCIE_INB_DW
6185 unsigned long flags = 0;
6186 #endif
6187 DHD_INFO(("D2H_MB_DATA: 0x%04x\n", d2h_mb_data));
6188
6189 if (d2h_mb_data & D2H_DEV_FWHALT) {
6190 DHD_ERROR(("FW trap has happened\n"));
6191 dhdpcie_checkdied(bus, NULL, 0);
6192 #ifdef SUPPORT_LINKDOWN_RECOVERY
6193 #ifdef CONFIG_ARCH_MSM
6194 bus->no_cfg_restore = 1;
6195 #endif /* CONFIG_ARCH_MSM */
6196 #endif /* SUPPORT_LINKDOWN_RECOVERY */
6197 dhd_os_check_hang(bus->dhd, 0, -EREMOTEIO);
6198 return;
6199 }
6200 if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ) {
6201 if ((bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) &&
6202 bus->wait_for_d3_ack) {
6203 DHD_ERROR(("DS-ENTRY AFTER D3-ACK!!!!! QUITING\n"));
6204 bus->dhd->busstate = DHD_BUS_DOWN;
6205 return;
6206 }
6207 /* what should we do */
6208 DHD_INFO(("D2H_MB_DATA: DEEP SLEEP REQ\n"));
6209 #ifdef PCIE_INB_DW
6210 if (INBAND_DW_ENAB(bus)) {
6211 DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
6212 if (dhdpcie_bus_get_pcie_inband_dw_state(bus) == DW_DEVICE_DS_ACTIVE) {
6213 dhdpcie_bus_set_pcie_inband_dw_state(bus,
6214 DW_DEVICE_DS_DEV_SLEEP_PEND);
6215 if (bus->host_active_cnt == 0) {
6216 dhdpcie_bus_set_pcie_inband_dw_state(bus,
6217 DW_DEVICE_DS_DEV_SLEEP);
6218 dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
6219 }
6220 }
6221 DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
6222 dhd_os_ds_enter_wake(bus->dhd);
6223 } else
6224 #endif /* PCIE_INB_DW */
6225 {
6226 dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
6227 }
6228 if (IDMA_DS_ENAB(bus->dhd)) {
6229 bus->dongle_in_ds = TRUE;
6230 }
6231 DHD_INFO(("D2H_MB_DATA: sent DEEP SLEEP ACK\n"));
6232 }
6233 if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE) {
6234 /* what should we do */
6235 bus->dongle_in_ds = FALSE;
6236 DHD_INFO(("D2H_MB_DATA: DEEP SLEEP EXIT\n"));
6237 #ifdef PCIE_INB_DW
6238 if (INBAND_DW_ENAB(bus)) {
6239 bus->inband_ds_exit_device_cnt++;
6240 DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
6241 if (dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
6242 DW_DEVICE_DS_DISABLED_WAIT) {
6243 /* wake up only if some one is waiting in
6244 * DW_DEVICE_DS_DISABLED_WAIT state
6245 * in this case the waiter will change the state
6246 * to DW_DEVICE_DS_DEV_WAKE
6247 */
6248 bus->wait_for_ds_exit = 1;
6249 DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
6250 dhd_os_ds_exit_wake(bus->dhd);
6251 } else {
6252 DHD_INFO(("D2H_MB_DATA: not in DW_DEVICE_DS_DISABLED_WAIT!\n"));
6253 /*
6254 * If there is no one waiting, then update the state from here
6255 */
6256 bus->wait_for_ds_exit = 1;
6257 dhdpcie_bus_set_pcie_inband_dw_state(bus,
6258 DW_DEVICE_DS_DEV_WAKE);
6259 DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
6260 }
6261 }
6262 #endif /* PCIE_INB_DW */
6263 }
6264 if (d2h_mb_data & D2HMB_DS_HOST_SLEEP_EXIT_ACK) {
6265 /* what should we do */
6266 DHD_INFO(("D2H_MB_DATA: D0 ACK\n"));
6267 #ifdef PCIE_INB_DW
6268 if (INBAND_DW_ENAB(bus)) {
6269 DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
6270 if (dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
6271 DW_DEVICE_HOST_WAKE_WAIT) {
6272 dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_DS_ACTIVE);
6273 }
6274 DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
6275 }
6276 #endif /* PCIE_INB_DW */
6277 }
6278 if (d2h_mb_data & D2H_DEV_D3_ACK) {
6279 /* what should we do */
6280 DHD_INFO_HW4(("D2H_MB_DATA: D3 ACK\n"));
6281 if (!bus->wait_for_d3_ack) {
6282 /* Disable dongle Interrupts Immediately after D3 */
6283 bus->suspend_intr_disable_count++;
6284 dhdpcie_bus_intr_disable(bus);
6285 #if defined(DHD_HANG_SEND_UP_TEST)
6286 if (bus->dhd->req_hang_type == HANG_REASON_D3_ACK_TIMEOUT) {
6287 DHD_ERROR(("TEST HANG: Skip to process D3 ACK\n"));
6288 } else {
6289 bus->wait_for_d3_ack = 1;
6290 dhd_os_d3ack_wake(bus->dhd);
6291 }
6292 #else /* DHD_HANG_SEND_UP_TEST */
6293 bus->wait_for_d3_ack = 1;
6294 dhd_os_d3ack_wake(bus->dhd);
6295 #endif /* DHD_HANG_SEND_UP_TEST */
6296 }
6297 }
6298 }
6299
6300 static void
dhdpcie_handle_mb_data(dhd_bus_t * bus)6301 dhdpcie_handle_mb_data(dhd_bus_t *bus)
6302 {
6303 uint32 d2h_mb_data = 0;
6304 uint32 zero = 0;
6305 dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0);
6306 if (D2H_DEV_MB_INVALIDATED(d2h_mb_data)) {
6307 DHD_ERROR(("%s: Invalid D2H_MB_DATA: 0x%08x\n",
6308 __FUNCTION__, d2h_mb_data));
6309 return;
6310 }
6311
6312 dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), D2H_MB_DATA, 0);
6313
6314 DHD_INFO_HW4(("%s: D2H_MB_DATA: 0x%04x\n", __FUNCTION__, d2h_mb_data));
6315 if (d2h_mb_data & D2H_DEV_FWHALT) {
6316 DHD_ERROR(("FW trap has happened\n"));
6317 dhdpcie_checkdied(bus, NULL, 0);
6318 /* not ready yet dhd_os_ind_firmware_stall(bus->dhd); */
6319 return;
6320 }
6321 if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ) {
6322 /* what should we do */
6323 DHD_INFO(("%s: D2H_MB_DATA: DEEP SLEEP REQ\n", __FUNCTION__));
6324 dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
6325 if (IDMA_DS_ENAB(bus->dhd)) {
6326 bus->dongle_in_ds = TRUE;
6327 }
6328 DHD_INFO(("%s: D2H_MB_DATA: sent DEEP SLEEP ACK\n", __FUNCTION__));
6329 }
6330 if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE) {
6331 /* what should we do */
6332 DHD_INFO(("%s: D2H_MB_DATA: DEEP SLEEP EXIT\n", __FUNCTION__));
6333 bus->dongle_in_ds = FALSE;
6334 }
6335 if (d2h_mb_data & D2H_DEV_D3_ACK) {
6336 /* what should we do */
6337 DHD_INFO_HW4(("%s: D2H_MB_DATA: D3 ACK\n", __FUNCTION__));
6338 if (!bus->wait_for_d3_ack) {
6339 #if defined(DHD_HANG_SEND_UP_TEST)
6340 if (bus->dhd->req_hang_type == HANG_REASON_D3_ACK_TIMEOUT) {
6341 DHD_ERROR(("TEST HANG: Skip to process D3 ACK\n"));
6342 } else {
6343 bus->wait_for_d3_ack = 1;
6344 dhd_os_d3ack_wake(bus->dhd);
6345 }
6346 #else /* DHD_HANG_SEND_UP_TEST */
6347 bus->wait_for_d3_ack = 1;
6348 dhd_os_d3ack_wake(bus->dhd);
6349 #endif /* DHD_HANG_SEND_UP_TEST */
6350 }
6351 }
6352 }
6353
6354 static void
dhdpcie_read_handle_mb_data(dhd_bus_t * bus)6355 dhdpcie_read_handle_mb_data(dhd_bus_t *bus)
6356 {
6357 uint32 d2h_mb_data = 0;
6358 uint32 zero = 0;
6359
6360 dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0);
6361 if (!d2h_mb_data)
6362 return;
6363
6364 dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), D2H_MB_DATA, 0);
6365
6366 dhd_bus_handle_mb_data(bus, d2h_mb_data);
6367 }
6368
6369 static bool
dhdpcie_bus_process_mailbox_intr(dhd_bus_t * bus,uint32 intstatus)6370 dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus)
6371 {
6372 bool resched = FALSE;
6373
6374 if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
6375 (bus->sih->buscorerev == 4)) {
6376 /* Msg stream interrupt */
6377 if (intstatus & I_BIT1) {
6378 resched = dhdpci_bus_read_frames(bus);
6379 } else if (intstatus & I_BIT0) {
6380 /* do nothing for Now */
6381 }
6382 } else {
6383 if (intstatus & (PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1))
6384 bus->api.handle_mb_data(bus);
6385
6386 if (bus->dhd->busstate == DHD_BUS_SUSPEND) {
6387 goto exit;
6388 }
6389
6390 if (intstatus & PCIE_MB_D2H_MB_MASK) {
6391 resched = dhdpci_bus_read_frames(bus);
6392 }
6393 }
6394
6395 exit:
6396 return resched;
6397 }
6398
6399 static bool
dhdpci_bus_read_frames(dhd_bus_t * bus)6400 dhdpci_bus_read_frames(dhd_bus_t *bus)
6401 {
6402 bool more = FALSE;
6403
6404 /* First check if there a FW trap */
6405 if ((bus->api.fw_rev >= PCIE_SHARED_VERSION_6) &&
6406 (bus->dhd->dongle_trap_data = dhd_prot_process_trapbuf(bus->dhd))) {
6407 dhd_bus_handle_mb_data(bus, D2H_DEV_FWHALT);
6408 return FALSE;
6409 }
6410
6411 /* There may be frames in both ctrl buf and data buf; check ctrl buf first */
6412 DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
6413
6414 dhd_prot_process_ctrlbuf(bus->dhd);
6415 /* Unlock to give chance for resp to be handled */
6416 DHD_PERIM_UNLOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
6417
6418 DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
6419 /* update the flow ring cpls */
6420 dhd_update_txflowrings(bus->dhd);
6421
6422 /* With heavy TX traffic, we could get a lot of TxStatus
6423 * so add bound
6424 */
6425 more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound);
6426
6427 /* With heavy RX traffic, this routine potentially could spend some time
6428 * processing RX frames without RX bound
6429 */
6430 more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound);
6431
6432 /* Process info ring completion messages */
6433 more |= dhd_prot_process_msgbuf_infocpl(bus->dhd, DHD_INFORING_BOUND);
6434
6435 #ifdef IDLE_TX_FLOW_MGMT
6436 if (bus->enable_idle_flowring_mgmt) {
6437 /* Look for idle flow rings */
6438 dhd_bus_check_idle_scan(bus);
6439 }
6440 #endif /* IDLE_TX_FLOW_MGMT */
6441
6442 /* don't talk to the dongle if fw is about to be reloaded */
6443 if (bus->dhd->hang_was_sent) {
6444 more = FALSE;
6445 }
6446 DHD_PERIM_UNLOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
6447
6448 #ifdef SUPPORT_LINKDOWN_RECOVERY
6449 if (bus->read_shm_fail) {
6450 /* Read interrupt state once again to confirm linkdown */
6451 int intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
6452 if (intstatus != (uint32)-1) {
6453 DHD_ERROR(("%s: read SHM failed but intstatus is valid\n", __FUNCTION__));
6454 #ifdef DHD_FW_COREDUMP
6455 if (bus->dhd->memdump_enabled) {
6456 DHD_OS_WAKE_LOCK(bus->dhd);
6457 bus->dhd->memdump_type = DUMP_TYPE_READ_SHM_FAIL;
6458 dhd_bus_mem_dump(bus->dhd);
6459 DHD_OS_WAKE_UNLOCK(bus->dhd);
6460 }
6461 #endif /* DHD_FW_COREDUMP */
6462 bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN;
6463 dhd_os_send_hang_message(bus->dhd);
6464 } else {
6465 DHD_ERROR(("%s: Link is Down.\n", __FUNCTION__));
6466 #ifdef CONFIG_ARCH_MSM
6467 bus->no_cfg_restore = 1;
6468 #endif /* CONFIG_ARCH_MSM */
6469 bus->is_linkdown = 1;
6470 bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN;
6471 dhd_os_send_hang_message(bus->dhd);
6472 }
6473 }
6474 #endif /* SUPPORT_LINKDOWN_RECOVERY */
6475 return more;
6476 }
6477
6478 bool
dhdpcie_tcm_valid(dhd_bus_t * bus)6479 dhdpcie_tcm_valid(dhd_bus_t *bus)
6480 {
6481 uint32 addr = 0;
6482 int rv;
6483 uint32 shaddr = 0;
6484 pciedev_shared_t sh;
6485
6486 shaddr = bus->dongle_ram_base + bus->ramsize - 4;
6487
6488 /* Read last word in memory to determine address of pciedev_shared structure */
6489 addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr));
6490
6491 if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) ||
6492 (addr > shaddr)) {
6493 DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid addr\n",
6494 __FUNCTION__, addr));
6495 return FALSE;
6496 }
6497
6498 /* Read hndrte_shared structure */
6499 if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&sh,
6500 sizeof(pciedev_shared_t))) < 0) {
6501 DHD_ERROR(("Failed to read PCIe shared struct with %d\n", rv));
6502 return FALSE;
6503 }
6504
6505 /* Compare any field in pciedev_shared_t */
6506 if (sh.console_addr != bus->pcie_sh->console_addr) {
6507 DHD_ERROR(("Contents of pciedev_shared_t structure are not matching.\n"));
6508 return FALSE;
6509 }
6510
6511 return TRUE;
6512 }
6513
6514 static void
dhdpcie_update_bus_api_revisions(uint32 firmware_api_version,uint32 host_api_version)6515 dhdpcie_update_bus_api_revisions(uint32 firmware_api_version, uint32 host_api_version)
6516 {
6517 snprintf(bus_api_revision, BUS_API_REV_STR_LEN, "\nBus API revisions:(FW rev%d)(DHD rev%d)",
6518 firmware_api_version, host_api_version);
6519 return;
6520 }
6521
6522 static bool
dhdpcie_check_firmware_compatible(uint32 firmware_api_version,uint32 host_api_version)6523 dhdpcie_check_firmware_compatible(uint32 firmware_api_version, uint32 host_api_version)
6524 {
6525 bool retcode = FALSE;
6526
6527 DHD_INFO(("firmware api revision %d, host api revision %d\n",
6528 firmware_api_version, host_api_version));
6529
6530 switch (firmware_api_version) {
6531 case PCIE_SHARED_VERSION_7:
6532 case PCIE_SHARED_VERSION_6:
6533 case PCIE_SHARED_VERSION_5:
6534 retcode = TRUE;
6535 break;
6536 default:
6537 if (firmware_api_version <= host_api_version)
6538 retcode = TRUE;
6539 }
6540 return retcode;
6541 }
6542
6543 static int
dhdpcie_readshared(dhd_bus_t * bus)6544 dhdpcie_readshared(dhd_bus_t *bus)
6545 {
6546 uint32 addr = 0;
6547 int rv, dma_indx_wr_buf, dma_indx_rd_buf;
6548 uint32 shaddr = 0;
6549 pciedev_shared_t *sh = bus->pcie_sh;
6550 dhd_timeout_t tmo;
6551
6552 shaddr = bus->dongle_ram_base + bus->ramsize - 4;
6553 /* start a timer for 5 seconds */
6554 dhd_timeout_start(&tmo, MAX_READ_TIMEOUT);
6555
6556 while (((addr == 0) || (addr == bus->nvram_csm)) && !dhd_timeout_expired(&tmo)) {
6557 /* Read last word in memory to determine address of pciedev_shared structure */
6558 addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr));
6559 }
6560
6561 if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) ||
6562 (addr > shaddr)) {
6563 DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid\n",
6564 __FUNCTION__, addr));
6565 DHD_ERROR(("%s: Waited %u usec, dongle is not ready\n", __FUNCTION__, tmo.elapsed));
6566 return BCME_ERROR;
6567 } else {
6568 bus->shared_addr = (ulong)addr;
6569 DHD_ERROR(("%s: PCIe shared addr (0x%08x) read took %u usec "
6570 "before dongle is ready\n", __FUNCTION__, addr, tmo.elapsed));
6571 }
6572
6573 /* Read hndrte_shared structure */
6574 if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)sh,
6575 sizeof(pciedev_shared_t))) < 0) {
6576 DHD_ERROR(("%s: Failed to read PCIe shared struct with %d\n", __FUNCTION__, rv));
6577 return rv;
6578 }
6579
6580 /* Endianness */
6581 sh->flags = ltoh32(sh->flags);
6582 sh->trap_addr = ltoh32(sh->trap_addr);
6583 sh->assert_exp_addr = ltoh32(sh->assert_exp_addr);
6584 sh->assert_file_addr = ltoh32(sh->assert_file_addr);
6585 sh->assert_line = ltoh32(sh->assert_line);
6586 sh->console_addr = ltoh32(sh->console_addr);
6587 sh->msgtrace_addr = ltoh32(sh->msgtrace_addr);
6588 sh->dma_rxoffset = ltoh32(sh->dma_rxoffset);
6589 sh->rings_info_ptr = ltoh32(sh->rings_info_ptr);
6590 sh->flags2 = ltoh32(sh->flags2);
6591
6592 /* load bus console address */
6593 bus->console_addr = sh->console_addr;
6594
6595 /* Read the dma rx offset */
6596 bus->dma_rxoffset = bus->pcie_sh->dma_rxoffset;
6597 dhd_prot_rx_dataoffset(bus->dhd, bus->dma_rxoffset);
6598
6599 DHD_INFO(("%s: DMA RX offset from shared Area %d\n", __FUNCTION__, bus->dma_rxoffset));
6600
6601 bus->api.fw_rev = sh->flags & PCIE_SHARED_VERSION_MASK;
6602 if (!(dhdpcie_check_firmware_compatible(bus->api.fw_rev, PCIE_SHARED_VERSION)))
6603 {
6604 DHD_ERROR(("%s: pcie_shared version %d in dhd "
6605 "is older than pciedev_shared version %d in dongle\n",
6606 __FUNCTION__, PCIE_SHARED_VERSION,
6607 bus->api.fw_rev));
6608 return BCME_ERROR;
6609 }
6610 dhdpcie_update_bus_api_revisions(bus->api.fw_rev, PCIE_SHARED_VERSION);
6611
6612 bus->rw_index_sz = (sh->flags & PCIE_SHARED_2BYTE_INDICES) ?
6613 sizeof(uint16) : sizeof(uint32);
6614 DHD_INFO(("%s: Dongle advertizes %d size indices\n",
6615 __FUNCTION__, bus->rw_index_sz));
6616
6617 #ifdef IDLE_TX_FLOW_MGMT
6618 if (sh->flags & PCIE_SHARED_IDLE_FLOW_RING) {
6619 DHD_ERROR(("%s: FW Supports IdleFlow ring managment!\n",
6620 __FUNCTION__));
6621 bus->enable_idle_flowring_mgmt = TRUE;
6622 }
6623 #endif /* IDLE_TX_FLOW_MGMT */
6624
6625 bus->dhd->idma_enable = (sh->flags & PCIE_SHARED_IDMA) ? TRUE : FALSE;
6626 bus->dhd->ifrm_enable = (sh->flags & PCIE_SHARED_IFRM) ? TRUE : FALSE;
6627
6628 bus->dhd->idma_retention_ds = (sh->flags & PCIE_SHARED_IDMA_RETENTION_DS) ? TRUE : FALSE;
6629
6630 bus->dhd->d2h_sync_mode = sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK;
6631
6632 /* Does the FW support DMA'ing r/w indices */
6633 if (sh->flags & PCIE_SHARED_DMA_INDEX) {
6634 if (!bus->dhd->dma_ring_upd_overwrite) {
6635 {
6636 if (!IFRM_ENAB(bus->dhd)) {
6637 bus->dhd->dma_h2d_ring_upd_support = TRUE;
6638 }
6639 bus->dhd->dma_d2h_ring_upd_support = TRUE;
6640 }
6641 }
6642
6643 if (bus->dhd->dma_d2h_ring_upd_support)
6644 bus->dhd->d2h_sync_mode = 0;
6645
6646 DHD_INFO(("%s: Host support DMAing indices: H2D:%d - D2H:%d. FW supports it\n",
6647 __FUNCTION__,
6648 (bus->dhd->dma_h2d_ring_upd_support ? 1 : 0),
6649 (bus->dhd->dma_d2h_ring_upd_support ? 1 : 0)));
6650 } else if (!(sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK)) {
6651 DHD_ERROR(("%s FW has to support either dma indices or d2h sync\n",
6652 __FUNCTION__));
6653 return BCME_UNSUPPORTED;
6654 } else {
6655 bus->dhd->dma_h2d_ring_upd_support = FALSE;
6656 bus->dhd->dma_d2h_ring_upd_support = FALSE;
6657 }
6658
6659 /* get ring_info, ring_state and mb data ptrs and store the addresses in bus structure */
6660 {
6661 ring_info_t ring_info;
6662
6663 if ((rv = dhdpcie_bus_membytes(bus, FALSE, sh->rings_info_ptr,
6664 (uint8 *)&ring_info, sizeof(ring_info_t))) < 0)
6665 return rv;
6666
6667 bus->h2d_mb_data_ptr_addr = ltoh32(sh->h2d_mb_data_ptr);
6668 bus->d2h_mb_data_ptr_addr = ltoh32(sh->d2h_mb_data_ptr);
6669
6670
6671 if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6) {
6672 bus->max_tx_flowrings = ltoh16(ring_info.max_tx_flowrings);
6673 bus->max_submission_rings = ltoh16(ring_info.max_submission_queues);
6674 bus->max_completion_rings = ltoh16(ring_info.max_completion_rings);
6675 bus->max_cmn_rings = bus->max_submission_rings - bus->max_tx_flowrings;
6676 bus->api.handle_mb_data = dhdpcie_read_handle_mb_data;
6677 bus->use_mailbox = sh->flags & PCIE_SHARED_USE_MAILBOX;
6678 }
6679 else {
6680 bus->max_tx_flowrings = ltoh16(ring_info.max_tx_flowrings);
6681 bus->max_submission_rings = bus->max_tx_flowrings;
6682 bus->max_completion_rings = BCMPCIE_D2H_COMMON_MSGRINGS;
6683 bus->max_cmn_rings = BCMPCIE_H2D_COMMON_MSGRINGS;
6684 bus->api.handle_mb_data = dhdpcie_handle_mb_data;
6685 }
6686 if (bus->max_completion_rings == 0) {
6687 DHD_ERROR(("dongle completion rings are invalid %d\n",
6688 bus->max_completion_rings));
6689 return BCME_ERROR;
6690 }
6691 if (bus->max_submission_rings == 0) {
6692 DHD_ERROR(("dongle submission rings are invalid %d\n",
6693 bus->max_submission_rings));
6694 return BCME_ERROR;
6695 }
6696 if (bus->max_tx_flowrings == 0) {
6697 DHD_ERROR(("dongle txflow rings are invalid %d\n", bus->max_tx_flowrings));
6698 return BCME_ERROR;
6699 }
6700
6701 /* If both FW and Host support DMA'ing indices, allocate memory and notify FW
6702 * The max_sub_queues is read from FW initialized ring_info
6703 */
6704 if (bus->dhd->dma_h2d_ring_upd_support || IDMA_ENAB(bus->dhd)) {
6705 dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
6706 H2D_DMA_INDX_WR_BUF, bus->max_submission_rings);
6707 dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
6708 D2H_DMA_INDX_RD_BUF, bus->max_completion_rings);
6709
6710 if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) {
6711 DHD_ERROR(("%s: Failed to allocate memory for dma'ing h2d indices"
6712 "Host will use w/r indices in TCM\n",
6713 __FUNCTION__));
6714 bus->dhd->dma_h2d_ring_upd_support = FALSE;
6715 bus->dhd->idma_enable = FALSE;
6716 }
6717 }
6718
6719 if (bus->dhd->dma_d2h_ring_upd_support) {
6720 dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
6721 D2H_DMA_INDX_WR_BUF, bus->max_completion_rings);
6722 dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
6723 H2D_DMA_INDX_RD_BUF, bus->max_submission_rings);
6724
6725 if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) {
6726 DHD_ERROR(("%s: Failed to allocate memory for dma'ing d2h indices"
6727 "Host will use w/r indices in TCM\n",
6728 __FUNCTION__));
6729 bus->dhd->dma_d2h_ring_upd_support = FALSE;
6730 }
6731 }
6732
6733 if (IFRM_ENAB(bus->dhd)) {
6734 dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
6735 H2D_IFRM_INDX_WR_BUF, bus->max_tx_flowrings);
6736
6737 if (dma_indx_wr_buf != BCME_OK) {
6738 DHD_ERROR(("%s: Failed to alloc memory for Implicit DMA\n",
6739 __FUNCTION__));
6740 bus->dhd->ifrm_enable = FALSE;
6741 }
6742 }
6743
6744 /* read ringmem and ringstate ptrs from shared area and store in host variables */
6745 dhd_fillup_ring_sharedptr_info(bus, &ring_info);
6746 if (dhd_msg_level & DHD_INFO_VAL) {
6747 bcm_print_bytes("ring_info_raw", (uchar *)&ring_info, sizeof(ring_info_t));
6748 }
6749 DHD_INFO(("%s: ring_info\n", __FUNCTION__));
6750
6751 DHD_ERROR(("%s: max H2D queues %d\n",
6752 __FUNCTION__, ltoh16(ring_info.max_tx_flowrings)));
6753
6754 DHD_INFO(("mail box address\n"));
6755 DHD_INFO(("%s: h2d_mb_data_ptr_addr 0x%04x\n",
6756 __FUNCTION__, bus->h2d_mb_data_ptr_addr));
6757 DHD_INFO(("%s: d2h_mb_data_ptr_addr 0x%04x\n",
6758 __FUNCTION__, bus->d2h_mb_data_ptr_addr));
6759 }
6760
6761 DHD_INFO(("%s: d2h_sync_mode 0x%08x\n",
6762 __FUNCTION__, bus->dhd->d2h_sync_mode));
6763
6764 bus->dhd->d2h_hostrdy_supported =
6765 ((sh->flags & PCIE_SHARED_HOSTRDY_SUPPORT) == PCIE_SHARED_HOSTRDY_SUPPORT);
6766
6767 #ifdef PCIE_OOB
6768 bus->dhd->d2h_no_oob_dw = (sh->flags & PCIE_SHARED_NO_OOB_DW) ? TRUE : FALSE;
6769 #endif /* PCIE_OOB */
6770
6771 #ifdef PCIE_INB_DW
6772 bus->dhd->d2h_inband_dw = (sh->flags & PCIE_SHARED_INBAND_DS) ? TRUE : FALSE;
6773 #endif /* PCIE_INB_DW */
6774
6775 #if defined(PCIE_OOB) && defined(PCIE_INB_DW)
6776 DHD_ERROR(("FW supports Inband dw ? %s oob dw ? %s\n",
6777 bus->dhd->d2h_inband_dw ? "Y":"N",
6778 bus->dhd->d2h_no_oob_dw ? "N":"Y"));
6779 #endif /* defined(PCIE_OOB) && defined(PCIE_INB_DW) */
6780
6781 bus->dhd->ext_trap_data_supported =
6782 ((sh->flags2 & PCIE_SHARED2_EXTENDED_TRAP_DATA) == PCIE_SHARED2_EXTENDED_TRAP_DATA);
6783
6784 return BCME_OK;
6785 } /* dhdpcie_readshared */
6786
6787 /** Read ring mem and ring state ptr info from shared memory area in device memory */
6788 static void
dhd_fillup_ring_sharedptr_info(dhd_bus_t * bus,ring_info_t * ring_info)6789 dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info)
6790 {
6791 uint16 i = 0;
6792 uint16 j = 0;
6793 uint32 tcm_memloc;
6794 uint32 d2h_w_idx_ptr, d2h_r_idx_ptr, h2d_w_idx_ptr, h2d_r_idx_ptr;
6795 uint16 max_tx_flowrings = bus->max_tx_flowrings;
6796
6797 /* Ring mem ptr info */
6798 /* Alloated in the order
6799 H2D_MSGRING_CONTROL_SUBMIT 0
6800 H2D_MSGRING_RXPOST_SUBMIT 1
6801 D2H_MSGRING_CONTROL_COMPLETE 2
6802 D2H_MSGRING_TX_COMPLETE 3
6803 D2H_MSGRING_RX_COMPLETE 4
6804 */
6805
6806 {
6807 /* ringmemptr holds start of the mem block address space */
6808 tcm_memloc = ltoh32(ring_info->ringmem_ptr);
6809
6810 /* Find out ringmem ptr for each ring common ring */
6811 for (i = 0; i <= BCMPCIE_COMMON_MSGRING_MAX_ID; i++) {
6812 bus->ring_sh[i].ring_mem_addr = tcm_memloc;
6813 /* Update mem block */
6814 tcm_memloc = tcm_memloc + sizeof(ring_mem_t);
6815 DHD_INFO(("%s: ring id %d ring mem addr 0x%04x \n", __FUNCTION__,
6816 i, bus->ring_sh[i].ring_mem_addr));
6817 }
6818 }
6819
6820 /* Ring state mem ptr info */
6821 {
6822 d2h_w_idx_ptr = ltoh32(ring_info->d2h_w_idx_ptr);
6823 d2h_r_idx_ptr = ltoh32(ring_info->d2h_r_idx_ptr);
6824 h2d_w_idx_ptr = ltoh32(ring_info->h2d_w_idx_ptr);
6825 h2d_r_idx_ptr = ltoh32(ring_info->h2d_r_idx_ptr);
6826
6827 /* Store h2d common ring write/read pointers */
6828 for (i = 0; i < BCMPCIE_H2D_COMMON_MSGRINGS; i++) {
6829 bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
6830 bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
6831
6832 /* update mem block */
6833 h2d_w_idx_ptr = h2d_w_idx_ptr + bus->rw_index_sz;
6834 h2d_r_idx_ptr = h2d_r_idx_ptr + bus->rw_index_sz;
6835
6836 DHD_INFO(("%s: h2d w/r : idx %d write %x read %x \n", __FUNCTION__, i,
6837 bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
6838 }
6839
6840 /* Store d2h common ring write/read pointers */
6841 for (j = 0; j < BCMPCIE_D2H_COMMON_MSGRINGS; j++, i++) {
6842 bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr;
6843 bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr;
6844
6845 /* update mem block */
6846 d2h_w_idx_ptr = d2h_w_idx_ptr + bus->rw_index_sz;
6847 d2h_r_idx_ptr = d2h_r_idx_ptr + bus->rw_index_sz;
6848
6849 DHD_INFO(("%s: d2h w/r : idx %d write %x read %x \n", __FUNCTION__, i,
6850 bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
6851 }
6852
6853 /* Store txflow ring write/read pointers */
6854 if (bus->api.fw_rev < PCIE_SHARED_VERSION_6) {
6855 max_tx_flowrings -= BCMPCIE_H2D_COMMON_MSGRINGS;
6856 } else {
6857 /* Account for Debug info h2d ring located after the last tx flow ring */
6858 max_tx_flowrings = max_tx_flowrings + 1;
6859 }
6860 for (j = 0; j < max_tx_flowrings; i++, j++)
6861 {
6862 bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
6863 bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
6864
6865 /* update mem block */
6866 h2d_w_idx_ptr = h2d_w_idx_ptr + bus->rw_index_sz;
6867 h2d_r_idx_ptr = h2d_r_idx_ptr + bus->rw_index_sz;
6868
6869 DHD_INFO(("%s: FLOW Rings h2d w/r : idx %d write %x read %x \n",
6870 __FUNCTION__, i,
6871 bus->ring_sh[i].ring_state_w,
6872 bus->ring_sh[i].ring_state_r));
6873 }
6874 /* store wr/rd pointers for debug info completion ring */
6875 bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr;
6876 bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr;
6877 d2h_w_idx_ptr = d2h_w_idx_ptr + bus->rw_index_sz;
6878 d2h_r_idx_ptr = d2h_r_idx_ptr + bus->rw_index_sz;
6879 DHD_INFO(("d2h w/r : idx %d write %x read %x \n", i,
6880 bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
6881 }
6882 } /* dhd_fillup_ring_sharedptr_info */
6883
6884 /**
6885 * Initialize bus module: prepare for communication with the dongle. Called after downloading
6886 * firmware into the dongle.
6887 */
dhd_bus_init(dhd_pub_t * dhdp,bool enforce_mutex)6888 int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex)
6889 {
6890 dhd_bus_t *bus = dhdp->bus;
6891 int ret = 0;
6892
6893 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
6894
6895 ASSERT(bus->dhd);
6896 if (!bus->dhd)
6897 return 0;
6898
6899 /* Make sure we're talking to the core. */
6900 bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
6901 ASSERT(bus->reg != NULL);
6902
6903 /* before opening up bus for data transfer, check if shared are is intact */
6904 ret = dhdpcie_readshared(bus);
6905 if (ret < 0) {
6906 DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__));
6907 return ret;
6908 }
6909
6910 /* Make sure we're talking to the core. */
6911 bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
6912 ASSERT(bus->reg != NULL);
6913
6914 /* Set bus state according to enable result */
6915 dhdp->busstate = DHD_BUS_DATA;
6916 bus->d3_suspend_pending = FALSE;
6917
6918 #if defined(DBG_PKT_MON) || defined(DHD_PKT_LOGGING)
6919 if (bus->pcie_sh->flags2 & PCIE_SHARED_D2H_D11_TX_STATUS) {
6920 uint32 flags2 = bus->pcie_sh->flags2;
6921 uint32 addr;
6922
6923 addr = bus->shared_addr + OFFSETOF(pciedev_shared_t, flags2);
6924 flags2 |= PCIE_SHARED_H2D_D11_TX_STATUS;
6925 ret = dhdpcie_bus_membytes(bus, TRUE, addr,
6926 (uint8 *)&flags2, sizeof(flags2));
6927 if (ret < 0) {
6928 DHD_ERROR(("%s: update flag bit (H2D_D11_TX_STATUS) failed\n",
6929 __FUNCTION__));
6930 return ret;
6931 }
6932 bus->pcie_sh->flags2 = flags2;
6933 bus->dhd->d11_tx_status = TRUE;
6934 }
6935 #endif /* DBG_PKT_MON || DHD_PKT_LOGGING */
6936
6937 if (!dhd_download_fw_on_driverload)
6938 dhd_dpc_enable(bus->dhd);
6939 /* Enable the interrupt after device is up */
6940 dhdpcie_bus_intr_enable(bus);
6941
6942 #ifdef DHD_PCIE_RUNTIMEPM
6943 bus->idlecount = 0;
6944 bus->idletime = (int32)MAX_IDLE_COUNT;
6945 init_waitqueue_head(&bus->rpm_queue);
6946 mutex_init(&bus->pm_lock);
6947 #else
6948 bus->idletime = 0;
6949 #endif /* DHD_PCIE_RUNTIMEPM */
6950
6951 #ifdef PCIE_INB_DW
6952 /* Initialize the lock to serialize Device Wake Inband activities */
6953 if (!bus->inb_lock) {
6954 bus->inb_lock = dhd_os_spin_lock_init(bus->dhd->osh);
6955 }
6956 #endif
6957
6958
6959 /* Make use_d0_inform TRUE for Rev 5 for backward compatibility */
6960 if (bus->api.fw_rev < PCIE_SHARED_VERSION_6) {
6961 bus->use_d0_inform = TRUE;
6962 } else {
6963 bus->use_d0_inform = FALSE;
6964 }
6965
6966 return ret;
6967 }
6968
6969 static void
dhdpcie_init_shared_addr(dhd_bus_t * bus)6970 dhdpcie_init_shared_addr(dhd_bus_t *bus)
6971 {
6972 uint32 addr = 0;
6973 uint32 val = 0;
6974 addr = bus->dongle_ram_base + bus->ramsize - 4;
6975 #ifdef DHD_PCIE_RUNTIMEPM
6976 dhdpcie_runtime_bus_wake(bus->dhd, TRUE, __builtin_return_address(0));
6977 #endif /* DHD_PCIE_RUNTIMEPM */
6978 dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val));
6979 }
6980
6981
6982 bool
dhdpcie_chipmatch(uint16 vendor,uint16 device)6983 dhdpcie_chipmatch(uint16 vendor, uint16 device)
6984 {
6985 if (vendor != PCI_VENDOR_ID_BROADCOM) {
6986 #ifndef DHD_EFI
6987 DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__,
6988 vendor, device));
6989 #endif /* DHD_EFI */
6990 return (-ENODEV);
6991 }
6992
6993 if ((device == BCM4350_D11AC_ID) || (device == BCM4350_D11AC2G_ID) ||
6994 (device == BCM4350_D11AC5G_ID) || (device == BCM4350_CHIP_ID) ||
6995 (device == BCM43569_CHIP_ID))
6996 return 0;
6997
6998 if ((device == BCM4354_D11AC_ID) || (device == BCM4354_D11AC2G_ID) ||
6999 (device == BCM4354_D11AC5G_ID) || (device == BCM4354_CHIP_ID))
7000 return 0;
7001
7002 if ((device == BCM4356_D11AC_ID) || (device == BCM4356_D11AC2G_ID) ||
7003 (device == BCM4356_D11AC5G_ID) || (device == BCM4356_CHIP_ID))
7004 return 0;
7005
7006 if ((device == BCM4371_D11AC_ID) || (device == BCM4371_D11AC2G_ID) ||
7007 (device == BCM4371_D11AC5G_ID) || (device == BCM4371_CHIP_ID))
7008 return 0;
7009
7010 if ((device == BCM4345_D11AC_ID) || (device == BCM4345_D11AC2G_ID) ||
7011 (device == BCM4345_D11AC5G_ID) || BCM4345_CHIP(device))
7012 return 0;
7013
7014 if ((device == BCM43452_D11AC_ID) || (device == BCM43452_D11AC2G_ID) ||
7015 (device == BCM43452_D11AC5G_ID))
7016 return 0;
7017
7018 if ((device == BCM4335_D11AC_ID) || (device == BCM4335_D11AC2G_ID) ||
7019 (device == BCM4335_D11AC5G_ID) || (device == BCM4335_CHIP_ID))
7020 return 0;
7021
7022 if ((device == BCM43602_D11AC_ID) || (device == BCM43602_D11AC2G_ID) ||
7023 (device == BCM43602_D11AC5G_ID) || (device == BCM43602_CHIP_ID))
7024 return 0;
7025
7026 if ((device == BCM43569_D11AC_ID) || (device == BCM43569_D11AC2G_ID) ||
7027 (device == BCM43569_D11AC5G_ID) || (device == BCM43569_CHIP_ID))
7028 return 0;
7029
7030 if ((device == BCM4358_D11AC_ID) || (device == BCM4358_D11AC2G_ID) ||
7031 (device == BCM4358_D11AC5G_ID))
7032 return 0;
7033
7034 if ((device == BCM4349_D11AC_ID) || (device == BCM4349_D11AC2G_ID) ||
7035 (device == BCM4349_D11AC5G_ID) || (device == BCM4349_CHIP_ID))
7036 return 0;
7037
7038 if ((device == BCM4355_D11AC_ID) || (device == BCM4355_D11AC2G_ID) ||
7039 (device == BCM4355_D11AC5G_ID) || (device == BCM4355_CHIP_ID))
7040 return 0;
7041
7042 if ((device == BCM4359_D11AC_ID) || (device == BCM4359_D11AC2G_ID) ||
7043 (device == BCM4359_D11AC5G_ID))
7044 return 0;
7045
7046 if ((device == BCM43596_D11AC_ID) || (device == BCM43596_D11AC2G_ID) ||
7047 (device == BCM43596_D11AC5G_ID))
7048 return 0;
7049
7050 if ((device == BCM43597_D11AC_ID) || (device == BCM43597_D11AC2G_ID) ||
7051 (device == BCM43597_D11AC5G_ID))
7052 return 0;
7053
7054 if ((device == BCM4364_D11AC_ID) || (device == BCM4364_D11AC2G_ID) ||
7055 (device == BCM4364_D11AC5G_ID) || (device == BCM4364_CHIP_ID))
7056 return 0;
7057
7058 if ((device == BCM4347_D11AC_ID) || (device == BCM4347_D11AC2G_ID) ||
7059 (device == BCM4347_D11AC5G_ID) || (device == BCM4347_CHIP_ID))
7060 return 0;
7061
7062 if ((device == BCM4361_D11AC_ID) || (device == BCM4361_D11AC2G_ID) ||
7063 (device == BCM4361_D11AC5G_ID) || (device == BCM4361_CHIP_ID))
7064 return 0;
7065
7066 if ((device == BCM4362_D11AX_ID) || (device == BCM4362_D11AX2G_ID) ||
7067 (device == BCM4362_D11AX5G_ID) || (device == BCM4362_CHIP_ID)) {
7068 return 0;
7069 }
7070
7071 if ((device == BCM4365_D11AC_ID) || (device == BCM4365_D11AC2G_ID) ||
7072 (device == BCM4365_D11AC5G_ID) || (device == BCM4365_CHIP_ID))
7073 return 0;
7074
7075 if ((device == BCM4366_D11AC_ID) || (device == BCM4366_D11AC2G_ID) ||
7076 (device == BCM4366_D11AC5G_ID) || (device == BCM4366_CHIP_ID))
7077 return 0;
7078 #ifndef DHD_EFI
7079 DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__, vendor, device));
7080 #endif
7081 return (-ENODEV);
7082 } /* dhdpcie_chipmatch */
7083
7084 /**
7085 * Name: dhdpcie_cc_nvmshadow
7086 *
7087 * Description:
7088 * A shadow of OTP/SPROM exists in ChipCommon Region
7089 * betw. 0x800 and 0xBFF (Backplane Addr. 0x1800_0800 and 0x1800_0BFF).
7090 * Strapping option (SPROM vs. OTP), presence of OTP/SPROM and its size
7091 * can also be read from ChipCommon Registers.
7092 */
7093 static int
dhdpcie_cc_nvmshadow(dhd_bus_t * bus,struct bcmstrbuf * b)7094 dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b)
7095 {
7096 uint16 dump_offset = 0;
7097 uint32 dump_size = 0, otp_size = 0, sprom_size = 0;
7098
7099 /* Table for 65nm OTP Size (in bits) */
7100 int otp_size_65nm[8] = {0, 2048, 4096, 8192, 4096, 6144, 512, 1024};
7101
7102 volatile uint16 *nvm_shadow;
7103
7104 uint cur_coreid;
7105 uint chipc_corerev;
7106 chipcregs_t *chipcregs;
7107
7108 /* Save the current core */
7109 cur_coreid = si_coreid(bus->sih);
7110 /* Switch to ChipC */
7111 chipcregs = (chipcregs_t *)si_setcore(bus->sih, CC_CORE_ID, 0);
7112 ASSERT(chipcregs != NULL);
7113
7114 chipc_corerev = si_corerev(bus->sih);
7115
7116 /* Check ChipcommonCore Rev */
7117 if (chipc_corerev < 44) {
7118 DHD_ERROR(("%s: ChipcommonCore Rev %d < 44\n", __FUNCTION__, chipc_corerev));
7119 return BCME_UNSUPPORTED;
7120 }
7121
7122 /* Check ChipID */
7123 if (((uint16)bus->sih->chip != BCM4350_CHIP_ID) && !BCM4345_CHIP((uint16)bus->sih->chip) &&
7124 ((uint16)bus->sih->chip != BCM4355_CHIP_ID) &&
7125 ((uint16)bus->sih->chip != BCM4364_CHIP_ID)) {
7126 DHD_ERROR(("%s: cc_nvmdump cmd. supported for Olympic chips"
7127 "4350/4345/4355/4364 only\n", __FUNCTION__));
7128 return BCME_UNSUPPORTED;
7129 }
7130
7131 /* Check if SRC_PRESENT in SpromCtrl(0x190 in ChipCommon Regs) is set */
7132 if (chipcregs->sromcontrol & SRC_PRESENT) {
7133 /* SPROM Size: 1Kbits (0x0), 4Kbits (0x1), 16Kbits(0x2) */
7134 sprom_size = (1 << (2 * ((chipcregs->sromcontrol & SRC_SIZE_MASK)
7135 >> SRC_SIZE_SHIFT))) * 1024;
7136 bcm_bprintf(b, "\nSPROM Present (Size %d bits)\n", sprom_size);
7137 }
7138
7139 if (chipcregs->sromcontrol & SRC_OTPPRESENT) {
7140 bcm_bprintf(b, "\nOTP Present");
7141
7142 if (((chipcregs->otplayout & OTPL_WRAP_TYPE_MASK) >> OTPL_WRAP_TYPE_SHIFT)
7143 == OTPL_WRAP_TYPE_40NM) {
7144 /* 40nm OTP: Size = (OtpSize + 1) * 1024 bits */
7145 /* Chipcommon rev51 is a variation on rev45 and does not support
7146 * the latest OTP configuration.
7147 */
7148 if (chipc_corerev != 51 && chipc_corerev >= 49) {
7149 otp_size = (((chipcregs->otplayout & OTPL_ROW_SIZE_MASK)
7150 >> OTPL_ROW_SIZE_SHIFT) + 1) * 1024;
7151 bcm_bprintf(b, "(Size %d bits)\n", otp_size);
7152 } else {
7153 otp_size = (((chipcregs->capabilities & CC_CAP_OTPSIZE)
7154 >> CC_CAP_OTPSIZE_SHIFT) + 1) * 1024;
7155 bcm_bprintf(b, "(Size %d bits)\n", otp_size);
7156 }
7157 } else {
7158 /* This part is untested since newer chips have 40nm OTP */
7159 /* Chipcommon rev51 is a variation on rev45 and does not support
7160 * the latest OTP configuration.
7161 */
7162 if (chipc_corerev != 51 && chipc_corerev >= 49) {
7163 otp_size = otp_size_65nm[(chipcregs->otplayout & OTPL_ROW_SIZE_MASK)
7164 >> OTPL_ROW_SIZE_SHIFT];
7165 bcm_bprintf(b, "(Size %d bits)\n", otp_size);
7166 } else {
7167 otp_size = otp_size_65nm[(chipcregs->capabilities & CC_CAP_OTPSIZE)
7168 >> CC_CAP_OTPSIZE_SHIFT];
7169 bcm_bprintf(b, "(Size %d bits)\n", otp_size);
7170 DHD_INFO(("%s: 65nm/130nm OTP Size not tested. \n",
7171 __FUNCTION__));
7172 }
7173 }
7174 }
7175
7176 /* Chipcommon rev51 is a variation on rev45 and does not support
7177 * the latest OTP configuration.
7178 */
7179 if (chipc_corerev != 51 && chipc_corerev >= 49) {
7180 if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
7181 ((chipcregs->otplayout & OTPL_ROW_SIZE_MASK) == 0)) {
7182 DHD_ERROR(("%s: SPROM and OTP could not be found "
7183 "sromcontrol = %x, otplayout = %x \n",
7184 __FUNCTION__, chipcregs->sromcontrol, chipcregs->otplayout));
7185 return BCME_NOTFOUND;
7186 }
7187 } else {
7188 if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
7189 ((chipcregs->capabilities & CC_CAP_OTPSIZE) == 0)) {
7190 DHD_ERROR(("%s: SPROM and OTP could not be found "
7191 "sromcontrol = %x, capablities = %x \n",
7192 __FUNCTION__, chipcregs->sromcontrol, chipcregs->capabilities));
7193 return BCME_NOTFOUND;
7194 }
7195 }
7196
7197 /* Check the strapping option in SpromCtrl: Set = OTP otherwise SPROM */
7198 if ((!(chipcregs->sromcontrol & SRC_PRESENT) || (chipcregs->sromcontrol & SRC_OTPSEL)) &&
7199 (chipcregs->sromcontrol & SRC_OTPPRESENT)) {
7200 bcm_bprintf(b, "OTP Strap selected.\n"
7201 "\nOTP Shadow in ChipCommon:\n");
7202
7203 dump_size = otp_size / 16 ; /* 16bit words */
7204 } else if (((chipcregs->sromcontrol & SRC_OTPSEL) == 0) &&
7205 (chipcregs->sromcontrol & SRC_PRESENT)) {
7206 bcm_bprintf(b, "SPROM Strap selected\n"
7207 "\nSPROM Shadow in ChipCommon:\n");
7208
7209 /* If SPROM > 8K only 8Kbits is mapped to ChipCommon (0x800 - 0xBFF) */
7210 /* dump_size in 16bit words */
7211 dump_size = sprom_size > 8 ? (8 * 1024) / 16 : sprom_size / 16;
7212 } else {
7213 DHD_ERROR(("%s: NVM Shadow does not exist in ChipCommon\n",
7214 __FUNCTION__));
7215 return BCME_NOTFOUND;
7216 }
7217
7218 if (bus->regs == NULL) {
7219 DHD_ERROR(("ChipCommon Regs. not initialized\n"));
7220 return BCME_NOTREADY;
7221 } else {
7222 bcm_bprintf(b, "\n OffSet:");
7223
7224 /* Chipcommon rev51 is a variation on rev45 and does not support
7225 * the latest OTP configuration.
7226 */
7227 if (chipc_corerev != 51 && chipc_corerev >= 49) {
7228 /* Chip common can read only 8kbits,
7229 * for ccrev >= 49 otp size is around 12 kbits so use GCI core
7230 */
7231 nvm_shadow = (volatile uint16 *)si_setcore(bus->sih, GCI_CORE_ID, 0);
7232 } else {
7233 /* Point to the SPROM/OTP shadow in ChipCommon */
7234 nvm_shadow = chipcregs->sromotp;
7235 }
7236
7237 if (nvm_shadow == NULL) {
7238 DHD_ERROR(("%s: NVM Shadow is not intialized\n", __FUNCTION__));
7239 return BCME_NOTFOUND;
7240 }
7241
7242 /*
7243 * Read 16 bits / iteration.
7244 * dump_size & dump_offset in 16-bit words
7245 */
7246 while (dump_offset < dump_size) {
7247 if (dump_offset % 2 == 0)
7248 /* Print the offset in the shadow space in Bytes */
7249 bcm_bprintf(b, "\n 0x%04x", dump_offset * 2);
7250
7251 bcm_bprintf(b, "\t0x%04x", *(nvm_shadow + dump_offset));
7252 dump_offset += 0x1;
7253 }
7254 }
7255
7256 /* Switch back to the original core */
7257 si_setcore(bus->sih, cur_coreid, 0);
7258
7259 return BCME_OK;
7260 } /* dhdpcie_cc_nvmshadow */
7261
7262 /** Flow rings are dynamically created and destroyed */
dhd_bus_clean_flow_ring(dhd_bus_t * bus,void * node)7263 void dhd_bus_clean_flow_ring(dhd_bus_t *bus, void *node)
7264 {
7265 void *pkt;
7266 flow_queue_t *queue;
7267 flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)node;
7268 unsigned long flags;
7269
7270 queue = &flow_ring_node->queue;
7271
7272 #ifdef DHDTCPACK_SUPPRESS
7273 /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
7274 * when there is a newly coming packet from network stack.
7275 */
7276 dhd_tcpack_info_tbl_clean(bus->dhd);
7277 #endif /* DHDTCPACK_SUPPRESS */
7278
7279 /* clean up BUS level info */
7280 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
7281
7282 /* Flush all pending packets in the queue, if any */
7283 while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
7284 PKTFREE(bus->dhd->osh, pkt, TRUE);
7285 }
7286 ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
7287
7288 /* Reinitialise flowring's queue */
7289 dhd_flow_queue_reinit(bus->dhd, queue, FLOW_RING_QUEUE_THRESHOLD);
7290 flow_ring_node->status = FLOW_RING_STATUS_CLOSED;
7291 flow_ring_node->active = FALSE;
7292
7293 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
7294
7295 /* Hold flowring_list_lock to ensure no race condition while accessing the List */
7296 DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
7297 dll_delete(&flow_ring_node->list);
7298 DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
7299
7300 /* Release the flowring object back into the pool */
7301 dhd_prot_flowrings_pool_release(bus->dhd,
7302 flow_ring_node->flowid, flow_ring_node->prot_info);
7303
7304 /* Free the flowid back to the flowid allocator */
7305 dhd_flowid_free(bus->dhd, flow_ring_node->flow_info.ifindex,
7306 flow_ring_node->flowid);
7307 }
7308
7309 /**
7310 * Allocate a Flow ring buffer,
7311 * Init Ring buffer, send Msg to device about flow ring creation
7312 */
7313 int
dhd_bus_flow_ring_create_request(dhd_bus_t * bus,void * arg)7314 dhd_bus_flow_ring_create_request(dhd_bus_t *bus, void *arg)
7315 {
7316 flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)arg;
7317
7318 DHD_INFO(("%s :Flow create\n", __FUNCTION__));
7319
7320 /* Send Msg to device about flow ring creation */
7321 if (dhd_prot_flow_ring_create(bus->dhd, flow_ring_node) != BCME_OK)
7322 return BCME_NOMEM;
7323
7324 return BCME_OK;
7325 }
7326
7327 /** Handle response from dongle on a 'flow ring create' request */
7328 void
dhd_bus_flow_ring_create_response(dhd_bus_t * bus,uint16 flowid,int32 status)7329 dhd_bus_flow_ring_create_response(dhd_bus_t *bus, uint16 flowid, int32 status)
7330 {
7331 flow_ring_node_t *flow_ring_node;
7332 unsigned long flags;
7333
7334 DHD_INFO(("%s :Flow Response %d \n", __FUNCTION__, flowid));
7335
7336 flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
7337 ASSERT(flow_ring_node->flowid == flowid);
7338
7339 if (status != BCME_OK) {
7340 DHD_ERROR(("%s Flow create Response failure error status = %d \n",
7341 __FUNCTION__, status));
7342 /* Call Flow clean up */
7343 dhd_bus_clean_flow_ring(bus, flow_ring_node);
7344 return;
7345 }
7346
7347 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
7348 flow_ring_node->status = FLOW_RING_STATUS_OPEN;
7349 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
7350
7351 /* Now add the Flow ring node into the active list
7352 * Note that this code to add the newly created node to the active
7353 * list was living in dhd_flowid_lookup. But note that after
7354 * adding the node to the active list the contents of node is being
7355 * filled in dhd_prot_flow_ring_create.
7356 * If there is a D2H interrupt after the node gets added to the
7357 * active list and before the node gets populated with values
7358 * from the Bottom half dhd_update_txflowrings would be called.
7359 * which will then try to walk through the active flow ring list,
7360 * pickup the nodes and operate on them. Now note that since
7361 * the function dhd_prot_flow_ring_create is not finished yet
7362 * the contents of flow_ring_node can still be NULL leading to
7363 * crashes. Hence the flow_ring_node should be added to the
7364 * active list only after its truely created, which is after
7365 * receiving the create response message from the Host.
7366 */
7367 DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
7368 dll_prepend(&bus->flowring_active_list, &flow_ring_node->list);
7369 DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
7370
7371 dhd_bus_schedule_queue(bus, flowid, FALSE); /* from queue to flowring */
7372
7373 return;
7374 }
7375
7376 int
dhd_bus_flow_ring_delete_request(dhd_bus_t * bus,void * arg)7377 dhd_bus_flow_ring_delete_request(dhd_bus_t *bus, void *arg)
7378 {
7379 void * pkt;
7380 flow_queue_t *queue;
7381 flow_ring_node_t *flow_ring_node;
7382 unsigned long flags;
7383
7384 DHD_INFO(("%s :Flow Delete\n", __FUNCTION__));
7385
7386 flow_ring_node = (flow_ring_node_t *)arg;
7387
7388 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
7389 if (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) {
7390 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
7391 DHD_ERROR(("%s :Delete Pending flowid %u\n", __FUNCTION__, flow_ring_node->flowid));
7392 return BCME_ERROR;
7393 }
7394 flow_ring_node->status = FLOW_RING_STATUS_DELETE_PENDING;
7395
7396 queue = &flow_ring_node->queue; /* queue associated with flow ring */
7397
7398 #ifdef DHDTCPACK_SUPPRESS
7399 /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
7400 * when there is a newly coming packet from network stack.
7401 */
7402 dhd_tcpack_info_tbl_clean(bus->dhd);
7403 #endif /* DHDTCPACK_SUPPRESS */
7404 /* Flush all pending packets in the queue, if any */
7405 while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
7406 PKTFREE(bus->dhd->osh, pkt, TRUE);
7407 }
7408 ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
7409
7410 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
7411
7412 /* Send Msg to device about flow ring deletion */
7413 dhd_prot_flow_ring_delete(bus->dhd, flow_ring_node);
7414
7415 return BCME_OK;
7416 }
7417
7418 void
dhd_bus_flow_ring_delete_response(dhd_bus_t * bus,uint16 flowid,uint32 status)7419 dhd_bus_flow_ring_delete_response(dhd_bus_t *bus, uint16 flowid, uint32 status)
7420 {
7421 flow_ring_node_t *flow_ring_node;
7422
7423 DHD_INFO(("%s :Flow Delete Response %d \n", __FUNCTION__, flowid));
7424
7425 flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
7426 ASSERT(flow_ring_node->flowid == flowid);
7427
7428 if (status != BCME_OK) {
7429 DHD_ERROR(("%s Flow Delete Response failure error status = %d \n",
7430 __FUNCTION__, status));
7431 return;
7432 }
7433 /* Call Flow clean up */
7434 dhd_bus_clean_flow_ring(bus, flow_ring_node);
7435
7436 return;
7437 }
7438
dhd_bus_flow_ring_flush_request(dhd_bus_t * bus,void * arg)7439 int dhd_bus_flow_ring_flush_request(dhd_bus_t *bus, void *arg)
7440 {
7441 void *pkt;
7442 flow_queue_t *queue;
7443 flow_ring_node_t *flow_ring_node;
7444 unsigned long flags;
7445
7446 DHD_INFO(("%s :Flow Flush\n", __FUNCTION__));
7447
7448 flow_ring_node = (flow_ring_node_t *)arg;
7449
7450 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
7451 queue = &flow_ring_node->queue; /* queue associated with flow ring */
7452 /* Flow ring status will be set back to FLOW_RING_STATUS_OPEN
7453 * once flow ring flush response is received for this flowring node.
7454 */
7455 flow_ring_node->status = FLOW_RING_STATUS_FLUSH_PENDING;
7456
7457 #ifdef DHDTCPACK_SUPPRESS
7458 /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
7459 * when there is a newly coming packet from network stack.
7460 */
7461 dhd_tcpack_info_tbl_clean(bus->dhd);
7462 #endif /* DHDTCPACK_SUPPRESS */
7463
7464 /* Flush all pending packets in the queue, if any */
7465 while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
7466 PKTFREE(bus->dhd->osh, pkt, TRUE);
7467 }
7468 ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
7469
7470 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
7471
7472 /* Send Msg to device about flow ring flush */
7473 dhd_prot_flow_ring_flush(bus->dhd, flow_ring_node);
7474
7475 return BCME_OK;
7476 }
7477
7478 void
dhd_bus_flow_ring_flush_response(dhd_bus_t * bus,uint16 flowid,uint32 status)7479 dhd_bus_flow_ring_flush_response(dhd_bus_t *bus, uint16 flowid, uint32 status)
7480 {
7481 flow_ring_node_t *flow_ring_node;
7482
7483 if (status != BCME_OK) {
7484 DHD_ERROR(("%s Flow flush Response failure error status = %d \n",
7485 __FUNCTION__, status));
7486 return;
7487 }
7488
7489 flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
7490 ASSERT(flow_ring_node->flowid == flowid);
7491
7492 flow_ring_node->status = FLOW_RING_STATUS_OPEN;
7493 return;
7494 }
7495
7496 uint32
dhd_bus_max_h2d_queues(struct dhd_bus * bus)7497 dhd_bus_max_h2d_queues(struct dhd_bus *bus)
7498 {
7499 return bus->max_submission_rings;
7500 }
7501
7502 /* To be symmetric with SDIO */
7503 void
dhd_bus_pktq_flush(dhd_pub_t * dhdp)7504 dhd_bus_pktq_flush(dhd_pub_t *dhdp)
7505 {
7506 return;
7507 }
7508
7509 void
dhd_bus_set_linkdown(dhd_pub_t * dhdp,bool val)7510 dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val)
7511 {
7512 dhdp->bus->is_linkdown = val;
7513 }
7514
7515 #ifdef IDLE_TX_FLOW_MGMT
7516 /* resume request */
7517 int
dhd_bus_flow_ring_resume_request(dhd_bus_t * bus,void * arg)7518 dhd_bus_flow_ring_resume_request(dhd_bus_t *bus, void *arg)
7519 {
7520 flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)arg;
7521
7522 DHD_ERROR(("%s :Flow Resume Request flow id %u\n", __FUNCTION__, flow_ring_node->flowid));
7523
7524 flow_ring_node->status = FLOW_RING_STATUS_RESUME_PENDING;
7525
7526 /* Send Msg to device about flow ring resume */
7527 dhd_prot_flow_ring_resume(bus->dhd, flow_ring_node);
7528
7529 return BCME_OK;
7530 }
7531
7532 /* add the node back to active flowring */
7533 void
dhd_bus_flow_ring_resume_response(dhd_bus_t * bus,uint16 flowid,int32 status)7534 dhd_bus_flow_ring_resume_response(dhd_bus_t *bus, uint16 flowid, int32 status)
7535 {
7536 flow_ring_node_t *flow_ring_node;
7537
7538 DHD_TRACE(("%s :flowid %d \n", __FUNCTION__, flowid));
7539
7540 flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
7541 ASSERT(flow_ring_node->flowid == flowid);
7542
7543 if (status != BCME_OK) {
7544 DHD_ERROR(("%s Error Status = %d \n",
7545 __FUNCTION__, status));
7546 return;
7547 }
7548
7549 DHD_TRACE(("%s :Number of pkts queued in FlowId:%d is -> %u!!\n",
7550 __FUNCTION__, flow_ring_node->flowid, flow_ring_node->queue.len));
7551
7552 flow_ring_node->status = FLOW_RING_STATUS_OPEN;
7553
7554 dhd_bus_schedule_queue(bus, flowid, FALSE);
7555 return;
7556 }
7557
7558 /* scan the flow rings in active list for idle time out */
7559 void
dhd_bus_check_idle_scan(dhd_bus_t * bus)7560 dhd_bus_check_idle_scan(dhd_bus_t *bus)
7561 {
7562 uint64 time_stamp; /* in millisec */
7563 uint64 diff;
7564
7565 time_stamp = OSL_SYSUPTIME();
7566 diff = time_stamp - bus->active_list_last_process_ts;
7567
7568 if (diff > IDLE_FLOW_LIST_TIMEOUT) {
7569 dhd_bus_idle_scan(bus);
7570 bus->active_list_last_process_ts = OSL_SYSUPTIME();
7571 }
7572
7573 return;
7574 }
7575
7576
7577 /* scan the nodes in active list till it finds a non idle node */
7578 void
dhd_bus_idle_scan(dhd_bus_t * bus)7579 dhd_bus_idle_scan(dhd_bus_t *bus)
7580 {
7581 dll_t *item, *prev;
7582 flow_ring_node_t *flow_ring_node;
7583 uint64 time_stamp, diff;
7584 unsigned long flags;
7585 uint16 ringid[MAX_SUSPEND_REQ];
7586 uint16 count = 0;
7587
7588 time_stamp = OSL_SYSUPTIME();
7589 DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
7590
7591 for (item = dll_tail_p(&bus->flowring_active_list);
7592 !dll_end(&bus->flowring_active_list, item); item = prev) {
7593 prev = dll_prev_p(item);
7594
7595 flow_ring_node = dhd_constlist_to_flowring(item);
7596
7597 if (flow_ring_node->flowid == (bus->max_submission_rings - 1))
7598 continue;
7599
7600 if (flow_ring_node->status != FLOW_RING_STATUS_OPEN) {
7601 /* Takes care of deleting zombie rings */
7602 /* delete from the active list */
7603 DHD_INFO(("deleting flow id %u from active list\n",
7604 flow_ring_node->flowid));
7605 __dhd_flow_ring_delete_from_active_list(bus, flow_ring_node);
7606 continue;
7607 }
7608
7609 diff = time_stamp - flow_ring_node->last_active_ts;
7610
7611 if ((diff > IDLE_FLOW_RING_TIMEOUT) && !(flow_ring_node->queue.len)) {
7612 DHD_ERROR(("\nSuspending flowid %d\n", flow_ring_node->flowid));
7613 /* delete from the active list */
7614 __dhd_flow_ring_delete_from_active_list(bus, flow_ring_node);
7615 flow_ring_node->status = FLOW_RING_STATUS_SUSPENDED;
7616 ringid[count] = flow_ring_node->flowid;
7617 count++;
7618 if (count == MAX_SUSPEND_REQ) {
7619 /* create a batch message now!! */
7620 dhd_prot_flow_ring_batch_suspend_request(bus->dhd, ringid, count);
7621 count = 0;
7622 }
7623 } else {
7624 /* No more scanning, break from here! */
7625 break;
7626 }
7627 }
7628
7629 if (count) {
7630 dhd_prot_flow_ring_batch_suspend_request(bus->dhd, ringid, count);
7631 }
7632
7633 DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
7634
7635 return;
7636 }
7637
dhd_flow_ring_move_to_active_list_head(struct dhd_bus * bus,flow_ring_node_t * flow_ring_node)7638 void dhd_flow_ring_move_to_active_list_head(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
7639 {
7640 unsigned long flags;
7641 dll_t* list;
7642
7643 DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
7644 /* check if the node is already at head, otherwise delete it and prepend */
7645 list = dll_head_p(&bus->flowring_active_list);
7646 if (&flow_ring_node->list != list) {
7647 dll_delete(&flow_ring_node->list);
7648 dll_prepend(&bus->flowring_active_list, &flow_ring_node->list);
7649 }
7650
7651 /* update flow ring timestamp */
7652 flow_ring_node->last_active_ts = OSL_SYSUPTIME();
7653
7654 DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
7655
7656 return;
7657 }
7658
dhd_flow_ring_add_to_active_list(struct dhd_bus * bus,flow_ring_node_t * flow_ring_node)7659 void dhd_flow_ring_add_to_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
7660 {
7661 unsigned long flags;
7662
7663 DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
7664
7665 dll_prepend(&bus->flowring_active_list, &flow_ring_node->list);
7666 /* update flow ring timestamp */
7667 flow_ring_node->last_active_ts = OSL_SYSUPTIME();
7668
7669 DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
7670
7671 return;
7672 }
__dhd_flow_ring_delete_from_active_list(struct dhd_bus * bus,flow_ring_node_t * flow_ring_node)7673 void __dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
7674 {
7675 dll_delete(&flow_ring_node->list);
7676 }
7677
dhd_flow_ring_delete_from_active_list(struct dhd_bus * bus,flow_ring_node_t * flow_ring_node)7678 void dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
7679 {
7680 unsigned long flags;
7681
7682 DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
7683
7684 __dhd_flow_ring_delete_from_active_list(bus, flow_ring_node);
7685
7686 DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
7687
7688 return;
7689 }
7690 #endif /* IDLE_TX_FLOW_MGMT */
7691
7692 int
dhdpcie_bus_clock_start(struct dhd_bus * bus)7693 dhdpcie_bus_clock_start(struct dhd_bus *bus)
7694 {
7695 return dhdpcie_start_host_pcieclock(bus);
7696 }
7697
7698 int
dhdpcie_bus_clock_stop(struct dhd_bus * bus)7699 dhdpcie_bus_clock_stop(struct dhd_bus *bus)
7700 {
7701 return dhdpcie_stop_host_pcieclock(bus);
7702 }
7703
7704 int
dhdpcie_bus_disable_device(struct dhd_bus * bus)7705 dhdpcie_bus_disable_device(struct dhd_bus *bus)
7706 {
7707 return dhdpcie_disable_device(bus);
7708 }
7709
7710 int
dhdpcie_bus_enable_device(struct dhd_bus * bus)7711 dhdpcie_bus_enable_device(struct dhd_bus *bus)
7712 {
7713 return dhdpcie_enable_device(bus);
7714 }
7715
7716 int
dhdpcie_bus_alloc_resource(struct dhd_bus * bus)7717 dhdpcie_bus_alloc_resource(struct dhd_bus *bus)
7718 {
7719 return dhdpcie_alloc_resource(bus);
7720 }
7721
7722 void
dhdpcie_bus_free_resource(struct dhd_bus * bus)7723 dhdpcie_bus_free_resource(struct dhd_bus *bus)
7724 {
7725 dhdpcie_free_resource(bus);
7726 }
7727
7728 int
dhd_bus_request_irq(struct dhd_bus * bus)7729 dhd_bus_request_irq(struct dhd_bus *bus)
7730 {
7731 return dhdpcie_bus_request_irq(bus);
7732 }
7733
7734 bool
dhdpcie_bus_dongle_attach(struct dhd_bus * bus)7735 dhdpcie_bus_dongle_attach(struct dhd_bus *bus)
7736 {
7737 return dhdpcie_dongle_attach(bus);
7738 }
7739
7740 int
dhd_bus_release_dongle(struct dhd_bus * bus)7741 dhd_bus_release_dongle(struct dhd_bus *bus)
7742 {
7743 bool dongle_isolation;
7744 osl_t *osh;
7745
7746 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7747
7748 if (bus) {
7749 osh = bus->osh;
7750 ASSERT(osh);
7751
7752 if (bus->dhd) {
7753 dongle_isolation = bus->dhd->dongle_isolation;
7754 dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
7755 }
7756 }
7757
7758 return 0;
7759 }
7760
7761 void
dhdpcie_cto_init(struct dhd_bus * bus,bool enable)7762 dhdpcie_cto_init(struct dhd_bus *bus, bool enable)
7763 {
7764 if (enable) {
7765 dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4,
7766 PCI_CTO_INT_MASK | PCI_SBIM_MASK_SERR);
7767 dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, SPROM_BACKPLANE_EN);
7768
7769 if (bus->dhd->cto_threshold == 0) {
7770 bus->dhd->cto_threshold = PCIE_CTO_TO_THRESH_DEFAULT;
7771 }
7772
7773 si_corereg(bus->sih, bus->sih->buscoreidx,
7774 OFFSETOF(sbpcieregs_t, ctoctrl), ~0,
7775 ((bus->dhd->cto_threshold << PCIE_CTO_TO_THRESHOLD_SHIFT) &
7776 PCIE_CTO_TO_THRESHHOLD_MASK) |
7777 ((PCIE_CTO_CLKCHKCNT_VAL << PCIE_CTO_CLKCHKCNT_SHIFT) &
7778 PCIE_CTO_CLKCHKCNT_MASK) |
7779 PCIE_CTO_ENAB_MASK);
7780 } else {
7781 dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, 0);
7782 dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, 0);
7783
7784 si_corereg(bus->sih, bus->sih->buscoreidx,
7785 OFFSETOF(sbpcieregs_t, ctoctrl), ~0, 0);
7786 }
7787 }
7788
7789 static void
dhdpcie_cto_error_recovery(struct dhd_bus * bus)7790 dhdpcie_cto_error_recovery(struct dhd_bus *bus)
7791 {
7792 uint32 pci_intmask, err_status;
7793 uint8 i = 0;
7794
7795 pci_intmask = dhdpcie_bus_cfg_read_dword(bus, PCI_INT_MASK, 4);
7796 dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, pci_intmask & ~PCI_CTO_INT_MASK);
7797
7798 DHD_OS_WAKE_LOCK(bus->dhd);
7799
7800 /* reset backplane */
7801 dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, SPROM_CFG_TO_SB_RST);
7802
7803 /* clear timeout error */
7804 while (1) {
7805 err_status = si_corereg(bus->sih, bus->sih->buscoreidx,
7806 OFFSETOF(sbpcieregs_t, dm_errlog),
7807 0, 0);
7808 if (err_status & PCIE_CTO_ERR_MASK) {
7809 si_corereg(bus->sih, bus->sih->buscoreidx,
7810 OFFSETOF(sbpcieregs_t, dm_errlog),
7811 ~0, PCIE_CTO_ERR_MASK);
7812 } else {
7813 break;
7814 }
7815 OSL_DELAY(CTO_TO_CLEAR_WAIT_MS * 1000);
7816 i++;
7817 if (i > CTO_TO_CLEAR_WAIT_MAX_CNT) {
7818 DHD_ERROR(("cto recovery fail\n"));
7819
7820 DHD_OS_WAKE_UNLOCK(bus->dhd);
7821 return;
7822 }
7823 }
7824
7825 /* clear interrupt status */
7826 dhdpcie_bus_cfg_write_dword(bus, PCI_INT_STATUS, 4, PCI_CTO_INT_MASK);
7827
7828 /* Halt ARM & remove reset */
7829 /* TBD : we can add ARM Halt here in case */
7830
7831 DHD_ERROR(("cto recovery success\n"));
7832
7833 DHD_OS_WAKE_UNLOCK(bus->dhd);
7834 }
7835
7836 #ifdef BCMPCIE_OOB_HOST_WAKE
7837 int
dhd_bus_oob_intr_register(dhd_pub_t * dhdp)7838 dhd_bus_oob_intr_register(dhd_pub_t *dhdp)
7839 {
7840 return dhdpcie_oob_intr_register(dhdp->bus);
7841 }
7842
7843 void
dhd_bus_oob_intr_unregister(dhd_pub_t * dhdp)7844 dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp)
7845 {
7846 dhdpcie_oob_intr_unregister(dhdp->bus);
7847 }
7848
7849 void
dhd_bus_oob_intr_set(dhd_pub_t * dhdp,bool enable)7850 dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable)
7851 {
7852 dhdpcie_oob_intr_set(dhdp->bus, enable);
7853 }
7854 #endif /* BCMPCIE_OOB_HOST_WAKE */
7855
7856
7857
7858 bool
dhdpcie_bus_get_pcie_hostready_supported(dhd_bus_t * bus)7859 dhdpcie_bus_get_pcie_hostready_supported(dhd_bus_t *bus)
7860 {
7861 return bus->dhd->d2h_hostrdy_supported;
7862 }
7863
7864 void
dhd_pcie_dump_core_regs(dhd_pub_t * pub,uint32 index,uint32 first_addr,uint32 last_addr)7865 dhd_pcie_dump_core_regs(dhd_pub_t * pub, uint32 index, uint32 first_addr, uint32 last_addr)
7866 {
7867 dhd_bus_t *bus = pub->bus;
7868 uint32 coreoffset = index << 12;
7869 uint32 core_addr = SI_ENUM_BASE + coreoffset;
7870 uint32 value;
7871
7872
7873 while (first_addr <= last_addr) {
7874 core_addr = SI_ENUM_BASE + coreoffset + first_addr;
7875 if (si_backplane_access(bus->sih, core_addr, 4, &value, TRUE) != BCME_OK) {
7876 DHD_ERROR(("Invalid size/addr combination \n"));
7877 }
7878 DHD_ERROR(("[0x%08x]: 0x%08x\n", core_addr, value));
7879 first_addr = first_addr + 4;
7880 }
7881 }
7882
7883 #ifdef PCIE_OOB
7884 bool
dhdpcie_bus_get_pcie_oob_dw_supported(dhd_bus_t * bus)7885 dhdpcie_bus_get_pcie_oob_dw_supported(dhd_bus_t *bus)
7886 {
7887 if (!bus->dhd)
7888 return FALSE;
7889 if (bus->oob_enabled) {
7890 return !bus->dhd->d2h_no_oob_dw;
7891 } else {
7892 return FALSE;
7893 }
7894 }
7895 #endif /* PCIE_OOB */
7896
7897 void
dhdpcie_bus_enab_pcie_dw(dhd_bus_t * bus,uint8 dw_option)7898 dhdpcie_bus_enab_pcie_dw(dhd_bus_t *bus, uint8 dw_option)
7899 {
7900 DHD_ERROR(("ENABLING DW:%d\n", dw_option));
7901 bus->dw_option = dw_option;
7902 }
7903
7904 #ifdef PCIE_INB_DW
7905 bool
dhdpcie_bus_get_pcie_inband_dw_supported(dhd_bus_t * bus)7906 dhdpcie_bus_get_pcie_inband_dw_supported(dhd_bus_t *bus)
7907 {
7908 if (!bus->dhd)
7909 return FALSE;
7910 if (bus->inb_enabled) {
7911 return bus->dhd->d2h_inband_dw;
7912 } else {
7913 return FALSE;
7914 }
7915 }
7916
7917 void
dhdpcie_bus_set_pcie_inband_dw_state(dhd_bus_t * bus,enum dhd_bus_ds_state state)7918 dhdpcie_bus_set_pcie_inband_dw_state(dhd_bus_t *bus, enum dhd_bus_ds_state state)
7919 {
7920 if (!INBAND_DW_ENAB(bus))
7921 return;
7922
7923 DHD_INFO(("%s:%d\n", __FUNCTION__, state));
7924 bus->dhd->ds_state = state;
7925 if (state == DW_DEVICE_DS_DISABLED_WAIT || state == DW_DEVICE_DS_D3_INFORM_WAIT) {
7926 bus->ds_exit_timeout = 100;
7927 }
7928 if (state == DW_DEVICE_HOST_WAKE_WAIT) {
7929 bus->host_sleep_exit_timeout = 100;
7930 }
7931 if (state == DW_DEVICE_DS_DEV_WAKE) {
7932 bus->ds_exit_timeout = 0;
7933 }
7934 if (state == DW_DEVICE_DS_ACTIVE) {
7935 bus->host_sleep_exit_timeout = 0;
7936 }
7937 }
7938
7939 enum dhd_bus_ds_state
dhdpcie_bus_get_pcie_inband_dw_state(dhd_bus_t * bus)7940 dhdpcie_bus_get_pcie_inband_dw_state(dhd_bus_t *bus)
7941 {
7942 if (!INBAND_DW_ENAB(bus))
7943 return DW_DEVICE_DS_INVALID;
7944 return bus->dhd->ds_state;
7945 }
7946 #endif /* PCIE_INB_DW */
7947
7948 bool
dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t * bus)7949 dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t *bus)
7950 {
7951 if (!bus->dhd)
7952 return FALSE;
7953 else if (bus->idma_enabled) {
7954 return bus->dhd->idma_enable;
7955 } else {
7956 return FALSE;
7957 }
7958 }
7959
7960 bool
dhdpcie_bus_get_pcie_ifrm_supported(dhd_bus_t * bus)7961 dhdpcie_bus_get_pcie_ifrm_supported(dhd_bus_t *bus)
7962 {
7963 if (!bus->dhd)
7964 return FALSE;
7965 else if (bus->ifrm_enabled) {
7966 return bus->dhd->ifrm_enable;
7967 } else {
7968 return FALSE;
7969 }
7970 }
7971
7972
7973 void
dhd_bus_dump_trap_info(dhd_bus_t * bus,struct bcmstrbuf * strbuf)7974 dhd_bus_dump_trap_info(dhd_bus_t *bus, struct bcmstrbuf *strbuf)
7975 {
7976 trap_t *tr = &bus->dhd->last_trap_info;
7977 bcm_bprintf(strbuf,
7978 "\nTRAP type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x,"
7979 " lp 0x%x, rpc 0x%x"
7980 "\nTrap offset 0x%x, r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, "
7981 "r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x\n\n",
7982 ltoh32(tr->type), ltoh32(tr->epc), ltoh32(tr->cpsr), ltoh32(tr->spsr),
7983 ltoh32(tr->r13), ltoh32(tr->r14), ltoh32(tr->pc),
7984 ltoh32(bus->pcie_sh->trap_addr),
7985 ltoh32(tr->r0), ltoh32(tr->r1), ltoh32(tr->r2), ltoh32(tr->r3),
7986 ltoh32(tr->r4), ltoh32(tr->r5), ltoh32(tr->r6), ltoh32(tr->r7));
7987 }
7988
7989 int
dhd_bus_readwrite_bp_addr(dhd_pub_t * dhdp,uint addr,uint size,uint * data,bool read)7990 dhd_bus_readwrite_bp_addr(dhd_pub_t *dhdp, uint addr, uint size, uint* data, bool read)
7991 {
7992 int bcmerror = 0;
7993 struct dhd_bus *bus = dhdp->bus;
7994
7995 if (si_backplane_access(bus->sih, addr, size, data, read) != BCME_OK) {
7996 DHD_ERROR(("Invalid size/addr combination \n"));
7997 bcmerror = BCME_ERROR;
7998 }
7999
8000 return bcmerror;
8001 }
8002
8003 int
dhd_get_idletime(dhd_pub_t * dhd)8004 dhd_get_idletime(dhd_pub_t *dhd)
8005 {
8006 return dhd->bus->idletime;
8007 }
8008
8009 #ifdef DHD_SSSR_DUMP
8010
8011 static INLINE void
dhd_sbreg_op(dhd_pub_t * dhd,uint addr,uint * val,bool read)8012 dhd_sbreg_op(dhd_pub_t *dhd, uint addr, uint *val, bool read)
8013 {
8014 OSL_DELAY(1);
8015 si_backplane_access(dhd->bus->sih, addr, sizeof(uint), val, read);
8016 DHD_ERROR(("%s: addr:0x%x val:0x%x read:%d\n", __FUNCTION__, addr, *val, read));
8017 return;
8018 }
8019
8020 static int
dhdpcie_get_sssr_fifo_dump(dhd_pub_t * dhd,uint * buf,uint fifo_size,uint addr_reg,uint data_reg)8021 dhdpcie_get_sssr_fifo_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size,
8022 uint addr_reg, uint data_reg)
8023 {
8024 uint addr;
8025 uint val = 0;
8026 int i;
8027
8028 DHD_ERROR(("%s\n", __FUNCTION__));
8029
8030 if (!buf) {
8031 DHD_ERROR(("%s: buf is NULL\n", __FUNCTION__));
8032 return BCME_ERROR;
8033 }
8034
8035 if (!fifo_size) {
8036 DHD_ERROR(("%s: fifo_size is 0\n", __FUNCTION__));
8037 return BCME_ERROR;
8038 }
8039
8040 /* Set the base address offset to 0 */
8041 addr = addr_reg;
8042 val = 0;
8043 dhd_sbreg_op(dhd, addr, &val, FALSE);
8044
8045 addr = data_reg;
8046 /* Read 4 bytes at once and loop for fifo_size / 4 */
8047 for (i = 0; i < fifo_size / 4; i++) {
8048 si_backplane_access(dhd->bus->sih, addr, sizeof(uint), &val, TRUE);
8049 buf[i] = val;
8050 OSL_DELAY(1);
8051 }
8052 return BCME_OK;
8053 }
8054
8055 static int
dhdpcie_get_sssr_vasip_dump(dhd_pub_t * dhd,uint * buf,uint fifo_size,uint addr_reg)8056 dhdpcie_get_sssr_vasip_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size,
8057 uint addr_reg)
8058 {
8059 uint addr;
8060 uint val = 0;
8061 int i;
8062
8063 DHD_ERROR(("%s\n", __FUNCTION__));
8064
8065 if (!buf) {
8066 DHD_ERROR(("%s: buf is NULL\n", __FUNCTION__));
8067 return BCME_ERROR;
8068 }
8069
8070 if (!fifo_size) {
8071 DHD_ERROR(("%s: fifo_size is 0\n", __FUNCTION__));
8072 return BCME_ERROR;
8073 }
8074
8075 /* Check if vasip clk is disabled, if yes enable it */
8076 addr = dhd->sssr_reg_info.vasip_regs.wrapper_regs.ioctrl;
8077 dhd_sbreg_op(dhd, addr, &val, TRUE);
8078 if (!val) {
8079 val = 1;
8080 dhd_sbreg_op(dhd, addr, &val, FALSE);
8081 }
8082
8083 addr = addr_reg;
8084 /* Read 4 bytes at once and loop for fifo_size / 4 */
8085 for (i = 0; i < fifo_size / 4; i++, addr += 4) {
8086 si_backplane_access(dhd->bus->sih, addr, sizeof(uint), &val, TRUE);
8087 buf[i] = val;
8088 OSL_DELAY(1);
8089 }
8090 return BCME_OK;
8091 }
8092
8093 static int
dhdpcie_resume_chipcommon_powerctrl(dhd_pub_t * dhd)8094 dhdpcie_resume_chipcommon_powerctrl(dhd_pub_t *dhd)
8095 {
8096 uint addr;
8097 uint val;
8098
8099 DHD_ERROR(("%s\n", __FUNCTION__));
8100
8101 /* conditionally clear bits [11:8] of PowerCtrl */
8102 addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
8103 dhd_sbreg_op(dhd, addr, &val, TRUE);
8104 if (!(val & dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask)) {
8105 addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
8106 val = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask;
8107 dhd_sbreg_op(dhd, addr, &val, FALSE);
8108 }
8109 return BCME_OK;
8110 }
8111
8112 static int
dhdpcie_suspend_chipcommon_powerctrl(dhd_pub_t * dhd)8113 dhdpcie_suspend_chipcommon_powerctrl(dhd_pub_t *dhd)
8114 {
8115 uint addr;
8116 uint val;
8117
8118 DHD_ERROR(("%s\n", __FUNCTION__));
8119
8120 /* conditionally clear bits [11:8] of PowerCtrl */
8121 addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
8122 dhd_sbreg_op(dhd, addr, &val, TRUE);
8123 if (val & dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask) {
8124 addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
8125 val = 0;
8126 dhd_sbreg_op(dhd, addr, &val, FALSE);
8127 }
8128 return BCME_OK;
8129 }
8130
8131 static int
dhdpcie_clear_intmask_and_timer(dhd_pub_t * dhd)8132 dhdpcie_clear_intmask_and_timer(dhd_pub_t *dhd)
8133 {
8134 uint addr;
8135 uint val;
8136
8137 DHD_ERROR(("%s\n", __FUNCTION__));
8138
8139 /* clear chipcommon intmask */
8140 addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.intmask;
8141 val = 0x0;
8142 dhd_sbreg_op(dhd, addr, &val, FALSE);
8143
8144 /* clear PMUIntMask0 */
8145 addr = dhd->sssr_reg_info.pmu_regs.base_regs.pmuintmask0;
8146 val = 0x0;
8147 dhd_sbreg_op(dhd, addr, &val, FALSE);
8148
8149 /* clear PMUIntMask1 */
8150 addr = dhd->sssr_reg_info.pmu_regs.base_regs.pmuintmask1;
8151 val = 0x0;
8152 dhd_sbreg_op(dhd, addr, &val, FALSE);
8153
8154 /* clear res_req_timer */
8155 addr = dhd->sssr_reg_info.pmu_regs.base_regs.resreqtimer;
8156 val = 0x0;
8157 dhd_sbreg_op(dhd, addr, &val, FALSE);
8158
8159 /* clear macresreqtimer */
8160 addr = dhd->sssr_reg_info.pmu_regs.base_regs.macresreqtimer;
8161 val = 0x0;
8162 dhd_sbreg_op(dhd, addr, &val, FALSE);
8163
8164 /* clear macresreqtimer1 */
8165 addr = dhd->sssr_reg_info.pmu_regs.base_regs.macresreqtimer1;
8166 val = 0x0;
8167 dhd_sbreg_op(dhd, addr, &val, FALSE);
8168
8169 /* clear VasipClkEn */
8170 if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
8171 addr = dhd->sssr_reg_info.vasip_regs.wrapper_regs.ioctrl;
8172 val = 0x0;
8173 dhd_sbreg_op(dhd, addr, &val, FALSE);
8174 }
8175
8176 return BCME_OK;
8177 }
8178
8179 static int
dhdpcie_d11_check_outofreset(dhd_pub_t * dhd)8180 dhdpcie_d11_check_outofreset(dhd_pub_t *dhd)
8181 {
8182 int i;
8183 uint addr;
8184 uint val = 0;
8185
8186 DHD_ERROR(("%s\n", __FUNCTION__));
8187
8188 for (i = 0; i < MAX_NUM_D11CORES; i++) {
8189 /* Check if bit 0 of resetctrl is cleared */
8190 addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.resetctrl;
8191 dhd_sbreg_op(dhd, addr, &val, TRUE);
8192 if (!(val & 1)) {
8193 dhd->sssr_d11_outofreset[i] = TRUE;
8194 } else {
8195 dhd->sssr_d11_outofreset[i] = FALSE;
8196 }
8197 DHD_ERROR(("%s: sssr_d11_outofreset[%d] : %d\n",
8198 __FUNCTION__, i, dhd->sssr_d11_outofreset[i]));
8199 }
8200 return BCME_OK;
8201 }
8202
8203 static int
dhdpcie_d11_clear_clk_req(dhd_pub_t * dhd)8204 dhdpcie_d11_clear_clk_req(dhd_pub_t *dhd)
8205 {
8206 int i;
8207 uint addr;
8208 uint val = 0;
8209
8210 DHD_ERROR(("%s\n", __FUNCTION__));
8211
8212 for (i = 0; i < MAX_NUM_D11CORES; i++) {
8213 if (dhd->sssr_d11_outofreset[i]) {
8214 /* clear request clk only if itopoobb is non zero */
8215 addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.itopoobb;
8216 dhd_sbreg_op(dhd, addr, &val, TRUE);
8217 if (val != 0) {
8218 /* clear clockcontrolstatus */
8219 addr = dhd->sssr_reg_info.mac_regs[i].base_regs.clockcontrolstatus;
8220 val =
8221 dhd->sssr_reg_info.mac_regs[i].base_regs.clockcontrolstatus_val;
8222 dhd_sbreg_op(dhd, addr, &val, FALSE);
8223 }
8224 }
8225 }
8226 return BCME_OK;
8227 }
8228
8229 static int
dhdpcie_arm_clear_clk_req(dhd_pub_t * dhd)8230 dhdpcie_arm_clear_clk_req(dhd_pub_t *dhd)
8231 {
8232 uint addr;
8233 uint val = 0;
8234
8235 DHD_ERROR(("%s\n", __FUNCTION__));
8236
8237 /* Check if bit 0 of resetctrl is cleared */
8238 addr = dhd->sssr_reg_info.arm_regs.wrapper_regs.resetctrl;
8239 dhd_sbreg_op(dhd, addr, &val, TRUE);
8240 if (!(val & 1)) {
8241 /* clear request clk only if itopoobb is non zero */
8242 addr = dhd->sssr_reg_info.arm_regs.wrapper_regs.itopoobb;
8243 dhd_sbreg_op(dhd, addr, &val, TRUE);
8244 if (val != 0) {
8245 /* clear clockcontrolstatus */
8246 addr = dhd->sssr_reg_info.arm_regs.base_regs.clockcontrolstatus;
8247 val = dhd->sssr_reg_info.arm_regs.base_regs.clockcontrolstatus_val;
8248 dhd_sbreg_op(dhd, addr, &val, FALSE);
8249 }
8250 }
8251 return BCME_OK;
8252 }
8253
8254 static int
dhdpcie_pcie_clear_clk_req(dhd_pub_t * dhd)8255 dhdpcie_pcie_clear_clk_req(dhd_pub_t *dhd)
8256 {
8257 uint addr;
8258 uint val = 0;
8259
8260 DHD_ERROR(("%s\n", __FUNCTION__));
8261
8262 /* clear request clk only if itopoobb is non zero */
8263 addr = dhd->sssr_reg_info.pcie_regs.wrapper_regs.itopoobb;
8264 dhd_sbreg_op(dhd, addr, &val, TRUE);
8265 if (val) {
8266 /* clear clockcontrolstatus */
8267 addr = dhd->sssr_reg_info.pcie_regs.base_regs.clockcontrolstatus;
8268 val = dhd->sssr_reg_info.pcie_regs.base_regs.clockcontrolstatus_val;
8269 dhd_sbreg_op(dhd, addr, &val, FALSE);
8270 }
8271 return BCME_OK;
8272 }
8273
8274 static int
dhdpcie_pcie_send_ltrsleep(dhd_pub_t * dhd)8275 dhdpcie_pcie_send_ltrsleep(dhd_pub_t *dhd)
8276 {
8277 uint addr;
8278 uint val = 0;
8279
8280 DHD_ERROR(("%s\n", __FUNCTION__));
8281
8282 addr = dhd->sssr_reg_info.pcie_regs.base_regs.ltrstate;
8283 val = LTR_ACTIVE;
8284 dhd_sbreg_op(dhd, addr, &val, FALSE);
8285
8286 val = LTR_SLEEP;
8287 dhd_sbreg_op(dhd, addr, &val, FALSE);
8288
8289 return BCME_OK;
8290 }
8291
8292 static int
dhdpcie_clear_clk_req(dhd_pub_t * dhd)8293 dhdpcie_clear_clk_req(dhd_pub_t *dhd)
8294 {
8295 DHD_ERROR(("%s\n", __FUNCTION__));
8296
8297 dhdpcie_arm_clear_clk_req(dhd);
8298
8299 dhdpcie_d11_clear_clk_req(dhd);
8300
8301 dhdpcie_pcie_clear_clk_req(dhd);
8302
8303 return BCME_OK;
8304 }
8305
8306 static int
dhdpcie_bring_d11_outofreset(dhd_pub_t * dhd)8307 dhdpcie_bring_d11_outofreset(dhd_pub_t *dhd)
8308 {
8309 int i;
8310 uint addr;
8311 uint val = 0;
8312
8313 DHD_ERROR(("%s\n", __FUNCTION__));
8314
8315 for (i = 0; i < MAX_NUM_D11CORES; i++) {
8316 if (dhd->sssr_d11_outofreset[i]) {
8317 /* disable core by setting bit 0 */
8318 addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.resetctrl;
8319 val = 1;
8320 dhd_sbreg_op(dhd, addr, &val, FALSE);
8321 OSL_DELAY(6000);
8322
8323 addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.ioctrl;
8324 val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[0];
8325 dhd_sbreg_op(dhd, addr, &val, FALSE);
8326
8327 val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[1];
8328 dhd_sbreg_op(dhd, addr, &val, FALSE);
8329
8330 /* enable core by clearing bit 0 */
8331 addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.resetctrl;
8332 val = 0;
8333 dhd_sbreg_op(dhd, addr, &val, FALSE);
8334
8335 addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.ioctrl;
8336 val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[2];
8337 dhd_sbreg_op(dhd, addr, &val, FALSE);
8338
8339 val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[3];
8340 dhd_sbreg_op(dhd, addr, &val, FALSE);
8341
8342 val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[4];
8343 dhd_sbreg_op(dhd, addr, &val, FALSE);
8344 }
8345 }
8346 return BCME_OK;
8347 }
8348
8349 static int
dhdpcie_sssr_dump_get_before_sr(dhd_pub_t * dhd)8350 dhdpcie_sssr_dump_get_before_sr(dhd_pub_t *dhd)
8351 {
8352 int i;
8353
8354 DHD_ERROR(("%s\n", __FUNCTION__));
8355
8356 for (i = 0; i < MAX_NUM_D11CORES; i++) {
8357 if (dhd->sssr_d11_outofreset[i]) {
8358 dhdpcie_get_sssr_fifo_dump(dhd, dhd->sssr_d11_before[i],
8359 dhd->sssr_reg_info.mac_regs[i].sr_size,
8360 dhd->sssr_reg_info.mac_regs[i].base_regs.xmtaddress,
8361 dhd->sssr_reg_info.mac_regs[i].base_regs.xmtdata);
8362 }
8363 }
8364
8365 if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
8366 dhdpcie_get_sssr_vasip_dump(dhd, dhd->sssr_vasip_buf_before,
8367 dhd->sssr_reg_info.vasip_regs.vasip_sr_size,
8368 dhd->sssr_reg_info.vasip_regs.vasip_sr_addr);
8369 }
8370
8371 return BCME_OK;
8372 }
8373
8374 static int
dhdpcie_sssr_dump_get_after_sr(dhd_pub_t * dhd)8375 dhdpcie_sssr_dump_get_after_sr(dhd_pub_t *dhd)
8376 {
8377 int i;
8378
8379 DHD_ERROR(("%s\n", __FUNCTION__));
8380
8381 for (i = 0; i < MAX_NUM_D11CORES; i++) {
8382 if (dhd->sssr_d11_outofreset[i]) {
8383 dhdpcie_get_sssr_fifo_dump(dhd, dhd->sssr_d11_after[i],
8384 dhd->sssr_reg_info.mac_regs[i].sr_size,
8385 dhd->sssr_reg_info.mac_regs[i].base_regs.xmtaddress,
8386 dhd->sssr_reg_info.mac_regs[i].base_regs.xmtdata);
8387 }
8388 }
8389
8390 if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
8391 dhdpcie_get_sssr_vasip_dump(dhd, dhd->sssr_vasip_buf_after,
8392 dhd->sssr_reg_info.vasip_regs.vasip_sr_size,
8393 dhd->sssr_reg_info.vasip_regs.vasip_sr_addr);
8394 }
8395
8396 return BCME_OK;
8397 }
8398
8399 int
dhdpcie_sssr_dump(dhd_pub_t * dhd)8400 dhdpcie_sssr_dump(dhd_pub_t *dhd)
8401 {
8402 if (!dhd->sssr_inited) {
8403 DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__));
8404 return BCME_ERROR;
8405 }
8406
8407 if (dhd->bus->is_linkdown) {
8408 DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
8409 return BCME_ERROR;
8410 }
8411
8412 dhdpcie_d11_check_outofreset(dhd);
8413
8414 DHD_ERROR(("%s: Collecting Dump before SR\n", __FUNCTION__));
8415 if (dhdpcie_sssr_dump_get_before_sr(dhd) != BCME_OK) {
8416 DHD_ERROR(("%s: dhdpcie_sssr_dump_get_before_sr failed\n", __FUNCTION__));
8417 return BCME_ERROR;
8418 }
8419
8420 dhdpcie_clear_intmask_and_timer(dhd);
8421 dhdpcie_suspend_chipcommon_powerctrl(dhd);
8422 dhdpcie_clear_clk_req(dhd);
8423 dhdpcie_pcie_send_ltrsleep(dhd);
8424
8425 /* Wait for some time before Restore */
8426 OSL_DELAY(6000);
8427
8428 dhdpcie_resume_chipcommon_powerctrl(dhd);
8429 dhdpcie_bring_d11_outofreset(dhd);
8430
8431 DHD_ERROR(("%s: Collecting Dump after SR\n", __FUNCTION__));
8432 if (dhdpcie_sssr_dump_get_after_sr(dhd) != BCME_OK) {
8433 DHD_ERROR(("%s: dhdpcie_sssr_dump_get_after_sr failed\n", __FUNCTION__));
8434 return BCME_ERROR;
8435 }
8436
8437 dhd_schedule_sssr_dump(dhd);
8438
8439 return BCME_OK;
8440 }
8441 #endif /* DHD_SSSR_DUMP */
8442
8443 #ifdef DHD_WAKE_STATUS
8444 wake_counts_t*
dhd_bus_get_wakecount(dhd_pub_t * dhd)8445 dhd_bus_get_wakecount(dhd_pub_t *dhd)
8446 {
8447 if (!dhd->bus) {
8448 return NULL;
8449 }
8450 return &dhd->bus->wake_counts;
8451 }
8452 int
dhd_bus_get_bus_wake(dhd_pub_t * dhd)8453 dhd_bus_get_bus_wake(dhd_pub_t *dhd)
8454 {
8455 return bcmpcie_set_get_wake(dhd->bus, 0);
8456 }
8457 #endif /* DHD_WAKE_STATUS */
8458
8459 #ifdef BCM_ASLR_HEAP
8460 /* Writes random number(s) to the TCM. FW upon initialization reads the metadata
8461 * of the random number and then based on metadata, reads the random number from the TCM.
8462 */
8463 static void
dhdpcie_wrt_rnd(struct dhd_bus * bus)8464 dhdpcie_wrt_rnd(struct dhd_bus *bus)
8465 {
8466 bcm_rand_metadata_t rnd_data;
8467 uint32 rand_no;
8468 uint32 count = 1; /* start with 1 random number */
8469
8470 uint32 addr = bus->dongle_ram_base + (bus->ramsize - BCM_NVRAM_OFFSET_TCM) -
8471 ((bus->nvram_csm & 0xffff)* BCM_NVRAM_IMG_COMPRS_FACTOR + sizeof(rnd_data));
8472 rnd_data.signature = htol32(BCM_RNG_SIGNATURE);
8473 rnd_data.count = htol32(count);
8474 /* write the metadata about random number */
8475 dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&rnd_data, sizeof(rnd_data));
8476 /* scale back by number of random number counts */
8477 addr -= sizeof(count) * count;
8478 /* Now write the random number(s) */
8479 rand_no = htol32(dhd_get_random_number());
8480 dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&rand_no, sizeof(rand_no));
8481 }
8482 #endif /* BCM_ASLR_HEAP */
8483