• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Misc utility routines for accessing chip-specific features
3  * of the SiliconBackplane-based Broadcom chips.
4  *
5  * Copyright (C) 2020, Broadcom.
6  *
7  *      Unless you and Broadcom execute a separate written software license
8  * agreement governing use of this software, this software is licensed to you
9  * under the terms of the GNU General Public License version 2 (the "GPL"),
10  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11  * following added to such license:
12  *
13  *      As a special exception, the copyright holders of this software give you
14  * permission to link this software with independent modules, and to copy and
15  * distribute the resulting executable under terms of your choice, provided that
16  * you also meet, for each linked independent module, the terms and conditions of
17  * the license of that module.  An independent module is a module which is not
18  * derived from this software.  The special exception does not apply to any
19  * modifications of the software.
20  *
21  *
22  * <<Broadcom-WL-IPTag/Dual:>>
23  */
24 
25 #include <typedefs.h>
26 #include <bcmdefs.h>
27 #include <osl.h>
28 #include <bcmutils.h>
29 #include <siutils.h>
30 #include <bcmdevs.h>
31 #include <hndsoc.h>
32 #include <sbchipc.h>
33 #include <sbgci.h>
34 #ifndef BCMSDIO
35 #include <pcie_core.h>
36 #endif
37 #if !defined(BCMDONGLEHOST)
38 #include <pci_core.h>
39 #include <nicpci.h>
40 #include <bcmnvram.h>
41 #include <bcmsrom.h>
42 #include <hndtcam.h>
43 #endif /* !defined(BCMDONGLEHOST) */
44 #ifdef BCMPCIEDEV
45 #include <pcieregsoffs.h>
46 #include <pciedev.h>
47 #endif /* BCMPCIEDEV */
48 #include <pcicfg.h>
49 #include <sbpcmcia.h>
50 #include <sbsysmem.h>
51 #include <sbsocram.h>
52 #if defined(BCMECICOEX) || !defined(BCMDONGLEHOST)
53 #include <bcmotp.h>
54 #endif /* BCMECICOEX || !BCMDONGLEHOST */
55 #ifdef BCMSDIO
56 #include <bcmsdh.h>
57 #include <sdio.h>
58 #include <sbsdio.h>
59 #include <sbhnddma.h>
60 #include <sbsdpcmdev.h>
61 #include <bcmsdpcm.h>
62 #endif /* BCMSDIO */
63 #include <hndpmu.h>
64 #ifdef BCMSPI
65 #include <spid.h>
66 #endif /* BCMSPI */
67 #if !defined(BCMDONGLEHOST) && !defined(BCM_BOOTLOADER) && defined(SR_ESSENTIALS)
68 #include <saverestore.h>
69 #endif
70 #include <dhd_config.h>
71 
72 #ifdef BCM_SDRBL
73 #include <hndcpu.h>
74 #endif /* BCM_SDRBL */
75 #ifdef HNDGCI
76 #include <hndgci.h>
77 #endif /* HNDGCI */
78 #ifdef DONGLEBUILD
79 #include <hnd_gci.h>
80 #endif /* DONGLEBUILD */
81 #include <hndlhl.h>
82 #include <hndoobr.h>
83 #include <lpflags.h>
84 #ifdef BCM_SFLASH
85 #include <sflash.h>
86 #endif
87 #ifdef BCM_SH_SFLASH
88 #include <sh_sflash.h>
89 #endif
90 #ifdef BCMGCISHM
91 #include <hnd_gcishm.h>
92 #endif
93 #include "siutils_priv.h"
94 #include "sbhndarm.h"
95 #include <hndchipc.h>
96 #ifdef SOCI_NCI_BUS
97 #include <nci.h>
98 #endif /* SOCI_NCI_BUS */
99 
100 #ifdef SECI_UART
101 /* Defines the set of GPIOs to be used for SECI UART if not specified in NVRAM */
102 /* For further details on each ppin functionality please refer to PINMUX table in
103  * Top level architecture of BCMXXXX Chip
104  */
105 #define DEFAULT_SECI_UART_PINMUX	0x08090a0b
106 static bool force_seci_clk = 0;
107 #endif /* SECI_UART */
108 
109 #define XTAL_FREQ_26000KHZ		26000
110 #define XTAL_FREQ_59970KHZ		59970
111 #define WCI2_UART_RX_BUF_SIZE	64
112 
113 /**
114  * A set of PMU registers is clocked in the ILP domain, which has an implication on register write
115  * behavior: if such a register is written, it takes multiple ILP clocks for the PMU block to absorb
116  * the write. During that time the 'SlowWritePending' bit in the PMUStatus register is set.
117  */
118 #define PMUREGS_ILP_SENSITIVE(regoff) \
119 	((regoff) == OFFSETOF(pmuregs_t, pmutimer) || \
120 	 (regoff) == OFFSETOF(pmuregs_t, pmuwatchdog) || \
121 	 (regoff) == OFFSETOF(pmuregs_t, res_req_timer))
122 
123 #define CHIPCREGS_ILP_SENSITIVE(regoff) \
124 	((regoff) == OFFSETOF(chipcregs_t, pmutimer) || \
125 	 (regoff) == OFFSETOF(chipcregs_t, pmuwatchdog) || \
126 	 (regoff) == OFFSETOF(chipcregs_t, res_req_timer))
127 
128 #define GCI_FEM_CTRL_WAR 0x11111111
129 
130 #ifndef AXI_TO_VAL
131 #define AXI_TO_VAL 19
132 #endif	/* AXI_TO_VAL */
133 
134 #ifndef AXI_TO_VAL_25
135 /*
136  * Increase BP timeout for fast clock and short PCIe timeouts
137  * New timeout: 2 ** 25 cycles
138  */
139 #define AXI_TO_VAL_25	25
140 #endif /* AXI_TO_VAL_25 */
141 
142 #define si_srpwr_domain_mask(rval, mask) \
143 	(((rval) >> SRPWR_STATUS_SHIFT) & (mask))
144 
145 /* local prototypes */
146 #if !defined(BCMDONGLEHOST)
147 static void si_43012_lp_enable(si_t *sih);
148 #endif /* !defined(BCMDONGLEHOST) */
149 static int32 BCMATTACHFN(si_alloc_wrapper)(si_info_t *sii);
150 static si_info_t *si_doattach(si_info_t *sii, uint devid, osl_t *osh, volatile void *regs,
151                               uint bustype, void *sdh, char **vars, uint *varsz);
152 static bool si_buscore_prep(si_info_t *sii, uint bustype, uint devid, void *sdh);
153 static bool si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin,
154 	uint *origidx, volatile const void *regs);
155 
156 #if !defined(BCMDONGLEHOST)
157 static void si_nvram_process(si_info_t *sii, char *pvars);
158 
159 /* dev path concatenation util */
160 static char *si_devpathvar(const si_t *sih, char *var, int len, const char *name);
161 static char *si_pcie_devpathvar(const si_t *sih, char *var, int len, const char *name);
162 static bool _si_clkctl_cc(si_info_t *sii, uint mode);
163 static bool si_ispcie(const si_info_t *sii);
164 static uint sysmem_banksize(const si_info_t *sii, sysmemregs_t *r, uint8 idx);
165 static uint socram_banksize(const si_info_t *sii, sbsocramregs_t *r, uint8 idx, uint8 mtype);
166 static void si_gci_get_chipctrlreg_ringidx_base4(uint32 pin, uint32 *regidx, uint32 *pos);
167 static uint8 si_gci_get_chipctrlreg_ringidx_base8(uint32 pin, uint32 *regidx, uint32 *pos);
168 static void si_gci_gpio_chipcontrol(si_t *si, uint8 gpoi, uint8 opt);
169 static void si_gci_enable_gpioint(si_t *sih, bool enable);
170 #if defined(BCMECICOEX) || defined(SECI_UART)
171 static chipcregs_t * seci_set_core(si_t *sih, uint32 *origidx, bool *fast);
172 #endif
173 #endif /* !defined(BCMDONGLEHOST) */
174 
175 static bool si_pmu_is_ilp_sensitive(uint32 idx, uint regoff);
176 
177 static void si_oob_war_BT_F1(si_t *sih);
178 
179 #if defined(DONGLEBUILD)
180 #if	!defined(NVSRCX)
181 static char * BCMATTACHFN(si_getkvars)(void);
182 static int BCMATTACHFN(si_getkvarsz)(void);
183 #endif
184 #endif /* DONGLEBUILD */
185 
186 #if defined(BCMLTECOEX) && !defined(WLTEST)
187 static void si_wci2_rxfifo_intr_handler_process(si_t *sih, uint32 intstatus);
188 #endif /* BCMLTECOEX && !WLTEST */
189 
190 /* global variable to indicate reservation/release of gpio's */
191 static uint32 si_gpioreservation = 0;
192 #if !defined(BCMDONGLEHOST)
193 /* global variable to indicate GCI reset is done */
194 static bool gci_reset_done = FALSE;
195 #endif
196 /* global flag to prevent shared resources from being initialized multiple times in si_attach() */
197 static bool si_onetimeinit = FALSE;
198 
199 #ifdef SR_DEBUG
200 static const uint32 si_power_island_test_array[] = {
201 	0x0000, 0x0001, 0x0010, 0x0011,
202 	0x0100, 0x0101, 0x0110, 0x0111,
203 	0x1000, 0x1001, 0x1010, 0x1011,
204 	0x1100, 0x1101, 0x1110, 0x1111
205 };
206 #endif /* SR_DEBUG */
207 
208 /* 4360 pcie2 WAR */
209 int do_4360_pcie2_war = 0;
210 
211 /* global kernel resource */
212 static si_info_t ksii;
213 static si_cores_info_t ksii_cores_info;
214 
215 #ifndef BCMDONGLEHOST
216 static const char BCMATTACHDATA(rstr_rmin)[] = "rmin";
217 static const char BCMATTACHDATA(rstr_rmax)[] = "rmax";
218 
219 static const char BCMATTACHDATA(rstr_lhl_ps_mode)[] = "lhl_ps_mode";
220 static const char BCMATTACHDATA(rstr_ext_wakeup_dis)[] = "ext_wakeup_dis";
221 #if defined(BCMSRTOPOFF) && !defined(BCMSRTOPOFF_DISABLED)
222 static const char BCMATTACHDATA(rstr_srtopoff_enab)[] = "srtopoff_enab";
223 #endif
224 #endif /* BCMDONGLEHOST */
225 
226 static uint32	wd_msticks;		/**< watchdog timer ticks normalized to ms */
227 
228 #ifdef DONGLEBUILD
229 /**
230  * As si_kattach goes thru full srom initialisation same can be used
231  * for all subsequent calls
232  */
233 #if	!defined(NVSRCX)
234 static char *
BCMATTACHFN(si_getkvars)235 BCMATTACHFN(si_getkvars)(void)
236 {
237 	if (FWSIGN_ENAB()) {
238 		return NULL;
239 	}
240 	return (ksii.vars);
241 }
242 
243 static int
BCMATTACHFN(si_getkvarsz)244 BCMATTACHFN(si_getkvarsz)(void)
245 {
246 	if (FWSIGN_ENAB()) {
247 		return NULL;
248 	}
249 	return (ksii.varsz);
250 }
251 #endif /* !defined(NVSRCX) */
252 #endif /* DONGLEBUILD */
253 
254 /** Returns the backplane address of the chipcommon core for a particular chip */
255 uint32
BCMATTACHFN(si_enum_base)256 BCMATTACHFN(si_enum_base)(uint devid)
257 {
258 	return SI_ENUM_BASE_DEFAULT;
259 }
260 
261 /**
262  * Allocate an si handle. This function may be called multiple times.
263  *
264  * devid - pci device id (used to determine chip#)
265  * osh - opaque OS handle
266  * regs - virtual address of initial core registers
267  * bustype - pci/sb/sdio/etc
268  * vars - pointer to a to-be created pointer area for "environment" variables. Some callers of this
269  *        function set 'vars' to NULL, making dereferencing of this parameter undesired.
270  * varsz - pointer to int to return the size of the vars
271  */
272 si_t *
BCMATTACHFN(si_attach)273 BCMATTACHFN(si_attach)(uint devid, osl_t *osh, volatile void *regs,
274                        uint bustype, void *sdh, char **vars, uint *varsz)
275 {
276 	si_info_t *sii;
277 
278 	/* alloc si_info_t */
279 	/* freed after ucode download for firmware builds */
280 	if ((sii = MALLOCZ_NOPERSIST(osh, sizeof(si_info_t))) == NULL) {
281 		SI_ERROR(("si_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh)));
282 		return (NULL);
283 	}
284 
285 #ifdef BCMDVFS
286 	if (BCMDVFS_ENAB() && si_dvfs_info_init((si_t *)sii, osh) == NULL) {
287 		SI_ERROR(("si_dvfs_info_init failed\n"));
288 		return (NULL);
289 	}
290 #endif /* BCMDVFS */
291 
292 	if (si_doattach(sii, devid, osh, regs, bustype, sdh, vars, varsz) == NULL) {
293 		MFREE(osh, sii, sizeof(si_info_t));
294 		return (NULL);
295 	}
296 	sii->vars = vars ? *vars : NULL;
297 	sii->varsz = varsz ? *varsz : 0;
298 
299 #if defined(BCM_SH_SFLASH) && !defined(BCM_SH_SFLASH_DISABLED)
300 	sh_sflash_attach(osh, (si_t *)sii);
301 #endif
302 	return (si_t *)sii;
303 }
304 
305 /** generic kernel variant of si_attach(). Is not called for Linux WLAN NIC builds. */
306 si_t *
BCMATTACHFN(si_kattach)307 BCMATTACHFN(si_kattach)(osl_t *osh)
308 {
309 	static bool ksii_attached = FALSE;
310 	si_cores_info_t *cores_info;
311 
312 	if (!ksii_attached) {
313 		void *regs = NULL;
314 		const uint device_id = BCM4710_DEVICE_ID; // pick an arbitrary default device_id
315 
316 		regs = REG_MAP(si_enum_base(device_id), SI_CORE_SIZE); // map physical to virtual
317 		cores_info = (si_cores_info_t *)&ksii_cores_info;
318 		ksii.cores_info = cores_info;
319 
320 		/* Use osh as the deciding factor if the memory management
321 		 * system has been initialized. Pass non-NULL vars & varsz only
322 		 * if memory management has been initialized. Otherwise MALLOC()
323 		 * will fail/crash.
324 		 */
325 #if defined(BCMDONGLEHOST)
326 		ASSERT(osh);
327 #endif
328 		if (si_doattach(&ksii, device_id, osh, regs,
329 		                SI_BUS, NULL,
330 		                osh != SI_OSH ? &(ksii.vars) : NULL,
331 		                osh != SI_OSH ? &(ksii.varsz) : NULL) == NULL) {
332 			SI_ERROR(("si_kattach: si_doattach failed\n"));
333 			REG_UNMAP(regs);
334 			return NULL;
335 		}
336 		REG_UNMAP(regs);
337 
338 		/* save ticks normalized to ms for si_watchdog_ms() */
339 		if (PMUCTL_ENAB(&ksii.pub)) {
340 			/* based on 32KHz ILP clock */
341 			wd_msticks = 32;
342 		} else {
343 #if !defined(BCMDONGLEHOST)
344 			if (CCREV(ksii.pub.ccrev) < 18)
345 				wd_msticks = si_clock(&ksii.pub) / 1000;
346 			else
347 				wd_msticks = si_alp_clock(&ksii.pub) / 1000;
348 #else
349 			wd_msticks = ALP_CLOCK / 1000;
350 #endif /* !defined(BCMDONGLEHOST) */
351 		}
352 
353 		ksii_attached = TRUE;
354 		SI_MSG(("si_kattach done. ccrev = %d, wd_msticks = %d\n",
355 		        CCREV(ksii.pub.ccrev), wd_msticks));
356 	}
357 
358 	return &ksii.pub;
359 }
360 
361 static bool
BCMATTACHFN(si_buscore_prep)362 BCMATTACHFN(si_buscore_prep)(si_info_t *sii, uint bustype, uint devid, void *sdh)
363 {
364 	BCM_REFERENCE(sdh);
365 	BCM_REFERENCE(devid);
366 
367 #if !defined(BCMDONGLEHOST)
368 	/* kludge to enable the clock on the 4306 which lacks a slowclock */
369 	if (BUSTYPE(bustype) == PCI_BUS && !si_ispcie(sii))
370 		si_clkctl_xtal(&sii->pub, XTAL|PLL, ON);
371 #endif /* !defined(BCMDONGLEHOST) */
372 
373 #if defined(BCMSDIO) && defined(BCMDONGLEHOST) && !defined(BCMSDIOLITE)
374 	/* PR 39902, 43618, 44891, 41539 -- avoid backplane accesses that may
375 	 * cause SDIO clock requests before a stable ALP clock.  Originally had
376 	 * this later (just before srom_var_init() below) to guarantee ALP for
377 	 * CIS read, but due to these PRs moving it here before backplane use.
378 	 */
379 	/* As it precedes any backplane access, can't check chipid; but may
380 	 * be able to qualify with devid if underlying SDIO allows.  But should
381 	 * be ok for all our SDIO (4318 doesn't support clock and pullup regs,
382 	 * but the access attempts don't seem to hurt.)  Might elimiante the
383 	 * the need for ALP for CIS at all if underlying SDIO uses CMD53...
384 	 */
385 	if (BUSTYPE(bustype) == SDIO_BUS) {
386 		int err;
387 		uint8 clkset;
388 
389 		/* Try forcing SDIO core to do ALPAvail request only */
390 		clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
391 		bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
392 		if (!err) {
393 			uint8 clkval;
394 
395 			/* If register supported, wait for ALPAvail and then force ALP */
396 			clkval = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, NULL);
397 			if ((clkval & ~SBSDIO_AVBITS) == clkset) {
398 				SPINWAIT(((clkval = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
399 					SBSDIO_FUNC1_CHIPCLKCSR, NULL)), !SBSDIO_ALPAV(clkval)),
400 					PMU_MAX_TRANSITION_DLY);
401 				if (!SBSDIO_ALPAV(clkval)) {
402 					SI_ERROR(("timeout on ALPAV wait, clkval 0x%02x\n",
403 						clkval));
404 					return FALSE;
405 				}
406 				clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
407 				bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
408 					clkset, &err);
409 				/* PR 40613: account for possible ALP delay */
410 				OSL_DELAY(65);
411 			}
412 		}
413 
414 		/* Also, disable the extra SDIO pull-ups */
415 		bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
416 	}
417 
418 #ifdef BCMSPI
419 	/* Avoid backplane accesses before wake-wlan (i.e. htavail) for spi.
420 	 * F1 read accesses may return correct data but with data-not-available dstatus bit set.
421 	 */
422 	if (BUSTYPE(bustype) == SPI_BUS) {
423 
424 		int err;
425 		uint32 regdata;
426 		/* wake up wlan function :WAKE_UP goes as HT_AVAIL request in hardware */
427 		regdata = bcmsdh_cfg_read_word(sdh, SDIO_FUNC_0, SPID_CONFIG, NULL);
428 		SI_MSG(("F0 REG0 rd = 0x%x\n", regdata));
429 		regdata |= WAKE_UP;
430 
431 		bcmsdh_cfg_write_word(sdh, SDIO_FUNC_0, SPID_CONFIG, regdata, &err);
432 
433 		/* It takes time for wakeup to take effect. */
434 		OSL_DELAY(100000);
435 	}
436 #endif /* BCMSPI */
437 #endif /* BCMSDIO && BCMDONGLEHOST && !BCMSDIOLITE */
438 
439 	return TRUE;
440 }
441 
442 /* note: this function is used by dhd */
443 uint32
si_get_pmu_reg_addr(si_t * sih,uint32 offset)444 si_get_pmu_reg_addr(si_t *sih, uint32 offset)
445 {
446 	si_info_t *sii = SI_INFO(sih);
447 	uint32 pmuaddr = INVALID_ADDR;
448 	uint origidx = 0;
449 
450 	SI_MSG(("si_get_pmu_reg_addr: pmu access, offset: %x\n", offset));
451 	if (!(sii->pub.cccaps & CC_CAP_PMU)) {
452 		goto done;
453 	}
454 	if (AOB_ENAB(&sii->pub)) {
455 		uint pmucoreidx;
456 		pmuregs_t *pmu;
457 		SI_MSG(("si_get_pmu_reg_addr: AOBENAB: %x\n", offset));
458 		origidx = sii->curidx;
459 		pmucoreidx = si_findcoreidx(&sii->pub, PMU_CORE_ID, 0);
460 		pmu = si_setcoreidx(&sii->pub, pmucoreidx);
461 		/* note: this function is used by dhd and possible 64 bit compilation needs
462 		 * a cast to (unsigned long) for avoiding a compilation error.
463 		 */
464 		pmuaddr = (uint32)(uintptr)((volatile uint8*)pmu + offset);
465 		si_setcoreidx(sih, origidx);
466 	} else
467 		pmuaddr = SI_ENUM_BASE(sih) + offset;
468 
469 done:
470 	SI_MSG(("%s: addrRET: %x\n", __FUNCTION__, pmuaddr));
471 	return pmuaddr;
472 }
473 
474 static bool
BCMATTACHFN(si_buscore_setup)475 BCMATTACHFN(si_buscore_setup)(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin,
476 	uint *origidx, volatile const void *regs)
477 {
478 	const si_cores_info_t *cores_info = sii->cores_info;
479 	bool pci, pcie, pcie_gen2 = FALSE;
480 	uint i;
481 	uint pciidx, pcieidx, pcirev, pcierev;
482 
483 #if defined(AXI_TIMEOUTS_NIC) || defined(AXI_TIMEOUTS)
484 	/* first, enable backplane timeouts */
485 	si_slave_wrapper_add(&sii->pub);
486 #endif
487 	sii->curidx = 0;
488 
489 	cc = si_setcoreidx(&sii->pub, SI_CC_IDX);
490 	ASSERT((uintptr)cc);
491 
492 	/* get chipcommon rev */
493 	sii->pub.ccrev = (int)si_corerev(&sii->pub);
494 
495 	/* get chipcommon chipstatus */
496 	if (CCREV(sii->pub.ccrev) >= 11)
497 		sii->pub.chipst = R_REG(sii->osh, &cc->chipstatus);
498 
499 	/* get chipcommon capabilites */
500 	sii->pub.cccaps = R_REG(sii->osh, &cc->capabilities);
501 	/* get chipcommon extended capabilities */
502 
503 	if (CCREV(sii->pub.ccrev) >= 35) /* PR77565 */
504 		sii->pub.cccaps_ext = R_REG(sii->osh, &cc->capabilities_ext);
505 
506 	/* get pmu rev and caps */
507 	if (sii->pub.cccaps & CC_CAP_PMU) {
508 		if (AOB_ENAB(&sii->pub)) {
509 			uint pmucoreidx;
510 			pmuregs_t *pmu;
511 
512 			pmucoreidx = si_findcoreidx(&sii->pub, PMU_CORE_ID, 0);
513 			if (!GOODIDX(pmucoreidx, sii->numcores)) {
514 				SI_ERROR(("si_buscore_setup: si_findcoreidx failed\n"));
515 				return FALSE;
516 			}
517 
518 			pmu = si_setcoreidx(&sii->pub, pmucoreidx);
519 			sii->pub.pmucaps = R_REG(sii->osh, &pmu->pmucapabilities);
520 			si_setcoreidx(&sii->pub, SI_CC_IDX);
521 
522 			sii->pub.gcirev = si_corereg(&sii->pub, GCI_CORE_IDX(&sii->pub),
523 				GCI_OFFSETOF(&sii->pub, gci_corecaps0), 0, 0) & GCI_CAP0_REV_MASK;
524 
525 			if (GCIREV(sii->pub.gcirev) >= 9) {
526 				sii->pub.lhlrev = si_corereg(&sii->pub, GCI_CORE_IDX(&sii->pub),
527 					OFFSETOF(gciregs_t, lhl_core_capab_adr), 0, 0) &
528 					LHL_CAP_REV_MASK;
529 			} else {
530 				sii->pub.lhlrev = NOREV;
531 			}
532 
533 		} else
534 			sii->pub.pmucaps = R_REG(sii->osh, &cc->pmucapabilities);
535 
536 		sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK;
537 	}
538 
539 	SI_MSG(("Chipc: rev %d, caps 0x%x, chipst 0x%x pmurev %d, pmucaps 0x%x\n",
540 		CCREV(sii->pub.ccrev), sii->pub.cccaps, sii->pub.chipst, sii->pub.pmurev,
541 		sii->pub.pmucaps));
542 
543 	/* figure out bus/orignal core idx */
544 	/* note for PCI_BUS the buscoretype variable is setup in ai_scan() */
545 	if (BUSTYPE(sii->pub.bustype) != PCI_BUS) {
546 		sii->pub.buscoretype = NODEV_CORE_ID;
547 	}
548 	sii->pub.buscorerev = NOREV;
549 	sii->pub.buscoreidx = BADIDX;
550 
551 	pci = pcie = FALSE;
552 	pcirev = pcierev = NOREV;
553 	pciidx = pcieidx = BADIDX;
554 
555 	/* This loop can be optimized */
556 	for (i = 0; i < sii->numcores; i++) {
557 		uint cid, crev;
558 
559 		si_setcoreidx(&sii->pub, i);
560 		cid = si_coreid(&sii->pub);
561 		crev = si_corerev(&sii->pub);
562 
563 		/* Display cores found */
564 		if (CHIPTYPE(sii->pub.socitype) != SOCI_NCI) {
565 			SI_VMSG(("CORE[%d]: id 0x%x rev %d base 0x%x size:%x regs 0x%p\n",
566 				i, cid, crev, cores_info->coresba[i], cores_info->coresba_size[i],
567 				OSL_OBFUSCATE_BUF(cores_info->regs[i])));
568 		}
569 
570 		if (BUSTYPE(bustype) == SI_BUS) {
571 			/* now look at the chipstatus register to figure the pacakge */
572 			/* this shoudl be a general change to cover all teh chips */
573 			/* this also shoudl validate the build where the dongle is built */
574 			/* for SDIO but downloaded on PCIE dev */
575 #ifdef BCMPCIEDEV_ENABLED
576 			if (cid == PCIE2_CORE_ID) {
577 				pcieidx = i;
578 				pcierev = crev;
579 				pcie = TRUE;
580 				pcie_gen2 = TRUE;
581 			}
582 #endif
583 			/* rest fill it up here */
584 
585 		} else if (BUSTYPE(bustype) == PCI_BUS) {
586 			if (cid == PCI_CORE_ID) {
587 				pciidx = i;
588 				pcirev = crev;
589 				pci = TRUE;
590 			} else if ((cid == PCIE_CORE_ID) || (cid == PCIE2_CORE_ID)) {
591 				pcieidx = i;
592 				pcierev = crev;
593 				pcie = TRUE;
594 				if (cid == PCIE2_CORE_ID)
595 					pcie_gen2 = TRUE;
596 			}
597 		}
598 #ifdef BCMSDIO
599 		else if (((BUSTYPE(bustype) == SDIO_BUS) ||
600 		          (BUSTYPE(bustype) == SPI_BUS)) &&
601 		         (cid == SDIOD_CORE_ID)) {
602 			sii->pub.buscorerev = (int16)crev;
603 			sii->pub.buscoretype = (uint16)cid;
604 			sii->pub.buscoreidx = (uint16)i;
605 		}
606 #endif /* BCMSDIO */
607 
608 		/* find the core idx before entering this func. */
609 		if (CHIPTYPE(sii->pub.socitype) == SOCI_NCI) {
610 			if (regs == sii->curmap) {
611 				*origidx = i;
612 			}
613 		} else {
614 			/* find the core idx before entering this func. */
615 			if ((savewin && (savewin == cores_info->coresba[i])) ||
616 			(regs == cores_info->regs[i])) {
617 				*origidx = i;
618 			}
619 		}
620 	}
621 
622 #if !defined(BCMDONGLEHOST)
623 	if (pci && pcie) {
624 		if (si_ispcie(sii))
625 			pci = FALSE;
626 		else
627 			pcie = FALSE;
628 	}
629 #endif /* !defined(BCMDONGLEHOST) */
630 
631 #if defined(PCIE_FULL_DONGLE)
632 	if (pcie) {
633 		if (pcie_gen2)
634 			sii->pub.buscoretype = PCIE2_CORE_ID;
635 		else
636 			sii->pub.buscoretype = PCIE_CORE_ID;
637 		sii->pub.buscorerev = (int16)pcierev;
638 		sii->pub.buscoreidx = (uint16)pcieidx;
639 	}
640 	BCM_REFERENCE(pci);
641 	BCM_REFERENCE(pcirev);
642 	BCM_REFERENCE(pciidx);
643 #else
644 	if (pci) {
645 		sii->pub.buscoretype = PCI_CORE_ID;
646 		sii->pub.buscorerev = (int16)pcirev;
647 		sii->pub.buscoreidx = (uint16)pciidx;
648 	} else if (pcie) {
649 		if (pcie_gen2)
650 			sii->pub.buscoretype = PCIE2_CORE_ID;
651 		else
652 			sii->pub.buscoretype = PCIE_CORE_ID;
653 		sii->pub.buscorerev = (int16)pcierev;
654 		sii->pub.buscoreidx = (uint16)pcieidx;
655 	}
656 #endif /* defined(PCIE_FULL_DONGLE) */
657 
658 	SI_VMSG(("Buscore id/type/rev %d/0x%x/%d\n", sii->pub.buscoreidx, sii->pub.buscoretype,
659 	         sii->pub.buscorerev));
660 
661 #if !defined(BCMDONGLEHOST)
662 	/* fixup necessary chip/core configurations */
663 	if (!FWSIGN_ENAB() && BUSTYPE(sii->pub.bustype) == PCI_BUS) {
664 		if (SI_FAST(sii)) {
665 			if (!sii->pch &&
666 			    ((sii->pch = (void *)(uintptr)pcicore_init(&sii->pub, sii->osh,
667 				(volatile void *)PCIEREGS(sii))) == NULL))
668 				return FALSE;
669 		}
670 		if (si_pci_fixcfg(&sii->pub)) {
671 			SI_ERROR(("si_buscore_setup: si_pci_fixcfg failed\n"));
672 			return FALSE;
673 		}
674 	}
675 #endif /* !defined(BCMDONGLEHOST) */
676 
677 #if defined(BCMSDIO) && defined(BCMDONGLEHOST)
678 	/* Make sure any on-chip ARM is off (in case strapping is wrong), or downloaded code was
679 	 * already running.
680 	 */
681 	if ((BUSTYPE(bustype) == SDIO_BUS) || (BUSTYPE(bustype) == SPI_BUS)) {
682 		if (si_setcore(&sii->pub, ARM7S_CORE_ID, 0) ||
683 		    si_setcore(&sii->pub, ARMCM3_CORE_ID, 0))
684 			si_core_disable(&sii->pub, 0);
685 	}
686 #endif /* BCMSDIO && BCMDONGLEHOST */
687 
688 	/* return to the original core */
689 	si_setcoreidx(&sii->pub, *origidx);
690 
691 	return TRUE;
692 }
693 
694 #if !defined(BCMDONGLEHOST) /* if not a DHD build */
695 
696 static const char BCMATTACHDATA(rstr_boardvendor)[] = "boardvendor";
697 static const char BCMATTACHDATA(rstr_boardtype)[] = "boardtype";
698 #if defined(BCMPCIEDEV_SROM_FORMAT)
699 static const char BCMATTACHDATA(rstr_subvid)[] = "subvid";
700 #endif /* defined(BCMPCIEDEV_SROM_FORMAT) */
701 #ifdef BCMSDIO
702 static const char BCMATTACHDATA(rstr_manfid)[] = "manfid";
703 #endif
704 static const char BCMATTACHDATA(rstr_prodid)[] = "prodid";
705 static const char BCMATTACHDATA(rstr_boardrev)[] = "boardrev";
706 static const char BCMATTACHDATA(rstr_boardflags)[] = "boardflags";
707 static const char BCMATTACHDATA(rstr_boardflags4)[] = "boardflags4";
708 static const char BCMATTACHDATA(rstr_xtalfreq)[] = "xtalfreq";
709 static const char BCMATTACHDATA(rstr_muxenab)[] = "muxenab";
710 static const char BCMATTACHDATA(rstr_gpiopulldown)[] = "gpdn";
711 static const char BCMATTACHDATA(rstr_devid)[] = "devid";
712 static const char BCMATTACHDATA(rstr_wl0id)[] = "wl0id";
713 static const char BCMATTACHDATA(rstr_devpathD)[] = "devpath%d";
714 static const char BCMATTACHDATA(rstr_D_S)[] = "%d:%s";
715 static const char BCMATTACHDATA(rstr_swdenab)[] = "swdenable";
716 static const char BCMATTACHDATA(rstr_spurconfig)[] = "spurconfig";
717 static const char BCMATTACHDATA(rstr_lpflags)[] = "lpflags";
718 static const char BCMATTACHDATA(rstr_armclk)[] = "armclk";
719 static const char BCMATTACHDATA(rstr_rfldo3p3_cap_war)[] = "rfldo3p3_cap_war";
720 #if defined(SECI_UART)
721 static const char BCMATTACHDATA(rstr_fuart_pup_rx_cts)[] = "fuart_pup_rx_cts";
722 #endif /* defined(SECI_UART) */
723 
724 static uint32
BCMATTACHFN(si_fixup_vid_overrides)725 BCMATTACHFN(si_fixup_vid_overrides)(si_info_t *sii, char *pvars, uint32 conf_vid)
726 {
727 	BCM_REFERENCE(pvars);
728 
729 	if ((sii->pub.boardvendor != VENDOR_APPLE)) {
730 		return conf_vid;
731 	}
732 
733 	switch (sii->pub.boardtype)
734 	{
735 		/* Check for the SROM value */
736 		case BCM94360X51P2:
737 		case BCM94360X29C:
738 		case BCM94360X29CP2:
739 		case BCM94360X51:
740 		case BCM943602X87:
741 		case BCM943602X238D:
742 			/* Take the PCIe configuration space subsystem ID */
743 			sii->pub.boardtype = (conf_vid >> 16) & 0xffff;
744 			break;
745 
746 		default:
747 			/* Do nothing */
748 			break;
749 	}
750 
751 	return conf_vid;
752 }
753 
754 static void
BCMATTACHFN(si_nvram_process)755 BCMATTACHFN(si_nvram_process)(si_info_t *sii, char *pvars)
756 {
757 	uint w = 0;
758 
759 	if (FWSIGN_ENAB()) {
760 		return;
761 	}
762 
763 	/* get boardtype and boardrev */
764 	switch (BUSTYPE(sii->pub.bustype)) {
765 	case PCI_BUS:
766 		/* do a pci config read to get subsystem id and subvendor id */
767 		w = OSL_PCI_READ_CONFIG(sii->osh, PCI_CFG_SVID, sizeof(uint32));
768 
769 		/* Let nvram variables override subsystem Vend/ID */
770 		if ((sii->pub.boardvendor = (uint16)si_getdevpathintvar(&sii->pub,
771 			rstr_boardvendor)) == 0) {
772 #ifdef BCMHOSTVARS
773 			if ((w & 0xffff) == 0)
774 				sii->pub.boardvendor = VENDOR_BROADCOM;
775 			else
776 #endif /* BCMHOSTVARS */
777 				sii->pub.boardvendor = w & 0xffff;
778 		} else {
779 			SI_ERROR(("Overriding boardvendor: 0x%x instead of 0x%x\n",
780 				sii->pub.boardvendor, w & 0xffff));
781 		}
782 
783 		if ((sii->pub.boardtype = (uint16)si_getdevpathintvar(&sii->pub, rstr_boardtype))
784 			== 0) {
785 			if ((sii->pub.boardtype = getintvar(pvars, rstr_boardtype)) == 0)
786 				sii->pub.boardtype = (w >> 16) & 0xffff;
787 		} else {
788 			SI_ERROR(("Overriding boardtype: 0x%x instead of 0x%x\n",
789 				sii->pub.boardtype, (w >> 16) & 0xffff));
790 		}
791 
792 		/* Override high priority fixups */
793 		if (!FWSIGN_ENAB()) {
794 			si_fixup_vid_overrides(sii, pvars, w);
795 		}
796 		break;
797 
798 #ifdef BCMSDIO
799 	case SDIO_BUS:
800 		sii->pub.boardvendor = getintvar(pvars, rstr_manfid);
801 		sii->pub.boardtype = getintvar(pvars, rstr_prodid);
802 		break;
803 
804 	case SPI_BUS:
805 		sii->pub.boardvendor = VENDOR_BROADCOM;
806 		sii->pub.boardtype = QT4710_BOARD;
807 		break;
808 #endif
809 
810 	case SI_BUS:
811 #ifdef BCMPCIEDEV_SROM_FORMAT
812 		if (BCMPCIEDEV_ENAB() && si_is_sprom_available(&sii->pub) && pvars &&
813 			getvar(pvars, rstr_subvid)) {
814 			sii->pub.boardvendor = getintvar(pvars, rstr_subvid);
815 		} else
816 #endif
817 		sii->pub.boardvendor = VENDOR_BROADCOM;
818 		if (pvars == NULL || ((sii->pub.boardtype = getintvar(pvars, rstr_prodid)) == 0))
819 			if ((sii->pub.boardtype = getintvar(pvars, rstr_boardtype)) == 0)
820 				sii->pub.boardtype = 0xffff;
821 
822 		if (CHIPTYPE(sii->pub.socitype) == SOCI_UBUS) {
823 			/* do a pci config read to get subsystem id and subvendor id */
824 			w = OSL_PCI_READ_CONFIG(sii->osh, PCI_CFG_SVID, sizeof(uint32));
825 			sii->pub.boardvendor = w & 0xffff;
826 			sii->pub.boardtype = (w >> 16) & 0xffff;
827 		}
828 		break;
829 
830 	default:
831 		break;
832 	}
833 
834 	if (sii->pub.boardtype == 0) {
835 		SI_ERROR(("si_doattach: unknown board type\n"));
836 		ASSERT(sii->pub.boardtype);
837 	}
838 
839 	sii->pub.lpflags = getintvar(pvars, rstr_lpflags);
840 	sii->pub.boardrev = getintvar(pvars, rstr_boardrev);
841 	sii->pub.boardflags = getintvar(pvars, rstr_boardflags);
842 
843 #ifdef BCM_SDRBL
844 	sii->pub.boardflags2 |= ((!CHIP_HOSTIF_USB(&(sii->pub))) ? ((si_arm_sflags(&(sii->pub))
845 				 & SISF_SDRENABLE) ?  BFL2_SDR_EN:0):
846 				 (((uint)getintvar(pvars, "boardflags2")) & BFL2_SDR_EN));
847 #endif /* BCM_SDRBL */
848 	sii->pub.boardflags4 = getintvar(pvars, rstr_boardflags4);
849 
850 }
851 
852 #endif /* !defined(BCMDONGLEHOST) */
853 
854 #if defined(CONFIG_XIP) && defined(BCMTCAM)
855 extern uint8 patch_pair;
856 #endif /* CONFIG_XIP && BCMTCAM */
857 
858 #if !defined(BCMDONGLEHOST)
859 typedef struct {
860 	uint8 uart_tx;
861 	uint32 uart_rx;
862 } si_mux_uartopt_t;
863 
864 /* note: each index corr to MUXENAB43012_HOSTWAKE_MASK > shift - 1 */
865 static const uint8 BCMATTACHDATA(mux43012_hostwakeopt)[] = {
866 		CC_PIN_GPIO_00
867 };
868 
869 static const si_mux_uartopt_t BCMATTACHDATA(mux_uartopt)[] = {
870 		{CC_PIN_GPIO_00, CC_PIN_GPIO_01},
871 		{CC_PIN_GPIO_05, CC_PIN_GPIO_04},
872 		{CC_PIN_GPIO_15, CC_PIN_GPIO_14},
873 };
874 
875 /* note: each index corr to MUXENAB_DEF_HOSTWAKE mask >> shift - 1 */
876 static const uint8 BCMATTACHDATA(mux_hostwakeopt)[] = {
877 		CC_PIN_GPIO_00,
878 };
879 
880 #ifdef SECI_UART
881 #define NUM_SECI_UART_GPIOS	4
882 static bool fuart_pullup_rx_cts_enab = FALSE;
883 static bool fast_uart_init = FALSE;
884 static uint32 fast_uart_tx;
885 static uint32 fast_uart_functionsel;
886 static uint32 fast_uart_pup;
887 static uint32 fast_uart_rx;
888 static uint32 fast_uart_cts_in;
889 #endif /* SECI_UART */
890 
891 void
BCMATTACHFN(si_swdenable)892 BCMATTACHFN(si_swdenable)(si_t *sih, uint32 swdflag)
893 {
894 	/* FIXME Need a more generic test for SWD instead of check on specific chipid */
895 	switch (CHIPID(sih->chip)) {
896 	case BCM4369_CHIP_GRPID:
897 	case BCM4362_CHIP_GRPID:
898 		if (swdflag) {
899 			/* Enable ARM debug clk, which is required for the ARM debug
900 			 * unit to operate
901 			 */
902 			si_pmu_chipcontrol(sih, PMU_CHIPCTL5, (1 << ARMCR4_DBG_CLK_BIT),
903 				(1 << ARMCR4_DBG_CLK_BIT));
904 			/* Force HT clock in Chipcommon. The HT clock is required for backplane
905 			 * access via SWD
906 			 */
907 			si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), CCS_FORCEHT,
908 				CCS_FORCEHT);
909 			/* Set TAP_SEL so that ARM is the first and the only TAP on the TAP chain.
910 			 * Must do a chip reset to clear this bit
911 			 */
912 			si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, jtagctrl),
913 				JCTRL_TAPSEL_BIT, JCTRL_TAPSEL_BIT);
914 			SI_MSG(("si_swdenable: set arm_dbgclk, ForceHTClock and tap_sel bit\n"));
915 		}
916 		break;
917 	default:
918 		/* swdenable specified for an unsupported chip */
919 		ASSERT(0);
920 		break;
921 	}
922 }
923 
924 /** want to have this available all the time to switch mux for debugging */
925 void
BCMATTACHFN(si_muxenab)926 BCMATTACHFN(si_muxenab)(si_t *sih, uint32 w)
927 {
928 	uint32 chipcontrol, pmu_chipcontrol;
929 
930 	pmu_chipcontrol = si_pmu_chipcontrol(sih, 1, 0, 0);
931 	chipcontrol = si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, chipcontrol),
932 	                         0, 0);
933 
934 	switch (CHIPID(sih->chip)) {
935 	case BCM4360_CHIP_ID:
936 	case BCM43460_CHIP_ID:
937 	case BCM4352_CHIP_ID:
938 	case BCM43526_CHIP_ID:
939 	CASE_BCM43602_CHIP:
940 		if (w & MUXENAB_UART)
941 			chipcontrol |= CCTRL4360_UART_MODE;
942 		break;
943 
944 	case BCM43012_CHIP_ID:
945 	case BCM43013_CHIP_ID:
946 	case BCM43014_CHIP_ID:
947 		/*
948 		 * 0x10 : use GPIO0 as host wake up pin
949 		 * 0x20 ~ 0xf0: Reserved
950 		 */
951 		if (w & MUXENAB43012_HOSTWAKE_MASK) {
952 			uint8 hostwake = 0;
953 			uint8 hostwake_ix = MUXENAB43012_GETIX(w, HOSTWAKE);
954 
955 			if (hostwake_ix >
956 				sizeof(mux43012_hostwakeopt)/sizeof(mux43012_hostwakeopt[0]) - 1) {
957 				SI_ERROR(("si_muxenab: wrong index %d for hostwake\n",
958 					hostwake_ix));
959 				break;
960 			}
961 
962 			hostwake = mux43012_hostwakeopt[hostwake_ix];
963 			si_gci_set_functionsel(sih, hostwake, CC_FNSEL_MISC1);
964 		}
965 		break;
966 
967 case BCM4385_CHIP_GRPID:
968 	case BCM4387_CHIP_GRPID:
969 		if (w & MUXENAB_DEF_UART_MASK) {
970 			uint32 uart_rx = 0, uart_tx = 0;
971 			uint8 uartopt_idx = (w & MUXENAB_DEF_UART_MASK) - 1;
972 			uint8 uartopt_size = sizeof(mux_uartopt)/sizeof(mux_uartopt[0]);
973 
974 			if (uartopt_idx < uartopt_size) {
975 				uart_rx = mux_uartopt[uartopt_idx].uart_rx;
976 				uart_tx = mux_uartopt[uartopt_idx].uart_tx;
977 #ifdef BOOTLOADER_CONSOLE_OUTPUT
978 				uart_rx = 0;
979 				uart_tx = 1;
980 #endif
981 				if (CHIPREV(sih->chiprev) >= 3) {
982 					si_gci_set_functionsel(sih, uart_rx, CC_FNSEL_GPIO1);
983 					si_gci_set_functionsel(sih, uart_tx, CC_FNSEL_GPIO1);
984 				} else {
985 					si_gci_set_functionsel(sih, uart_rx, CC_FNSEL_GPIO0);
986 					si_gci_set_functionsel(sih, uart_tx, CC_FNSEL_GPIO0);
987 				}
988 			} else {
989 				SI_MSG(("si_muxenab: Invalid uart OTP setting\n"));
990 			}
991 		}
992 		if (w & MUXENAB_DEF_HOSTWAKE_MASK) {
993 			uint8 hostwake = 0;
994 			/*
995 			* SDIO
996 			* 0x10 : use GPIO0 as host wake up pin
997 			*/
998 			uint8 hostwake_ix = MUXENAB_DEF_GETIX(w, HOSTWAKE);
999 
1000 			if (hostwake_ix > (sizeof(mux_hostwakeopt) /
1001 				sizeof(mux_hostwakeopt[0]) - 1)) {
1002 				SI_ERROR(("si_muxenab: wrong index %d for hostwake\n",
1003 					hostwake_ix));
1004 				break;
1005 			}
1006 
1007 			hostwake = mux_hostwakeopt[hostwake_ix];
1008 			si_gci_set_functionsel(sih, hostwake, CC_FNSEL_GPIO0);
1009 		}
1010 
1011 		break;
1012 
1013 	case BCM4369_CHIP_GRPID:
1014 	case BCM4362_CHIP_GRPID:
1015 		/* TBD fill */
1016 		if (w & MUXENAB_HOST_WAKE) {
1017 			si_gci_set_functionsel(sih, CC_PIN_GPIO_00, CC_FNSEL_MISC1);
1018 		}
1019 		break;
1020 	case BCM4376_CHIP_GRPID:
1021 	case BCM4378_CHIP_GRPID:
1022 		/* TBD fill */
1023 		break;
1024 	case BCM4388_CHIP_GRPID:
1025 	case BCM4389_CHIP_GRPID:
1026 	case BCM4397_CHIP_GRPID:
1027 		/* TBD fill */
1028 		break;
1029 	default:
1030 		/* muxenab specified for an unsupported chip */
1031 		ASSERT(0);
1032 		break;
1033 	}
1034 
1035 	/* write both updated values to hw */
1036 	si_pmu_chipcontrol(sih, 1, ~0, pmu_chipcontrol);
1037 	si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, chipcontrol),
1038 	           ~0, chipcontrol);
1039 }
1040 
1041 /** ltecx GCI reg access */
1042 uint32
BCMPOSTTRAPFN(si_gci_direct)1043 BCMPOSTTRAPFN(si_gci_direct)(si_t *sih, uint offset, uint32 mask, uint32 val)
1044 {
1045 	/* gci direct reg access */
1046 	return si_corereg(sih, GCI_CORE_IDX(sih), offset, mask, val);
1047 }
1048 
1049 uint32
si_gci_indirect(si_t * sih,uint regidx,uint offset,uint32 mask,uint32 val)1050 si_gci_indirect(si_t *sih, uint regidx, uint offset, uint32 mask, uint32 val)
1051 {
1052 	/* gci indirect reg access */
1053 	si_corereg(sih, GCI_CORE_IDX(sih), GCI_OFFSETOF(sih, gci_indirect_addr), ~0, regidx);
1054 	return si_corereg(sih, GCI_CORE_IDX(sih), offset, mask, val);
1055 }
1056 
1057 uint32
si_gci_input(si_t * sih,uint reg)1058 si_gci_input(si_t *sih, uint reg)
1059 {
1060 	/* gci_input[] */
1061 	return si_corereg(sih, GCI_CORE_IDX(sih), GCI_OFFSETOF(sih, gci_input[reg]), 0, 0);
1062 }
1063 
1064 uint32
si_gci_output(si_t * sih,uint reg,uint32 mask,uint32 val)1065 si_gci_output(si_t *sih, uint reg, uint32 mask, uint32 val)
1066 {
1067 	/* gci_output[] */
1068 	return si_corereg(sih, GCI_CORE_IDX(sih), GCI_OFFSETOF(sih, gci_output[reg]), mask, val);
1069 }
1070 
1071 uint32
si_gci_int_enable(si_t * sih,bool enable)1072 si_gci_int_enable(si_t *sih, bool enable)
1073 {
1074 	uint offs;
1075 
1076 	/* enable GCI interrupt */
1077 	offs = OFFSETOF(chipcregs_t, intmask);
1078 	return (si_corereg(sih, SI_CC_IDX, offs, CI_ECI, (enable ? CI_ECI : 0)));
1079 }
1080 
1081 void
si_gci_reset(si_t * sih)1082 si_gci_reset(si_t *sih)
1083 {
1084 	int i;
1085 
1086 	if (gci_reset_done == FALSE) {
1087 		gci_reset_done = TRUE;
1088 
1089 		/* Set ForceRegClk and ForceSeciClk */
1090 		si_gci_direct(sih, GCI_OFFSETOF(sih, gci_corectrl),
1091 			((1 << GCI_CCTL_FREGCLK_OFFSET)
1092 			|(1 << GCI_CCTL_FSECICLK_OFFSET)),
1093 			((1 << GCI_CCTL_FREGCLK_OFFSET)
1094 			|(1 << GCI_CCTL_FSECICLK_OFFSET)));
1095 
1096 		/* Some Delay */
1097 		for (i = 0; i < 2; i++) {
1098 		si_gci_direct(sih, GCI_OFFSETOF(sih, gci_corectrl), 0, 0);
1099 		}
1100 		/* Reset SECI block */
1101 		si_gci_direct(sih, GCI_OFFSETOF(sih, gci_corectrl),
1102 			((1 << GCI_CCTL_SECIRST_OFFSET)
1103 			|(1 << GCI_CCTL_RSTSL_OFFSET)
1104 			|(1 << GCI_CCTL_RSTOCC_OFFSET)),
1105 			((1 << GCI_CCTL_SECIRST_OFFSET)
1106 			|(1 << GCI_CCTL_RSTSL_OFFSET)
1107 			|(1 << GCI_CCTL_RSTOCC_OFFSET)));
1108 		/* Some Delay */
1109 		for (i = 0; i < 10; i++) {
1110 			si_gci_direct(sih, GCI_OFFSETOF(sih, gci_corectrl), 0, 0);
1111 		}
1112 		/* Remove SECI Reset */
1113 		si_gci_direct(sih, GCI_OFFSETOF(sih, gci_corectrl),
1114 			((1 << GCI_CCTL_SECIRST_OFFSET)
1115 			|(1 << GCI_CCTL_RSTSL_OFFSET)
1116 			|(1 << GCI_CCTL_RSTOCC_OFFSET)),
1117 			((0 << GCI_CCTL_SECIRST_OFFSET)
1118 			|(0 << GCI_CCTL_RSTSL_OFFSET)
1119 			|(0 << GCI_CCTL_RSTOCC_OFFSET)));
1120 
1121 		/* Some Delay */
1122 		for (i = 0; i < 2; i++) {
1123 			si_gci_direct(sih, GCI_OFFSETOF(sih, gci_corectrl), 0, 0);
1124 		}
1125 
1126 		/* Clear ForceRegClk and ForceSeciClk */
1127 		si_gci_direct(sih, GCI_OFFSETOF(sih, gci_corectrl),
1128 			((1 << GCI_CCTL_FREGCLK_OFFSET)
1129 			|(1 << GCI_CCTL_FSECICLK_OFFSET)),
1130 			((0 << GCI_CCTL_FREGCLK_OFFSET)
1131 			|(0 << GCI_CCTL_FSECICLK_OFFSET)));
1132 	}
1133 	/* clear events */
1134 	for (i = 0; i < 32; i++) {
1135 		si_gci_direct(sih, GCI_OFFSETOF(sih, gci_event[i]), ALLONES_32, 0x00);
1136 	}
1137 }
1138 
1139 void
si_gci_gpio_chipcontrol_ex(si_t * sih,uint8 gci_gpio,uint8 opt)1140 si_gci_gpio_chipcontrol_ex(si_t *sih, uint8 gci_gpio, uint8 opt)
1141 {
1142 	si_gci_gpio_chipcontrol(sih, gci_gpio, opt);
1143 }
1144 
1145 static void
BCMPOSTTRAPFN(si_gci_gpio_chipcontrol)1146 BCMPOSTTRAPFN(si_gci_gpio_chipcontrol)(si_t *sih, uint8 gci_gpio, uint8 opt)
1147 {
1148 	uint32 ring_idx = 0, pos = 0;
1149 
1150 	si_gci_get_chipctrlreg_ringidx_base8(gci_gpio, &ring_idx, &pos);
1151 	SI_MSG(("si_gci_gpio_chipcontrol:rngidx is %d, pos is %d, opt is %d, mask is 0x%04x,"
1152 		" value is 0x%04x\n",
1153 		ring_idx, pos, opt, GCIMASK_8B(pos), GCIPOSVAL_8B(opt, pos)));
1154 
1155 	si_corereg(sih, GCI_CORE_IDX(sih), GCI_OFFSETOF(sih, gci_indirect_addr), ~0, ring_idx);
1156 	si_corereg(sih, GCI_CORE_IDX(sih), GCI_OFFSETOF(sih, gci_gpioctl),
1157 		GCIMASK_8B(pos), GCIPOSVAL_8B(opt, pos));
1158 }
1159 
1160 static uint8
BCMPOSTTRAPFN(si_gci_gpio_reg)1161 BCMPOSTTRAPFN(si_gci_gpio_reg)(si_t *sih, uint8 gci_gpio, uint8 mask, uint8 value,
1162 	uint32 reg_offset)
1163 {
1164 	uint32 ring_idx = 0, pos = 0; /**< FunctionSel register idx and bits to use */
1165 	uint32 val_32;
1166 
1167 	si_gci_get_chipctrlreg_ringidx_base4(gci_gpio, &ring_idx, &pos);
1168 	SI_MSG(("si_gci_gpio_reg:rngidx is %d, pos is %d, val is %d, mask is 0x%04x,"
1169 		" value is 0x%04x\n",
1170 		ring_idx, pos, value, GCIMASK_4B(pos), GCIPOSVAL_4B(value, pos)));
1171 	si_corereg(sih, GCI_CORE_IDX(sih), GCI_OFFSETOF(sih, gci_indirect_addr), ~0, ring_idx);
1172 
1173 	if (mask || value) {
1174 		/* set operation */
1175 		si_corereg(sih, GCI_CORE_IDX(sih),
1176 			reg_offset, GCIMASK_4B(pos), GCIPOSVAL_4B(value, pos));
1177 	}
1178 	val_32 = si_corereg(sih, GCI_CORE_IDX(sih), reg_offset, 0, 0);
1179 
1180 	value  = (uint8)((val_32 >> pos) & 0xFF);
1181 
1182 	return value;
1183 }
1184 
1185 /**
1186  * In order to route a ChipCommon originated GPIO towards a package pin, both CC and GCI cores have
1187  * to be written to.
1188  * @param[in] sih
1189  * @param[in] gpio   chip specific package pin number. See Toplevel Arch page, GCI chipcontrol reg
1190  *                   section.
1191  * @param[in] mask   chip common gpio mask
1192  * @param[in] val    chip common gpio value
1193  */
1194 void
BCMPOSTTRAPFN(si_gci_enable_gpio)1195 BCMPOSTTRAPFN(si_gci_enable_gpio)(si_t *sih, uint8 gpio, uint32 mask, uint32 value)
1196 {
1197 	uint32 ring_idx = 0, pos = 0;
1198 
1199 	si_gci_get_chipctrlreg_ringidx_base4(gpio, &ring_idx, &pos);
1200 	SI_MSG(("si_gci_enable_gpio:rngidx is %d, pos is %d, val is %d, mask is 0x%04x,"
1201 		" value is 0x%04x\n",
1202 		ring_idx, pos, value, GCIMASK_4B(pos), GCIPOSVAL_4B(value, pos)));
1203 	si_gci_set_functionsel(sih, gpio, CC_FNSEL_SAMEASPIN);
1204 	si_corereg(sih, GCI_CORE_IDX(sih), GCI_OFFSETOF(sih, gci_indirect_addr), ~0, ring_idx);
1205 
1206 	si_gpiocontrol(sih, mask, 0, GPIO_HI_PRIORITY);
1207 	si_gpioouten(sih, mask, mask, GPIO_HI_PRIORITY);
1208 	si_gpioout(sih, mask, value, GPIO_HI_PRIORITY);
1209 }
1210 
1211 /*
1212  * The above seems to be for gpio output only (forces gpioouten).
1213  * This function is to configure GPIO as input, and accepts a mask of bits.
1214  * Also: doesn't force the gpiocontrol (chipc) functionality, assumes it
1215  * is the default, and rejects the request (BUSY => gpio in use) if it's
1216  * already configured for a different function... but it will override the
1217  * output enable.
1218  */
1219 int
si_gpio_enable(si_t * sih,uint32 mask)1220 si_gpio_enable(si_t *sih, uint32 mask)
1221 {
1222 	uint bit;
1223 	int fnsel = -1; /* Valid fnsel is a small positive number */
1224 
1225 	BCM_REFERENCE(bit);
1226 	BCM_REFERENCE(fnsel);
1227 
1228 	/* Bail if any bit is explicitly set for some other function */
1229 	if (si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, gpiocontrol), 0, 0) & mask) {
1230 		return BCME_BUSY;
1231 	}
1232 
1233 #if !defined(BCMDONGLEHOST)
1234 	/* Some chips need to be explicitly set */
1235 	switch (CHIPID(sih->chip))
1236 	{
1237 	case BCM4362_CHIP_GRPID:
1238 	case BCM4369_CHIP_GRPID:
1239 	case BCM4376_CHIP_GRPID:
1240 	case BCM4378_CHIP_GRPID:
1241 	case BCM4387_CHIP_GRPID:
1242 		fnsel = CC_FNSEL_SAMEASPIN;
1243 	default:
1244 		;
1245 	}
1246 
1247 	if (fnsel != -1) {
1248 		for (bit = 0; mask; bit++) {
1249 			if (mask & (1 << bit)) {
1250 				si_gci_set_functionsel(sih, bit, (uint8)fnsel);
1251 				mask ^= (1 << bit);
1252 			}
1253 		}
1254 	}
1255 #endif /* !BCMDONGLEHOST */
1256 	si_gpioouten(sih, mask, 0, GPIO_HI_PRIORITY);
1257 
1258 	return BCME_OK;
1259 }
1260 
1261 static const char BCMATTACHDATA(rstr_host_wake_opt)[] = "host_wake_opt";
1262 uint8
BCMATTACHFN(si_gci_host_wake_gpio_init)1263 BCMATTACHFN(si_gci_host_wake_gpio_init)(si_t *sih)
1264 {
1265 	uint8  host_wake_gpio = CC_GCI_GPIO_INVALID;
1266 	uint32 host_wake_opt;
1267 
1268 	/* parse the device wake opt from nvram */
1269 	/* decode what that means for specific chip */
1270 	if (getvar(NULL, rstr_host_wake_opt) == NULL)
1271 		return host_wake_gpio;
1272 
1273 	host_wake_opt = getintvar(NULL, rstr_host_wake_opt);
1274 	host_wake_gpio = host_wake_opt & 0xff;
1275 	si_gci_host_wake_gpio_enable(sih, host_wake_gpio, FALSE);
1276 
1277 	return host_wake_gpio;
1278 }
1279 
1280 void
BCMPOSTTRAPFN(si_gci_host_wake_gpio_enable)1281 BCMPOSTTRAPFN(si_gci_host_wake_gpio_enable)(si_t *sih, uint8 gpio, bool state)
1282 {
1283 	switch (CHIPID(sih->chip)) {
1284 	case BCM4369_CHIP_GRPID:
1285 	case BCM4376_CHIP_GRPID:
1286 	case BCM4378_CHIP_GRPID:
1287 	case BCM4385_CHIP_GRPID:
1288 	case BCM4387_CHIP_GRPID:
1289 	case BCM4388_CHIP_GRPID:
1290 	case BCM4389_CHIP_GRPID:
1291 	case BCM4397_CHIP_GRPID:
1292 	case BCM4362_CHIP_GRPID:
1293 		si_gci_enable_gpio(sih, gpio, 1 << gpio,
1294 			state ? 1 << gpio : 0x00);
1295 		break;
1296 	default:
1297 		SI_ERROR(("host wake not supported for 0x%04x yet\n", CHIPID(sih->chip)));
1298 		break;
1299 	}
1300 }
1301 
1302 void
si_gci_time_sync_gpio_enable(si_t * sih,uint8 gpio,bool state)1303 si_gci_time_sync_gpio_enable(si_t *sih, uint8 gpio, bool state)
1304 {
1305 	switch (CHIPID(sih->chip)) {
1306 	case BCM4369_CHIP_GRPID:
1307 	case BCM4362_CHIP_GRPID:
1308 	case BCM4376_CHIP_GRPID:
1309 	case BCM4378_CHIP_GRPID:
1310 	case BCM4385_CHIP_GRPID:
1311 	case BCM4387_CHIP_GRPID:
1312 		si_gci_enable_gpio(sih, gpio, 1 << gpio,
1313 			state ? 1 << gpio : 0x00);
1314 		break;
1315 	default:
1316 		SI_ERROR(("Time sync not supported for 0x%04x yet\n", CHIPID(sih->chip)));
1317 		break;
1318 	}
1319 }
1320 
1321 #define	TIMESYNC_GPIO_NUM	12 /* Hardcoded for now. Will be removed later */
1322 static const char BCMATTACHDATA(rstr_time_sync_opt)[] = "time_sync_opt";
1323 uint8
BCMATTACHFN(si_gci_time_sync_gpio_init)1324 BCMATTACHFN(si_gci_time_sync_gpio_init)(si_t *sih)
1325 {
1326 	uint8  time_sync_gpio = TIMESYNC_GPIO_NUM;
1327 	uint32 time_sync_opt;
1328 
1329 	/* parse the device wake opt from nvram */
1330 	/* decode what that means for specific chip */
1331 	if (getvar(NULL, rstr_time_sync_opt) == NULL) {
1332 		time_sync_opt = TIMESYNC_GPIO_NUM;
1333 	} else {
1334 		time_sync_opt = getintvar(NULL, rstr_time_sync_opt);
1335 	}
1336 	switch (CHIPID(sih->chip)) {
1337 	case BCM4369_CHIP_GRPID:
1338 	case BCM4362_CHIP_GRPID:
1339 	case BCM4376_CHIP_GRPID:
1340 	case BCM4378_CHIP_GRPID:
1341 	case BCM4385_CHIP_GRPID:
1342 	case BCM4387_CHIP_GRPID:
1343 		time_sync_gpio = time_sync_opt & 0xff;
1344 		si_gci_enable_gpio(sih, time_sync_gpio,
1345 			1 << time_sync_gpio, 0x00);
1346 		break;
1347 	default:
1348 		SI_ERROR(("time sync not supported for 0x%04x yet\n", CHIPID(sih->chip)));
1349 		break;
1350 	}
1351 
1352 	return time_sync_gpio;
1353 }
1354 
1355 uint8
BCMPOSTTRAPFN(si_gci_gpio_wakemask)1356 BCMPOSTTRAPFN(si_gci_gpio_wakemask)(si_t *sih, uint8 gpio, uint8 mask, uint8 value)
1357 {
1358 	si_corereg(sih, GCI_CORE_IDX(sih), GCI_OFFSETOF(sih, gci_wakemask),
1359 		GCI_WAKEMASK_GPIOWAKE, GCI_WAKEMASK_GPIOWAKE);
1360 	return (si_gci_gpio_reg(sih, gpio, mask, value, GCI_OFFSETOF(sih, gci_gpiowakemask)));
1361 }
1362 
1363 uint8
BCMPOSTTRAPFN(si_gci_gpio_intmask)1364 BCMPOSTTRAPFN(si_gci_gpio_intmask)(si_t *sih, uint8 gpio, uint8 mask, uint8 value)
1365 {
1366 	return (si_gci_gpio_reg(sih, gpio, mask, value, GCI_OFFSETOF(sih, gci_gpiointmask)));
1367 }
1368 
1369 uint8
BCMPOSTTRAPFN(si_gci_gpio_status)1370 BCMPOSTTRAPFN(si_gci_gpio_status)(si_t *sih, uint8 gpio, uint8 mask, uint8 value)
1371 {
1372 	return (si_gci_gpio_reg(sih, gpio, mask, value, GCI_OFFSETOF(sih, gci_gpiostatus)));
1373 }
1374 
1375 static void
si_gci_enable_gpioint(si_t * sih,bool enable)1376 si_gci_enable_gpioint(si_t *sih, bool enable)
1377 {
1378 	if (enable)
1379 		si_corereg(sih, GCI_CORE_IDX(sih), GCI_OFFSETOF(sih, gci_intmask),
1380 			GCI_INTSTATUS_GPIOINT, GCI_INTSTATUS_GPIOINT);
1381 	else
1382 		si_corereg(sih, GCI_CORE_IDX(sih), GCI_OFFSETOF(sih, gci_intmask),
1383 			GCI_INTSTATUS_GPIOINT, 0);
1384 }
1385 
1386 /* assumes function select is performed separately */
1387 void
BCMINITFN(si_enable_gpio_wake)1388 BCMINITFN(si_enable_gpio_wake)(si_t *sih, uint8 *wake_mask, uint8 *cur_status, uint8 gci_gpio,
1389 	uint32 pmu_cc2_mask, uint32 pmu_cc2_value)
1390 {
1391 	si_gci_gpio_chipcontrol(sih, gci_gpio,
1392 	                        (1 << GCI_GPIO_CHIPCTRL_ENAB_IN_BIT));
1393 
1394 	si_gci_gpio_intmask(sih, gci_gpio, *wake_mask, *wake_mask);
1395 	si_gci_gpio_wakemask(sih, gci_gpio, *wake_mask, *wake_mask);
1396 
1397 	/* clear the existing status bits */
1398 	*cur_status = si_gci_gpio_status(sih, gci_gpio,
1399 	                                 GCI_GPIO_STS_CLEAR, GCI_GPIO_STS_CLEAR);
1400 
1401 	/* top level gci int enable */
1402 	si_gci_enable_gpioint(sih, TRUE);
1403 
1404 	/* enable the pmu chip control bit to enable wake */
1405 	si_pmu_chipcontrol(sih, PMU_CHIPCTL2, pmu_cc2_mask, pmu_cc2_value);
1406 }
1407 
1408 void
BCMPOSTTRAPFN(si_gci_config_wake_pin)1409 BCMPOSTTRAPFN(si_gci_config_wake_pin)(si_t *sih, uint8 gpio_n, uint8 wake_events, bool gci_gpio)
1410 {
1411 	uint8 chipcontrol = 0;
1412 	uint32 pmu_chipcontrol2 = 0;
1413 
1414 	if (!gci_gpio)
1415 		chipcontrol = (1 << GCI_GPIO_CHIPCTRL_ENAB_EXT_GPIO_BIT);
1416 
1417 	chipcontrol |= (1 << GCI_GPIO_CHIPCTRL_PULLUP_BIT);
1418 	si_gci_gpio_chipcontrol(sih, gpio_n,
1419 		(chipcontrol | (1 << GCI_GPIO_CHIPCTRL_ENAB_IN_BIT)));
1420 
1421 	/* enable gci gpio int/wake events */
1422 	si_gci_gpio_intmask(sih, gpio_n, wake_events, wake_events);
1423 	si_gci_gpio_wakemask(sih, gpio_n, wake_events, wake_events);
1424 
1425 	/* clear the existing status bits */
1426 	si_gci_gpio_status(sih, gpio_n,
1427 		GCI_GPIO_STS_CLEAR, GCI_GPIO_STS_CLEAR);
1428 
1429 	/* Enable gci2wl_wake */
1430 	pmu_chipcontrol2 = si_pmu_chipcontrol(sih, PMU_CHIPCTL2, 0, 0);
1431 	pmu_chipcontrol2 |= si_pmu_wake_bit_offset(sih);
1432 	si_pmu_chipcontrol(sih, PMU_CHIPCTL2, ~0, pmu_chipcontrol2);
1433 
1434 	/* enable gci int/wake events */
1435 	si_corereg(sih, GCI_CORE_IDX(sih), GCI_OFFSETOF(sih, gci_intmask),
1436 		GCI_INTSTATUS_GPIOINT, GCI_INTSTATUS_GPIOINT);
1437 	si_corereg(sih, GCI_CORE_IDX(sih), GCI_OFFSETOF(sih, gci_wakemask),
1438 		GCI_INTSTATUS_GPIOWAKE, GCI_INTSTATUS_GPIOWAKE);
1439 }
1440 
1441 void
si_gci_free_wake_pin(si_t * sih,uint8 gpio_n)1442 si_gci_free_wake_pin(si_t *sih, uint8 gpio_n)
1443 {
1444 	uint8 chipcontrol = 0;
1445 	uint8 wake_events;
1446 
1447 	si_gci_gpio_chipcontrol(sih, gpio_n, chipcontrol);
1448 
1449 	/* enable gci gpio int/wake events */
1450 	wake_events = si_gci_gpio_intmask(sih, gpio_n, 0, 0);
1451 	si_gci_gpio_intmask(sih, gpio_n, wake_events, 0);
1452 	wake_events = si_gci_gpio_wakemask(sih, gpio_n, 0, 0);
1453 	si_gci_gpio_wakemask(sih, gpio_n, wake_events, 0);
1454 
1455 	/* clear the existing status bits */
1456 	si_gci_gpio_status(sih, gpio_n,
1457 		GCI_GPIO_STS_CLEAR, GCI_GPIO_STS_CLEAR);
1458 }
1459 
1460 #if defined(BCMPCIEDEV)
1461 static const char BCMINITDATA(rstr_device_wake_opt)[] = "device_wake_opt";
1462 #else
1463 static const char BCMINITDATA(rstr_device_wake_opt)[] = "sd_devwake";
1464 #endif
1465 #define DEVICE_WAKE_GPIO3	3
1466 
1467 uint8
BCMATTACHFN(si_enable_perst_wake)1468 BCMATTACHFN(si_enable_perst_wake)(si_t *sih, uint8 *perst_wake_mask, uint8 *perst_cur_status)
1469 {
1470 	uint8  gci_perst = CC_GCI_GPIO_15;
1471 	switch (CHIPID(sih->chip)) {
1472 	default:
1473 		SI_ERROR(("device wake not supported for 0x%04x yet\n", CHIPID(sih->chip)));
1474 		break;
1475 	}
1476 	return gci_perst;
1477 
1478 }
1479 
1480 uint8
BCMINITFN(si_get_device_wake_opt)1481 BCMINITFN(si_get_device_wake_opt)(si_t *sih)
1482 {
1483 	si_info_t *sii = SI_INFO(sih);
1484 
1485 	if (getvar(NULL, rstr_device_wake_opt) == NULL)
1486 		return CC_GCI_GPIO_INVALID;
1487 
1488 	sii->device_wake_opt = (uint8)getintvar(NULL, rstr_device_wake_opt);
1489 	return sii->device_wake_opt;
1490 }
1491 
1492 uint8
si_enable_device_wake(si_t * sih,uint8 * wake_mask,uint8 * cur_status)1493 si_enable_device_wake(si_t *sih, uint8 *wake_mask, uint8 *cur_status)
1494 {
1495 	uint8  gci_gpio = CC_GCI_GPIO_INVALID;		/* DEVICE_WAKE GCI GPIO */
1496 	uint32 device_wake_opt;
1497 	const si_info_t *sii = SI_INFO(sih);
1498 
1499 	device_wake_opt = sii->device_wake_opt;
1500 
1501 	if (device_wake_opt == CC_GCI_GPIO_INVALID) {
1502 		/* parse the device wake opt from nvram */
1503 		/* decode what that means for specific chip */
1504 		/* apply the right gci config */
1505 		/* enable the internal interrupts */
1506 		/* assume: caller already registered handler for that GCI int */
1507 		if (getvar(NULL, rstr_device_wake_opt) == NULL)
1508 			return gci_gpio;
1509 
1510 		device_wake_opt = getintvar(NULL, rstr_device_wake_opt);
1511 	}
1512 	switch (CHIPID(sih->chip)) {
1513 	case BCM4369_CHIP_GRPID:
1514 	case BCM4376_CHIP_GRPID:
1515 	case BCM4378_CHIP_GRPID:
1516 	case BCM4385_CHIP_GRPID:
1517 	case BCM4387_CHIP_GRPID:
1518 	case BCM4362_CHIP_GRPID:
1519 		/* device_wake op 1:
1520 		 * gpio 1, func sel 4,
1521 		 * gcigpioctrl: input pin, exra gpio
1522 		 * since GCI_GPIO_CHIPCTRL_ENAB_EXT_GPIO_BIT is used, gci gpio is same as GPIO num
1523 		 * GCI GPIO 1,wakemask/intmask: any edge, both positive negative
1524 		 * enable the wake mask, intmask in GCI top level
1525 		 * enable the chip common to get the G/ECI interrupt
1526 		 * enable the PMU ctrl to wake the chip on wakemask set
1527 		 */
1528 		if (device_wake_opt == 1) {
1529 			gci_gpio = CC_GCI_GPIO_1;
1530 			*wake_mask = (1 << GCI_GPIO_STS_VALUE_BIT) |
1531 				(1 << GCI_GPIO_STS_POS_EDGE_BIT) |
1532 				(1 << GCI_GPIO_STS_NEG_EDGE_BIT);
1533 			si_gci_set_functionsel(sih, gci_gpio, CC_FNSEL_GCI0);
1534 			si_enable_gpio_wake(sih, wake_mask, cur_status, gci_gpio,
1535 				PMU_CC2_GCI2_WAKE | PMU_CC2_MASK_WL_DEV_WAKE,
1536 				PMU_CC2_GCI2_WAKE | PMU_CC2_MASK_WL_DEV_WAKE);
1537 			/* hack: add a pulldown to HOST_WAKE */
1538 			si_gci_gpio_chipcontrol(sih, 0,
1539 					(1 << GCI_GPIO_CHIPCTRL_PULLDN_BIT));
1540 
1541 			/* Enable wake on GciWake */
1542 			si_gci_indirect(sih, 0,
1543 				GCI_OFFSETOF(sih, gci_wakemask),
1544 				(GCI_INTSTATUS_GPIOWAKE | GCI_INTSTATUS_GPIOINT),
1545 				(GCI_INTSTATUS_GPIOWAKE | GCI_INTSTATUS_GPIOINT));
1546 
1547 		} else {
1548 			SI_ERROR(("0x%04x: don't know about device_wake_opt %d\n",
1549 				CHIPID(sih->chip), device_wake_opt));
1550 		}
1551 		break;
1552 	default:
1553 		SI_ERROR(("device wake not supported for 0x%04x yet\n", CHIPID(sih->chip)));
1554 		break;
1555 	}
1556 	return gci_gpio;
1557 }
1558 
1559 void
si_gci_gpioint_handler_unregister(si_t * sih,void * gci_i)1560 si_gci_gpioint_handler_unregister(si_t *sih, void *gci_i)
1561 {
1562 	si_info_t *sii;
1563 	gci_gpio_item_t *p, *n;
1564 
1565 	sii = SI_INFO(sih);
1566 
1567 	ASSERT(gci_i != NULL);
1568 
1569 	sii = SI_INFO(sih);
1570 
1571 	if (!(sih->cccaps_ext & CC_CAP_EXT_GCI_PRESENT)) {
1572 		SI_ERROR(("si_gci_gpioint_handler_unregister: not GCI capable\n"));
1573 		return;
1574 	}
1575 	ASSERT(sii->gci_gpio_head != NULL);
1576 
1577 	if ((void*)sii->gci_gpio_head == gci_i) {
1578 		sii->gci_gpio_head = sii->gci_gpio_head->next;
1579 		MFREE(sii->osh, gci_i, sizeof(gci_gpio_item_t));
1580 		return;
1581 	} else {
1582 		p = sii->gci_gpio_head;
1583 		n = p->next;
1584 		while (n) {
1585 			if ((void*)n == gci_i) {
1586 				p->next = n->next;
1587 				MFREE(sii->osh, gci_i, sizeof(gci_gpio_item_t));
1588 				return;
1589 			}
1590 			p = n;
1591 			n = n->next;
1592 		}
1593 	}
1594 }
1595 
1596 void*
si_gci_gpioint_handler_register(si_t * sih,uint8 gci_gpio,uint8 gpio_status,gci_gpio_handler_t cb,void * arg)1597 si_gci_gpioint_handler_register(si_t *sih, uint8 gci_gpio, uint8 gpio_status,
1598 	gci_gpio_handler_t cb, void *arg)
1599 {
1600 	si_info_t *sii;
1601 	gci_gpio_item_t *gci_i;
1602 
1603 	sii = SI_INFO(sih);
1604 
1605 	ASSERT(cb != NULL);
1606 
1607 	sii = SI_INFO(sih);
1608 
1609 	if (!(sih->cccaps_ext & CC_CAP_EXT_GCI_PRESENT)) {
1610 		SI_ERROR(("si_gci_gpioint_handler_register: not GCI capable\n"));
1611 		return NULL;
1612 	}
1613 
1614 	SI_MSG(("si_gci_gpioint_handler_register: gci_gpio  is %d\n", gci_gpio));
1615 	if (gci_gpio >= SI_GPIO_MAX) {
1616 		SI_ERROR(("isi_gci_gpioint_handler_register: Invalid GCI GPIO NUM %d\n", gci_gpio));
1617 		return NULL;
1618 	}
1619 
1620 	gci_i = MALLOC(sii->osh, (sizeof(gci_gpio_item_t)));
1621 
1622 	ASSERT(gci_i);
1623 	if (gci_i == NULL) {
1624 		SI_ERROR(("si_gci_gpioint_handler_register: GCI Item MALLOC failure\n"));
1625 		return NULL;
1626 	}
1627 
1628 	if (sii->gci_gpio_head)
1629 		gci_i->next = sii->gci_gpio_head;
1630 	else
1631 		gci_i->next = NULL;
1632 
1633 	sii->gci_gpio_head = gci_i;
1634 
1635 	gci_i->handler = cb;
1636 	gci_i->arg = arg;
1637 	gci_i->gci_gpio = gci_gpio;
1638 	gci_i->status = gpio_status;
1639 
1640 	return (void *)(gci_i);
1641 }
1642 
1643 static void
si_gci_gpioint_handler_process(si_t * sih)1644 si_gci_gpioint_handler_process(si_t *sih)
1645 {
1646 	si_info_t *sii;
1647 	uint32 gpio_status[2], status;
1648 	gci_gpio_item_t *gci_i;
1649 
1650 	sii = SI_INFO(sih);
1651 
1652 	/* most probably there are going to be 1 or 2 GPIOs used this way, so do for each GPIO */
1653 
1654 	/* go through the GPIO handlers and call them back if their intstatus is set */
1655 	si_corereg(sih, GCI_CORE_IDX(sih), GCI_OFFSETOF(sih, gci_indirect_addr), ~0, 0);
1656 	gpio_status[0] = si_corereg(sih, GCI_CORE_IDX(sih),
1657 		GCI_OFFSETOF(sih, gci_gpiostatus), 0, 0);
1658 	/* Only clear the status bits that have been read. Other bits (if present) should not
1659 	* get cleared, so that they can be handled later.
1660 	*/
1661 	si_corereg(sih, GCI_CORE_IDX(sih), GCI_OFFSETOF(sih, gci_gpiostatus), ~0, gpio_status[0]);
1662 
1663 	si_corereg(sih, GCI_CORE_IDX(sih), GCI_OFFSETOF(sih, gci_indirect_addr), ~0, 1);
1664 	gpio_status[1] = si_corereg(sih, GCI_CORE_IDX(sih),
1665 		GCI_OFFSETOF(sih, gci_gpiostatus), 0, 0);
1666 	/* Only clear the status bits that have been read. Other bits (if present) should not
1667 	* get cleared, so that they can be handled later.
1668 	*/
1669 	si_corereg(sih, GCI_CORE_IDX(sih), GCI_OFFSETOF(sih, gci_gpiostatus), ~0, gpio_status[1]);
1670 
1671 	gci_i = sii->gci_gpio_head;
1672 
1673 	SI_MSG(("si_gci_gpioint_handler_process: status 0x%04x, 0x%04x\n",
1674 		gpio_status[0], gpio_status[1]));
1675 
1676 	while (gci_i) {
1677 		if (gci_i->gci_gpio < 8)
1678 			status = ((gpio_status[0] >> (gci_i->gci_gpio * 4)) & 0x0F);
1679 		else
1680 			status = ((gpio_status[1] >> ((gci_i->gci_gpio - 8) * 4)) & 0x0F);
1681 		/* should we mask these */
1682 		/* call back */
1683 		ASSERT(gci_i->handler);
1684 		if (gci_i->status & status)
1685 			gci_i->handler(status, gci_i->arg);
1686 		gci_i = gci_i->next;
1687 	}
1688 }
1689 
1690 void
si_gci_handler_process(si_t * sih)1691 si_gci_handler_process(si_t *sih)
1692 {
1693 	uint32 gci_intstatus;
1694 
1695 	/* check the intmask, wakemask in the interrupt routine and call the right ones */
1696 	/* for now call the gpio interrupt */
1697 	gci_intstatus = si_corereg(sih, GCI_CORE_IDX(sih), GCI_OFFSETOF(sih, gci_intstat), 0, 0);
1698 
1699 	if (gci_intstatus & GCI_INTMASK_GPIOINT) {
1700 		SI_MSG(("si_gci_handler_process: gci_intstatus is 0x%04x\n", gci_intstatus));
1701 		si_gci_gpioint_handler_process(sih);
1702 	}
1703 	if ((gci_intstatus & ~(GCI_INTMASK_GPIOINT))) {
1704 #ifdef	HNDGCI
1705 		hndgci_handler_process(gci_intstatus, sih);
1706 #endif /* HNDGCI */
1707 	}
1708 #ifdef WLGCIMBHLR
1709 	if (gci_intstatus & GCI_INTSTATUS_EVENT) {
1710 		hnd_gci_mb_handler_process(gci_intstatus, sih);
1711 	}
1712 #endif /* WLGCIMBHLR */
1713 
1714 #if defined(BCMLTECOEX) && !defined(WLTEST)
1715 	if (gci_intstatus & GCI_INTMASK_SRFNE) {
1716 		si_wci2_rxfifo_intr_handler_process(sih, gci_intstatus);
1717 	}
1718 #endif /* BCMLTECOEX && !WLTEST */
1719 
1720 #ifdef BCMGCISHM
1721 	if (gci_intstatus & (GCI_INTSTATUS_EVENT | GCI_INTSTATUS_EVENTWAKE)) {
1722 		hnd_gcishm_handler_process(sih, gci_intstatus);
1723 	}
1724 #endif /* BCMGCISHM */
1725 }
1726 
1727 void
si_gci_seci_init(si_t * sih)1728 si_gci_seci_init(si_t *sih)
1729 {
1730 	si_gci_direct(sih, GCI_OFFSETOF(sih, gci_corectrl), ALLONES_32,
1731 	              (GCI_CCTL_SCS << GCI_CCTL_SCS_OFFSET) |
1732 	              (GCI_MODE_SECI << GCI_CCTL_SMODE_OFFSET) |
1733 	              (1 << GCI_CCTL_SECIEN_OFFSET));
1734 
1735 	si_gci_indirect(sih, 0, GCI_OFFSETOF(sih, gci_chipctrl), ALLONES_32, 0x0080000); //0x200
1736 
1737 	si_gci_indirect(sih, 1, GCI_OFFSETOF(sih, gci_gpioctl), ALLONES_32, 0x00010280); //0x044
1738 
1739 	/* baudrate:4Mbps at 40MHz xtal, escseq:0xdb, high baudrate, enable seci_tx/rx */
1740 	si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secibauddiv), //0x1e0
1741 	              ALLONES_32, 0xF6);
1742 	si_gci_direct(sih, GCI_OFFSETOF(sih, gci_baudadj), ALLONES_32, 0xFF); //0x1f8
1743 	si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secifcr), ALLONES_32, 0x00); //0x1e4
1744 	si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secimcr), ALLONES_32, 0x08); //0x1ec
1745 	si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secilcr), ALLONES_32, 0xA8); //0x1e8
1746 	si_gci_direct(sih, GCI_OFFSETOF(sih, gci_seciuartescval), //0x1d0
1747 	              ALLONES_32, 0xDB);
1748 
1749 	/* Atlas/GMAC3 configuration for SECI */
1750 	si_gci_direct(sih, GCI_OFFSETOF(sih, gci_miscctl), ALLONES_32, 0xFFFF); //0xc54
1751 
1752 	/* config GPIO pins 5/6 as SECI_IN/SECI_OUT */
1753 	si_gci_indirect(sih, 0,
1754 		GCI_OFFSETOF(sih, gci_seciin_ctrl), ALLONES_32, 0x161); //0x218
1755 	si_gci_indirect(sih, 0,
1756 		GCI_OFFSETOF(sih, gci_seciout_ctrl), ALLONES_32, 0x10051); //0x21c
1757 
1758 	si_gci_direct(sih, GCI_OFFSETOF(sih, gci_seciout_txen_txbr), ALLONES_32, 0x01); //0x224
1759 
1760 	/* WLAN rx offset assignment */
1761 	/* WLCX: RX offset assignment from WLAN core to WLAN core (faked as BT TX) */
1762 	si_gci_indirect(sih, 0,
1763 		GCI_OFFSETOF(sih, gci_secif0rx_offset), ALLONES_32, 0x13121110); //0x1bc
1764 	si_gci_indirect(sih, 1,
1765 		GCI_OFFSETOF(sih, gci_secif0rx_offset), ALLONES_32, 0x17161514);
1766 	si_gci_indirect(sih, 2,
1767 		GCI_OFFSETOF(sih, gci_secif0rx_offset), ALLONES_32, 0x1b1a1918);
1768 
1769 	/* first 12 nibbles configured for format-0 */
1770 	/* note: we can only select 1st 12 nibbles of each IP for format_0 */
1771 	si_gci_indirect(sih, 0,  GCI_OFFSETOF(sih, gci_seciusef0tx_reg), //0x1b4
1772 	                ALLONES_32, 0xFFF); // first 12 nibbles
1773 
1774 	si_gci_indirect(sih, 0,  GCI_OFFSETOF(sih, gci_secitx_datatag),
1775 			ALLONES_32, 0x0F0); // gci_secitx_datatag(nibbles 4 to 7 tagged)
1776 	si_gci_indirect(sih, 0,  GCI_OFFSETOF(sih, gci_secirx_datatag),
1777 	                ALLONES_32, 0x0F0); // gci_secirx_datatag(nibbles 4 to 7 tagged)
1778 
1779 	/* TX offset assignment (wlan to bt) */
1780 	si_gci_indirect(sih, 0,
1781 		GCI_OFFSETOF(sih, gci_secif0tx_offset), 0xFFFFFFFF, 0x76543210); //0x1b8
1782 	si_gci_indirect(sih, 1,
1783 		GCI_OFFSETOF(sih, gci_secif0tx_offset), 0xFFFFFFFF, 0x0000ba98);
1784 	if (CHIPID(sih->chip) == BCM43602_CHIP_ID) {
1785 		/* Request	BT side to update SECI information */
1786 		si_gci_direct(sih, OFFSETOF(chipcregs_t, gci_seciauxtx),
1787 			(SECI_AUX_TX_START | SECI_REFRESH_REQ),
1788 			(SECI_AUX_TX_START | SECI_REFRESH_REQ));
1789 		/* WLAN to update SECI information */
1790 		si_gci_direct(sih, OFFSETOF(chipcregs_t, gci_corectrl),
1791 			SECI_UPD_SECI, SECI_UPD_SECI);
1792 	}
1793 
1794 	// HW ECI bus directly driven from IP
1795 	si_gci_direct(sih, GCI_OFFSETOF(sih, gci_control_0), ALLONES_32, 0x00000000);
1796 	si_gci_direct(sih, GCI_OFFSETOF(sih, gci_control_1), ALLONES_32, 0x00000000);
1797 }
1798 
1799 #if defined(BCMLTECOEX) && !defined(WLTEST)
1800 int
si_wci2_rxfifo_handler_register(si_t * sih,wci2_handler_t rx_cb,void * ctx)1801 si_wci2_rxfifo_handler_register(si_t *sih, wci2_handler_t rx_cb, void *ctx)
1802 {
1803 	si_info_t *sii;
1804 	wci2_rxfifo_info_t *wci2_info;
1805 
1806 	sii = SI_INFO(sih);
1807 
1808 	ASSERT(rx_cb != NULL);
1809 
1810 	if (!(sih->cccaps_ext & CC_CAP_EXT_GCI_PRESENT)) {
1811 		SI_ERROR(("si_wci2_rxfifo_handler_register: not GCI capable\n"));
1812 		return BCME_ERROR;
1813 	}
1814 
1815 	if ((wci2_info = (wci2_rxfifo_info_t *)MALLOCZ(sii->osh,
1816 			sizeof(wci2_rxfifo_info_t))) == NULL) {
1817 		SI_ERROR(("si_wci2_rxfifo_handler_register: WCI2 RXFIFO INFO MALLOC failure\n"));
1818 		return BCME_NOMEM;
1819 	}
1820 
1821 	if ((wci2_info->rx_buf = (char *)MALLOCZ(sii->osh, WCI2_UART_RX_BUF_SIZE)) == NULL) {
1822 		MFREE(sii->osh, wci2_info, sizeof(wci2_rxfifo_info_t));
1823 
1824 		SI_ERROR(("si_wci2_rxfifo_handler_register: WCI2 RXFIFO INFO MALLOC failure\n"));
1825 		return BCME_NOMEM;
1826 	}
1827 
1828 	if ((wci2_info->cbs = (wci2_cbs_t *)MALLOCZ(sii->osh, sizeof(wci2_cbs_t))) == NULL) {
1829 		MFREE(sii->osh, wci2_info->rx_buf, WCI2_UART_RX_BUF_SIZE);
1830 		MFREE(sii->osh, wci2_info, sizeof(wci2_rxfifo_info_t));
1831 
1832 		SI_ERROR(("si_wci2_rxfifo_handler_register: WCI2 RXFIFO INFO MALLOC failure\n"));
1833 		return BCME_NOMEM;
1834 	}
1835 
1836 	sii->wci2_info = wci2_info;
1837 
1838 	/* init callback */
1839 	wci2_info->cbs->handler = rx_cb;
1840 	wci2_info->cbs->context = ctx;
1841 
1842 	return BCME_OK;
1843 }
1844 
1845 void
si_wci2_rxfifo_handler_unregister(si_t * sih)1846 si_wci2_rxfifo_handler_unregister(si_t *sih)
1847 {
1848 
1849 	si_info_t *sii;
1850 	wci2_rxfifo_info_t *wci2_info;
1851 
1852 	sii = SI_INFO(sih);
1853 	ASSERT(sii);
1854 
1855 	if (!(sih->cccaps_ext & CC_CAP_EXT_GCI_PRESENT)) {
1856 		SI_ERROR(("si_wci2_rxfifo_handler_unregister: not GCI capable\n"));
1857 		return;
1858 	}
1859 
1860 	wci2_info = sii->wci2_info;
1861 
1862 	if (wci2_info == NULL) {
1863 		return;
1864 	}
1865 
1866 	if (wci2_info->rx_buf != NULL) {
1867 		MFREE(sii->osh, wci2_info->rx_buf, WCI2_UART_RX_BUF_SIZE);
1868 	}
1869 
1870 	if (wci2_info->cbs != NULL) {
1871 		MFREE(sii->osh, wci2_info->cbs, sizeof(wci2_cbs_t));
1872 	}
1873 
1874 	MFREE(sii->osh, wci2_info, sizeof(wci2_rxfifo_info_t));
1875 
1876 }
1877 
1878 /* GCI WCI2 UART RXFIFO interrupt handler */
1879 static void
si_wci2_rxfifo_intr_handler_process(si_t * sih,uint32 intstatus)1880 si_wci2_rxfifo_intr_handler_process(si_t *sih, uint32 intstatus)
1881 {
1882 	const si_info_t *sii = SI_INFO(sih);
1883 	uint32 udata;
1884 	char ubyte;
1885 	wci2_rxfifo_info_t *wci2_info;
1886 	bool call_cb = FALSE;
1887 
1888 	wci2_info = sii->wci2_info;
1889 
1890 	if (wci2_info == NULL) {
1891 		return;
1892 	}
1893 
1894 	if (intstatus & GCI_INTSTATUS_SRFOF) {
1895 		SI_ERROR(("*** rx fifo overflow *** \n"));
1896 		si_gci_direct(sih, GCI_OFFSETOF(sih, gci_intstat),
1897 			GCI_INTSTATUS_SRFOF, GCI_INTSTATUS_SRFOF);
1898 	}
1899 
1900 	/* Check if RF FIFO has any data */
1901 	if (intstatus & GCI_INTMASK_SRFNE) {
1902 
1903 		/* Read seci uart data */
1904 		udata = si_gci_direct(sih, GCI_OFFSETOF(sih, gci_seciuartdata), 0, 0);
1905 
1906 		while (udata & SECI_UART_DATA_RF_NOT_EMPTY_BIT) {
1907 
1908 			ubyte = (char) udata;
1909 			if (wci2_info) {
1910 				wci2_info->rx_buf[wci2_info->rx_idx] = ubyte;
1911 				wci2_info->rx_idx++;
1912 				call_cb = TRUE;
1913 				/* if the buffer is full, break
1914 				 * remaining will be processed in next callback
1915 				 */
1916 				if (wci2_info->rx_idx == WCI2_UART_RX_BUF_SIZE) {
1917 					break;
1918 				}
1919 			}
1920 
1921 			udata = si_gci_direct(sih, GCI_OFFSETOF(sih, gci_seciuartdata), 0, 0);
1922 		}
1923 
1924 		/* if callback registered; call it */
1925 		if (call_cb && wci2_info && wci2_info->cbs) {
1926 			wci2_info->cbs->handler(wci2_info->cbs->context, wci2_info->rx_buf,
1927 				wci2_info->rx_idx);
1928 			bzero(wci2_info->rx_buf, WCI2_UART_RX_BUF_SIZE);
1929 			wci2_info->rx_idx = 0;
1930 		}
1931 	}
1932 }
1933 #endif /* BCMLTECOEX && !WLTEST */
1934 
1935 #ifdef BCMLTECOEX
1936 /* Program GCI GpioMask and GCI GpioControl Registers */
1937 static void
si_config_gcigpio(si_t * sih,uint32 gci_pos,uint8 gcigpio,uint8 gpioctl_mask,uint8 gpioctl_val)1938 si_config_gcigpio(si_t *sih, uint32 gci_pos, uint8 gcigpio,
1939 	uint8 gpioctl_mask, uint8 gpioctl_val)
1940 {
1941 	uint32 indirect_idx =
1942 		GCI_REGIDX(gci_pos) | (gcigpio << GCI_GPIOIDX_OFFSET);
1943 	si_gci_indirect(sih, indirect_idx, GCI_OFFSETOF(sih, gci_gpiomask),
1944 		(1 << GCI_BITOFFSET(gci_pos)),
1945 		(1 << GCI_BITOFFSET(gci_pos)));
1946 	/* Write GPIO Configuration to GCI Registers */
1947 	si_gci_indirect(sih, gcigpio/4, GCI_OFFSETOF(sih, gci_gpioctl),
1948 		(gpioctl_mask << (gcigpio%4)*8), (gpioctl_val << (gcigpio%4)*8));
1949 }
1950 
1951 void
si_ercx_init(si_t * sih,uint32 ltecx_mux,uint32 ltecx_padnum,uint32 ltecx_fnsel,uint32 ltecx_gcigpio)1952 si_ercx_init(si_t *sih, uint32 ltecx_mux, uint32 ltecx_padnum,
1953 	uint32 ltecx_fnsel, uint32 ltecx_gcigpio)
1954 {
1955 	uint8 fsync_padnum, lterx_padnum, ltetx_padnum, wlprio_padnum;
1956 	uint8 fsync_fnsel, lterx_fnsel, ltetx_fnsel, wlprio_fnsel;
1957 	uint8 fsync_gcigpio, lterx_gcigpio, ltetx_gcigpio, wlprio_gcigpio;
1958 
1959 	/* reset GCI block */
1960 	si_gci_reset(sih);
1961 
1962 	/* enable ERCX (pure gpio) mode, Keep SECI in Reset Mode Only */
1963 	/* Hopefully, keeping SECI in Reset Mode will draw lesser current */
1964 	si_gci_direct(sih, GCI_OFFSETOF(sih, gci_corectrl),
1965 		((GCI_MODE_MASK << GCI_CCTL_SMODE_OFFSET)
1966 		|(1 << GCI_CCTL_SECIEN_OFFSET)
1967 		|(1 << GCI_CCTL_RSTSL_OFFSET)
1968 		|(1 << GCI_CCTL_SECIRST_OFFSET)),
1969 		((GCI_MODE_GPIO << GCI_CCTL_SMODE_OFFSET)
1970 		|(0 << GCI_CCTL_SECIEN_OFFSET)
1971 		|(1 << GCI_CCTL_RSTSL_OFFSET)
1972 		|(1 << GCI_CCTL_SECIRST_OFFSET)));
1973 
1974 	/* Extract Interface Configuration */
1975 	fsync_padnum	= LTECX_EXTRACT_PADNUM(ltecx_padnum, LTECX_NVRAM_FSYNC_IDX);
1976 	lterx_padnum	= LTECX_EXTRACT_PADNUM(ltecx_padnum, LTECX_NVRAM_LTERX_IDX);
1977 	ltetx_padnum	= LTECX_EXTRACT_PADNUM(ltecx_padnum, LTECX_NVRAM_LTETX_IDX);
1978 	wlprio_padnum	= LTECX_EXTRACT_PADNUM(ltecx_padnum, LTECX_NVRAM_WLPRIO_IDX);
1979 
1980 	fsync_fnsel	= LTECX_EXTRACT_FNSEL(ltecx_fnsel, LTECX_NVRAM_FSYNC_IDX);
1981 	lterx_fnsel	= LTECX_EXTRACT_FNSEL(ltecx_fnsel, LTECX_NVRAM_LTERX_IDX);
1982 	ltetx_fnsel	= LTECX_EXTRACT_FNSEL(ltecx_fnsel, LTECX_NVRAM_LTETX_IDX);
1983 	wlprio_fnsel	= LTECX_EXTRACT_FNSEL(ltecx_fnsel, LTECX_NVRAM_WLPRIO_IDX);
1984 
1985 	fsync_gcigpio	= LTECX_EXTRACT_GCIGPIO(ltecx_gcigpio, LTECX_NVRAM_FSYNC_IDX);
1986 	lterx_gcigpio	= LTECX_EXTRACT_GCIGPIO(ltecx_gcigpio, LTECX_NVRAM_LTERX_IDX);
1987 	ltetx_gcigpio	= LTECX_EXTRACT_GCIGPIO(ltecx_gcigpio, LTECX_NVRAM_LTETX_IDX);
1988 	wlprio_gcigpio	= LTECX_EXTRACT_GCIGPIO(ltecx_gcigpio, LTECX_NVRAM_WLPRIO_IDX);
1989 
1990 	/* Clear this Function Select for all GPIOs if programmed by default */
1991 	si_gci_clear_functionsel(sih, fsync_fnsel);
1992 	si_gci_clear_functionsel(sih, lterx_fnsel);
1993 	si_gci_clear_functionsel(sih, ltetx_fnsel);
1994 	si_gci_clear_functionsel(sih, wlprio_fnsel);
1995 
1996 	/* Program Function select for selected GPIOs */
1997 	si_gci_set_functionsel(sih, fsync_padnum, fsync_fnsel);
1998 	si_gci_set_functionsel(sih, lterx_padnum, lterx_fnsel);
1999 	si_gci_set_functionsel(sih, ltetx_padnum, ltetx_fnsel);
2000 	si_gci_set_functionsel(sih, wlprio_padnum, wlprio_fnsel);
2001 
2002 	/* NOTE: We are keeping Input PADs in Pull Down Mode to take care of the case
2003 	 * when LTE Modem doesn't drive these lines for any reason.
2004 	 * We should consider alternate ways to identify this situation and dynamically
2005 	 * enable Pull Down PAD only when LTE Modem doesn't drive these lines.
2006 	 */
2007 
2008 	/* Configure Frame Sync as input */
2009 	si_config_gcigpio(sih, GCI_LTE_FRAMESYNC_POS, fsync_gcigpio, 0xFF,
2010 		((1 << GCI_GPIOCTL_INEN_OFFSET)|(1 << GCI_GPIOCTL_PDN_OFFSET)));
2011 
2012 	/* Configure LTE Rx as input */
2013 	si_config_gcigpio(sih, GCI_LTE_RX_POS, lterx_gcigpio, 0xFF,
2014 		((1 << GCI_GPIOCTL_INEN_OFFSET)|(1 << GCI_GPIOCTL_PDN_OFFSET)));
2015 
2016 	/* Configure LTE Tx as input */
2017 	si_config_gcigpio(sih, GCI_LTE_TX_POS, ltetx_gcigpio, 0xFF,
2018 		((1 << GCI_GPIOCTL_INEN_OFFSET)|(1 << GCI_GPIOCTL_PDN_OFFSET)));
2019 
2020 	/* Configure WLAN Prio as output. BT Need to configure its ISM Prio separately
2021 	 * NOTE: LTE chip has to enable its internal pull-down whenever WL goes down
2022 	 */
2023 	si_config_gcigpio(sih, GCI_WLAN_PRIO_POS, wlprio_gcigpio, 0xFF,
2024 		(1 << GCI_GPIOCTL_OUTEN_OFFSET));
2025 
2026 	/* Enable inbandIntMask for FrmSync only, disable LTE_Rx and LTE_Tx
2027 	  * Note: FrameSync, LTE Rx & LTE Tx happen to share the same REGIDX
2028 	  * Hence a single Access is sufficient
2029 	  */
2030 	si_gci_indirect(sih, GCI_REGIDX(GCI_LTE_FRAMESYNC_POS),
2031 		GCI_OFFSETOF(sih, gci_inbandeventintmask),
2032 		((1 << GCI_BITOFFSET(GCI_LTE_FRAMESYNC_POS))
2033 		|(1 << GCI_BITOFFSET(GCI_LTE_RX_POS))
2034 		|(1 << GCI_BITOFFSET(GCI_LTE_TX_POS))),
2035 		((1 << GCI_BITOFFSET(GCI_LTE_FRAMESYNC_POS))
2036 		|(0 << GCI_BITOFFSET(GCI_LTE_RX_POS))
2037 		|(0 << GCI_BITOFFSET(GCI_LTE_TX_POS))));
2038 
2039 	/* Enable Inband interrupt polarity for LTE_FRMSYNC */
2040 	si_gci_indirect(sih, GCI_REGIDX(GCI_LTE_FRAMESYNC_POS),
2041 		GCI_OFFSETOF(sih, gci_intpolreg),
2042 		(1 << GCI_BITOFFSET(GCI_LTE_FRAMESYNC_POS)),
2043 		(1 << GCI_BITOFFSET(GCI_LTE_FRAMESYNC_POS)));
2044 }
2045 
2046 void
si_wci2_init(si_t * sih,uint8 baudrate,uint32 ltecx_mux,uint32 ltecx_padnum,uint32 ltecx_fnsel,uint32 ltecx_gcigpio,uint32 xtalfreq)2047 si_wci2_init(si_t *sih, uint8 baudrate, uint32 ltecx_mux, uint32 ltecx_padnum,
2048 	uint32 ltecx_fnsel, uint32 ltecx_gcigpio, uint32 xtalfreq)
2049 {
2050 	/* BCMLTECOEXGCI_ENAB should be checked before calling si_wci2_init() */
2051 	uint8 baud = baudrate;
2052 	uint8 seciin, seciout, fnselin, fnselout, gcigpioin, gcigpioout;
2053 
2054 	/* Extract PAD GPIO number (1-byte) from "ltecx_padnum" for each LTECX pin */
2055 	seciin =	LTECX_EXTRACT_PADNUM(ltecx_padnum, LTECX_NVRAM_WCI2IN_IDX);
2056 	seciout =	LTECX_EXTRACT_PADNUM(ltecx_padnum, LTECX_NVRAM_WCI2OUT_IDX);
2057 	/* Extract FunctionSel (1-nibble) from "ltecx_fnsel" for each LTECX pin */
2058 	fnselin =	LTECX_EXTRACT_FNSEL(ltecx_fnsel, LTECX_NVRAM_WCI2IN_IDX);
2059 	fnselout =	LTECX_EXTRACT_FNSEL(ltecx_fnsel, LTECX_NVRAM_WCI2OUT_IDX);
2060 	/* Extract GCI-GPIO number (1-nibble) from "ltecx_gcigpio" for each LTECX pin */
2061 	gcigpioin =	LTECX_EXTRACT_GCIGPIO(ltecx_gcigpio, LTECX_NVRAM_WCI2IN_IDX);
2062 	gcigpioout =	LTECX_EXTRACT_GCIGPIO(ltecx_gcigpio, LTECX_NVRAM_WCI2OUT_IDX);
2063 
2064 	/* reset GCI block */
2065 	si_gci_reset(sih);
2066 
2067 	/* NOTE: Writing Reserved bits of older GCI Revs is OK */
2068 	si_gci_direct(sih, GCI_OFFSETOF(sih, gci_corectrl),
2069 		((GCI_CCTL_SCS_MASK << GCI_CCTL_SCS_OFFSET)
2070 		|(GCI_CCTL_LOWTOUT_MASK << GCI_CCTL_SILOWTOUT_OFFSET)
2071 		|(1 << GCI_CCTL_BRKONSLP_OFFSET)
2072 		|(1 << GCI_CCTL_US_OFFSET)
2073 		|(GCI_MODE_MASK << GCI_CCTL_SMODE_OFFSET)
2074 		|(1 << GCI_CCTL_FSL_OFFSET)
2075 		|(1 << GCI_CCTL_SECIEN_OFFSET)),
2076 		((GCI_CCTL_SCS_DEF << GCI_CCTL_SCS_OFFSET)
2077 		|(GCI_CCTL_LOWTOUT_30BIT << GCI_CCTL_SILOWTOUT_OFFSET)
2078 		|(0 << GCI_CCTL_BRKONSLP_OFFSET)
2079 		|(0 << GCI_CCTL_US_OFFSET)
2080 		|(GCI_MODE_BTSIG << GCI_CCTL_SMODE_OFFSET)
2081 		|(0 << GCI_CCTL_FSL_OFFSET)
2082 		|(1 << GCI_CCTL_SECIEN_OFFSET))); /* 19000024 */
2083 
2084 	/* Program Function select for selected GPIOs */
2085 	si_gci_set_functionsel(sih, seciin, fnselin);
2086 	si_gci_set_functionsel(sih, seciout, fnselout);
2087 
2088 	/* Enable inbandIntMask for FrmSync only; disable LTE_Rx and LTE_Tx
2089 	  * Note: FrameSync, LTE Rx & LTE Tx happen to share the same REGIDX
2090 	  * Hence a single Access is sufficient
2091 	  */
2092 	si_gci_indirect(sih,
2093 		GCI_REGIDX(GCI_LTE_FRAMESYNC_POS),
2094 		GCI_OFFSETOF(sih, gci_inbandeventintmask),
2095 		((1 << GCI_BITOFFSET(GCI_LTE_FRAMESYNC_POS))
2096 		|(1 << GCI_BITOFFSET(GCI_LTE_RX_POS))
2097 		|(1 << GCI_BITOFFSET(GCI_LTE_TX_POS))),
2098 		((1 << GCI_BITOFFSET(GCI_LTE_FRAMESYNC_POS))
2099 		|(0 << GCI_BITOFFSET(GCI_LTE_RX_POS))
2100 		|(0 << GCI_BITOFFSET(GCI_LTE_TX_POS))));
2101 
2102 	if (GCIREV(sih->gcirev) >= 1) {
2103 		/* Program inband interrupt polarity as posedge for FrameSync */
2104 		si_gci_indirect(sih, GCI_REGIDX(GCI_LTE_FRAMESYNC_POS),
2105 			GCI_OFFSETOF(sih, gci_intpolreg),
2106 			(1 << GCI_BITOFFSET(GCI_LTE_FRAMESYNC_POS)),
2107 			(1 << GCI_BITOFFSET(GCI_LTE_FRAMESYNC_POS)));
2108 	}
2109 	if (GCIREV(sih->gcirev) >= 4) {
2110 		/* Program SECI_IN Control Register */
2111 		si_gci_indirect(sih, GCI_LTECX_SECI_ID,
2112 			GCI_OFFSETOF(sih, gci_seciin_ctrl), ALLONES_32,
2113 			((GCI_MODE_BTSIG << GCI_SECIIN_MODE_OFFSET)
2114 			 |(gcigpioin << GCI_SECIIN_GCIGPIO_OFFSET)
2115 			 |(GCI_LTE_IP_ID << GCI_SECIIN_RXID2IP_OFFSET)));
2116 
2117 		/* Program GPIO Control Register for SECI_IN GCI GPIO */
2118 		si_gci_indirect(sih, gcigpioin/4, GCI_OFFSETOF(sih, gci_gpioctl),
2119 			(0xFF << (gcigpioin%4)*8),
2120 			(((1 << GCI_GPIOCTL_INEN_OFFSET)
2121 			 |(1 << GCI_GPIOCTL_PDN_OFFSET)) << (gcigpioin%4)*8));
2122 
2123 		/* Program SECI_OUT Control Register */
2124 		si_gci_indirect(sih, GCI_LTECX_SECI_ID,
2125 			GCI_OFFSETOF(sih, gci_seciout_ctrl), ALLONES_32,
2126 			((GCI_MODE_BTSIG << GCI_SECIOUT_MODE_OFFSET)
2127 			 |(gcigpioout << GCI_SECIOUT_GCIGPIO_OFFSET)
2128 			 |((1 << GCI_LTECX_SECI_ID) << GCI_SECIOUT_SECIINRELATED_OFFSET)));
2129 
2130 		/* Program GPIO Control Register for SECI_OUT GCI GPIO */
2131 		si_gci_indirect(sih, gcigpioout/4, GCI_OFFSETOF(sih, gci_gpioctl),
2132 			(0xFF << (gcigpioout%4)*8),
2133 			(((1 << GCI_GPIOCTL_OUTEN_OFFSET)) << (gcigpioout%4)*8));
2134 
2135 		/* Program SECI_IN Aux FIFO enable for LTECX SECI_IN Port */
2136 		if (GCIREV(sih->gcirev) >= 16) {
2137 			si_gci_indirect(sih, GCI_LTECX_SECI_ID,
2138 				GCI_OFFSETOF(sih, gci_seciin_auxfifo_en),
2139 				(((1 << GCI_LTECX_SECI_ID) << GCI_SECIAUX_RXENABLE_OFFSET)
2140 				|((1 << GCI_LTECX_SECI_ID) << GCI_SECIFIFO_RXENABLE_OFFSET)),
2141 				(((1 << GCI_LTECX_SECI_ID) << GCI_SECIAUX_RXENABLE_OFFSET)
2142 				|((1 << GCI_LTECX_SECI_ID) << GCI_SECIFIFO_RXENABLE_OFFSET)));
2143 		} else {
2144 			si_gci_direct(sih, GCI_OFFSETOF(sih, gci_seciin_auxfifo_en),
2145 				(((1 << GCI_LTECX_SECI_ID) << GCI_SECIAUX_RXENABLE_OFFSET)
2146 				|((1 << GCI_LTECX_SECI_ID) << GCI_SECIFIFO_RXENABLE_OFFSET)),
2147 				(((1 << GCI_LTECX_SECI_ID) << GCI_SECIAUX_RXENABLE_OFFSET)
2148 				|((1 << GCI_LTECX_SECI_ID) << GCI_SECIFIFO_RXENABLE_OFFSET)));
2149 		}
2150 		/* Program SECI_OUT Tx Enable for LTECX SECI_OUT Port */
2151 		si_gci_direct(sih, GCI_OFFSETOF(sih, gci_seciout_txen_txbr), ALLONES_32,
2152 			((1 << GCI_LTECX_SECI_ID) << GCI_SECITX_ENABLE_OFFSET));
2153 	}
2154 	if (GCIREV(sih->gcirev) >= 5) {
2155 		/* enable WlPrio/TxOn override from D11 */
2156 		si_gci_direct(sih, GCI_OFFSETOF(sih, gci_miscctl),
2157 			(1 << GCI_LTECX_TXCONF_EN_OFFSET | 1 << GCI_LTECX_PRISEL_EN_OFFSET),
2158 			(1 << GCI_LTECX_TXCONF_EN_OFFSET | 1 << GCI_LTECX_PRISEL_EN_OFFSET));
2159 	} else {
2160 		si_gci_direct(sih, GCI_OFFSETOF(sih, gci_miscctl),
2161 			(1 << GCI_LTECX_TXCONF_EN_OFFSET | 1 << GCI_LTECX_PRISEL_EN_OFFSET),
2162 			0x0000);
2163 	}
2164 	/* baudrate: 1/2/3/4mbps, escseq:0xdb, high baudrate, enable seci_tx/rx */
2165 	si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secifcr), ALLONES_32, 0x00);
2166 	if (GCIREV(sih->gcirev) >= 15) {
2167 		si_gci_indirect(sih, GCI_LTECX_SECI_ID, GCI_OFFSETOF(sih, gci_secilcr),
2168 			ALLONES_32, 0x00);
2169 	} else if (GCIREV(sih->gcirev) >= 4) {
2170 		si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secilcr), ALLONES_32, 0x00);
2171 	} else {
2172 		si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secilcr), ALLONES_32, 0x28);
2173 	}
2174 	si_gci_direct(sih, GCI_OFFSETOF(sih, gci_seciuartescval), ALLONES_32, 0xDB);
2175 
2176 	switch (baud) {
2177 	case 1:
2178 		/* baudrate:1mbps */
2179 		if (GCIREV(sih->gcirev) >= 15) {
2180 			si_gci_indirect(sih, GCI_LTECX_SECI_ID, GCI_OFFSETOF(sih, gci_secibauddiv),
2181 				ALLONES_32, 0xFE);
2182 		} else {
2183 			si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secibauddiv),
2184 				ALLONES_32, 0xFE);
2185 		}
2186 		if (GCIREV(sih->gcirev) >= 15) {
2187 			si_gci_indirect(sih, GCI_LTECX_SECI_ID, GCI_OFFSETOF(sih, gci_secimcr),
2188 				ALLONES_32, 0x80);
2189 		} else if (GCIREV(sih->gcirev) >= 4) {
2190 			si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secimcr),
2191 				ALLONES_32, 0x80);
2192 		} else {
2193 			si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secimcr),
2194 				ALLONES_32, 0x81);
2195 		}
2196 		si_gci_direct(sih, GCI_OFFSETOF(sih, gci_baudadj),
2197 			ALLONES_32, 0x23);
2198 		break;
2199 
2200 	case 2:
2201 		/* baudrate:2mbps */
2202 		if (xtalfreq == XTAL_FREQ_26000KHZ) {
2203 			/* 43430 A0 uses 26 MHz crystal.
2204 			 * Baudrate settings for crystel freq 26 MHz
2205 			 */
2206 			if (GCIREV(sih->gcirev) >= 15) {
2207 				si_gci_indirect(sih, GCI_LTECX_SECI_ID,
2208 					GCI_OFFSETOF(sih, gci_secibauddiv), ALLONES_32, 0xFF);
2209 			} else {
2210 				si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secibauddiv),
2211 					ALLONES_32, 0xFF);
2212 			}
2213 			if (GCIREV(sih->gcirev) >= 15) {
2214 				si_gci_indirect(sih, GCI_LTECX_SECI_ID,
2215 					GCI_OFFSETOF(sih, gci_secimcr), ALLONES_32, 0x80);
2216 			} else {
2217 				si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secimcr),
2218 					ALLONES_32, 0x80);
2219 			}
2220 			si_gci_direct(sih, GCI_OFFSETOF(sih, gci_baudadj),
2221 					ALLONES_32, 0x0);
2222 		}
2223 		else {
2224 			if (GCIREV(sih->gcirev) >= 15) {
2225 				si_gci_indirect(sih, GCI_LTECX_SECI_ID,
2226 					GCI_OFFSETOF(sih, gci_secibauddiv), ALLONES_32, 0xFF);
2227 			} else {
2228 				si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secibauddiv),
2229 					ALLONES_32, 0xFF);
2230 			}
2231 			if (GCIREV(sih->gcirev) >= 15) {
2232 				si_gci_indirect(sih, GCI_LTECX_SECI_ID,
2233 					GCI_OFFSETOF(sih, gci_secimcr), ALLONES_32, 0x80);
2234 			} else if (GCIREV(sih->gcirev) >= 4) {
2235 				si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secimcr),
2236 						ALLONES_32, 0x80);
2237 			} else {
2238 				si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secimcr),
2239 						ALLONES_32, 0x81);
2240 			}
2241 			si_gci_direct(sih, GCI_OFFSETOF(sih, gci_baudadj),
2242 					ALLONES_32, 0x11);
2243 		}
2244 		break;
2245 
2246 	case 4:
2247 		/* baudrate:4mbps */
2248 		if (GCIREV(sih->gcirev) >= 15) {
2249 			si_gci_indirect(sih, GCI_LTECX_SECI_ID, GCI_OFFSETOF(sih, gci_secibauddiv),
2250 				ALLONES_32, 0xF7);
2251 		} else {
2252 			si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secibauddiv),
2253 				ALLONES_32, 0xF7);
2254 		}
2255 		if (GCIREV(sih->gcirev) >= 15) {
2256 			si_gci_indirect(sih, GCI_LTECX_SECI_ID, GCI_OFFSETOF(sih, gci_secimcr),
2257 				ALLONES_32, 0x8);
2258 		} else if (GCIREV(sih->gcirev) >= 4) {
2259 			si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secimcr),
2260 				ALLONES_32, 0x8);
2261 		} else {
2262 			si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secimcr),
2263 				ALLONES_32, 0x9);
2264 		}
2265 		si_gci_direct(sih, GCI_OFFSETOF(sih, gci_baudadj),
2266 			ALLONES_32, 0x0);
2267 		break;
2268 
2269 	case 25:
2270 		/* baudrate:2.5mbps */
2271 		if (xtalfreq == XTAL_FREQ_26000KHZ) {
2272 			/* 43430 A0 uses 26 MHz crystal.
2273 			  * Baudrate settings for crystel freq 26 MHz
2274 			  */
2275 			if (GCIREV(sih->gcirev) >= 15) {
2276 				si_gci_indirect(sih, GCI_LTECX_SECI_ID,
2277 					GCI_OFFSETOF(sih, gci_secibauddiv), ALLONES_32, 0xF6);
2278 			} else {
2279 				si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secibauddiv),
2280 					ALLONES_32, 0xF6);
2281 			}
2282 		} else if (xtalfreq == XTAL_FREQ_59970KHZ) {
2283 			/* 4387 uses 60M MHz crystal.
2284 			  * Baudrate settings for crystel freq/2 29.9 MHz
2285 			  * set bauddiv to 0xF4 to achieve 2.5M for Xtal/2 @ 29.9MHz
2286 			  * bauddiv = 256-Integer Part of (GCI clk freq/baudrate)
2287 			  */
2288 			if (GCIREV(sih->gcirev) >= 15) {
2289 				si_gci_indirect(sih, GCI_LTECX_SECI_ID,
2290 					GCI_OFFSETOF(sih, gci_secibauddiv), ALLONES_32, 0xF4);
2291 			} else {
2292 				si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secibauddiv),
2293 					ALLONES_32, 0xF4);
2294 			}
2295 		} else {
2296 			if (GCIREV(sih->gcirev) >= 15) {
2297 				si_gci_indirect(sih, GCI_LTECX_SECI_ID,
2298 					GCI_OFFSETOF(sih, gci_secibauddiv), ALLONES_32, 0xF1);
2299 			} else {
2300 				si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secibauddiv),
2301 					ALLONES_32, 0xF1);
2302 			}
2303 		}
2304 		if (GCIREV(sih->gcirev) >= 15) {
2305 			si_gci_indirect(sih, GCI_LTECX_SECI_ID, GCI_OFFSETOF(sih, gci_secimcr),
2306 				ALLONES_32, 0x8);
2307 		} else if (GCIREV(sih->gcirev) >= 4) {
2308 			si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secimcr),
2309 				ALLONES_32, 0x8);
2310 		} else {
2311 			si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secimcr),
2312 				ALLONES_32, 0x9);
2313 		}
2314 		si_gci_direct(sih, GCI_OFFSETOF(sih, gci_baudadj),
2315 			ALLONES_32, 0x0);
2316 		break;
2317 
2318 	case 3:
2319 	default:
2320 		/* baudrate:3mbps */
2321 		if (xtalfreq == XTAL_FREQ_26000KHZ) {
2322 			/* 43430 A0 uses 26 MHz crystal.
2323 			  * Baudrate settings for crystel freq 26 MHz
2324 			  */
2325 			if (GCIREV(sih->gcirev) >= 15) {
2326 				si_gci_indirect(sih, GCI_LTECX_SECI_ID,
2327 					GCI_OFFSETOF(sih, gci_secibauddiv), ALLONES_32, 0xF7);
2328 			} else {
2329 				si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secibauddiv),
2330 					ALLONES_32, 0xF7);
2331 			}
2332 		} else {
2333 			if (GCIREV(sih->gcirev) >= 15) {
2334 				si_gci_indirect(sih, GCI_LTECX_SECI_ID,
2335 					GCI_OFFSETOF(sih, gci_secibauddiv), ALLONES_32, 0xF4);
2336 			} else {
2337 				si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secibauddiv),
2338 					ALLONES_32, 0xF4);
2339 			}
2340 		}
2341 		if (GCIREV(sih->gcirev) >= 15) {
2342 			si_gci_indirect(sih, GCI_LTECX_SECI_ID, GCI_OFFSETOF(sih, gci_secimcr),
2343 				ALLONES_32, 0x8);
2344 		} else if (GCIREV(sih->gcirev) >= 4) {
2345 			si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secimcr),
2346 				ALLONES_32, 0x8);
2347 		} else {
2348 			si_gci_direct(sih, GCI_OFFSETOF(sih, gci_secimcr),
2349 				ALLONES_32, 0x9);
2350 		}
2351 		si_gci_direct(sih, GCI_OFFSETOF(sih, gci_baudadj),
2352 			ALLONES_32, 0x0);
2353 		break;
2354 	}
2355 	/* GCI Rev >= 1 */
2356 	if (GCIREV(sih->gcirev) >= 1) {
2357 		/* Route Rx-data through AUX register */
2358 		si_gci_direct(sih, GCI_OFFSETOF(sih, gci_rxfifo_common_ctrl),
2359 			GCI_RXFIFO_CTRL_AUX_EN, GCI_RXFIFO_CTRL_AUX_EN);
2360 #if !defined(WLTEST)
2361 		/* Route RX Type 2 data through RX FIFO */
2362 		si_gci_direct(sih, GCI_OFFSETOF(sih, gci_rxfifo_common_ctrl),
2363 			GCI_RXFIFO_CTRL_FIFO_TYPE2_EN, GCI_RXFIFO_CTRL_FIFO_TYPE2_EN);
2364 		/* Enable Inband interrupt for RX FIFO status */
2365 		si_gci_direct(sih, GCI_OFFSETOF(sih, gci_intmask),
2366 			(GCI_INTSTATUS_SRFNE | GCI_INTSTATUS_SRFOF),
2367 			(GCI_INTSTATUS_SRFNE | GCI_INTSTATUS_SRFOF));
2368 #endif /* !WLTEST */
2369 	} else {
2370 		/* GPIO 3-7 as BT_SIG complaint */
2371 		/* config GPIO pins 3-7 as input */
2372 		si_gci_indirect(sih, 0,
2373 			GCI_OFFSETOF(sih, gci_gpioctl), 0x20000000, 0x20000010);
2374 		si_gci_indirect(sih, 1,
2375 			GCI_OFFSETOF(sih, gci_gpioctl), 0x20202020, 0x20202020);
2376 		/* gpio mapping: frmsync-gpio7, mws_rx-gpio6, mws_tx-gpio5,
2377 		 * pat[0]-gpio4, pat[1]-gpio3
2378 		 */
2379 		si_gci_indirect(sih, 0x70010,
2380 			GCI_OFFSETOF(sih, gci_gpiomask), 0x00000001, 0x00000001);
2381 		si_gci_indirect(sih, 0x60010,
2382 			GCI_OFFSETOF(sih, gci_gpiomask), 0x00000002, 0x00000002);
2383 		si_gci_indirect(sih, 0x50010,
2384 			GCI_OFFSETOF(sih, gci_gpiomask), 0x00000004, 0x00000004);
2385 		si_gci_indirect(sih, 0x40010,
2386 			GCI_OFFSETOF(sih, gci_gpiomask), 0x02000000, 0x00000008);
2387 		si_gci_indirect(sih, 0x30010,
2388 			GCI_OFFSETOF(sih, gci_gpiomask), 0x04000000, 0x04000010);
2389 		/* gpio mapping: wlan_rx_prio-gpio5, wlan_tx_on-gpio4 */
2390 		si_gci_indirect(sih, 0x50000,
2391 			GCI_OFFSETOF(sih, gci_gpiomask), 0x00000010, 0x00000010);
2392 		si_gci_indirect(sih, 0x40000,
2393 			GCI_OFFSETOF(sih, gci_gpiomask), 0x00000020, 0x00000020);
2394 		/* enable gpio out on gpio4(wlanrxprio), gpio5(wlantxon) */
2395 		si_gci_direct(sih,
2396 			GCI_OFFSETOF(sih, gci_control_0), 0x00000030, 0x00000000);
2397 	}
2398 }
2399 #endif /* BCMLTECOEX */
2400 
2401 /* This function is used in AIBSS mode by BTCX to enable strobing to BT */
2402 bool
si_btcx_wci2_init(si_t * sih)2403 si_btcx_wci2_init(si_t *sih)
2404 {
2405 	/* reset GCI block */
2406 	si_gci_reset(sih);
2407 
2408 	if (GCIREV(sih->gcirev) >= 1) {
2409 		si_gci_direct(sih, GCI_OFFSETOF(sih, gci_corectrl),
2410 			((GCI_CCTL_SCS_MASK << GCI_CCTL_SCS_OFFSET)
2411 			|(GCI_CCTL_LOWTOUT_MASK << GCI_CCTL_SILOWTOUT_OFFSET)
2412 			|(1 << GCI_CCTL_BRKONSLP_OFFSET)
2413 			|(1 << GCI_CCTL_US_OFFSET)
2414 			|(GCI_MODE_MASK << GCI_CCTL_SMODE_OFFSET)
2415 			|(1 << GCI_CCTL_FSL_OFFSET)
2416 			|(1 << GCI_CCTL_SECIEN_OFFSET)),
2417 			((GCI_CCTL_SCS_DEF << GCI_CCTL_SCS_OFFSET)
2418 			|(GCI_CCTL_LOWTOUT_30BIT << GCI_CCTL_SILOWTOUT_OFFSET)
2419 			|(0 << GCI_CCTL_BRKONSLP_OFFSET)
2420 			|(0 << GCI_CCTL_US_OFFSET)
2421 			|(GCI_MODE_BTSIG << GCI_CCTL_SMODE_OFFSET)
2422 			|(0 << GCI_CCTL_FSL_OFFSET)
2423 			|(1 << GCI_CCTL_SECIEN_OFFSET))); /* 19000024 */
2424 		return TRUE;
2425 	}
2426 	return FALSE;
2427 }
2428 
2429 void
si_gci_uart_init(si_t * sih,osl_t * osh,uint8 seci_mode)2430 si_gci_uart_init(si_t *sih, osl_t *osh, uint8 seci_mode)
2431 {
2432 #ifdef	HNDGCI
2433 	hndgci_init(sih, osh, HND_GCI_PLAIN_UART_MODE,
2434 		GCI_UART_BR_115200);
2435 
2436 	/* specify rx callback */
2437 	hndgci_uart_config_rx_complete(-1, -1, 0, NULL, NULL);
2438 #else
2439 	BCM_REFERENCE(sih);
2440 	BCM_REFERENCE(osh);
2441 	BCM_REFERENCE(seci_mode);
2442 #endif	/* HNDGCI */
2443 }
2444 
2445 /**
2446  * A given GCI pin needs to be converted to a GCI FunctionSel register offset and the bit position
2447  * in this register.
2448  * @param[in]  input   pin number, see respective chip Toplevel Arch page, GCI chipstatus regs
2449  * @param[out] regidx  chipcontrol reg(ring_index base) and
2450  * @param[out] pos     bits to shift for pin first regbit
2451  *
2452  * eg: gpio9 will give regidx: 1 and pos 4
2453  */
2454 static void
BCMPOSTTRAPFN(si_gci_get_chipctrlreg_ringidx_base4)2455 BCMPOSTTRAPFN(si_gci_get_chipctrlreg_ringidx_base4)(uint32 pin, uint32 *regidx, uint32 *pos)
2456 {
2457 	*regidx = (pin / 8);
2458 	*pos = (pin % 8) * 4; // each pin occupies 4 FunctionSel register bits
2459 
2460 	SI_MSG(("si_gci_get_chipctrlreg_ringidx_base4:%d:%d:%d\n", pin, *regidx, *pos));
2461 }
2462 
2463 /* input: pin number
2464 * output: chipcontrol reg(ring_index base) and
2465 * bits to shift for pin first regbit.
2466 * eg: gpio9 will give regidx: 2 and pos 16
2467 */
2468 static uint8
BCMPOSTTRAPFN(si_gci_get_chipctrlreg_ringidx_base8)2469 BCMPOSTTRAPFN(si_gci_get_chipctrlreg_ringidx_base8)(uint32 pin, uint32 *regidx, uint32 *pos)
2470 {
2471 	*regidx = (pin / 4);
2472 	*pos = (pin % 4)*8;
2473 
2474 	SI_MSG(("si_gci_get_chipctrlreg_ringidx_base8:%d:%d:%d\n", pin, *regidx, *pos));
2475 
2476 	return 0;
2477 }
2478 
2479 /** setup a given pin for fnsel function */
2480 void
BCMPOSTTRAPFN(si_gci_set_functionsel)2481 BCMPOSTTRAPFN(si_gci_set_functionsel)(si_t *sih, uint32 pin, uint8 fnsel)
2482 {
2483 	uint32 reg = 0, pos = 0;
2484 
2485 	SI_MSG(("si_gci_set_functionsel:%d\n", pin));
2486 
2487 	si_gci_get_chipctrlreg_ringidx_base4(pin, &reg, &pos);
2488 	si_gci_chipcontrol(sih, reg, GCIMASK_4B(pos), GCIPOSVAL_4B(fnsel, pos));
2489 }
2490 
2491 /* Returns a given pin's fnsel value */
2492 uint32
si_gci_get_functionsel(si_t * sih,uint32 pin)2493 si_gci_get_functionsel(si_t *sih, uint32 pin)
2494 {
2495 	uint32 reg = 0, pos = 0, temp;
2496 
2497 	SI_MSG(("si_gci_get_functionsel: %d\n", pin));
2498 
2499 	si_gci_get_chipctrlreg_ringidx_base4(pin, &reg, &pos);
2500 	temp = si_gci_chipstatus(sih, reg);
2501 	return GCIGETNBL(temp, pos);
2502 }
2503 
2504 /* Sets fnsel value to IND for all the GPIO pads that have fnsel set to given argument */
2505 void
si_gci_clear_functionsel(si_t * sih,uint8 fnsel)2506 si_gci_clear_functionsel(si_t *sih, uint8 fnsel)
2507 {
2508 	uint32 i;
2509 	SI_MSG(("si_gci_clear_functionsel: %d\n", fnsel));
2510 	for (i = 0; i <= CC_PIN_GPIO_LAST; i++)	{
2511 		if (si_gci_get_functionsel(sih, i) == fnsel)
2512 			si_gci_set_functionsel(sih, i, CC_FNSEL_IND);
2513 	}
2514 }
2515 
2516 /** write 'val' to the gci chip control register indexed by 'reg' */
2517 uint32
BCMPOSTTRAPFN(si_gci_chipcontrol)2518 BCMPOSTTRAPFN(si_gci_chipcontrol)(si_t *sih, uint reg, uint32 mask, uint32 val)
2519 {
2520 	/* because NFLASH and GCI clashes in 0xC00 */
2521 	if ((CCREV(sih->ccrev) == 38) && ((sih->chipst & (1 << 4)) != 0)) {
2522 		/* CC NFLASH exist, prohibit to manipulate gci register */
2523 		ASSERT(0);
2524 		return ALLONES_32;
2525 	}
2526 
2527 	si_corereg(sih, GCI_CORE_IDX(sih), GCI_OFFSETOF(sih, gci_indirect_addr), ~0, reg);
2528 	return si_corereg(sih, GCI_CORE_IDX(sih), GCI_OFFSETOF(sih, gci_chipctrl), mask, val);
2529 }
2530 
2531 /* Read the gci chip status register indexed by 'reg' */
2532 uint32
BCMPOSTTRAPFN(si_gci_chipstatus)2533 BCMPOSTTRAPFN(si_gci_chipstatus)(si_t *sih, uint reg)
2534 {
2535 	/* because NFLASH and GCI clashes in 0xC00 */
2536 	if ((CCREV(sih->ccrev) == 38) && ((sih->chipst & (1 << 4)) != 0)) {
2537 		/* CC NFLASH exist, prohibit to manipulate gci register */
2538 		ASSERT(0);
2539 		return ALLONES_32;
2540 	}
2541 
2542 	si_corereg(sih, GCI_CORE_IDX(sih), GCI_OFFSETOF(sih, gci_indirect_addr), ~0, reg);
2543 	/* setting mask and value to '0' to use si_corereg for read only purpose */
2544 	return si_corereg(sih, GCI_CORE_IDX(sih), GCI_OFFSETOF(sih, gci_chipsts), 0, 0);
2545 }
2546 #endif /* !defined(BCMDONGLEHOST) */
2547 
2548 uint16
BCMINITFN(si_chipid)2549 BCMINITFN(si_chipid)(const si_t *sih)
2550 {
2551 	const si_info_t *sii = SI_INFO(sih);
2552 
2553 	return (sii->chipnew) ? sii->chipnew : sih->chip;
2554 }
2555 
2556 /* CHIP_ID's being mapped here should not be used anywhere else in the code */
2557 static void
BCMATTACHFN(si_chipid_fixup)2558 BCMATTACHFN(si_chipid_fixup)(si_t *sih)
2559 {
2560 	si_info_t *sii = SI_INFO(sih);
2561 
2562 	ASSERT(sii->chipnew == 0);
2563 	switch (sih->chip) {
2564 		case BCM4377_CHIP_ID:
2565 			sii->chipnew = sih->chip; /* save it */
2566 			sii->pub.chip = BCM4369_CHIP_ID; /* chip class */
2567 		break;
2568 		case BCM4375_CHIP_ID:
2569 			sii->chipnew = sih->chip; /* save it */
2570 			sii->pub.chip = BCM4375_CHIP_ID; /* chip class */
2571 		break;
2572 		case BCM4362_CHIP_ID:
2573 			sii->chipnew = sih->chip; /* save it */
2574 			sii->pub.chip = BCM4362_CHIP_ID; /* chip class */
2575 		break;
2576 		case BCM4356_CHIP_ID:
2577 		case BCM4371_CHIP_ID:
2578 			sii->chipnew = sih->chip; /* save it */
2579 			sii->pub.chip = BCM4354_CHIP_ID; /* chip class */
2580 			break;
2581 		default:
2582 		break;
2583 	}
2584 }
2585 
2586 #ifdef AXI_TIMEOUTS_NIC
2587 uint32
BCMPOSTTRAPFN(si_clear_backplane_to_fast)2588 BCMPOSTTRAPFN(si_clear_backplane_to_fast)(void *sih, void *addr)
2589 {
2590 	si_t *_sih = DISCARD_QUAL(sih, si_t);
2591 
2592 	if (CHIPTYPE(_sih->socitype) == SOCI_AI) {
2593 		return ai_clear_backplane_to_fast(_sih, addr);
2594 	}
2595 
2596 	return 0;
2597 }
2598 
2599 const si_axi_error_info_t *
si_get_axi_errlog_info(const si_t * sih)2600 si_get_axi_errlog_info(const si_t *sih)
2601 {
2602 	if (CHIPTYPE(sih->socitype) == SOCI_AI) {
2603 		return (const si_axi_error_info_t *)sih->err_info;
2604 	}
2605 
2606 	return NULL;
2607 }
2608 
2609 void
si_reset_axi_errlog_info(const si_t * sih)2610 si_reset_axi_errlog_info(const si_t *sih)
2611 {
2612 	if (sih->err_info) {
2613 		sih->err_info->count = 0;
2614 	}
2615 }
2616 #endif /* AXI_TIMEOUTS_NIC */
2617 
2618 /* TODO: Can we allocate only one instance? */
2619 static int32
BCMATTACHFN(si_alloc_wrapper)2620 BCMATTACHFN(si_alloc_wrapper)(si_info_t *sii)
2621 {
2622 	if (sii->osh) {
2623 		sii->axi_wrapper = (axi_wrapper_t *)MALLOCZ(sii->osh,
2624 			(sizeof(axi_wrapper_t) * SI_MAX_AXI_WRAPPERS));
2625 
2626 		if (sii->axi_wrapper == NULL) {
2627 			return BCME_NOMEM;
2628 		}
2629 	} else {
2630 		sii->axi_wrapper = NULL;
2631 		return BCME_ERROR;
2632 	}
2633 	return BCME_OK;
2634 }
2635 
2636 static void
BCMATTACHFN(si_free_wrapper)2637 BCMATTACHFN(si_free_wrapper)(si_info_t *sii)
2638 {
2639 	if (sii->axi_wrapper) {
2640 
2641 		MFREE(sii->osh, sii->axi_wrapper, (sizeof(axi_wrapper_t) * SI_MAX_AXI_WRAPPERS));
2642 	}
2643 }
2644 
2645 static void *
BCMATTACHFN(si_alloc_coresinfo)2646 BCMATTACHFN(si_alloc_coresinfo)(si_info_t *sii, osl_t *osh, chipcregs_t *cc)
2647 {
2648 	if (CHIPTYPE(sii->pub.socitype) == SOCI_NCI) {
2649 		sii->nci_info = nci_init(&sii->pub, (void*)(uintptr)cc, sii->pub.bustype);
2650 
2651 		return sii->nci_info;
2652 
2653 	} else {
2654 
2655 #ifdef _RTE_
2656 		sii->cores_info = (si_cores_info_t *)&ksii_cores_info;
2657 #else
2658 		if (sii->cores_info == NULL) {
2659 			/* alloc si_cores_info_t */
2660 			if ((sii->cores_info = (si_cores_info_t *)MALLOCZ(osh,
2661 				sizeof(si_cores_info_t))) == NULL) {
2662 				SI_ERROR(("si_attach: malloc failed for cores_info! malloced"
2663 					" %d bytes\n", MALLOCED(osh)));
2664 				return (NULL);
2665 			}
2666 		} else {
2667 			ASSERT(sii->cores_info == &ksii_cores_info);
2668 
2669 		}
2670 #endif /* _RTE_ */
2671 		return sii->cores_info;
2672 	}
2673 
2674 }
2675 
2676 static void
BCMATTACHFN(si_free_coresinfo)2677 BCMATTACHFN(si_free_coresinfo)(si_info_t *sii, osl_t *osh)
2678 {
2679 
2680 	if (CHIPTYPE(sii->pub.socitype) == SOCI_NCI) {
2681 		if (sii->nci_info) {
2682 			nci_uninit(sii->nci_info);
2683 			sii->nci_info = NULL;
2684 		}
2685 	} else {
2686 		if (sii->cores_info && (sii->cores_info != &ksii_cores_info)) {
2687 			MFREE(osh, sii->cores_info, sizeof(si_cores_info_t));
2688 		}
2689 	}
2690 }
2691 
2692 /**
2693  * Allocate an si handle. This function may be called multiple times. This function is called by
2694  * both si_attach() and si_kattach().
2695  *
2696  * vars - pointer to a to-be created pointer area for "environment" variables. Some callers of this
2697  *        function set 'vars' to NULL.
2698  */
2699 static si_info_t *
BCMATTACHFN(si_doattach)2700 BCMATTACHFN(si_doattach)(si_info_t *sii, uint devid, osl_t *osh, volatile void *regs,
2701                        uint bustype, void *sdh, char **vars, uint *varsz)
2702 {
2703 	struct si_pub *sih = &sii->pub;
2704 	uint32 w, savewin;
2705 	chipcregs_t *cc;
2706 	char *pvars = NULL;
2707 	uint origidx;
2708 #if defined(NVSRCX)
2709 	char *sromvars;
2710 #endif
2711 	uint err_at = 0;
2712 
2713 	ASSERT(GOODREGS(regs));
2714 
2715 	savewin = 0;
2716 
2717 	sih->buscoreidx = BADIDX;
2718 	sii->device_removed = FALSE;
2719 
2720 	sii->curmap = regs;
2721 	sii->sdh = sdh;
2722 	sii->osh = osh;
2723 	sii->second_bar0win = ~0x0;
2724 	sih->enum_base = si_enum_base(devid);
2725 
2726 #if defined(AXI_TIMEOUTS_NIC)
2727 	sih->err_info = MALLOCZ(osh, sizeof(si_axi_error_info_t));
2728 	if (sih->err_info == NULL) {
2729 		SI_ERROR(("si_doattach: %zu bytes MALLOC FAILED",
2730 			sizeof(si_axi_error_info_t)));
2731 	}
2732 #endif /* AXI_TIMEOUTS_NIC */
2733 
2734 #if defined(AXI_TIMEOUTS_NIC) && defined(__linux__)
2735 	osl_set_bpt_cb(osh, (void *)si_clear_backplane_to_fast, (void *)sih);
2736 #endif	/* AXI_TIMEOUTS_NIC && linux */
2737 
2738 	/* check to see if we are a si core mimic'ing a pci core */
2739 	if ((bustype == PCI_BUS) &&
2740 	    (OSL_PCI_READ_CONFIG(sii->osh, PCI_SPROM_CONTROL, sizeof(uint32)) == 0xffffffff)) {
2741 		SI_ERROR(("si_doattach: incoming bus is PCI but it's a lie, switching to SI "
2742 		          "devid:0x%x\n", devid));
2743 		bustype = SI_BUS;
2744 	}
2745 
2746 	/* find Chipcommon address */
2747 	if (bustype == PCI_BUS) {
2748 		savewin = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
2749 		/* PR 29857: init to core0 if bar0window is not programmed properly */
2750 		if (!GOODCOREADDR(savewin, SI_ENUM_BASE(sih)))
2751 			savewin = SI_ENUM_BASE(sih);
2752 		OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, SI_ENUM_BASE(sih));
2753 		if (!regs) {
2754 			err_at = 1;
2755 			goto exit;
2756 		}
2757 		cc = (chipcregs_t *)regs;
2758 #ifdef BCMSDIO
2759 	} else if ((bustype == SDIO_BUS) || (bustype == SPI_BUS)) {
2760 		cc = (chipcregs_t *)sii->curmap;
2761 #endif
2762 	} else {
2763 		cc = (chipcregs_t *)REG_MAP(SI_ENUM_BASE(sih), SI_CORE_SIZE);
2764 	}
2765 
2766 	sih->bustype = (uint16)bustype;
2767 #ifdef BCMBUSTYPE
2768 	if (bustype != BUSTYPE(bustype)) {
2769 		SI_ERROR(("si_doattach: bus type %d does not match configured bus type %d\n",
2770 			bustype, BUSTYPE(bustype)));
2771 		err_at = 2;
2772 		goto exit;
2773 	}
2774 #endif
2775 
2776 	/* bus/core/clk setup for register access */
2777 	if (!si_buscore_prep(sii, bustype, devid, sdh)) {
2778 		SI_ERROR(("si_doattach: si_core_clk_prep failed %d\n", bustype));
2779 		err_at = 3;
2780 		goto exit;
2781 	}
2782 
2783 	/* ChipID recognition.
2784 	*   We assume we can read chipid at offset 0 from the regs arg.
2785 	*   If we add other chiptypes (or if we need to support old sdio hosts w/o chipcommon),
2786 	*   some way of recognizing them needs to be added here.
2787 	*/
2788 	if (!cc) {
2789 		err_at = 3;
2790 		goto exit;
2791 	}
2792 	w = R_REG(osh, &cc->chipid);
2793 #if defined(BCMDONGLEHOST)
2794 	/* plz refer to RB:13157 */
2795 	if ((w & 0xfffff) == 148277) w -= 65532;
2796 #endif /* defined(BCMDONGLEHOST) */
2797 	sih->socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
2798 	/* Might as wll fill in chip id rev & pkg */
2799 	sih->chip = w & CID_ID_MASK;
2800 	sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT;
2801 	sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT;
2802 
2803 #if defined(BCMSDIO) && (defined(HW_OOB) || defined(FORCE_WOWLAN))
2804 	dhd_conf_set_hw_oob_intr(sdh, sih);
2805 #endif
2806 
2807 	si_chipid_fixup(sih);
2808 
2809 	sih->issim = IS_SIM(sih->chippkg);
2810 
2811 	if (MULTIBP_CAP(sih)) {
2812 		sih->_multibp_enable = TRUE;
2813 	}
2814 
2815 	/* scan for cores */
2816 	 if (CHIPTYPE(sii->pub.socitype) == SOCI_NCI) {
2817 
2818 		if (si_alloc_coresinfo(sii, osh, cc) == NULL) {
2819 			err_at = 4;
2820 			goto exit;
2821 		}
2822 		ASSERT(sii->nci_info);
2823 
2824 		if (!FWSIGN_ENAB()) {
2825 			if ((si_alloc_wrapper(sii)) != BCME_OK) {
2826 				err_at = 5;
2827 				goto exit;
2828 			}
2829 		}
2830 
2831 		if ((sii->numcores = nci_scan(sih)) == 0u) {
2832 			err_at = 6;
2833 			goto exit;
2834 		} else {
2835 			if (!FWSIGN_ENAB()) {
2836 				nci_dump_erom(sii->nci_info);
2837 			}
2838 		}
2839 	} else {
2840 
2841 		if (si_alloc_coresinfo(sii, osh, cc) == NULL) {
2842 			err_at = 7;
2843 			goto exit;
2844 		}
2845 
2846 		if (CHIPTYPE(sii->pub.socitype) == SOCI_SB) {
2847 			SI_MSG(("Found chip type SB (0x%08x)\n", w));
2848 			sb_scan(&sii->pub, regs, devid);
2849 		} else if ((CHIPTYPE(sii->pub.socitype) == SOCI_AI) ||
2850 			(CHIPTYPE(sii->pub.socitype) == SOCI_NAI) ||
2851 			(CHIPTYPE(sii->pub.socitype) == SOCI_DVTBUS)) {
2852 
2853 			if (CHIPTYPE(sii->pub.socitype) == SOCI_AI)
2854 				SI_MSG(("Found chip type AI (0x%08x)\n", w));
2855 			else if (CHIPTYPE(sii->pub.socitype) == SOCI_NAI)
2856 				SI_MSG(("Found chip type NAI (0x%08x)\n", w));
2857 			else
2858 				SI_MSG(("Found chip type DVT (0x%08x)\n", w));
2859 			/* pass chipc address instead of original core base */
2860 			if ((si_alloc_wrapper(sii)) != BCME_OK) {
2861 				err_at = 8;
2862 				goto exit;
2863 			}
2864 			ai_scan(&sii->pub, (void *)(uintptr)cc, devid);
2865 			/* make sure the wrappers are properly accounted for */
2866 			if (sii->axi_num_wrappers == 0) {
2867 				SI_ERROR(("FATAL: Wrapper count 0\n"));
2868 				err_at = 16;
2869 				goto exit;
2870 			}
2871 		}
2872 		 else if (CHIPTYPE(sii->pub.socitype) == SOCI_UBUS) {
2873 			SI_MSG(("Found chip type UBUS (0x%08x), chip id = 0x%4x\n", w, sih->chip));
2874 			/* pass chipc address instead of original core base */
2875 			ub_scan(&sii->pub, (void *)(uintptr)cc, devid);
2876 		} else {
2877 			SI_ERROR(("Found chip of unknown type (0x%08x)\n", w));
2878 			err_at = 9;
2879 			goto exit;
2880 		}
2881 	}
2882 	/* no cores found, bail out */
2883 	if (sii->numcores == 0) {
2884 		err_at = 10;
2885 		goto exit;
2886 	}
2887 	/* bus/core/clk setup */
2888 	origidx = SI_CC_IDX;
2889 	if (!si_buscore_setup(sii, cc, bustype, savewin, &origidx, regs)) {
2890 		err_at = 11;
2891 		goto exit;
2892 	}
2893 
2894 	/* JIRA: SWWLAN-98321: SPROM read showing wrong values */
2895 	/* Set the clkdiv2 divisor bits (2:0) to 0x4 if srom is present */
2896 	if (bustype == SI_BUS) {
2897 		uint32 clkdiv2, sromprsnt, capabilities, srom_supported;
2898 		capabilities =	R_REG(osh, &cc->capabilities);
2899 		srom_supported = capabilities & SROM_SUPPORTED;
2900 		if (srom_supported) {
2901 			sromprsnt = R_REG(osh, &cc->sromcontrol);
2902 			sromprsnt = sromprsnt & SROM_PRSNT_MASK;
2903 			if (sromprsnt) {
2904 				/* SROM clock come from backplane clock/div2. Must <= 1Mhz */
2905 				clkdiv2 = (R_REG(osh, &cc->clkdiv2) & ~CLKD2_SROM);
2906 				clkdiv2 |= CLKD2_SROMDIV_192;
2907 				W_REG(osh, &cc->clkdiv2, clkdiv2);
2908 			}
2909 		}
2910 	}
2911 
2912 	if (bustype == PCI_BUS) {
2913 #if !defined(BCMDONGLEHOST)
2914 		/* JIRA:SWWLAN-18243: SPROM access taking too long */
2915 		/* not required for 43602 */
2916 		if (((CHIPID(sih->chip) == BCM4360_CHIP_ID) ||
2917 		     (CHIPID(sih->chip) == BCM43460_CHIP_ID) ||
2918 		     (CHIPID(sih->chip) == BCM4352_CHIP_ID)) &&
2919 		    (CHIPREV(sih->chiprev) <= 2)) {
2920 			pcie_disable_TL_clk_gating(sii->pch);
2921 			pcie_set_L1_entry_time(sii->pch, 0x40);
2922 		}
2923 #endif /* BCMDONGLEHOST */
2924 
2925 	}
2926 #ifdef BCM_SDRBL
2927 	/* 4360 rom bootloader in PCIE case, if the SDR is enabled, But preotection is
2928 	 * not turned on, then we want to hold arm in reset.
2929 	 * Bottomline: In sdrenable case, we allow arm to boot only when protection is
2930 	 * turned on.
2931 	 */
2932 	if (CHIP_HOSTIF_PCIE(&(sii->pub))) {
2933 		uint32 sflags = si_arm_sflags(&(sii->pub));
2934 
2935 		/* If SDR is enabled but protection is not turned on
2936 		* then we want to force arm to WFI.
2937 		*/
2938 		if ((sflags & (SISF_SDRENABLE | SISF_TCMPROT)) == SISF_SDRENABLE) {
2939 			disable_arm_irq();
2940 			while (1) {
2941 				hnd_cpu_wait(sih);
2942 			}
2943 		}
2944 	}
2945 #endif /* BCM_SDRBL */
2946 #ifdef SI_SPROM_PROBE
2947 	si_sprom_init(sih);
2948 #endif /* SI_SPROM_PROBE */
2949 
2950 #if !defined(BCMDONGLEHOST)
2951 	if (!FWSIGN_ENAB()) {
2952 		/* Init nvram from flash if it exists */
2953 		if (nvram_init(&(sii->pub)) != BCME_OK) {
2954 			SI_ERROR(("si_doattach: nvram_init failed \n"));
2955 			goto exit;
2956 		}
2957 	}
2958 
2959 	/* Init nvram from sprom/otp if they exist */
2960 	GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
2961 
2962 #ifdef DONGLEBUILD
2963 #if	!defined(NVSRCX)
2964 	/* Init nvram from sprom/otp if they exist and not inited */
2965 	if (!FWSIGN_ENAB() && si_getkvars()) {
2966 		*vars = si_getkvars();
2967 		*varsz = si_getkvarsz();
2968 	}
2969 	else
2970 #endif
2971 #endif /* DONGLEBUILD */
2972 	{
2973 #if defined(NVSRCX)
2974 	sromvars = srom_get_sromvars();
2975 	if (sromvars == NULL) {
2976 		if (srom_var_init(&sii->pub, BUSTYPE(bustype), (void *)regs,
2977 				sii->osh, &sromvars, varsz)) {
2978 			err_at = 12;
2979 			goto exit;
2980 		}
2981 	}
2982 #else
2983 	if (!FWSIGN_ENAB()) {
2984 		if (srom_var_init(&sii->pub, BUSTYPE(bustype), (void *)regs,
2985 				sii->osh, vars, varsz)) {
2986 			err_at = 13;
2987 			goto exit;
2988 		}
2989 	}
2990 #endif /* NVSRCX */
2991 	}
2992 	GCC_DIAGNOSTIC_POP();
2993 
2994 	pvars = vars ? *vars : NULL;
2995 
2996 	si_nvram_process(sii, pvars);
2997 
2998 	/* xtalfreq is required for programming open loop calibration support changes */
2999 	sii->xtalfreq = getintvar(NULL, rstr_xtalfreq);
3000 	/* === NVRAM, clock is ready === */
3001 #else
3002 	pvars = NULL;
3003 	BCM_REFERENCE(pvars);
3004 #endif /* !BCMDONGLEHOST */
3005 
3006 #if !defined(BCMDONGLEHOST)
3007 #if defined(BCMSRTOPOFF) && !defined(BCMSRTOPOFF_DISABLED)
3008 	_srtopoff_enab = (bool)getintvar(NULL, rstr_srtopoff_enab);
3009 #endif
3010 
3011 	if (!FWSIGN_ENAB()) {
3012 		if (HIB_EXT_WAKEUP_CAP(sih)) {
3013 			sii->lhl_ps_mode = (uint8)getintvar(NULL, rstr_lhl_ps_mode);
3014 
3015 			if (getintvar(NULL, rstr_ext_wakeup_dis)) {
3016 				sii->hib_ext_wakeup_enab = FALSE;
3017 			} else if (BCMSRTOPOFF_ENAB()) {
3018 				/*  Has GPIO false wakeup issue on 4387, needs resolve  */
3019 				sii->hib_ext_wakeup_enab = TRUE;
3020 			} else if (LHL_IS_PSMODE_1(sih)) {
3021 				sii->hib_ext_wakeup_enab = TRUE;
3022 			} else {
3023 				sii->hib_ext_wakeup_enab = FALSE;
3024 			}
3025 		}
3026 
3027 		sii->rfldo3p3_war = (bool)getintvar(NULL, rstr_rfldo3p3_cap_war);
3028 	}
3029 #endif /* !defined(BCMDONGLEHOST) */
3030 
3031 	if (!si_onetimeinit) {
3032 #if !defined(BCMDONGLEHOST)
3033 		char *val;
3034 
3035 		(void) val;
3036 		if (!FWSIGN_ENAB()) {
3037 			/* Cache nvram override to min mask */
3038 			if ((val = getvar(NULL, rstr_rmin)) != NULL) {
3039 				sii->min_mask_valid = TRUE;
3040 				sii->nvram_min_mask = (uint32)bcm_strtoul(val, NULL, 0);
3041 			} else {
3042 				sii->min_mask_valid = FALSE;
3043 			}
3044 			/* Cache nvram override to max mask */
3045 			if ((val = getvar(NULL, rstr_rmax)) != NULL) {
3046 				sii->max_mask_valid = TRUE;
3047 				sii->nvram_max_mask = (uint32)bcm_strtoul(val, NULL, 0);
3048 			} else {
3049 				sii->max_mask_valid = FALSE;
3050 			}
3051 
3052 #ifdef DONGLEBUILD
3053 			/* Handle armclk frequency setting from NVRAM file */
3054 			if (BCM4369_CHIP(sih->chip) || BCM4362_CHIP(sih->chip) ||
3055 				BCM4389_CHIP(sih->chip) ||
3056 				BCM4388_CHIP(sih->chip) || BCM4397_CHIP(sih->chip) || FALSE) {
3057 				if ((val = getvar(NULL, rstr_armclk)) != NULL) {
3058 					sii->armpllclkfreq = (uint32)bcm_strtoul(val, NULL, 0);
3059 					ASSERT(sii->armpllclkfreq > 0);
3060 				} else {
3061 					sii->armpllclkfreq = 0;
3062 				}
3063 			}
3064 
3065 #endif /* DONGLEBUILD */
3066 		}
3067 
3068 #endif /* !BCMDONGLEHOST */
3069 
3070 #if defined(CONFIG_XIP) && defined(BCMTCAM)
3071 		/* patch the ROM if there are any patch pairs from OTP/SPROM */
3072 		if (patch_pair) {
3073 
3074 #if defined(__ARM_ARCH_7R__)
3075 			hnd_tcam_bootloader_load(si_setcore(sih, ARMCR4_CORE_ID, 0), pvars);
3076 #elif defined(__ARM_ARCH_7A__)
3077 			hnd_tcam_bootloader_load(si_setcore(sih, SYSMEM_CORE_ID, 0), pvars);
3078 #else
3079 			hnd_tcam_bootloader_load(si_setcore(sih, SOCRAM_CORE_ID, 0), pvars);
3080 #endif
3081 			si_setcoreidx(sih, origidx);
3082 		}
3083 #endif /* CONFIG_XIP && BCMTCAM */
3084 
3085 		if (CCREV(sii->pub.ccrev) >= 20) {
3086 			uint32 gpiopullup = 0, gpiopulldown = 0;
3087 			cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
3088 			ASSERT(cc != NULL);
3089 
3090 #if !defined(BCMDONGLEHOST) /* if not a DHD build */
3091 			if (getvar(pvars, rstr_gpiopulldown) != NULL) {
3092 				uint32 value;
3093 				value = getintvar(pvars, rstr_gpiopulldown);
3094 				if (value != 0xFFFFFFFF) { /* non populated SROM fields are ffff */
3095 					gpiopulldown |= value;
3096 				}
3097 			}
3098 #endif /* !BCMDONGLEHOST */
3099 
3100 			W_REG(osh, &cc->gpiopullup, gpiopullup);
3101 			W_REG(osh, &cc->gpiopulldown, gpiopulldown);
3102 			si_setcoreidx(sih, origidx);
3103 		}
3104 
3105 #ifdef DONGLEBUILD
3106 		/* Ensure gci is initialized before PMU as PLL init needs to aquire gci semaphore */
3107 		hnd_gci_init(sih);
3108 #endif /* DONGLEBUILD */
3109 
3110 #if defined(BT_WLAN_REG_ON_WAR)
3111 	/*
3112 	 * 4389B0/C0 - WLAN and BT turn on WAR - synchronize WLAN and BT firmware using GCI
3113 	 * semaphore - THREAD_0_GCI_SEM_3_ID to ensure that simultaneous register accesses
3114 	 * does not occur. The WLAN firmware will acquire the semaphore just to ensure that
3115 	 * if BT firmware is already executing the WAR, then wait until it finishes.
3116 	 * In BT firmware checking for WL_REG_ON status is sufficient to decide whether
3117 	 * to apply the WAR or not (i.e, WLAN is turned ON/OFF).
3118 	 */
3119 	if ((hnd_gcisem_acquire(GCI_BT_WLAN_REG_ON_WAR_SEM, TRUE,
3120 			GCI_BT_WLAN_REG_ON_WAR_SEM_TIMEOUT) != BCME_OK)) {
3121 		err_at = 14;
3122 		hnd_gcisem_set_err(GCI_BT_WLAN_REG_ON_WAR_SEM);
3123 		goto exit;
3124 	}
3125 
3126 	/* WLAN/BT turn On WAR - Remove wlsc_btsc_prisel override after semaphore acquire
3127 	 * BT sets the override at power up when WL_REG_ON is low - wlsc_btsc_prisel is in
3128 	 * undefined state when wlan_reg_on is low
3129 	 */
3130 	si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_23,
3131 		(CC_GCI_CHIPCTRL_23_WLSC_BTSC_PRISEL_FORCE_MASK |
3132 		CC_GCI_CHIPCTRL_23_WLSC_BTSC_PRISEL_VAL_MASK), 0u);
3133 
3134 	if ((hnd_gcisem_release(GCI_BT_WLAN_REG_ON_WAR_SEM) != BCME_OK)) {
3135 		hnd_gcisem_set_err(GCI_BT_WLAN_REG_ON_WAR_SEM);
3136 		err_at = 15;
3137 		goto exit;
3138 	}
3139 #endif /* BT_WLAN_REG_ON_WAR */
3140 
3141 		/* Skip PMU initialization from the Dongle Host.
3142 		 * Firmware will take care of it when it comes up.
3143 		 */
3144 #if !defined(BCMDONGLEHOST)
3145 		/* PMU specific initializations */
3146 		if (PMUCTL_ENAB(sih)) {
3147 			uint32 xtalfreq;
3148 			si_pmu_init(sih, sii->osh);
3149 			si_pmu_chip_init(sih, sii->osh);
3150 			xtalfreq = getintvar(pvars, rstr_xtalfreq);
3151 #if defined(WL_FWSIGN)
3152 			if (FWSIGN_ENAB()) {
3153 				xtalfreq = XTALFREQ_KHZ;
3154 			}
3155 #endif /* WL_FWSIGN */
3156 
3157 			/*
3158 			 * workaround for chips that don't support external LPO, thus ALP clock
3159 			 * can not be measured accurately:
3160 			 */
3161 			switch (CHIPID(sih->chip)) {
3162 			CASE_BCM43602_CHIP:
3163 				xtalfreq = 40000;
3164 				break;
3165 			case BCM4369_CHIP_GRPID:
3166 				if (xtalfreq == 0)
3167 					xtalfreq = 37400;
3168 				break;
3169 			default:
3170 				break;
3171 			}
3172 
3173 			/* If xtalfreq var not available, try to measure it */
3174 			if (xtalfreq == 0)
3175 				xtalfreq = si_pmu_measure_alpclk(sih, sii->osh);
3176 
3177 			sii->xtalfreq = xtalfreq;
3178 			si_pmu_pll_init(sih, sii->osh, xtalfreq);
3179 
3180 			if (!FWSIGN_ENAB()) {
3181 				/* configure default spurmode  */
3182 				sii->spurmode = getintvar(pvars, rstr_spurconfig) & 0xf;
3183 
3184 #if defined(SAVERESTORE)
3185 				/* Only needs to be done once.
3186 				 * Needs this before si_pmu_res_init() to use sr_isenab()
3187 				 */
3188 				if (SR_ENAB()) {
3189 					sr_save_restore_init(sih);
3190 				}
3191 #endif
3192 
3193 				/* TODO: should move the per core srpwr out of
3194 				 * si_doattach() to a function where it knows
3195 				 * which core it should enable the power domain
3196 				 * request for...
3197 				 */
3198 				if (SRPWR_CAP(sih) && !SRPWR_ENAB()) {
3199 					uint32 domain = SRPWR_DMN3_MACMAIN_MASK;
3200 
3201 #if defined(WLRSDB) && !defined(WLRSDB_DISABLED)
3202 					domain |= SRPWR_DMN2_MACAUX_MASK;
3203 #endif /* WLRSDB && !WLRSDB_DISABLED */
3204 
3205 					if (si_scan_core_present(sih)) {
3206 						domain |= SRPWR_DMN4_MACSCAN_MASK;
3207 					}
3208 
3209 					si_srpwr_request(sih, domain, domain);
3210 				}
3211 			}
3212 
3213 			si_pmu_res_init(sih, sii->osh);
3214 			si_pmu_swreg_init(sih, sii->osh);
3215 #ifdef BCMGCISHM
3216 			hnd_gcishm_init(sih);
3217 #endif
3218 		}
3219 #endif /* !defined(BCMDONGLEHOST) */
3220 #ifdef _RTE_
3221 		si_onetimeinit = TRUE;
3222 #endif
3223 	}
3224 
3225 #if !defined(BCMDONGLEHOST)
3226 
3227 	si_lowpwr_opt(sih);
3228 
3229 	if (!FWSIGN_ENAB()) {
3230 		if (PCIE(sii)) {
3231 			ASSERT(sii->pch != NULL);
3232 			pcicore_attach(sii->pch, pvars, SI_DOATTACH);
3233 		}
3234 	}
3235 
3236 	if ((CHIPID(sih->chip) == BCM43012_CHIP_ID) ||
3237 		(CHIPID(sih->chip) == BCM43013_CHIP_ID) ||
3238 		(CHIPID(sih->chip) == BCM43014_CHIP_ID) ||
3239 		(CCREV(sih->ccrev) >= 62)) {
3240 		/* Clear SFlash clock request */
3241 		CHIPC_REG(sih, clk_ctl_st, CCS_SFLASH_CLKREQ, 0);
3242 	}
3243 
3244 #ifdef SECI_UART
3245 	/* Enable pull up on fast_uart_rx and fast_uart_cts_in
3246 	* when fast uart is disabled.
3247 	*/
3248 	if (getvar(pvars, rstr_fuart_pup_rx_cts) != NULL) {
3249 		w = getintvar(pvars, rstr_fuart_pup_rx_cts);
3250 		if (w)
3251 			fuart_pullup_rx_cts_enab = TRUE;
3252 	}
3253 #endif
3254 
3255 	/* configure default pinmux enables for the chip */
3256 	if (getvar(pvars, rstr_muxenab) != NULL) {
3257 		w = getintvar(pvars, rstr_muxenab);
3258 		si_muxenab((si_t *)sii, w);
3259 	}
3260 
3261 	/* configure default swd enables for the chip */
3262 	if (getvar(pvars, rstr_swdenab) != NULL) {
3263 		w = getintvar(pvars, rstr_swdenab);
3264 		si_swdenable((si_t *)sii, w);
3265 	}
3266 
3267 	sii->device_wake_opt = CC_GCI_GPIO_INVALID;
3268 #endif /* !BCMDONGLEHOST */
3269 	/* clear any previous epidiag-induced target abort */
3270 	ASSERT(!si_taclear(sih, FALSE));
3271 
3272 #if defined(BCMPMU_STATS) && !defined(BCMPMU_STATS_DISABLED)
3273 	si_pmustatstimer_init(sih);
3274 #endif /* BCMPMU_STATS */
3275 
3276 #ifdef BOOTLOADER_CONSOLE_OUTPUT
3277 	/* Enable console prints */
3278 	si_muxenab(sii, 3);
3279 #endif
3280 
3281 	if (((PCIECOREREV(sih->buscorerev) == 66) || (PCIECOREREV(sih->buscorerev) == 68)) &&
3282 		CST4378_CHIPMODE_BTOP(sih->chipst)) {
3283 		/*
3284 		 * HW4378-413 :
3285 		 * BT oob connections for pcie function 1 seen at oob_ain[5] instead of oob_ain[1]
3286 		 */
3287 		si_oob_war_BT_F1(sih);
3288 	}
3289 
3290 	return (sii);
3291 
3292 exit:
3293 #if !defined(BCMDONGLEHOST)
3294 	if (BUSTYPE(sih->bustype) == PCI_BUS) {
3295 		if (sii->pch)
3296 			pcicore_deinit(sii->pch);
3297 		sii->pch = NULL;
3298 	}
3299 #endif /* !defined(BCMDONGLEHOST) */
3300 
3301 	if (err_at) {
3302 		SI_ERROR(("si_doattach Failed. Error at %d\n", err_at));
3303 		si_free_coresinfo(sii, osh);
3304 		si_free_wrapper(sii);
3305 	}
3306 	return NULL;
3307 }
3308 
3309 /** may be called with core in reset */
3310 void
BCMATTACHFN(si_detach)3311 BCMATTACHFN(si_detach)(si_t *sih)
3312 {
3313 	si_info_t *sii = SI_INFO(sih);
3314 	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
3315 	uint idx;
3316 
3317 #if !defined(BCMDONGLEHOST)
3318 	struct si_pub *si_local = NULL;
3319 	bcopy(&sih, &si_local, sizeof(si_t*));
3320 #endif /* !BCMDONGLEHOST */
3321 
3322 #ifdef BCM_SH_SFLASH
3323 	if (BCM_SH_SFLASH_ENAB()) {
3324 		sh_sflash_detach(sii->osh, sih);
3325 	}
3326 #endif
3327 	if (BUSTYPE(sih->bustype) == SI_BUS) {
3328 		if (CHIPTYPE(sii->pub.socitype) == SOCI_NCI) {
3329 			if (sii->nci_info) {
3330 				nci_uninit(sii->nci_info);
3331 				sii->nci_info = NULL;
3332 
3333 				/* TODO: REG_UNMAP */
3334 			}
3335 		} else {
3336 			for (idx = 0; idx < SI_MAXCORES; idx++) {
3337 				if (cores_info->regs[idx]) {
3338 					REG_UNMAP(cores_info->regs[idx]);
3339 					cores_info->regs[idx] = NULL;
3340 				}
3341 			}
3342 		}
3343 	}
3344 
3345 #if !defined(BCMDONGLEHOST)
3346 	srom_var_deinit(si_local);
3347 	nvram_exit(si_local); /* free up nvram buffers */
3348 #endif /* !BCMDONGLEHOST */
3349 
3350 #if !defined(BCMDONGLEHOST)
3351 	if (BUSTYPE(sih->bustype) == PCI_BUS) {
3352 		if (sii->pch)
3353 			pcicore_deinit(sii->pch);
3354 		sii->pch = NULL;
3355 	}
3356 #endif /* !defined(BCMDONGLEHOST) */
3357 
3358 	si_free_coresinfo(sii, sii->osh);
3359 
3360 #if defined(AXI_TIMEOUTS_NIC)
3361 	if (sih->err_info) {
3362 		MFREE(sii->osh, sih->err_info, sizeof(si_axi_error_info_t));
3363 		sii->pub.err_info = NULL;
3364 	}
3365 #endif /* AXI_TIMEOUTS_NIC */
3366 
3367 	si_free_wrapper(sii);
3368 
3369 #ifdef BCMDVFS
3370 	if (BCMDVFS_ENAB()) {
3371 		si_dvfs_info_deinit(sih, sii->osh);
3372 	}
3373 #endif /* BCMDVFS */
3374 
3375 	if (sii != &ksii) {
3376 		MFREE(sii->osh, sii, sizeof(si_info_t));
3377 	}
3378 }
3379 
3380 void *
BCMPOSTTRAPFN(si_osh)3381 BCMPOSTTRAPFN(si_osh)(si_t *sih)
3382 {
3383 	const si_info_t *sii;
3384 
3385 	sii = SI_INFO(sih);
3386 	return sii->osh;
3387 }
3388 
3389 void
si_setosh(si_t * sih,osl_t * osh)3390 si_setosh(si_t *sih, osl_t *osh)
3391 {
3392 	si_info_t *sii;
3393 
3394 	sii = SI_INFO(sih);
3395 	if (sii->osh != NULL) {
3396 		SI_ERROR(("osh is already set....\n"));
3397 		ASSERT(!sii->osh);
3398 	}
3399 	sii->osh = osh;
3400 }
3401 
3402 /** register driver interrupt disabling and restoring callback functions */
3403 void
BCMATTACHFN(si_register_intr_callback)3404 BCMATTACHFN(si_register_intr_callback)(si_t *sih, void *intrsoff_fn, void *intrsrestore_fn,
3405                           void *intrsenabled_fn, void *intr_arg)
3406 {
3407 	si_info_t *sii = SI_INFO(sih);
3408 	sii->intr_arg = intr_arg;
3409 	sii->intrsoff_fn = (si_intrsoff_t)intrsoff_fn;
3410 	sii->intrsrestore_fn = (si_intrsrestore_t)intrsrestore_fn;
3411 	sii->intrsenabled_fn = (si_intrsenabled_t)intrsenabled_fn;
3412 	/* save current core id.  when this function called, the current core
3413 	 * must be the core which provides driver functions(il, et, wl, etc.)
3414 	 */
3415 	sii->dev_coreid = si_coreid(sih);
3416 }
3417 
3418 void
BCMPOSTTRAPFN(si_deregister_intr_callback)3419 BCMPOSTTRAPFN(si_deregister_intr_callback)(si_t *sih)
3420 {
3421 	si_info_t *sii;
3422 
3423 	sii = SI_INFO(sih);
3424 	sii->intrsoff_fn = NULL;
3425 	sii->intrsrestore_fn = NULL;
3426 	sii->intrsenabled_fn = NULL;
3427 }
3428 
3429 uint
BCMPOSTTRAPFN(si_intflag)3430 BCMPOSTTRAPFN(si_intflag)(si_t *sih)
3431 {
3432 	const si_info_t *sii = SI_INFO(sih);
3433 
3434 	if (CHIPTYPE(sih->socitype) == SOCI_SB)
3435 		return sb_intflag(sih);
3436 	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
3437 		(CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
3438 		(CHIPTYPE(sih->socitype) == SOCI_NAI))
3439 		return R_REG(sii->osh, ((uint32 *)(uintptr)
3440 			    (sii->oob_router + OOB_STATUSA)));
3441 	else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
3442 		return nci_intflag(sih);
3443 	else {
3444 		ASSERT(0);
3445 		return 0;
3446 	}
3447 }
3448 
3449 uint
si_flag(si_t * sih)3450 si_flag(si_t *sih)
3451 {
3452 	if (CHIPTYPE(sih->socitype) == SOCI_SB)
3453 		return sb_flag(sih);
3454 	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
3455 		(CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
3456 		(CHIPTYPE(sih->socitype) == SOCI_NAI))
3457 		return ai_flag(sih);
3458 	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
3459 		return ub_flag(sih);
3460 	else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
3461 		return nci_flag(sih);
3462 	else {
3463 		ASSERT(0);
3464 		return 0;
3465 	}
3466 }
3467 
3468 uint
si_flag_alt(const si_t * sih)3469 si_flag_alt(const si_t *sih)
3470 {
3471 	if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
3472 	(CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
3473 	(CHIPTYPE(sih->socitype) == SOCI_NAI))
3474 		return ai_flag_alt(sih);
3475 	else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
3476 		return nci_flag_alt(sih);
3477 	else {
3478 		ASSERT(0);
3479 		return 0;
3480 	}
3481 }
3482 
3483 void
BCMATTACHFN(si_setint)3484 BCMATTACHFN(si_setint)(const si_t *sih, int siflag)
3485 {
3486 	if (CHIPTYPE(sih->socitype) == SOCI_SB)
3487 		sb_setint(sih, siflag);
3488 	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
3489 		(CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
3490 		(CHIPTYPE(sih->socitype) == SOCI_NAI))
3491 		ai_setint(sih, siflag);
3492 	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
3493 		ub_setint(sih, siflag);
3494 	else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
3495 		nci_setint(sih, siflag);
3496 	else
3497 		ASSERT(0);
3498 }
3499 
3500 uint32
si_oobr_baseaddr(const si_t * sih,bool second)3501 si_oobr_baseaddr(const si_t *sih, bool second)
3502 {
3503 	const si_info_t *sii = SI_INFO(sih);
3504 
3505 	if (CHIPTYPE(sih->socitype) == SOCI_SB)
3506 		return 0;
3507 	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
3508 		(CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
3509 		(CHIPTYPE(sih->socitype) == SOCI_NAI))
3510 		return (second ? sii->oob_router1 : sii->oob_router);
3511 	else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
3512 		return nci_oobr_baseaddr(sih, second);
3513 	else {
3514 		ASSERT(0);
3515 		return 0;
3516 	}
3517 }
3518 
3519 uint
BCMPOSTTRAPFN(si_coreid)3520 BCMPOSTTRAPFN(si_coreid)(const si_t *sih)
3521 {
3522 	const si_info_t *sii = SI_INFO(sih);
3523 	const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
3524 	if (CHIPTYPE(sii->pub.socitype) == SOCI_NCI) {
3525 		return nci_coreid(sih, sii->curidx);
3526 	} else
3527 	{
3528 		return cores_info->coreid[sii->curidx];
3529 	}
3530 }
3531 
3532 uint
BCMPOSTTRAPFN(si_coreidx)3533 BCMPOSTTRAPFN(si_coreidx)(const si_t *sih)
3534 {
3535 	const si_info_t *sii;
3536 
3537 	sii = SI_INFO(sih);
3538 	return sii->curidx;
3539 }
3540 
3541 uint
si_get_num_cores(const si_t * sih)3542 si_get_num_cores(const si_t *sih)
3543 {
3544 	const si_info_t *sii = SI_INFO(sih);
3545 	return sii->numcores;
3546 }
3547 
3548 volatile void *
si_d11_switch_addrbase(si_t * sih,uint coreunit)3549 si_d11_switch_addrbase(si_t *sih, uint coreunit)
3550 {
3551 	return si_setcore(sih,  D11_CORE_ID, coreunit);
3552 }
3553 
3554 /** return the core-type instantiation # of the current core */
3555 uint
si_coreunit(const si_t * sih)3556 si_coreunit(const si_t *sih)
3557 {
3558 	const si_info_t *sii = SI_INFO(sih);
3559 	const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
3560 	uint idx;
3561 	uint coreid;
3562 	uint coreunit;
3563 	uint i;
3564 
3565 	if (CHIPTYPE(sii->pub.socitype) == SOCI_NCI) {
3566 		return nci_coreunit(sih);
3567 	}
3568 
3569 	coreunit = 0;
3570 
3571 	idx = sii->curidx;
3572 
3573 	ASSERT(GOODREGS(sii->curmap));
3574 	coreid = si_coreid(sih);
3575 
3576 	/* count the cores of our type */
3577 	for (i = 0; i < idx; i++)
3578 		if (cores_info->coreid[i] == coreid)
3579 			coreunit++;
3580 
3581 	return (coreunit);
3582 }
3583 
3584 uint
BCMATTACHFN(si_corevendor)3585 BCMATTACHFN(si_corevendor)(const si_t *sih)
3586 {
3587 	if (CHIPTYPE(sih->socitype) == SOCI_SB)
3588 		return sb_corevendor(sih);
3589 		else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
3590 		(CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
3591 		(CHIPTYPE(sih->socitype) == SOCI_NAI))
3592 		return ai_corevendor(sih);
3593 	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
3594 		return ub_corevendor(sih);
3595 	else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
3596 		return nci_corevendor(sih);
3597 	else {
3598 		ASSERT(0);
3599 		return 0;
3600 	}
3601 }
3602 
3603 bool
BCMINITFN(si_backplane64)3604 BCMINITFN(si_backplane64)(const si_t *sih)
3605 {
3606 	return ((sih->cccaps & CC_CAP_BKPLN64) != 0);
3607 }
3608 
3609 uint
BCMPOSTTRAPFN(si_corerev)3610 BCMPOSTTRAPFN(si_corerev)(const si_t *sih)
3611 {
3612 	if (CHIPTYPE(sih->socitype) == SOCI_SB)
3613 		return sb_corerev(sih);
3614 		else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
3615 		(CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
3616 		(CHIPTYPE(sih->socitype) == SOCI_NAI))
3617 		return ai_corerev(sih);
3618 	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
3619 		return ub_corerev(sih);
3620 	else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
3621 		return nci_corerev(sih);
3622 	else {
3623 		ASSERT(0);
3624 		return 0;
3625 	}
3626 }
3627 
3628 uint
si_corerev_minor(const si_t * sih)3629 si_corerev_minor(const si_t *sih)
3630 {
3631 	if (CHIPTYPE(sih->socitype) == SOCI_AI) {
3632 		return ai_corerev_minor(sih);
3633 	}
3634 	else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
3635 		return nci_corerev_minor(sih);
3636 	else {
3637 		return 0;
3638 	}
3639 }
3640 
3641 /* return index of coreid or BADIDX if not found */
3642 uint
BCMPOSTTRAPFN(si_findcoreidx)3643 BCMPOSTTRAPFN(si_findcoreidx)(const si_t *sih, uint coreid, uint coreunit)
3644 {
3645 	const si_info_t *sii = SI_INFO(sih);
3646 	const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
3647 	uint found;
3648 	uint i;
3649 
3650 	if (CHIPTYPE(sih->socitype) == SOCI_NCI) {
3651 		return nci_findcoreidx(sih, coreid, coreunit);
3652 	}
3653 
3654 	found = 0;
3655 
3656 	for (i = 0; i < sii->numcores; i++) {
3657 		if (cores_info->coreid[i] == coreid) {
3658 			if (found == coreunit)
3659 				return (i);
3660 			found++;
3661 		}
3662 	}
3663 
3664 	return (BADIDX);
3665 }
3666 
3667 bool
BCMPOSTTRAPFN(si_hwa_present)3668 BCMPOSTTRAPFN(si_hwa_present)(const si_t *sih)
3669 {
3670 	if (si_findcoreidx(sih, HWA_CORE_ID, 0) != BADIDX) {
3671 		return TRUE;
3672 	}
3673 	return FALSE;
3674 }
3675 
3676 bool
BCMPOSTTRAPFN(si_sysmem_present)3677 BCMPOSTTRAPFN(si_sysmem_present)(const si_t *sih)
3678 {
3679 	if (si_findcoreidx(sih, SYSMEM_CORE_ID, 0) != BADIDX) {
3680 		return TRUE;
3681 	}
3682 	return FALSE;
3683 }
3684 
3685 /* return the coreid of the core at index */
3686 uint
si_findcoreid(const si_t * sih,uint coreidx)3687 si_findcoreid(const si_t *sih, uint coreidx)
3688 {
3689 	const si_info_t *sii = SI_INFO(sih);
3690 	const si_cores_info_t *cores_info = sii->cores_info;
3691 
3692 	if (coreidx >= sii->numcores) {
3693 		return NODEV_CORE_ID;
3694 	}
3695 	if (CHIPTYPE(sih->socitype) == SOCI_NCI) {
3696 		return nci_coreid(sih, coreidx);
3697 	}
3698 	return cores_info->coreid[coreidx];
3699 }
3700 
3701 /** return total coreunit of coreid or zero if not found */
3702 uint
BCMPOSTTRAPFN(si_numcoreunits)3703 BCMPOSTTRAPFN(si_numcoreunits)(const si_t *sih, uint coreid)
3704 {
3705 	const si_info_t *sii = SI_INFO(sih);
3706 	const si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
3707 	uint found = 0;
3708 	uint i;
3709 
3710 	if (CHIPTYPE(sih->socitype) == SOCI_NCI) {
3711 		return nci_numcoreunits(sih, coreid);
3712 	}
3713 	for (i = 0; i < sii->numcores; i++) {
3714 		if (cores_info->coreid[i] == coreid) {
3715 			found++;
3716 		}
3717 	}
3718 
3719 	return found;
3720 }
3721 
3722 /** return total D11 coreunits */
3723 uint
BCMPOSTTRAPRAMFN(si_numd11coreunits)3724 BCMPOSTTRAPRAMFN(si_numd11coreunits)(const si_t *sih)
3725 {
3726 	if (CHIPTYPE(sih->socitype) == SOCI_NCI) {
3727 		return nci_numcoreunits(sih, D11_CORE_ID);
3728 	}
3729 	return si_numcoreunits(sih, D11_CORE_ID);
3730 }
3731 
3732 /** return list of found cores */
3733 uint
si_corelist(const si_t * sih,uint coreid[])3734 si_corelist(const si_t *sih, uint coreid[])
3735 {
3736 	const si_info_t *sii = SI_INFO(sih);
3737 	const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
3738 
3739 	if (CHIPTYPE(sih->socitype) == SOCI_NCI) {
3740 		return nci_corelist(sih, coreid);
3741 	}
3742 	(void)memcpy_s(coreid, (sii->numcores * sizeof(uint)), cores_info->coreid,
3743 		(sii->numcores * sizeof(uint)));
3744 	return (sii->numcores);
3745 }
3746 
3747 /** return current wrapper mapping */
3748 void *
BCMPOSTTRAPFN(si_wrapperregs)3749 BCMPOSTTRAPFN(si_wrapperregs)(const si_t *sih)
3750 {
3751 	const si_info_t *sii = SI_INFO(sih);
3752 
3753 	ASSERT(GOODREGS(sii->curwrap));
3754 
3755 	return (sii->curwrap);
3756 }
3757 
3758 /** return current register mapping */
3759 volatile void *
BCMPOSTTRAPFN(si_coreregs)3760 BCMPOSTTRAPFN(si_coreregs)(const si_t *sih)
3761 {
3762 	const si_info_t *sii = SI_INFO(sih);
3763 
3764 	ASSERT(GOODREGS(sii->curmap));
3765 
3766 	return (sii->curmap);
3767 }
3768 
3769 /**
3770  * This function changes logical "focus" to the indicated core;
3771  * must be called with interrupts off.
3772  * Moreover, callers should keep interrupts off during switching out of and back to d11 core
3773  */
3774 volatile void *
BCMPOSTTRAPFN(si_setcore)3775 BCMPOSTTRAPFN(si_setcore)(si_t *sih, uint coreid, uint coreunit)
3776 {
3777 	si_info_t *sii = SI_INFO(sih);
3778 	uint idx;
3779 
3780 	idx = si_findcoreidx(sih, coreid, coreunit);
3781 	if (!GOODIDX(idx, sii->numcores)) {
3782 		return (NULL);
3783 	}
3784 
3785 	if (CHIPTYPE(sih->socitype) == SOCI_SB)
3786 		return sb_setcoreidx(sih, idx);
3787 	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
3788 		(CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
3789 		(CHIPTYPE(sih->socitype) == SOCI_NAI))
3790 		return ai_setcoreidx(sih, idx);
3791 	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
3792 		return ub_setcoreidx(sih, idx);
3793 	else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
3794 		return nci_setcoreidx(sih, idx);
3795 	else {
3796 		ASSERT(0);
3797 		return NULL;
3798 	}
3799 }
3800 
3801 volatile void *
BCMPOSTTRAPFN(si_setcoreidx)3802 BCMPOSTTRAPFN(si_setcoreidx)(si_t *sih, uint coreidx)
3803 {
3804 	if (CHIPTYPE(sih->socitype) == SOCI_SB)
3805 		return sb_setcoreidx(sih, coreidx);
3806 	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
3807 		(CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
3808 		(CHIPTYPE(sih->socitype) == SOCI_NAI))
3809 		return ai_setcoreidx(sih, coreidx);
3810 	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
3811 		return ub_setcoreidx(sih, coreidx);
3812 	else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
3813 		return nci_setcoreidx(sih, coreidx);
3814 	else {
3815 		ASSERT(0);
3816 		return NULL;
3817 	}
3818 }
3819 
3820 /** Turn off interrupt as required by sb_setcore, before switch core */
3821 volatile void *
BCMPOSTTRAPFN(si_switch_core)3822 BCMPOSTTRAPFN(si_switch_core)(si_t *sih, uint coreid, uint *origidx, bcm_int_bitmask_t *intr_val)
3823 {
3824 	volatile void *cc;
3825 	si_info_t *sii = SI_INFO(sih);
3826 
3827 	if (SI_FAST(sii)) {
3828 		/* Overloading the origidx variable to remember the coreid,
3829 		 * this works because the core ids cannot be confused with
3830 		 * core indices.
3831 		 */
3832 		*origidx = coreid;
3833 		if (coreid == CC_CORE_ID)
3834 			return (volatile void *)CCREGS_FAST(sii);
3835 		else if (coreid == BUSCORETYPE(sih->buscoretype))
3836 			return (volatile void *)PCIEREGS(sii);
3837 	}
3838 	INTR_OFF(sii, intr_val);
3839 	*origidx = sii->curidx;
3840 	cc = si_setcore(sih, coreid, 0);
3841 	ASSERT(cc != NULL);
3842 
3843 	return cc;
3844 }
3845 
3846 /* restore coreidx and restore interrupt */
3847 void
BCMPOSTTRAPFN(si_restore_core)3848 	BCMPOSTTRAPFN(si_restore_core)(si_t *sih, uint coreid, bcm_int_bitmask_t *intr_val)
3849 {
3850 	si_info_t *sii = SI_INFO(sih);
3851 
3852 	if (SI_FAST(sii) && ((coreid == CC_CORE_ID) || (coreid == BUSCORETYPE(sih->buscoretype))))
3853 		return;
3854 
3855 	si_setcoreidx(sih, coreid);
3856 	INTR_RESTORE(sii, intr_val);
3857 }
3858 
3859 /* Switch to particular core and get corerev */
3860 #ifdef USE_NEW_COREREV_API
3861 uint
BCMPOSTTRAPFN(si_corerev_ext)3862 BCMPOSTTRAPFN(si_corerev_ext)(si_t *sih, uint coreid, uint coreunit)
3863 {
3864 	uint coreidx;
3865 	uint corerev;
3866 
3867 	coreidx = si_coreidx(sih);
3868 	(void)si_setcore(sih, coreid, coreunit);
3869 
3870 	corerev = si_corerev(sih);
3871 
3872 	si_setcoreidx(sih, coreidx);
3873 	return corerev;
3874 }
3875 #else
si_get_corerev(si_t * sih,uint core_id)3876 uint si_get_corerev(si_t *sih, uint core_id)
3877 {
3878 	uint corerev, orig_coreid;
3879 	bcm_int_bitmask_t intr_val;
3880 
3881 	si_switch_core(sih, core_id, &orig_coreid, &intr_val);
3882 	corerev = si_corerev(sih);
3883 	si_restore_core(sih, orig_coreid, &intr_val);
3884 	return corerev;
3885 }
3886 #endif /* !USE_NEW_COREREV_API */
3887 
3888 int
BCMATTACHFN(si_numaddrspaces)3889 BCMATTACHFN(si_numaddrspaces)(const si_t *sih)
3890 {
3891 	if (CHIPTYPE(sih->socitype) == SOCI_SB)
3892 		return sb_numaddrspaces(sih);
3893 	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
3894 		(CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
3895 		(CHIPTYPE(sih->socitype) == SOCI_NAI))
3896 		return ai_numaddrspaces(sih);
3897 	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
3898 		return ub_numaddrspaces(sih);
3899 	else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
3900 		return nci_numaddrspaces(sih);
3901 	else {
3902 		ASSERT(0);
3903 		return 0;
3904 	}
3905 }
3906 
3907 /* Return the address of the nth address space in the current core
3908  * Arguments:
3909  * sih : Pointer to struct si_t
3910  * spidx : slave port index
3911  * baidx : base address index
3912  */
3913 
3914 uint32
si_addrspace(const si_t * sih,uint spidx,uint baidx)3915 si_addrspace(const si_t *sih, uint spidx, uint baidx)
3916 {
3917 	if (CHIPTYPE(sih->socitype) == SOCI_SB)
3918 		return sb_addrspace(sih, baidx);
3919 	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
3920 		(CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
3921 		(CHIPTYPE(sih->socitype) == SOCI_NAI))
3922 		return ai_addrspace(sih, spidx, baidx);
3923 	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
3924 		return ub_addrspace(sih, baidx);
3925 	else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
3926 		return nci_addrspace(sih, spidx, baidx);
3927 	else {
3928 		ASSERT(0);
3929 		return 0;
3930 	}
3931 }
3932 
3933 /* Return the size of the nth address space in the current core
3934  * Arguments:
3935  * sih : Pointer to struct si_t
3936  * spidx : slave port index
3937  * baidx : base address index
3938  */
3939 uint32
BCMATTACHFN(si_addrspacesize)3940 BCMATTACHFN(si_addrspacesize)(const si_t *sih, uint spidx, uint baidx)
3941 {
3942 	if (CHIPTYPE(sih->socitype) == SOCI_SB)
3943 		return sb_addrspacesize(sih, baidx);
3944 	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
3945 		(CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
3946 		(CHIPTYPE(sih->socitype) == SOCI_NAI))
3947 		return ai_addrspacesize(sih, spidx, baidx);
3948 	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
3949 		return ub_addrspacesize(sih, baidx);
3950 	else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
3951 		return nci_addrspacesize(sih, spidx, baidx);
3952 	else {
3953 		ASSERT(0);
3954 		return 0;
3955 	}
3956 }
3957 
3958 void
si_coreaddrspaceX(const si_t * sih,uint asidx,uint32 * addr,uint32 * size)3959 si_coreaddrspaceX(const si_t *sih, uint asidx, uint32 *addr, uint32 *size)
3960 {
3961 	/* Only supported for SOCI_AI */
3962 	if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
3963 		(CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
3964 		(CHIPTYPE(sih->socitype) == SOCI_NAI))
3965 		ai_coreaddrspaceX(sih, asidx, addr, size);
3966 	else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
3967 		nci_coreaddrspaceX(sih, asidx, addr, size);
3968 	else
3969 		*size = 0;
3970 }
3971 
3972 uint32
BCMPOSTTRAPFN(si_core_cflags)3973 BCMPOSTTRAPFN(si_core_cflags)(const si_t *sih, uint32 mask, uint32 val)
3974 {
3975 	if (CHIPTYPE(sih->socitype) == SOCI_SB)
3976 		return sb_core_cflags(sih, mask, val);
3977 	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
3978 		(CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
3979 		(CHIPTYPE(sih->socitype) == SOCI_NAI))
3980 		return ai_core_cflags(sih, mask, val);
3981 	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
3982 		return ub_core_cflags(sih, mask, val);
3983 	else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
3984 		return nci_core_cflags(sih, mask, val);
3985 	else {
3986 		ASSERT(0);
3987 		return 0;
3988 	}
3989 }
3990 
3991 void
si_core_cflags_wo(const si_t * sih,uint32 mask,uint32 val)3992 si_core_cflags_wo(const si_t *sih, uint32 mask, uint32 val)
3993 {
3994 	if (CHIPTYPE(sih->socitype) == SOCI_SB)
3995 		sb_core_cflags_wo(sih, mask, val);
3996 	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
3997 		(CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
3998 		(CHIPTYPE(sih->socitype) == SOCI_NAI))
3999 		ai_core_cflags_wo(sih, mask, val);
4000 	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
4001 		ub_core_cflags_wo(sih, mask, val);
4002 	else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
4003 		nci_core_cflags_wo(sih, mask, val);
4004 	else
4005 		ASSERT(0);
4006 }
4007 
4008 uint32
si_core_sflags(const si_t * sih,uint32 mask,uint32 val)4009 si_core_sflags(const si_t *sih, uint32 mask, uint32 val)
4010 {
4011 	if (CHIPTYPE(sih->socitype) == SOCI_SB)
4012 		return sb_core_sflags(sih, mask, val);
4013 	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
4014 		(CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
4015 		(CHIPTYPE(sih->socitype) == SOCI_NAI))
4016 		return ai_core_sflags(sih, mask, val);
4017 	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
4018 		return ub_core_sflags(sih, mask, val);
4019 	else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
4020 		return nci_core_sflags(sih, mask, val);
4021 	else {
4022 		ASSERT(0);
4023 		return 0;
4024 	}
4025 }
4026 
4027 void
si_commit(si_t * sih)4028 si_commit(si_t *sih)
4029 {
4030 	if (CHIPTYPE(sih->socitype) == SOCI_SB)
4031 		sb_commit(sih);
4032 	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
4033 		(CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
4034 		(CHIPTYPE(sih->socitype) == SOCI_NAI))
4035 		;
4036 	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
4037 		;
4038 	else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
4039 		;
4040 	else {
4041 		ASSERT(0);
4042 	}
4043 }
4044 
4045 bool
BCMPOSTTRAPFN(si_iscoreup)4046 BCMPOSTTRAPFN(si_iscoreup)(const si_t *sih)
4047 {
4048 	if (CHIPTYPE(sih->socitype) == SOCI_SB)
4049 		return sb_iscoreup(sih);
4050 	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
4051 		(CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
4052 		(CHIPTYPE(sih->socitype) == SOCI_NAI))
4053 		return ai_iscoreup(sih);
4054 	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
4055 		return ub_iscoreup(sih);
4056 	else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
4057 		return nci_iscoreup(sih);
4058 	else {
4059 		ASSERT(0);
4060 		return FALSE;
4061 	}
4062 }
4063 
4064 /** Caller should make sure it is on the right core, before calling this routine */
4065 uint
BCMPOSTTRAPFN(si_wrapperreg)4066 BCMPOSTTRAPFN(si_wrapperreg)(const si_t *sih, uint32 offset, uint32 mask, uint32 val)
4067 {
4068 	/* only for AI back plane chips */
4069 	if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
4070 		(CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
4071 		(CHIPTYPE(sih->socitype) == SOCI_NAI))
4072 		return (ai_wrap_reg(sih, offset, mask, val));
4073 	else if	(CHIPTYPE(sih->socitype) == SOCI_NCI)
4074 		return (nci_get_wrap_reg(sih, offset, mask, val));
4075 	return 0;
4076 }
4077 /* si_backplane_access is used to read full backplane address from host for PCIE FD
4078  * it uses secondary bar-0 window which lies at an offset of 16K from primary bar-0
4079  * Provides support for read/write of 1/2/4 bytes of backplane address
4080  * Can be used to read/write
4081  *	1. core regs
4082  *	2. Wrapper regs
4083  *	3. memory
4084  *	4. BT area
4085  * For accessing any 32 bit backplane address, [31 : 12] of backplane should be given in "region"
4086  * [11 : 0] should be the "regoff"
4087  * for reading  4 bytes from reg 0x200 of d11 core use it like below
4088  * : si_backplane_access(sih, 0x18001000, 0x200, 4, 0, TRUE)
4089  */
si_backplane_addr_sane(uint addr,uint size)4090 static int si_backplane_addr_sane(uint addr, uint size)
4091 {
4092 	int bcmerror = BCME_OK;
4093 
4094 	/* For 2 byte access, address has to be 2 byte aligned */
4095 	if (size == 2) {
4096 		if (addr & 0x1) {
4097 			bcmerror = BCME_ERROR;
4098 		}
4099 	}
4100 	/* For 4 byte access, address has to be 4 byte aligned */
4101 	if (size == 4) {
4102 		if (addr & 0x3) {
4103 			bcmerror = BCME_ERROR;
4104 		}
4105 	}
4106 
4107 	return bcmerror;
4108 }
4109 
4110 void
si_invalidate_second_bar0win(si_t * sih)4111 si_invalidate_second_bar0win(si_t *sih)
4112 {
4113 	si_info_t *sii = SI_INFO(sih);
4114 	sii->second_bar0win = ~0x0;
4115 }
4116 
4117 int
si_backplane_access(si_t * sih,uint addr,uint size,uint * val,bool read)4118 si_backplane_access(si_t *sih, uint addr, uint size, uint *val, bool read)
4119 {
4120 	volatile uint32 *r = NULL;
4121 	uint32 region = 0;
4122 	si_info_t *sii = SI_INFO(sih);
4123 
4124 	/* Valid only for pcie bus */
4125 	if (BUSTYPE(sih->bustype) != PCI_BUS) {
4126 		SI_ERROR(("Valid only for pcie bus \n"));
4127 		return BCME_ERROR;
4128 	}
4129 
4130 	/* Split adrr into region and address offset */
4131 	region = (addr & (0xFFFFF << 12));
4132 	addr = addr & 0xFFF;
4133 
4134 	/* check for address and size sanity */
4135 	if (si_backplane_addr_sane(addr, size) != BCME_OK)
4136 		return BCME_ERROR;
4137 
4138 	/* Update window if required */
4139 	if (sii->second_bar0win != region) {
4140 		OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_CORE2_WIN, 4, region);
4141 		sii->second_bar0win = region;
4142 	}
4143 
4144 	/* Estimate effective address
4145 	 * sii->curmap   : bar-0 virtual address
4146 	 * PCI_SECOND_BAR0_OFFSET  : secondar bar-0 offset
4147 	 * regoff : actual reg offset
4148 	 */
4149 	r = (volatile uint32 *)((volatile char *)sii->curmap + PCI_SECOND_BAR0_OFFSET + addr);
4150 
4151 	SI_VMSG(("si curmap %p  region %x regaddr %x effective addr %p READ %d\n",
4152 		(volatile char*)sii->curmap, region, addr, r, read));
4153 
4154 	switch (size) {
4155 		case sizeof(uint8) :
4156 			if (read)
4157 				*val = R_REG(sii->osh, (volatile uint8*)r);
4158 			else
4159 				W_REG(sii->osh, (volatile uint8*)r, *val);
4160 			break;
4161 		case sizeof(uint16) :
4162 			if (read)
4163 				*val = R_REG(sii->osh, (volatile uint16*)r);
4164 			else
4165 				W_REG(sii->osh, (volatile uint16*)r, *val);
4166 			break;
4167 		case sizeof(uint32) :
4168 			if (read)
4169 				*val = R_REG(sii->osh, (volatile uint32*)r);
4170 			else
4171 				W_REG(sii->osh, (volatile uint32*)r, *val);
4172 			break;
4173 		default :
4174 			SI_ERROR(("Invalid  size %d \n", size));
4175 			return (BCME_ERROR);
4176 			break;
4177 	}
4178 
4179 	return (BCME_OK);
4180 }
4181 
4182 /* precommit failed when this is removed */
4183 /* BLAZAR_BRANCH_101_10_DHD_002/build/dhd/linux-fc30/brix-brcm */
4184 /* TBD: Revisit later */
4185 #ifdef BCMINTERNAL
4186 int
si_backplane_access_64(si_t * sih,uint addr,uint size,uint64 * val,bool read)4187 si_backplane_access_64(si_t *sih, uint addr, uint size, uint64 *val, bool read)
4188 {
4189 #if defined(NDIS) || defined(EFI)
4190 	SI_ERROR(("NDIS/EFI won't support 64 bit access\n"));
4191 	return (BCME_ERROR);
4192 #else
4193 	volatile uint64 *r = NULL;
4194 	uint32 region = 0;
4195 	si_info_t *sii = SI_INFO(sih);
4196 
4197 	/* Valid only for pcie bus */
4198 	if (BUSTYPE(sih->bustype) != PCI_BUS) {
4199 		SI_ERROR(("Valid only for pcie bus \n"));
4200 		return BCME_ERROR;
4201 	}
4202 
4203 	/* Split adrr into region and address offset */
4204 	region = (addr & (0xFFFFF << 12));
4205 	addr = addr & 0xFFF;
4206 
4207 	/* check for address and size sanity */
4208 	if (si_backplane_addr_sane(addr, size) != BCME_OK) {
4209 		SI_ERROR(("Address is not aligned\n"));
4210 		return BCME_ERROR;
4211 	}
4212 
4213 	/* Update window if required */
4214 	if (sii->second_bar0win != region) {
4215 		OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_CORE2_WIN, 4, region);
4216 		sii->second_bar0win = region;
4217 	}
4218 
4219 	/* Estimate effective address
4220 	 * sii->curmap   : bar-0 virtual address
4221 	 * PCI_SECOND_BAR0_OFFSET  : secondar bar-0 offset
4222 	 * regoff : actual reg offset
4223 	 */
4224 	r = (volatile uint64 *)((volatile char *)sii->curmap + PCI_SECOND_BAR0_OFFSET + addr);
4225 
4226 	switch (size) {
4227 		case sizeof(uint64) :
4228 			if (read) {
4229 				*val = R_REG(sii->osh, (volatile uint64*)r);
4230 			} else {
4231 				W_REG(sii->osh, (volatile uint64*)r, *val);
4232 			}
4233 			break;
4234 		default :
4235 			SI_ERROR(("Invalid  size %d \n", size));
4236 			return (BCME_ERROR);
4237 			break;
4238 	}
4239 
4240 	return (BCME_OK);
4241 #endif /* NDIS */
4242 }
4243 #endif /* BCMINTERNAL */
4244 
4245 uint
BCMPOSTTRAPFN(si_corereg)4246 BCMPOSTTRAPFN(si_corereg)(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
4247 {
4248 	if (CHIPTYPE(sih->socitype) == SOCI_SB)
4249 		return sb_corereg(sih, coreidx, regoff, mask, val);
4250 	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
4251 		(CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
4252 		(CHIPTYPE(sih->socitype) == SOCI_NAI))
4253 		return ai_corereg(sih, coreidx, regoff, mask, val);
4254 	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
4255 		return ub_corereg(sih, coreidx, regoff, mask, val);
4256 	else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
4257 		return nci_corereg(sih, coreidx, regoff, mask, val);
4258 	else {
4259 		ASSERT(0);
4260 		return 0;
4261 	}
4262 }
4263 
4264 uint
BCMPOSTTRAPFN(si_corereg_writeonly)4265 BCMPOSTTRAPFN(si_corereg_writeonly)(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
4266 {
4267 	if (CHIPTYPE(sih->socitype) == SOCI_NCI) {
4268 		return nci_corereg_writeonly(sih, coreidx, regoff, mask, val);
4269 	} else
4270 	{
4271 		return ai_corereg_writeonly(sih, coreidx, regoff, mask, val);
4272 	}
4273 }
4274 
4275 /** ILP sensitive register access needs special treatment to avoid backplane stalls */
4276 bool
BCMPOSTTRAPFN(si_pmu_is_ilp_sensitive)4277 BCMPOSTTRAPFN(si_pmu_is_ilp_sensitive)(uint32 idx, uint regoff)
4278 {
4279 	if (idx == SI_CC_IDX) {
4280 		if (CHIPCREGS_ILP_SENSITIVE(regoff))
4281 			return TRUE;
4282 	} else if (PMUREGS_ILP_SENSITIVE(regoff)) {
4283 		return TRUE;
4284 	}
4285 
4286 	return FALSE;
4287 }
4288 
4289 /** 'idx' should refer either to the chipcommon core or the PMU core */
4290 uint
BCMPOSTTRAPFN(si_pmu_corereg)4291 BCMPOSTTRAPFN(si_pmu_corereg)(si_t *sih, uint32 idx, uint regoff, uint mask, uint val)
4292 {
4293 	int pmustatus_offset;
4294 
4295 	/* prevent backplane stall on double write to 'ILP domain' registers in the PMU */
4296 	if (mask != 0 && PMUREV(sih->pmurev) >= 22 &&
4297 	    si_pmu_is_ilp_sensitive(idx, regoff)) {
4298 		pmustatus_offset = AOB_ENAB(sih) ? OFFSETOF(pmuregs_t, pmustatus) :
4299 			OFFSETOF(chipcregs_t, pmustatus);
4300 
4301 		while (si_corereg(sih, idx, pmustatus_offset, 0, 0) & PST_SLOW_WR_PENDING)
4302 			{};
4303 	}
4304 
4305 	return si_corereg(sih, idx, regoff, mask, val);
4306 }
4307 
4308 /*
4309  * If there is no need for fiddling with interrupts or core switches (typically silicon
4310  * back plane registers, pci registers and chipcommon registers), this function
4311  * returns the register offset on this core to a mapped address. This address can
4312  * be used for W_REG/R_REG directly.
4313  *
4314  * For accessing registers that would need a core switch, this function will return
4315  * NULL.
4316  */
4317 volatile uint32 *
BCMPOSTTRAPFN(si_corereg_addr)4318 BCMPOSTTRAPFN(si_corereg_addr)(si_t *sih, uint coreidx, uint regoff)
4319 {
4320 	if (CHIPTYPE(sih->socitype) == SOCI_SB)
4321 		return sb_corereg_addr(sih, coreidx, regoff);
4322 	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
4323 		(CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
4324 		(CHIPTYPE(sih->socitype) == SOCI_NAI))
4325 		return ai_corereg_addr(sih, coreidx, regoff);
4326 	else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
4327 		return nci_corereg_addr(sih, coreidx, regoff);
4328 	else {
4329 		return 0;
4330 	}
4331 }
4332 
4333 void
si_core_disable(const si_t * sih,uint32 bits)4334 si_core_disable(const si_t *sih, uint32 bits)
4335 {
4336 	if (CHIPTYPE(sih->socitype) == SOCI_SB)
4337 		sb_core_disable(sih, bits);
4338 	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
4339 		(CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
4340 		(CHIPTYPE(sih->socitype) == SOCI_NAI))
4341 		ai_core_disable(sih, bits);
4342 	else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
4343 		nci_core_disable(sih, bits);
4344 	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
4345 		ub_core_disable(sih, bits);
4346 }
4347 
4348 void
BCMPOSTTRAPFN(si_core_reset)4349 BCMPOSTTRAPFN(si_core_reset)(si_t *sih, uint32 bits, uint32 resetbits)
4350 {
4351 	if (CHIPTYPE(sih->socitype) == SOCI_SB)
4352 		sb_core_reset(sih, bits, resetbits);
4353 	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
4354 		(CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
4355 		(CHIPTYPE(sih->socitype) == SOCI_NAI))
4356 		ai_core_reset(sih, bits, resetbits);
4357 	else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
4358 		nci_core_reset(sih, bits, resetbits);
4359 	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
4360 		ub_core_reset(sih, bits, resetbits);
4361 }
4362 
4363 /** Run bist on current core. Caller needs to take care of core-specific bist hazards */
4364 int
si_corebist(const si_t * sih)4365 si_corebist(const si_t *sih)
4366 {
4367 	uint32 cflags;
4368 	int result = 0;
4369 
4370 	/* Read core control flags */
4371 	cflags = si_core_cflags(sih, 0, 0);
4372 
4373 	/* Set bist & fgc */
4374 	si_core_cflags(sih, ~0, (SICF_BIST_EN | SICF_FGC));
4375 
4376 	/* Wait for bist done */
4377 	SPINWAIT(((si_core_sflags(sih, 0, 0) & SISF_BIST_DONE) == 0), 100000);
4378 
4379 	if (si_core_sflags(sih, 0, 0) & SISF_BIST_ERROR)
4380 		result = BCME_ERROR;
4381 
4382 	/* Reset core control flags */
4383 	si_core_cflags(sih, 0xffff, cflags);
4384 
4385 	return result;
4386 }
4387 
4388 uint
si_num_slaveports(const si_t * sih,uint coreid)4389 si_num_slaveports(const si_t *sih, uint coreid)
4390 {
4391 	uint idx = si_findcoreidx(sih, coreid, 0);
4392 	uint num = 0;
4393 
4394 	if (idx != BADIDX) {
4395 		if (CHIPTYPE(sih->socitype) == SOCI_AI) {
4396 			num = ai_num_slaveports(sih, idx);
4397 		}
4398 		else if (CHIPTYPE(sih->socitype) == SOCI_NCI) {
4399 			num = nci_num_slaveports(sih, idx);
4400 		}
4401 	}
4402 	return num;
4403 }
4404 
4405 /* TODO: Check if NCI has a slave port address */
4406 uint32
si_get_slaveport_addr(si_t * sih,uint spidx,uint baidx,uint core_id,uint coreunit)4407 si_get_slaveport_addr(si_t *sih, uint spidx, uint baidx, uint core_id, uint coreunit)
4408 {
4409 	const si_info_t *sii = SI_INFO(sih);
4410 	uint origidx = sii->curidx;
4411 	uint32 addr = 0x0;
4412 
4413 	if (!((CHIPTYPE(sih->socitype) == SOCI_AI) ||
4414 		(CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
4415 		(CHIPTYPE(sih->socitype) == SOCI_NAI) ||
4416 		(CHIPTYPE(sih->socitype) == SOCI_NCI)))
4417 		goto done;
4418 
4419 	si_setcore(sih, core_id, coreunit);
4420 
4421 	addr = si_addrspace(sih, spidx, baidx);
4422 
4423 	si_setcoreidx(sih, origidx);
4424 
4425 done:
4426 	return addr;
4427 }
4428 
4429 /* TODO: Check if NCI has a d11 slave port address */
4430 uint32
si_get_d11_slaveport_addr(si_t * sih,uint spidx,uint baidx,uint coreunit)4431 si_get_d11_slaveport_addr(si_t *sih, uint spidx, uint baidx, uint coreunit)
4432 {
4433 	const si_info_t *sii = SI_INFO(sih);
4434 	uint origidx = sii->curidx;
4435 	uint32 addr = 0x0;
4436 
4437 	if (!((CHIPTYPE(sih->socitype) == SOCI_AI) ||
4438 		(CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
4439 		(CHIPTYPE(sih->socitype) == SOCI_NAI) ||
4440 		(CHIPTYPE(sih->socitype) == SOCI_NCI)))
4441 		goto done;
4442 
4443 	si_setcore(sih, D11_CORE_ID, coreunit);
4444 
4445 	addr = si_addrspace(sih, spidx, baidx);
4446 
4447 	si_setcoreidx(sih, origidx);
4448 
4449 done:
4450 	return addr;
4451 }
4452 
4453 static uint32
BCMINITFN(factor6)4454 BCMINITFN(factor6)(uint32 x)
4455 {
4456 	switch (x) {
4457 	case CC_F6_2:	return 2;
4458 	case CC_F6_3:	return 3;
4459 	case CC_F6_4:	return 4;
4460 	case CC_F6_5:	return 5;
4461 	case CC_F6_6:	return 6;
4462 	case CC_F6_7:	return 7;
4463 	default:	return 0;
4464 	}
4465 }
4466 
4467 /*
4468  * Divide the clock by the divisor with protection for
4469  * a zero divisor.
4470  */
4471 static uint32
divide_clock(uint32 clock,uint32 div)4472 divide_clock(uint32 clock, uint32 div)
4473 {
4474 	return div ? clock / div : 0;
4475 }
4476 
4477 /** calculate the speed the SI would run at given a set of clockcontrol values */
4478 uint32
BCMINITFN(si_clock_rate)4479 BCMINITFN(si_clock_rate)(uint32 pll_type, uint32 n, uint32 m)
4480 {
4481 	uint32 n1, n2, clock, m1, m2, m3, mc;
4482 
4483 	n1 = n & CN_N1_MASK;
4484 	n2 = (n & CN_N2_MASK) >> CN_N2_SHIFT;
4485 
4486 	if (pll_type == PLL_TYPE6) {
4487 		if (m & CC_T6_MMASK)
4488 			return CC_T6_M1;
4489 		else
4490 			return CC_T6_M0;
4491 	} else if ((pll_type == PLL_TYPE1) ||
4492 	           (pll_type == PLL_TYPE3) ||
4493 	           (pll_type == PLL_TYPE4) ||
4494 	           (pll_type == PLL_TYPE7)) {
4495 		n1 = factor6(n1);
4496 		n2 += CC_F5_BIAS;
4497 	} else if (pll_type == PLL_TYPE2) {
4498 		n1 += CC_T2_BIAS;
4499 		n2 += CC_T2_BIAS;
4500 		ASSERT((n1 >= 2) && (n1 <= 7));
4501 		ASSERT((n2 >= 5) && (n2 <= 23));
4502 	} else if (pll_type == PLL_TYPE5) {
4503 		/* 5365 */
4504 		return (100000000);
4505 	} else
4506 		ASSERT(0);
4507 	/* PLL types 3 and 7 use BASE2 (25Mhz) */
4508 	if ((pll_type == PLL_TYPE3) ||
4509 	    (pll_type == PLL_TYPE7)) {
4510 		clock = CC_CLOCK_BASE2 * n1 * n2;
4511 	} else
4512 		clock = CC_CLOCK_BASE1 * n1 * n2;
4513 
4514 	if (clock == 0)
4515 		return 0;
4516 
4517 	m1 = m & CC_M1_MASK;
4518 	m2 = (m & CC_M2_MASK) >> CC_M2_SHIFT;
4519 	m3 = (m & CC_M3_MASK) >> CC_M3_SHIFT;
4520 	mc = (m & CC_MC_MASK) >> CC_MC_SHIFT;
4521 
4522 	if ((pll_type == PLL_TYPE1) ||
4523 	    (pll_type == PLL_TYPE3) ||
4524 	    (pll_type == PLL_TYPE4) ||
4525 	    (pll_type == PLL_TYPE7)) {
4526 		m1 = factor6(m1);
4527 		if ((pll_type == PLL_TYPE1) || (pll_type == PLL_TYPE3))
4528 			m2 += CC_F5_BIAS;
4529 		else
4530 			m2 = factor6(m2);
4531 		m3 = factor6(m3);
4532 
4533 		switch (mc) {
4534 		case CC_MC_BYPASS:	return (clock);
4535 		case CC_MC_M1:		return divide_clock(clock, m1);
4536 		case CC_MC_M1M2:	return divide_clock(clock, m1 * m2);
4537 		case CC_MC_M1M2M3:	return divide_clock(clock, m1 * m2 * m3);
4538 		case CC_MC_M1M3:	return divide_clock(clock, m1 * m3);
4539 		default:		return (0);
4540 		}
4541 	} else {
4542 		ASSERT(pll_type == PLL_TYPE2);
4543 
4544 		m1 += CC_T2_BIAS;
4545 		m2 += CC_T2M2_BIAS;
4546 		m3 += CC_T2_BIAS;
4547 		ASSERT((m1 >= 2) && (m1 <= 7));
4548 		ASSERT((m2 >= 3) && (m2 <= 10));
4549 		ASSERT((m3 >= 2) && (m3 <= 7));
4550 
4551 		if ((mc & CC_T2MC_M1BYP) == 0)
4552 			clock /= m1;
4553 		if ((mc & CC_T2MC_M2BYP) == 0)
4554 			clock /= m2;
4555 		if ((mc & CC_T2MC_M3BYP) == 0)
4556 			clock /= m3;
4557 
4558 		return (clock);
4559 	}
4560 }
4561 
4562 /**
4563  * Some chips could have multiple host interfaces, however only one will be active.
4564  * For a given chip. Depending pkgopt and cc_chipst return the active host interface.
4565  */
4566 uint
si_chip_hostif(const si_t * sih)4567 si_chip_hostif(const si_t *sih)
4568 {
4569 	uint hosti = 0;
4570 
4571 	switch (CHIPID(sih->chip)) {
4572 	case BCM43012_CHIP_ID:
4573 	case BCM43013_CHIP_ID:
4574 	case BCM43014_CHIP_ID:
4575 		hosti = CHIP_HOSTIF_SDIOMODE;
4576 		break;
4577 	CASE_BCM43602_CHIP:
4578 		hosti = CHIP_HOSTIF_PCIEMODE;
4579 		break;
4580 
4581 	case BCM4360_CHIP_ID:
4582 		/* chippkg bit-0 == 0 is PCIE only pkgs
4583 		 * chippkg bit-0 == 1 has both PCIE and USB cores enabled
4584 		 */
4585 		if ((sih->chippkg & 0x1) && (sih->chipst & CST4360_MODE_USB))
4586 			hosti = CHIP_HOSTIF_USBMODE;
4587 		else
4588 			hosti = CHIP_HOSTIF_PCIEMODE;
4589 
4590 		break;
4591 
4592 	case BCM4369_CHIP_GRPID:
4593 		 if (CST4369_CHIPMODE_SDIOD(sih->chipst))
4594 			 hosti = CHIP_HOSTIF_SDIOMODE;
4595 		 else if (CST4369_CHIPMODE_PCIE(sih->chipst))
4596 			 hosti = CHIP_HOSTIF_PCIEMODE;
4597 		 break;
4598 	case BCM4376_CHIP_GRPID:
4599 	case BCM4378_CHIP_GRPID:
4600 	case BCM4385_CHIP_GRPID:
4601 	case BCM4387_CHIP_GRPID:
4602 	case BCM4388_CHIP_GRPID:
4603 	case BCM4389_CHIP_GRPID:
4604 		 hosti = CHIP_HOSTIF_PCIEMODE;
4605 		 break;
4606 	 case BCM4362_CHIP_GRPID:
4607 		if (CST4362_CHIPMODE_SDIOD(sih->chipst)) {
4608 			hosti = CHIP_HOSTIF_SDIOMODE;
4609 		} else if (CST4362_CHIPMODE_PCIE(sih->chipst)) {
4610 			hosti = CHIP_HOSTIF_PCIEMODE;
4611 		}
4612 		break;
4613 
4614 	default:
4615 		break;
4616 	}
4617 
4618 	return hosti;
4619 }
4620 
4621 #if !defined(BCMDONGLEHOST)
4622 uint32
BCMINITFN(si_clock)4623 BCMINITFN(si_clock)(si_t *sih)
4624 {
4625 	const si_info_t *sii = SI_INFO(sih);
4626 	chipcregs_t *cc;
4627 	uint32 n, m;
4628 	uint idx;
4629 	uint32 pll_type, rate;
4630 	bcm_int_bitmask_t intr_val;
4631 
4632 	INTR_OFF(sii, &intr_val);
4633 	if (PMUCTL_ENAB(sih)) {
4634 		rate = si_pmu_si_clock(sih, sii->osh);
4635 		goto exit;
4636 	}
4637 
4638 	idx = sii->curidx;
4639 	cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
4640 	ASSERT(cc != NULL);
4641 
4642 	n = R_REG(sii->osh, &cc->clockcontrol_n);
4643 	pll_type = sih->cccaps & CC_CAP_PLL_MASK;
4644 	if (pll_type == PLL_TYPE6)
4645 		m = R_REG(sii->osh, &cc->clockcontrol_m3);
4646 	else if (pll_type == PLL_TYPE3)
4647 		m = R_REG(sii->osh, &cc->clockcontrol_m2);
4648 	else
4649 		m = R_REG(sii->osh, &cc->clockcontrol_sb);
4650 
4651 	/* calculate rate */
4652 	rate = si_clock_rate(pll_type, n, m);
4653 
4654 	if (pll_type == PLL_TYPE3)
4655 		rate = rate / 2;
4656 
4657 	/* switch back to previous core */
4658 	si_setcoreidx(sih, idx);
4659 exit:
4660 	INTR_RESTORE(sii, &intr_val);
4661 
4662 	return rate;
4663 }
4664 
4665 /** returns value in [Hz] units */
4666 uint32
BCMINITFN(si_alp_clock)4667 BCMINITFN(si_alp_clock)(si_t *sih)
4668 {
4669 	if (PMUCTL_ENAB(sih)) {
4670 		return si_pmu_alp_clock(sih, si_osh(sih));
4671 	}
4672 
4673 	return ALP_CLOCK;
4674 }
4675 
4676 /** returns value in [Hz] units */
4677 uint32
BCMINITFN(si_ilp_clock)4678 BCMINITFN(si_ilp_clock)(si_t *sih)
4679 {
4680 	if (PMUCTL_ENAB(sih))
4681 		return si_pmu_ilp_clock(sih, si_osh(sih));
4682 
4683 	return ILP_CLOCK;
4684 }
4685 #endif /* !defined(BCMDONGLEHOST) */
4686 
4687 /** set chip watchdog reset timer to fire in 'ticks' */
4688 void
si_watchdog(si_t * sih,uint ticks)4689 si_watchdog(si_t *sih, uint ticks)
4690 {
4691 	uint nb, maxt;
4692 	uint pmu_wdt = 1;
4693 
4694 	if (PMUCTL_ENAB(sih) && pmu_wdt) {
4695 		nb = (CCREV(sih->ccrev) < 26) ? 16 : ((CCREV(sih->ccrev) >= 37) ? 32 : 24);
4696 		/* The mips compiler uses the sllv instruction,
4697 		 * so we specially handle the 32-bit case.
4698 		 */
4699 		if (nb == 32)
4700 			maxt = 0xffffffff;
4701 		else
4702 			maxt = ((1 << nb) - 1);
4703 
4704 		/* PR43821: PMU watchdog timer needs min. of 2 ticks */
4705 		if (ticks == 1)
4706 			ticks = 2;
4707 		else if (ticks > maxt)
4708 			ticks = maxt;
4709 #ifndef DONGLEBUILD
4710 		if ((CHIPID(sih->chip) == BCM43012_CHIP_ID) ||
4711 			(CHIPID(sih->chip) == BCM43013_CHIP_ID) ||
4712 			(CHIPID(sih->chip) == BCM43014_CHIP_ID)) {
4713 			PMU_REG_NEW(sih, min_res_mask, ~0, DEFAULT_43012_MIN_RES_MASK);
4714 			PMU_REG_NEW(sih, watchdog_res_mask, ~0, DEFAULT_43012_MIN_RES_MASK);
4715 			PMU_REG_NEW(sih, pmustatus, PST_WDRESET, PST_WDRESET);
4716 			PMU_REG_NEW(sih, pmucontrol_ext, PCTL_EXT_FASTLPO_SWENAB, 0);
4717 			SPINWAIT((PMU_REG(sih, pmustatus, 0, 0) & PST_ILPFASTLPO),
4718 				PMU_MAX_TRANSITION_DLY);
4719 		}
4720 #endif /* DONGLEBUILD */
4721 		pmu_corereg(sih, SI_CC_IDX, pmuwatchdog, ~0, ticks);
4722 	} else {
4723 #if !defined(BCMDONGLEHOST)
4724 		/* make sure we come up in fast clock mode; or if clearing, clear clock */
4725 		si_clkctl_cc(sih, ticks ? CLK_FAST : CLK_DYNAMIC);
4726 #endif /* !defined(BCMDONGLEHOST) */
4727 		maxt = (1 << 28) - 1;
4728 		if (ticks > maxt)
4729 			ticks = maxt;
4730 
4731 		if (CCREV(sih->ccrev) >= 65) {
4732 			si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, watchdog), ~0,
4733 				(ticks & WD_COUNTER_MASK) | WD_SSRESET_PCIE_F0_EN |
4734 					WD_SSRESET_PCIE_ALL_FN_EN);
4735 		} else {
4736 			si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, watchdog), ~0, ticks);
4737 		}
4738 	}
4739 }
4740 
4741 /** trigger watchdog reset after ms milliseconds */
4742 void
si_watchdog_ms(si_t * sih,uint32 ms)4743 si_watchdog_ms(si_t *sih, uint32 ms)
4744 {
4745 	si_watchdog(sih, wd_msticks * ms);
4746 }
4747 
si_watchdog_msticks(void)4748 uint32 si_watchdog_msticks(void)
4749 {
4750 	return wd_msticks;
4751 }
4752 
4753 bool
si_taclear(si_t * sih,bool details)4754 si_taclear(si_t *sih, bool details)
4755 {
4756 #if defined(BCMDBG_ERR) || defined(BCMASSERT_SUPPORT) || \
4757 	defined(BCMDBG_DUMP)
4758 	if (CHIPTYPE(sih->socitype) == SOCI_SB)
4759 		return sb_taclear(sih, details);
4760 	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
4761 		(CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
4762 		(CHIPTYPE(sih->socitype) == SOCI_NCI) ||
4763 		(CHIPTYPE(sih->socitype) == SOCI_NAI))
4764 		return FALSE;
4765 	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
4766 		return FALSE;
4767 	else {
4768 		ASSERT(0);
4769 		return FALSE;
4770 	}
4771 #else
4772 	return FALSE;
4773 #endif /* BCMDBG_ERR || BCMASSERT_SUPPORT || BCMDBG_DUMP */
4774 }
4775 
4776 #if !defined(BCMDONGLEHOST)
4777 /**
4778  * Map sb core id to pci device id.
4779  */
4780 uint16
BCMATTACHFN(si_d11_devid)4781 BCMATTACHFN(si_d11_devid)(si_t *sih)
4782 {
4783 	const si_info_t *sii = SI_INFO(sih);
4784 	uint16 device;
4785 
4786 	(void) sii;
4787 	if (FWSIGN_ENAB()) {
4788 		return 0xffff;
4789 	}
4790 
4791 	/* normal case: nvram variable with devpath->devid->wl0id */
4792 	if ((device = (uint16)si_getdevpathintvar(sih, rstr_devid)) != 0)
4793 		;
4794 	/* Get devid from OTP/SPROM depending on where the SROM is read */
4795 	else if ((device = (uint16)getintvar(sii->vars, rstr_devid)) != 0)
4796 		;
4797 	/* no longer support wl0id, but keep the code here for backward compatibility. */
4798 	else if ((device = (uint16)getintvar(sii->vars, rstr_wl0id)) != 0)
4799 		;
4800 	else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
4801 		;
4802 	else {
4803 		/* ignore it */
4804 		device = 0xffff;
4805 	}
4806 	return device;
4807 }
4808 
4809 int
BCMATTACHFN(si_corepciid)4810 BCMATTACHFN(si_corepciid)(si_t *sih, uint func, uint16 *pcivendor, uint16 *pcidevice,
4811                           uint8 *pciclass, uint8 *pcisubclass, uint8 *pciprogif,
4812                           uint8 *pciheader)
4813 {
4814 	uint16 vendor = 0xffff, device = 0xffff;
4815 	uint8 class, subclass, progif = 0;
4816 	uint8 header = PCI_HEADER_NORMAL;
4817 	uint32 core = si_coreid(sih);
4818 
4819 	/* Verify whether the function exists for the core */
4820 	if (func >= (uint)((core == USB20H_CORE_ID) || (core == NS_USB20_CORE_ID) ? 2 : 1))
4821 		return BCME_ERROR;
4822 
4823 	/* Known vendor translations */
4824 	switch (si_corevendor(sih)) {
4825 	case SB_VEND_BCM:
4826 	case MFGID_BRCM:
4827 		vendor = VENDOR_BROADCOM;
4828 		break;
4829 	default:
4830 		return BCME_ERROR;
4831 	}
4832 
4833 	/* Determine class based on known core codes */
4834 	switch (core) {
4835 	case ENET_CORE_ID:
4836 		class = PCI_CLASS_NET;
4837 		subclass = PCI_NET_ETHER;
4838 		device = BCM47XX_ENET_ID;
4839 		break;
4840 	case GIGETH_CORE_ID:
4841 		class = PCI_CLASS_NET;
4842 		subclass = PCI_NET_ETHER;
4843 		device = BCM47XX_GIGETH_ID;
4844 		break;
4845 	case GMAC_CORE_ID:
4846 		class = PCI_CLASS_NET;
4847 		subclass = PCI_NET_ETHER;
4848 		device = BCM47XX_GMAC_ID;
4849 		break;
4850 	case SDRAM_CORE_ID:
4851 	case MEMC_CORE_ID:
4852 	case DMEMC_CORE_ID:
4853 	case SOCRAM_CORE_ID:
4854 		class = PCI_CLASS_MEMORY;
4855 		subclass = PCI_MEMORY_RAM;
4856 		device = (uint16)core;
4857 		break;
4858 	case PCI_CORE_ID:
4859 	case PCIE_CORE_ID:
4860 	case PCIE2_CORE_ID:
4861 		class = PCI_CLASS_BRIDGE;
4862 		subclass = PCI_BRIDGE_PCI;
4863 		device = (uint16)core;
4864 		header = PCI_HEADER_BRIDGE;
4865 		break;
4866 	case CODEC_CORE_ID:
4867 		class = PCI_CLASS_COMM;
4868 		subclass = PCI_COMM_MODEM;
4869 		device = BCM47XX_V90_ID;
4870 		break;
4871 	case I2S_CORE_ID:
4872 		class = PCI_CLASS_MMEDIA;
4873 		subclass = PCI_MMEDIA_AUDIO;
4874 		device = BCM47XX_AUDIO_ID;
4875 		break;
4876 	case USB_CORE_ID:
4877 	case USB11H_CORE_ID:
4878 		class = PCI_CLASS_SERIAL;
4879 		subclass = PCI_SERIAL_USB;
4880 		progif = 0x10; /* OHCI */
4881 		device = BCM47XX_USBH_ID;
4882 		break;
4883 	case USB20H_CORE_ID:
4884 	case NS_USB20_CORE_ID:
4885 		class = PCI_CLASS_SERIAL;
4886 		subclass = PCI_SERIAL_USB;
4887 		progif = func == 0 ? 0x10 : 0x20; /* OHCI/EHCI value defined in spec */
4888 		device = BCM47XX_USB20H_ID;
4889 		header = PCI_HEADER_MULTI; /* multifunction */
4890 		break;
4891 	case IPSEC_CORE_ID:
4892 		class = PCI_CLASS_CRYPT;
4893 		subclass = PCI_CRYPT_NETWORK;
4894 		device = BCM47XX_IPSEC_ID;
4895 		break;
4896 	case NS_USB30_CORE_ID:
4897 		class = PCI_CLASS_SERIAL;
4898 		subclass = PCI_SERIAL_USB;
4899 		progif = 0x30; /* XHCI */
4900 		device = BCM47XX_USB30H_ID;
4901 		break;
4902 	case ROBO_CORE_ID:
4903 		/* Don't use class NETWORK, so wl/et won't attempt to recognize it */
4904 		class = PCI_CLASS_COMM;
4905 		subclass = PCI_COMM_OTHER;
4906 		device = BCM47XX_ROBO_ID;
4907 		break;
4908 	case CC_CORE_ID:
4909 		class = PCI_CLASS_MEMORY;
4910 		subclass = PCI_MEMORY_FLASH;
4911 		device = (uint16)core;
4912 		break;
4913 	case SATAXOR_CORE_ID:
4914 		class = PCI_CLASS_XOR;
4915 		subclass = PCI_XOR_QDMA;
4916 		device = BCM47XX_SATAXOR_ID;
4917 		break;
4918 	case ATA100_CORE_ID:
4919 		class = PCI_CLASS_DASDI;
4920 		subclass = PCI_DASDI_IDE;
4921 		device = BCM47XX_ATA100_ID;
4922 		break;
4923 	case USB11D_CORE_ID:
4924 		class = PCI_CLASS_SERIAL;
4925 		subclass = PCI_SERIAL_USB;
4926 		device = BCM47XX_USBD_ID;
4927 		break;
4928 	case USB20D_CORE_ID:
4929 		class = PCI_CLASS_SERIAL;
4930 		subclass = PCI_SERIAL_USB;
4931 		device = BCM47XX_USB20D_ID;
4932 		break;
4933 	case D11_CORE_ID:
4934 		class = PCI_CLASS_NET;
4935 		subclass = PCI_NET_OTHER;
4936 		device = si_d11_devid(sih);
4937 		break;
4938 
4939 	default:
4940 		class = subclass = progif = 0xff;
4941 		device = (uint16)core;
4942 		break;
4943 	}
4944 
4945 	*pcivendor = vendor;
4946 	*pcidevice = device;
4947 	*pciclass = class;
4948 	*pcisubclass = subclass;
4949 	*pciprogif = progif;
4950 	*pciheader = header;
4951 
4952 	return 0;
4953 }
4954 
4955 #if defined(BCMDBG) || defined(BCMDBG_DUMP) || defined(BCMDBG_PHYDUMP)
4956 /** print interesting sbconfig registers */
4957 void
si_dumpregs(si_t * sih,struct bcmstrbuf * b)4958 si_dumpregs(si_t *sih, struct bcmstrbuf *b)
4959 {
4960 	si_info_t *sii = SI_INFO(sih);
4961 	uint origidx;
4962 	bcm_int_bitmask_t intr_val;
4963 
4964 	origidx = sii->curidx;
4965 
4966 	INTR_OFF(sii, &intr_val);
4967 	if (CHIPTYPE(sih->socitype) == SOCI_SB)
4968 		sb_dumpregs(sih, b);
4969 	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
4970 		(CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
4971 		(CHIPTYPE(sih->socitype) == SOCI_NAI))
4972 		ai_dumpregs(sih, b);
4973 	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
4974 		ub_dumpregs(sih, b);
4975 	else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
4976 		nci_dumpregs(sih, b);
4977 	else
4978 		ASSERT(0);
4979 
4980 	si_setcoreidx(sih, origidx);
4981 	INTR_RESTORE(sii, &intr_val);
4982 }
4983 #endif	/* BCMDBG || BCMDBG_DUMP || BCMDBG_PHYDUMP */
4984 #endif /* !defined(BCMDONGLEHOST) */
4985 
4986 #ifdef BCMDBG
4987 void
si_view(si_t * sih,bool verbose)4988 si_view(si_t *sih, bool verbose)
4989 {
4990 	if (CHIPTYPE(sih->socitype) == SOCI_SB)
4991 		sb_view(sih, verbose);
4992 	else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
4993 		(CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
4994 		(CHIPTYPE(sih->socitype) == SOCI_NAI))
4995 		ai_view(sih, verbose);
4996 	else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
4997 		ub_view(sih, verbose);
4998 	else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
4999 		nci_view(sih, verbose);
5000 	else
5001 		ASSERT(0);
5002 }
5003 
5004 void
si_viewall(si_t * sih,bool verbose)5005 si_viewall(si_t *sih, bool verbose)
5006 {
5007 	si_info_t *sii = SI_INFO(sih);
5008 	uint curidx, i;
5009 	bcm_int_bitmask_t intr_val;
5010 
5011 	curidx = sii->curidx;
5012 
5013 	INTR_OFF(sii, &intr_val);
5014 	if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
5015 		(CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
5016 		(CHIPTYPE(sih->socitype) == SOCI_NAI))
5017 		ai_viewall(sih, verbose);
5018 	else if (CHIPTYPE(sih->socitype) == SOCI_NCI)
5019 		nci_viewall(sih, verbose);
5020 	else {
5021 		SI_ERROR(("si_viewall: num_cores %d\n", sii->numcores));
5022 		for (i = 0; i < sii->numcores; i++) {
5023 			si_setcoreidx(sih, i);
5024 			si_view(sih, verbose);
5025 		}
5026 	}
5027 	si_setcoreidx(sih, curidx);
5028 	INTR_RESTORE(sii, &intr_val);
5029 }
5030 #endif	/* BCMDBG */
5031 
5032 /** return the slow clock source - LPO, XTAL, or PCI */
5033 static uint
si_slowclk_src(si_info_t * sii)5034 si_slowclk_src(si_info_t *sii)
5035 {
5036 	chipcregs_t *cc;
5037 
5038 	ASSERT(SI_FAST(sii) || si_coreid(&sii->pub) == CC_CORE_ID);
5039 
5040 	if (CCREV(sii->pub.ccrev) < 6) {
5041 		if ((BUSTYPE(sii->pub.bustype) == PCI_BUS) &&
5042 		    (OSL_PCI_READ_CONFIG(sii->osh, PCI_GPIO_OUT, sizeof(uint32)) &
5043 		     PCI_CFG_GPIO_SCS))
5044 			return (SCC_SS_PCI);
5045 		else
5046 			return (SCC_SS_XTAL);
5047 	} else if (CCREV(sii->pub.ccrev) < 10) {
5048 		cc = (chipcregs_t *)si_setcoreidx(&sii->pub, sii->curidx);
5049 		ASSERT(cc);
5050 		return (R_REG(sii->osh, &cc->slow_clk_ctl) & SCC_SS_MASK);
5051 	} else	/* Insta-clock */
5052 		return (SCC_SS_XTAL);
5053 }
5054 
5055 /** return the ILP (slowclock) min or max frequency */
5056 static uint
si_slowclk_freq(si_info_t * sii,bool max_freq,chipcregs_t * cc)5057 si_slowclk_freq(si_info_t *sii, bool max_freq, chipcregs_t *cc)
5058 {
5059 	uint32 slowclk;
5060 	uint div;
5061 
5062 	ASSERT(SI_FAST(sii) || si_coreid(&sii->pub) == CC_CORE_ID);
5063 
5064 	/* shouldn't be here unless we've established the chip has dynamic clk control */
5065 	ASSERT(R_REG(sii->osh, &cc->capabilities) & CC_CAP_PWR_CTL);
5066 
5067 	slowclk = si_slowclk_src(sii);
5068 	if (CCREV(sii->pub.ccrev) < 6) {
5069 		if (slowclk == SCC_SS_PCI)
5070 			return (max_freq ? (PCIMAXFREQ / 64) : (PCIMINFREQ / 64));
5071 		else
5072 			return (max_freq ? (XTALMAXFREQ / 32) : (XTALMINFREQ / 32));
5073 	} else if (CCREV(sii->pub.ccrev) < 10) {
5074 		div = 4 *
5075 		        (((R_REG(sii->osh, &cc->slow_clk_ctl) & SCC_CD_MASK) >> SCC_CD_SHIFT) + 1);
5076 		if (slowclk == SCC_SS_LPO)
5077 			return (max_freq ? LPOMAXFREQ : LPOMINFREQ);
5078 		else if (slowclk == SCC_SS_XTAL)
5079 			return (max_freq ? (XTALMAXFREQ / div) : (XTALMINFREQ / div));
5080 		else if (slowclk == SCC_SS_PCI)
5081 			return (max_freq ? (PCIMAXFREQ / div) : (PCIMINFREQ / div));
5082 		else
5083 			ASSERT(0);
5084 	} else {
5085 		/* Chipc rev 10 is InstaClock */
5086 		div = R_REG(sii->osh, &cc->system_clk_ctl) >> SYCC_CD_SHIFT;
5087 		div = 4 * (div + 1);
5088 		return (max_freq ? XTALMAXFREQ : (XTALMINFREQ / div));
5089 	}
5090 	return (0);
5091 }
5092 
5093 static void
BCMINITFN(si_clkctl_setdelay)5094 BCMINITFN(si_clkctl_setdelay)(si_info_t *sii, void *chipcregs)
5095 {
5096 	chipcregs_t *cc = (chipcregs_t *)chipcregs;
5097 	uint slowmaxfreq, pll_delay, slowclk;
5098 	uint pll_on_delay, fref_sel_delay;
5099 
5100 	pll_delay = PLL_DELAY;
5101 
5102 	/* If the slow clock is not sourced by the xtal then add the xtal_on_delay
5103 	 * since the xtal will also be powered down by dynamic clk control logic.
5104 	 */
5105 
5106 	slowclk = si_slowclk_src(sii);
5107 	if (slowclk != SCC_SS_XTAL)
5108 		pll_delay += XTAL_ON_DELAY;
5109 
5110 	/* Starting with 4318 it is ILP that is used for the delays */
5111 	slowmaxfreq = si_slowclk_freq(sii, (CCREV(sii->pub.ccrev) >= 10) ? FALSE : TRUE, cc);
5112 
5113 	pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000;
5114 	fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000;
5115 
5116 	W_REG(sii->osh, &cc->pll_on_delay, pll_on_delay);
5117 	W_REG(sii->osh, &cc->fref_sel_delay, fref_sel_delay);
5118 }
5119 
5120 /** initialize power control delay registers */
5121 void
BCMINITFN(si_clkctl_init)5122 BCMINITFN(si_clkctl_init)(si_t *sih)
5123 {
5124 	si_info_t *sii;
5125 	uint origidx = 0;
5126 	chipcregs_t *cc;
5127 	bool fast;
5128 
5129 	if (!CCCTL_ENAB(sih))
5130 		return;
5131 
5132 	sii = SI_INFO(sih);
5133 	fast = SI_FAST(sii);
5134 	if (!fast) {
5135 		origidx = sii->curidx;
5136 		if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL)
5137 			return;
5138 	} else if ((cc = (chipcregs_t *)CCREGS_FAST(sii)) == NULL)
5139 		return;
5140 	ASSERT(cc != NULL);
5141 
5142 	/* set all Instaclk chip ILP to 1 MHz */
5143 	if (CCREV(sih->ccrev) >= 10)
5144 		SET_REG(sii->osh, &cc->system_clk_ctl, SYCC_CD_MASK,
5145 		        (ILP_DIV_1MHZ << SYCC_CD_SHIFT));
5146 
5147 	si_clkctl_setdelay(sii, (void *)(uintptr)cc);
5148 
5149 	/* PR 110294 */
5150 	OSL_DELAY(20000);
5151 
5152 	if (!fast)
5153 		si_setcoreidx(sih, origidx);
5154 }
5155 
5156 #if !defined(BCMDONGLEHOST)
5157 /** return the value suitable for writing to the dot11 core FAST_PWRUP_DELAY register */
5158 uint16
BCMINITFN(si_clkctl_fast_pwrup_delay)5159 BCMINITFN(si_clkctl_fast_pwrup_delay)(si_t *sih)
5160 {
5161 	si_info_t *sii = SI_INFO(sih);
5162 	uint origidx = 0;
5163 	chipcregs_t *cc;
5164 	uint slowminfreq;
5165 	uint16 fpdelay;
5166 	bcm_int_bitmask_t intr_val;
5167 	bool fast;
5168 
5169 	if (PMUCTL_ENAB(sih)) {
5170 		INTR_OFF(sii, &intr_val);
5171 		fpdelay = si_pmu_fast_pwrup_delay(sih, sii->osh);
5172 		INTR_RESTORE(sii, &intr_val);
5173 		return fpdelay;
5174 	}
5175 
5176 	if (!CCCTL_ENAB(sih))
5177 		return 0;
5178 
5179 	fast = SI_FAST(sii);
5180 	fpdelay = 0;
5181 	if (!fast) {
5182 		origidx = sii->curidx;
5183 		INTR_OFF(sii, &intr_val);
5184 		if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL)
5185 			goto done;
5186 	} else if ((cc = (chipcregs_t *)CCREGS_FAST(sii)) == NULL) {
5187 		goto done;
5188 	}
5189 
5190 	ASSERT(cc != NULL);
5191 
5192 	slowminfreq = si_slowclk_freq(sii, FALSE, cc);
5193 	if (slowminfreq > 0)
5194 		fpdelay = (((R_REG(sii->osh, &cc->pll_on_delay) + 2) * 1000000) +
5195 		(slowminfreq - 1)) / slowminfreq;
5196 
5197 done:
5198 	if (!fast) {
5199 		si_setcoreidx(sih, origidx);
5200 		INTR_RESTORE(sii, &intr_val);
5201 	}
5202 	return fpdelay;
5203 }
5204 
5205 /** turn primary xtal and/or pll off/on */
5206 int
si_clkctl_xtal(si_t * sih,uint what,bool on)5207 si_clkctl_xtal(si_t *sih, uint what, bool on)
5208 {
5209 	si_info_t *sii;
5210 	uint32 in, out, outen;
5211 
5212 	sii = SI_INFO(sih);
5213 
5214 	switch (BUSTYPE(sih->bustype)) {
5215 
5216 #ifdef BCMSDIO
5217 	case SDIO_BUS:
5218 		return (-1);
5219 #endif	/* BCMSDIO */
5220 
5221 	case PCI_BUS:
5222 		/* pcie core doesn't have any mapping to control the xtal pu */
5223 		if (PCIE(sii) || PCIE_GEN2(sii))
5224 			return -1;
5225 
5226 		in = OSL_PCI_READ_CONFIG(sii->osh, PCI_GPIO_IN, sizeof(uint32));
5227 		out = OSL_PCI_READ_CONFIG(sii->osh, PCI_GPIO_OUT, sizeof(uint32));
5228 		outen = OSL_PCI_READ_CONFIG(sii->osh, PCI_GPIO_OUTEN, sizeof(uint32));
5229 
5230 		/*
5231 		 * Avoid glitching the clock if GPRS is already using it.
5232 		 * We can't actually read the state of the PLLPD so we infer it
5233 		 * by the value of XTAL_PU which *is* readable via gpioin.
5234 		 */
5235 		if (on && (in & PCI_CFG_GPIO_XTAL))
5236 			return (0);
5237 
5238 		if (what & XTAL)
5239 			outen |= PCI_CFG_GPIO_XTAL;
5240 		if (what & PLL)
5241 			outen |= PCI_CFG_GPIO_PLL;
5242 
5243 		if (on) {
5244 			/* turn primary xtal on */
5245 			if (what & XTAL) {
5246 				out |= PCI_CFG_GPIO_XTAL;
5247 				if (what & PLL)
5248 					out |= PCI_CFG_GPIO_PLL;
5249 				OSL_PCI_WRITE_CONFIG(sii->osh, PCI_GPIO_OUT,
5250 				                     sizeof(uint32), out);
5251 				OSL_PCI_WRITE_CONFIG(sii->osh, PCI_GPIO_OUTEN,
5252 				                     sizeof(uint32), outen);
5253 				OSL_DELAY(XTAL_ON_DELAY);
5254 			}
5255 
5256 			/* turn pll on */
5257 			if (what & PLL) {
5258 				out &= ~PCI_CFG_GPIO_PLL;
5259 				OSL_PCI_WRITE_CONFIG(sii->osh, PCI_GPIO_OUT,
5260 				                     sizeof(uint32), out);
5261 				OSL_DELAY(2000);
5262 			}
5263 		} else {
5264 			if (what & XTAL)
5265 				out &= ~PCI_CFG_GPIO_XTAL;
5266 			if (what & PLL)
5267 				out |= PCI_CFG_GPIO_PLL;
5268 			OSL_PCI_WRITE_CONFIG(sii->osh, PCI_GPIO_OUT, sizeof(uint32), out);
5269 			OSL_PCI_WRITE_CONFIG(sii->osh, PCI_GPIO_OUTEN, sizeof(uint32),
5270 			                     outen);
5271 		}
5272 		return 0;
5273 
5274 	default:
5275 		return (-1);
5276 	}
5277 
5278 	return (0);
5279 }
5280 
5281 /**
5282  *  clock control policy function throught chipcommon
5283  *
5284  *    set dynamic clk control mode (forceslow, forcefast, dynamic)
5285  *    returns true if we are forcing fast clock
5286  *    this is a wrapper over the next internal function
5287  *      to allow flexible policy settings for outside caller
5288  */
5289 bool
si_clkctl_cc(si_t * sih,uint mode)5290 si_clkctl_cc(si_t *sih, uint mode)
5291 {
5292 	si_info_t *sii;
5293 
5294 	sii = SI_INFO(sih);
5295 
5296 	/* chipcommon cores prior to rev6 don't support dynamic clock control */
5297 	if (CCREV(sih->ccrev) < 6)
5298 		return FALSE;
5299 
5300 	return _si_clkctl_cc(sii, mode);
5301 }
5302 
5303 /* clk control mechanism through chipcommon, no policy checking */
5304 static bool
_si_clkctl_cc(si_info_t * sii,uint mode)5305 _si_clkctl_cc(si_info_t *sii, uint mode)
5306 {
5307 	uint origidx = 0;
5308 	chipcregs_t *cc;
5309 	uint32 scc;
5310 	bcm_int_bitmask_t intr_val;
5311 	bool fast = SI_FAST(sii);
5312 
5313 	/* chipcommon cores prior to rev6 don't support dynamic clock control */
5314 	if (CCREV(sii->pub.ccrev) < 6)
5315 		return (FALSE);
5316 
5317 	/* Chips with ccrev 10 are EOL and they don't have SYCC_HR which we use below */
5318 	ASSERT(CCREV(sii->pub.ccrev) != 10);
5319 
5320 	if (!fast) {
5321 		INTR_OFF(sii, &intr_val);
5322 		origidx = sii->curidx;
5323 		cc = (chipcregs_t *) si_setcore(&sii->pub, CC_CORE_ID, 0);
5324 	} else if ((cc = (chipcregs_t *) CCREGS_FAST(sii)) == NULL)
5325 		goto done;
5326 	ASSERT(cc != NULL);
5327 
5328 	if (!CCCTL_ENAB(&sii->pub) && (CCREV(sii->pub.ccrev) < 20))
5329 		goto done;
5330 
5331 	switch (mode) {
5332 	case CLK_FAST:	/* FORCEHT, fast (pll) clock */
5333 		if (CCREV(sii->pub.ccrev) < 10) {
5334 			/* don't forget to force xtal back on before we clear SCC_DYN_XTAL.. */
5335 			si_clkctl_xtal(&sii->pub, XTAL, ON);
5336 			SET_REG(sii->osh, &cc->slow_clk_ctl, (SCC_XC | SCC_FS | SCC_IP), SCC_IP);
5337 		} else if (CCREV(sii->pub.ccrev) < 20) {
5338 			OR_REG(sii->osh, &cc->system_clk_ctl, SYCC_HR);
5339 		} else {
5340 			OR_REG(sii->osh, &cc->clk_ctl_st, CCS_FORCEHT);
5341 		}
5342 
5343 		/* wait for the PLL */
5344 		if (PMUCTL_ENAB(&sii->pub)) {
5345 			uint32 htavail = CCS_HTAVAIL;
5346 			SPINWAIT(((R_REG(sii->osh, &cc->clk_ctl_st) & htavail) == 0),
5347 			         PMU_MAX_TRANSITION_DLY);
5348 			ASSERT(R_REG(sii->osh, &cc->clk_ctl_st) & htavail);
5349 		} else {
5350 			OSL_DELAY(PLL_DELAY);
5351 		}
5352 		break;
5353 
5354 	case CLK_DYNAMIC:	/* enable dynamic clock control */
5355 		if (CCREV(sii->pub.ccrev) < 10) {
5356 			scc = R_REG(sii->osh, &cc->slow_clk_ctl);
5357 			scc &= ~(SCC_FS | SCC_IP | SCC_XC);
5358 			if ((scc & SCC_SS_MASK) != SCC_SS_XTAL)
5359 				scc |= SCC_XC;
5360 			W_REG(sii->osh, &cc->slow_clk_ctl, scc);
5361 
5362 			/* for dynamic control, we have to release our xtal_pu "force on" */
5363 			if (scc & SCC_XC)
5364 				si_clkctl_xtal(&sii->pub, XTAL, OFF);
5365 		} else if (CCREV(sii->pub.ccrev) < 20) {
5366 			/* Instaclock */
5367 			AND_REG(sii->osh, &cc->system_clk_ctl, ~SYCC_HR);
5368 		} else {
5369 			AND_REG(sii->osh, &cc->clk_ctl_st, ~CCS_FORCEHT);
5370 		}
5371 
5372 		/* wait for the PLL */
5373 		if (PMUCTL_ENAB(&sii->pub)) {
5374 			uint32 htavail = CCS_HTAVAIL;
5375 			SPINWAIT(((R_REG(sii->osh, &cc->clk_ctl_st) & htavail) != 0),
5376 			         PMU_MAX_TRANSITION_DLY);
5377 			ASSERT(!(R_REG(sii->osh, &cc->clk_ctl_st) & htavail));
5378 		} else {
5379 			OSL_DELAY(PLL_DELAY);
5380 		}
5381 
5382 		break;
5383 
5384 	default:
5385 		ASSERT(0);
5386 	}
5387 
5388 done:
5389 	if (!fast) {
5390 		si_setcoreidx(&sii->pub, origidx);
5391 		INTR_RESTORE(sii, &intr_val);
5392 	}
5393 	return (mode == CLK_FAST);
5394 }
5395 
5396 /** Build device path. Support SI, PCI for now. */
5397 int
BCMNMIATTACHFN(si_devpath)5398 BCMNMIATTACHFN(si_devpath)(const si_t *sih, char *path, int size)
5399 {
5400 	int slen;
5401 
5402 	ASSERT(path != NULL);
5403 	ASSERT(size >= SI_DEVPATH_BUFSZ);
5404 
5405 	if (!path || size <= 0)
5406 		return -1;
5407 
5408 	switch (BUSTYPE(sih->bustype)) {
5409 	case SI_BUS:
5410 		slen = snprintf(path, (size_t)size, "sb/%u/", si_coreidx(sih));
5411 		break;
5412 	case PCI_BUS:
5413 		ASSERT((SI_INFO(sih))->osh != NULL);
5414 		slen = snprintf(path, (size_t)size, "pci/%u/%u/",
5415 		                OSL_PCI_BUS((SI_INFO(sih))->osh),
5416 		                OSL_PCI_SLOT((SI_INFO(sih))->osh));
5417 		break;
5418 #ifdef BCMSDIO
5419 	case SDIO_BUS:
5420 		SI_ERROR(("si_devpath: device 0 assumed\n"));
5421 		slen = snprintf(path, (size_t)size, "sd/%u/", si_coreidx(sih));
5422 		break;
5423 #endif
5424 	default:
5425 		slen = -1;
5426 		ASSERT(0);
5427 		break;
5428 	}
5429 
5430 	if (slen < 0 || slen >= size) {
5431 		path[0] = '\0';
5432 		return -1;
5433 	}
5434 
5435 	return 0;
5436 }
5437 
5438 int
BCMNMIATTACHFN(si_devpath_pcie)5439 BCMNMIATTACHFN(si_devpath_pcie)(const si_t *sih, char *path, int size)
5440 {
5441 	ASSERT(path != NULL);
5442 	ASSERT(size >= SI_DEVPATH_BUFSZ);
5443 
5444 	if (!path || size <= 0)
5445 		return -1;
5446 
5447 	ASSERT((SI_INFO(sih))->osh != NULL);
5448 	snprintf(path, (size_t)size, "pcie/%u/%u/",
5449 		OSL_PCIE_DOMAIN((SI_INFO(sih))->osh),
5450 		OSL_PCIE_BUS((SI_INFO(sih))->osh));
5451 
5452 	return 0;
5453 }
5454 
5455 char *
BCMATTACHFN(si_coded_devpathvar)5456 BCMATTACHFN(si_coded_devpathvar)(const si_t *sih, char *varname, int var_len, const char *name)
5457 {
5458 	char pathname[SI_DEVPATH_BUFSZ + 32];
5459 	char devpath[SI_DEVPATH_BUFSZ + 32];
5460 	char devpath_pcie[SI_DEVPATH_BUFSZ + 32];
5461 	char *p;
5462 	int idx;
5463 	int len1;
5464 	int len2;
5465 	int len3 = 0;
5466 
5467 	if (FWSIGN_ENAB()) {
5468 		return NULL;
5469 	}
5470 	if (BUSTYPE(sih->bustype) == PCI_BUS) {
5471 		snprintf(devpath_pcie, SI_DEVPATH_BUFSZ, "pcie/%u/%u",
5472 			OSL_PCIE_DOMAIN((SI_INFO(sih))->osh),
5473 			OSL_PCIE_BUS((SI_INFO(sih))->osh));
5474 		len3 = strlen(devpath_pcie);
5475 	}
5476 
5477 	/* try to get compact devpath if it exist */
5478 	if (si_devpath(sih, devpath, SI_DEVPATH_BUFSZ) == 0) {
5479 		/* devpath now is 'zzz/zz/', adjust length to */
5480 		/* eliminate ending '/' (if present) */
5481 		len1 = strlen(devpath);
5482 		if (devpath[len1 - 1] == '/')
5483 			len1--;
5484 
5485 		for (idx = 0; idx < SI_MAXCORES; idx++) {
5486 			snprintf(pathname, SI_DEVPATH_BUFSZ, rstr_devpathD, idx);
5487 			if ((p = getvar(NULL, pathname)) == NULL)
5488 				continue;
5489 
5490 			/* eliminate ending '/' (if present) */
5491 			len2 = strlen(p);
5492 			if (p[len2 - 1] == '/')
5493 				len2--;
5494 
5495 			/* check that both lengths match and if so compare */
5496 			/* the strings (minus trailing '/'s if present */
5497 			if ((len1 == len2) && (memcmp(p, devpath, len1) == 0)) {
5498 				snprintf(varname, var_len, rstr_D_S, idx, name);
5499 				return varname;
5500 			}
5501 
5502 			/* try the new PCIe devpath format if it exists */
5503 			if (len3 && (len3 == len2) && (memcmp(p, devpath_pcie, len3) == 0)) {
5504 				snprintf(varname, var_len, rstr_D_S, idx, name);
5505 				return varname;
5506 			}
5507 		}
5508 	}
5509 
5510 	return NULL;
5511 }
5512 
5513 /** Get a variable, but only if it has a devpath prefix */
5514 char *
BCMATTACHFN(si_getdevpathvar)5515 BCMATTACHFN(si_getdevpathvar)(const si_t *sih, const char *name)
5516 {
5517 	char varname[SI_DEVPATH_BUFSZ + 32];
5518 	char *val;
5519 
5520 	si_devpathvar(sih, varname, sizeof(varname), name);
5521 
5522 	if ((val = getvar(NULL, varname)) != NULL)
5523 		return val;
5524 
5525 	if (BUSTYPE(sih->bustype) == PCI_BUS) {
5526 		si_pcie_devpathvar(sih, varname, sizeof(varname), name);
5527 		if ((val = getvar(NULL, varname)) != NULL)
5528 			return val;
5529 	}
5530 
5531 	/* try to get compact devpath if it exist */
5532 	if (si_coded_devpathvar(sih, varname, sizeof(varname), name) == NULL)
5533 		return NULL;
5534 
5535 	return (getvar(NULL, varname));
5536 }
5537 
5538 /** Get a variable, but only if it has a devpath prefix */
5539 int
BCMATTACHFN(si_getdevpathintvar)5540 BCMATTACHFN(si_getdevpathintvar)(const si_t *sih, const char *name)
5541 {
5542 #if defined(BCMBUSTYPE) && (BCMBUSTYPE == SI_BUS)
5543 	BCM_REFERENCE(sih);
5544 	return (getintvar(NULL, name));
5545 #else
5546 	char varname[SI_DEVPATH_BUFSZ + 32];
5547 	int val;
5548 
5549 	si_devpathvar(sih, varname, sizeof(varname), name);
5550 
5551 	if ((val = getintvar(NULL, varname)) != 0)
5552 		return val;
5553 
5554 	if (BUSTYPE(sih->bustype) == PCI_BUS) {
5555 		si_pcie_devpathvar(sih, varname, sizeof(varname), name);
5556 		if ((val = getintvar(NULL, varname)) != 0)
5557 			return val;
5558 	}
5559 
5560 	/* try to get compact devpath if it exist */
5561 	if (si_coded_devpathvar(sih, varname, sizeof(varname), name) == NULL)
5562 		return 0;
5563 
5564 	return (getintvar(NULL, varname));
5565 #endif /* BCMBUSTYPE && BCMBUSTYPE == SI_BUS */
5566 }
5567 
5568 /**
5569  * Concatenate the dev path with a varname into the given 'var' buffer
5570  * and return the 'var' pointer.
5571  * Nothing is done to the arguments if len == 0 or var is NULL, var is still returned.
5572  * On overflow, the first char will be set to '\0'.
5573  */
5574 static char *
BCMATTACHFN(si_devpathvar)5575 BCMATTACHFN(si_devpathvar)(const si_t *sih, char *var, int len, const char *name)
5576 {
5577 	uint path_len;
5578 
5579 	if (!var || len <= 0)
5580 		return var;
5581 
5582 	if (si_devpath(sih, var, len) == 0) {
5583 		path_len = strlen(var);
5584 
5585 		if (strlen(name) + 1 > (uint)(len - path_len))
5586 			var[0] = '\0';
5587 		else
5588 			strlcpy(var + path_len, name, len - path_len);
5589 	}
5590 
5591 	return var;
5592 }
5593 
5594 static char *
BCMATTACHFN(si_pcie_devpathvar)5595 BCMATTACHFN(si_pcie_devpathvar)(const si_t *sih, char *var, int len, const char *name)
5596 {
5597 	uint path_len;
5598 
5599 	if (!var || len <= 0)
5600 		return var;
5601 
5602 	if (si_devpath_pcie(sih, var, len) == 0) {
5603 		path_len = strlen(var);
5604 
5605 		if (strlen(name) + 1 > (uint)(len - path_len))
5606 			var[0] = '\0';
5607 		else
5608 			strlcpy(var + path_len, name, len - path_len);
5609 	}
5610 
5611 	return var;
5612 }
5613 
5614 uint32
BCMPOSTTRAPFN(si_ccreg)5615 BCMPOSTTRAPFN(si_ccreg)(si_t *sih, uint32 offset, uint32 mask, uint32 val)
5616 {
5617 	si_info_t *sii;
5618 	uint32 reg_val = 0;
5619 
5620 	sii = SI_INFO(sih);
5621 
5622 	/* abort for invalid offset */
5623 	if (offset > sizeof(chipcregs_t))
5624 		return 0;
5625 
5626 	reg_val = si_corereg(&sii->pub, SI_CC_IDX, offset, mask, val);
5627 
5628 	return reg_val;
5629 }
5630 
5631 void
sih_write_sraon(si_t * sih,int offset,int len,const uint32 * data)5632 sih_write_sraon(si_t *sih, int offset, int len, const uint32* data)
5633 {
5634 	chipcregs_t *cc;
5635 	cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
5636 	W_REG(si_osh(sih), &cc->sr_memrw_addr, offset);
5637 	while (len > 0) {
5638 		W_REG(si_osh(sih), &cc->sr_memrw_data, *data);
5639 		data++;
5640 		len -= sizeof(uint32);
5641 	}
5642 }
5643 
5644 #ifdef SR_DEBUG
5645 void
si_dump_pmu(si_t * sih,void * arg)5646 si_dump_pmu(si_t *sih, void *arg)
5647 {
5648 	uint i;
5649 	uint32 pmu_chip_ctl_reg;
5650 	uint32 pmu_chip_reg_reg;
5651 	uint32 pmu_chip_pll_reg;
5652 	uint32 pmu_chip_res_reg;
5653 	pmu_reg_t *pmu_var = (pmu_reg_t*)arg;
5654 	pmu_var->pmu_control = si_ccreg(sih, PMU_CTL, 0, 0);
5655 	pmu_var->pmu_capabilities = si_ccreg(sih, PMU_CAP, 0, 0);
5656 	pmu_var->pmu_status = si_ccreg(sih, PMU_ST, 0, 0);
5657 	pmu_var->res_state = si_ccreg(sih, PMU_RES_STATE, 0, 0);
5658 	pmu_var->res_pending = si_ccreg(sih, PMU_RES_PENDING, 0, 0);
5659 	pmu_var->pmu_timer1 = si_ccreg(sih, PMU_TIMER, 0, 0);
5660 	pmu_var->min_res_mask = si_ccreg(sih, MINRESMASKREG, 0, 0);
5661 	pmu_var->max_res_mask = si_ccreg(sih, MAXRESMASKREG, 0, 0);
5662 	pmu_chip_ctl_reg = (pmu_var->pmu_capabilities & 0xf8000000);
5663 	pmu_chip_ctl_reg = pmu_chip_ctl_reg >> 27;
5664 	for (i = 0; i < pmu_chip_ctl_reg; i++) {
5665 		pmu_var->pmu_chipcontrol1[i] = si_pmu_chipcontrol(sih, i, 0, 0);
5666 	}
5667 	pmu_chip_reg_reg = (pmu_var->pmu_capabilities & 0x07c00000);
5668 	pmu_chip_reg_reg = pmu_chip_reg_reg >> 22;
5669 	for (i = 0; i < pmu_chip_reg_reg; i++) {
5670 		pmu_var->pmu_regcontrol[i] = si_pmu_vreg_control(sih, i, 0, 0);
5671 	}
5672 	pmu_chip_pll_reg = (pmu_var->pmu_capabilities & 0x003e0000);
5673 	pmu_chip_pll_reg = pmu_chip_pll_reg >> 17;
5674 	for (i = 0; i < pmu_chip_pll_reg; i++) {
5675 		pmu_var->pmu_pllcontrol[i] = si_pmu_pllcontrol(sih, i, 0, 0);
5676 	}
5677 	pmu_chip_res_reg = (pmu_var->pmu_capabilities & 0x00001f00);
5678 	pmu_chip_res_reg = pmu_chip_res_reg >> 8;
5679 	for (i = 0; i < pmu_chip_res_reg; i++) {
5680 		si_corereg(sih, SI_CC_IDX, RSRCTABLEADDR, ~0, i);
5681 		pmu_var->pmu_rsrc_up_down_timer[i] = si_corereg(sih, SI_CC_IDX,
5682 			RSRCUPDWNTIME, 0, 0);
5683 	}
5684 	pmu_chip_res_reg = (pmu_var->pmu_capabilities & 0x00001f00);
5685 	pmu_chip_res_reg = pmu_chip_res_reg >> 8;
5686 	for (i = 0; i < pmu_chip_res_reg; i++) {
5687 		si_corereg(sih, SI_CC_IDX, RSRCTABLEADDR, ~0, i);
5688 		pmu_var->rsrc_dep_mask[i] = si_corereg(sih, SI_CC_IDX, PMU_RES_DEP_MASK, 0, 0);
5689 	}
5690 }
5691 
5692 void
si_pmu_keep_on(const si_t * sih,int32 int_val)5693 si_pmu_keep_on(const si_t *sih, int32 int_val)
5694 {
5695 	const si_info_t *sii = SI_INFO(sih);
5696 	chipcregs_t *cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
5697 	uint32 res_dep_mask;
5698 	uint32 min_res_mask;
5699 	uint32 max_res_mask;
5700 
5701 	/* Corresponding Resource Dependancy Mask */
5702 	W_REG(sii->osh, &cc->res_table_sel, int_val);
5703 	res_dep_mask = R_REG(sii->osh, &cc->res_dep_mask);
5704 	/* Local change of minimum resource mask */
5705 	min_res_mask = res_dep_mask | 1 << int_val;
5706 	/* Corresponding change of Maximum Resource Mask */
5707 	max_res_mask = R_REG(sii->osh, &cc->max_res_mask);
5708 	max_res_mask  = max_res_mask | min_res_mask;
5709 	W_REG(sii->osh, &cc->max_res_mask, max_res_mask);
5710 	/* Corresponding change of Minimum Resource Mask */
5711 	W_REG(sii->osh, &cc->min_res_mask, min_res_mask);
5712 }
5713 
5714 uint32
si_pmu_keep_on_get(const si_t * sih)5715 si_pmu_keep_on_get(const si_t *sih)
5716 {
5717 	uint i;
5718 	const si_info_t *sii = SI_INFO(sih);
5719 	chipcregs_t *cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
5720 	uint32 res_dep_mask;
5721 	uint32 min_res_mask;
5722 
5723 	/* Read min res mask */
5724 	min_res_mask = R_REG(sii->osh, &cc->min_res_mask);
5725 	/* Get corresponding Resource Dependancy Mask  */
5726 	for (i = 0; i < PMU_RES; i++) {
5727 		W_REG(sii->osh, &cc->res_table_sel, i);
5728 		res_dep_mask = R_REG(sii->osh, &cc->res_dep_mask);
5729 		res_dep_mask = res_dep_mask | 1 << i;
5730 		/* Compare with the present min res mask */
5731 		if (res_dep_mask == min_res_mask) {
5732 			return i;
5733 		}
5734 	}
5735 	return 0;
5736 }
5737 
5738 uint32
si_power_island_set(si_t * sih,uint32 int_val)5739 si_power_island_set(si_t *sih, uint32 int_val)
5740 {
5741 	uint32 i = 0x0;
5742 	uint32 j;
5743 	uint32 k;
5744 	int cnt = 0;
5745 	for (k = 0; k < ARRAYSIZE(si_power_island_test_array); k++) {
5746 		if (int_val == si_power_island_test_array[k]) {
5747 			cnt = cnt + 1;
5748 		}
5749 	}
5750 	if (cnt > 0) {
5751 		if (int_val & SUBCORE_POWER_ON) {
5752 			i = i | 0x1;
5753 		}
5754 		if (int_val & PHY_POWER_ON) {
5755 			i = i | 0x2;
5756 		}
5757 		if (int_val & VDDM_POWER_ON) {
5758 			i = i | 0x4;
5759 		}
5760 		if (int_val & MEMLPLDO_POWER_ON) {
5761 			i = i | 0x8;
5762 		}
5763 		j = (i << 18) & 0x003c0000;
5764 		si_pmu_chipcontrol(sih, CHIPCTRLREG2, 0x003c0000, j);
5765 	} else {
5766 		return 0;
5767 	}
5768 
5769 	return 1;
5770 }
5771 
5772 uint32
si_power_island_get(si_t * sih)5773 si_power_island_get(si_t *sih)
5774 {
5775 	uint32 sc_on = 0x0;
5776 	uint32 phy_on = 0x0;
5777 	uint32 vddm_on = 0x0;
5778 	uint32 memlpldo_on = 0x0;
5779 	uint32 res;
5780 	uint32 reg_val;
5781 	reg_val = si_pmu_chipcontrol(sih, CHIPCTRLREG2, 0, 0);
5782 	if (reg_val & SUBCORE_POWER_ON_CHK) {
5783 		sc_on = SUBCORE_POWER_ON;
5784 	}
5785 	if (reg_val & PHY_POWER_ON_CHK) {
5786 		phy_on = PHY_POWER_ON;
5787 	}
5788 	if (reg_val & VDDM_POWER_ON_CHK) {
5789 		vddm_on = VDDM_POWER_ON;
5790 	}
5791 	if (reg_val & MEMLPLDO_POWER_ON_CHK) {
5792 		memlpldo_on = MEMLPLDO_POWER_ON;
5793 	}
5794 	res = (sc_on | phy_on | vddm_on | memlpldo_on);
5795 	return res;
5796 }
5797 #endif /* SR_DEBUG */
5798 
5799 uint32
si_pciereg(const si_t * sih,uint32 offset,uint32 mask,uint32 val,uint type)5800 si_pciereg(const si_t *sih, uint32 offset, uint32 mask, uint32 val, uint type)
5801 {
5802 	const si_info_t *sii = SI_INFO(sih);
5803 
5804 	if (!PCIE(sii)) {
5805 		SI_ERROR(("si_pciereg: Not a PCIE device\n"));
5806 		return 0;
5807 	}
5808 
5809 	return pcicore_pciereg(sii->pch, offset, mask, val, type);
5810 }
5811 
5812 uint32
si_pcieserdesreg(const si_t * sih,uint32 mdioslave,uint32 offset,uint32 mask,uint32 val)5813 si_pcieserdesreg(const si_t *sih, uint32 mdioslave, uint32 offset, uint32 mask, uint32 val)
5814 {
5815 	const si_info_t *sii = SI_INFO(sih);
5816 
5817 	if (!PCIE(sii)) {
5818 		SI_ERROR(("si_pcieserdesreg: Not a PCIE device\n"));
5819 		return 0;
5820 	}
5821 
5822 	return pcicore_pcieserdesreg(sii->pch, mdioslave, offset, mask, val);
5823 
5824 }
5825 
5826 /** return TRUE if PCIE capability exists in the pci config space */
5827 static bool
BCMATTACHFN(si_ispcie)5828 BCMATTACHFN(si_ispcie)(const si_info_t *sii)
5829 {
5830 	uint8 cap_ptr;
5831 
5832 	if (BUSTYPE(sii->pub.bustype) != PCI_BUS)
5833 		return FALSE;
5834 
5835 	cap_ptr = pcicore_find_pci_capability(sii->osh, PCI_CAP_PCIECAP_ID, NULL, NULL);
5836 	if (!cap_ptr)
5837 		return FALSE;
5838 
5839 	return TRUE;
5840 }
5841 
5842 /* Wake-on-wireless-LAN (WOWL) support functions */
5843 /** Enable PME generation and disable clkreq */
5844 void
si_pci_pmeen(const si_t * sih)5845 si_pci_pmeen(const si_t *sih)
5846 {
5847 	pcicore_pmeen(SI_INFO(sih)->pch);
5848 }
5849 
5850 /** Return TRUE if PME status is set */
5851 bool
si_pci_pmestat(const si_t * sih)5852 si_pci_pmestat(const si_t *sih)
5853 {
5854 	return pcicore_pmestat(SI_INFO(sih)->pch);
5855 }
5856 
5857 /** Disable PME generation, clear the PME status bit if set */
5858 void
si_pci_pmeclr(const si_t * sih)5859 si_pci_pmeclr(const si_t *sih)
5860 {
5861 	pcicore_pmeclr(SI_INFO(sih)->pch);
5862 }
5863 
5864 void
si_pci_pmestatclr(const si_t * sih)5865 si_pci_pmestatclr(const si_t *sih)
5866 {
5867 	pcicore_pmestatclr(SI_INFO(sih)->pch);
5868 }
5869 
5870 #ifdef BCMSDIO
5871 /** initialize the sdio core */
5872 void
si_sdio_init(si_t * sih)5873 si_sdio_init(si_t *sih)
5874 {
5875 	const si_info_t *sii = SI_INFO(sih);
5876 
5877 	if (BUSCORETYPE(sih->buscoretype) == SDIOD_CORE_ID) {
5878 		uint idx;
5879 		sdpcmd_regs_t *sdpregs;
5880 
5881 		/* get the current core index */
5882 		/* could do stuff like tcpflag in pci, but why? */
5883 		idx = sii->curidx;
5884 		ASSERT(idx == si_findcoreidx(sih, D11_CORE_ID, 0));
5885 
5886 		/* switch to sdio core */
5887 		/* could use buscoreidx? */
5888 		sdpregs = (sdpcmd_regs_t *)si_setcore(sih, SDIOD_CORE_ID, 0);
5889 		ASSERT(sdpregs);
5890 
5891 		SI_MSG(("si_sdio_init: For SDIO Corerev %d, enable ints from core %d "
5892 		        "through SD core %d (%p)\n",
5893 		        sih->buscorerev, idx, sii->curidx, OSL_OBFUSCATE_BUF(sdpregs)));
5894 
5895 		/* enable backplane error and core interrupts */
5896 		W_REG(sii->osh, &sdpregs->hostintmask, I_SBINT);
5897 		W_REG(sii->osh, &sdpregs->sbintmask, (I_SB_SERR | I_SB_RESPERR | (1 << idx)));
5898 
5899 		/* switch back to previous core */
5900 		si_setcoreidx(sih, idx);
5901 	}
5902 
5903 	/* enable interrupts */
5904 	bcmsdh_intr_enable(sii->sdh);
5905 
5906 	/* What else */
5907 }
5908 #endif	/* BCMSDIO */
5909 
5910 /**
5911  * Disable pcie_war_ovr for some platforms (sigh!)
5912  * This is for boards that have BFL2_PCIEWAR_OVR set
5913  * but are in systems that still want the benefits of ASPM
5914  * Note that this should be done AFTER si_doattach
5915  */
5916 void
si_pcie_war_ovr_update(const si_t * sih,uint8 aspm)5917 si_pcie_war_ovr_update(const si_t *sih, uint8 aspm)
5918 {
5919 	const si_info_t *sii = SI_INFO(sih);
5920 
5921 	if (!PCIE_GEN1(sii))
5922 		return;
5923 
5924 	pcie_war_ovr_aspm_update(sii->pch, aspm);
5925 }
5926 
5927 void
si_pcie_power_save_enable(const si_t * sih,bool enable)5928 si_pcie_power_save_enable(const si_t *sih, bool enable)
5929 {
5930 	const si_info_t *sii = SI_INFO(sih);
5931 
5932 	if (!PCIE_GEN1(sii))
5933 		return;
5934 
5935 	pcie_power_save_enable(sii->pch, enable);
5936 }
5937 
5938 void
si_pcie_set_maxpayload_size(const si_t * sih,uint16 size)5939 si_pcie_set_maxpayload_size(const si_t *sih, uint16 size)
5940 {
5941 	const si_info_t *sii = SI_INFO(sih);
5942 
5943 	if (!PCIE(sii))
5944 		return;
5945 
5946 	pcie_set_maxpayload_size(sii->pch, size);
5947 }
5948 
5949 uint16
si_pcie_get_maxpayload_size(const si_t * sih)5950 si_pcie_get_maxpayload_size(const si_t *sih)
5951 {
5952 	const si_info_t *sii = SI_INFO(sih);
5953 
5954 	if (!PCIE(sii))
5955 		return (0);
5956 
5957 	return pcie_get_maxpayload_size(sii->pch);
5958 }
5959 
5960 void
si_pcie_set_request_size(const si_t * sih,uint16 size)5961 si_pcie_set_request_size(const si_t *sih, uint16 size)
5962 {
5963 	const si_info_t *sii = SI_INFO(sih);
5964 
5965 	if (!PCIE(sii))
5966 		return;
5967 
5968 	pcie_set_request_size(sii->pch, size);
5969 }
5970 
5971 uint16
BCMATTACHFN(si_pcie_get_request_size)5972 BCMATTACHFN(si_pcie_get_request_size)(const si_t *sih)
5973 {
5974 	const si_info_t *sii = SI_INFO(sih);
5975 
5976 	if (!PCIE_GEN1(sii))
5977 		return (0);
5978 
5979 	return pcie_get_request_size(sii->pch);
5980 }
5981 
5982 uint16
si_pcie_get_ssid(const si_t * sih)5983 si_pcie_get_ssid(const si_t *sih)
5984 {
5985 	const si_info_t *sii = SI_INFO(sih);
5986 
5987 	if (!PCIE_GEN1(sii))
5988 		return (0);
5989 
5990 	return pcie_get_ssid(sii->pch);
5991 }
5992 
5993 uint32
si_pcie_get_bar0(const si_t * sih)5994 si_pcie_get_bar0(const si_t *sih)
5995 {
5996 	const si_info_t *sii = SI_INFO(sih);
5997 
5998 	if (!PCIE(sii))
5999 		return (0);
6000 
6001 	return pcie_get_bar0(sii->pch);
6002 }
6003 
6004 int
si_pcie_configspace_cache(const si_t * sih)6005 si_pcie_configspace_cache(const si_t *sih)
6006 {
6007 	const si_info_t *sii = SI_INFO(sih);
6008 
6009 	if (!PCIE(sii))
6010 		return BCME_UNSUPPORTED;
6011 
6012 	return pcie_configspace_cache(sii->pch);
6013 }
6014 
6015 int
si_pcie_configspace_restore(const si_t * sih)6016 si_pcie_configspace_restore(const si_t *sih)
6017 {
6018 	const si_info_t *sii = SI_INFO(sih);
6019 
6020 	if (!PCIE(sii))
6021 		return BCME_UNSUPPORTED;
6022 
6023 	return pcie_configspace_restore(sii->pch);
6024 }
6025 
6026 int
si_pcie_configspace_get(const si_t * sih,uint8 * buf,uint size)6027 si_pcie_configspace_get(const si_t *sih, uint8 *buf, uint size)
6028 {
6029 	const si_info_t *sii = SI_INFO(sih);
6030 
6031 	if (!PCIE(sii) || size > PCI_CONFIG_SPACE_SIZE)
6032 		return -1;
6033 
6034 	return pcie_configspace_get(sii->pch, buf, size);
6035 }
6036 
6037 void
si_pcie_hw_L1SS_war(const si_t * sih)6038 si_pcie_hw_L1SS_war(const si_t *sih)
6039 {
6040 	const si_info_t *sii = SI_INFO(sih);
6041 
6042 	/* SWWLAN-41753: WAR intermittent issue with D3Cold and L1.2 exit,
6043 	 * need to update PMU rsrc dependency
6044 	 */
6045 	if (PCIE_GEN2(sii))
6046 		pcie_hw_L1SS_war(sii->pch);
6047 }
6048 
6049 void
BCMINITFN(si_pci_up)6050 BCMINITFN(si_pci_up)(const si_t *sih)
6051 {
6052 	const si_info_t *sii;
6053 
6054 	/* if not pci bus, we're done */
6055 	if (BUSTYPE(sih->bustype) != PCI_BUS)
6056 		return;
6057 
6058 	sii = SI_INFO(sih);
6059 
6060 	if (PCIE(sii)) {
6061 		pcicore_up(sii->pch, SI_PCIUP);
6062 	}
6063 }
6064 
6065 /** Unconfigure and/or apply various WARs when system is going to sleep mode */
6066 void
BCMUNINITFN(si_pci_sleep)6067 BCMUNINITFN(si_pci_sleep)(const si_t *sih)
6068 {
6069 	/* 4360 pcie2 WAR */
6070 	do_4360_pcie2_war = 0;
6071 
6072 	pcicore_sleep(SI_INFO(sih)->pch);
6073 }
6074 
6075 /** Unconfigure and/or apply various WARs when the wireless interface is going down */
6076 void
BCMINITFN(si_pci_down)6077 BCMINITFN(si_pci_down)(const si_t *sih)
6078 {
6079 	const si_info_t *sii = SI_INFO(sih);
6080 	BCM_REFERENCE(sii);
6081 
6082 	/* if not pci bus, we're done */
6083 	if (BUSTYPE(sih->bustype) != PCI_BUS)
6084 		return;
6085 
6086 	pcicore_down(sii->pch, SI_PCIDOWN);
6087 }
6088 
6089 /**
6090  * Configure the pci core for pci client (NIC) action
6091  * coremask is the bitvec of cores by index to be enabled.
6092  */
6093 void
BCMATTACHFN(si_pci_setup)6094 BCMATTACHFN(si_pci_setup)(si_t *sih, uint coremask)
6095 {
6096 	const si_info_t *sii = SI_INFO(sih);
6097 	sbpciregs_t *pciregs = NULL;
6098 	uint32 siflag = 0, w;
6099 	uint idx = 0;
6100 
6101 	if (BUSTYPE(sii->pub.bustype) != PCI_BUS)
6102 		return;
6103 
6104 	ASSERT(PCI(sii) || PCIE(sii));
6105 	ASSERT(sii->pub.buscoreidx != BADIDX);
6106 
6107 	if (PCI(sii)) {
6108 		/* get current core index */
6109 		idx = sii->curidx;
6110 
6111 		/* we interrupt on this backplane flag number */
6112 		siflag = si_flag(sih);
6113 
6114 		/* switch over to pci core */
6115 		pciregs = (sbpciregs_t *)si_setcoreidx(sih, sii->pub.buscoreidx);
6116 	}
6117 
6118 	/*
6119 	 * Enable sb->pci interrupts.  Assume
6120 	 * PCI rev 2.3 support was added in pci core rev 6 and things changed..
6121 	 */
6122 	if (PCIE(sii) || (PCI(sii) && ((sii->pub.buscorerev) >= 6))) {
6123 		/* pci config write to set this core bit in PCIIntMask */
6124 		w = OSL_PCI_READ_CONFIG(sii->osh, PCI_INT_MASK, sizeof(uint32));
6125 		w |= (coremask << PCI_SBIM_SHIFT);
6126 #ifdef USER_MODE
6127 		/* User mode operate with interrupt disabled */
6128 		w &= !(coremask << PCI_SBIM_SHIFT);
6129 #endif
6130 		OSL_PCI_WRITE_CONFIG(sii->osh, PCI_INT_MASK, sizeof(uint32), w);
6131 	} else {
6132 		/* set sbintvec bit for our flag number */
6133 		si_setint(sih, siflag);
6134 	}
6135 
6136 	/*
6137 	 * enable prefetch and bursts for dma big window
6138 	 * enable read multiple for dma big window corerev >= 11
6139 	 * PR 9962/4708: Set initiator timeouts. corerev < 5
6140 	 */
6141 	if (PCI(sii)) {
6142 		OR_REG(sii->osh, &pciregs->sbtopci2, (SBTOPCI_PREF | SBTOPCI_BURST));
6143 		if (sii->pub.buscorerev >= 11) {
6144 			OR_REG(sii->osh, &pciregs->sbtopci2, SBTOPCI_RC_READMULTI);
6145 			/* PR50531: On some Laptops, the 4321 CB shows bad
6146 			 * UDP performance on one direction
6147 			 */
6148 			w = R_REG(sii->osh, &pciregs->clkrun);
6149 			W_REG(sii->osh, &pciregs->clkrun, (w | PCI_CLKRUN_DSBL));
6150 			w = R_REG(sii->osh, &pciregs->clkrun);
6151 		}
6152 
6153 		/* switch back to previous core */
6154 		si_setcoreidx(sih, idx);
6155 	}
6156 }
6157 
6158 /* In NIC mode is there any better way to find out what ARM core is there? */
6159 static uint
BCMATTACHFN(si_get_armcoreidx)6160 BCMATTACHFN(si_get_armcoreidx)(si_t *sih)
6161 {
6162 	uint saveidx = si_coreidx(sih);
6163 	uint coreidx = BADIDX;
6164 
6165 	if (si_setcore(sih, ARMCR4_CORE_ID, 0) != NULL ||
6166 	    si_setcore(sih, ARMCA7_CORE_ID, 0) != NULL) {
6167 		coreidx = si_coreidx(sih);
6168 	}
6169 
6170 	si_setcoreidx(sih, saveidx);
6171 
6172 	return coreidx;
6173 }
6174 
6175 /**
6176  * Configure the pcie core for pcie client (NIC) action
6177  * coreidx is the index of the core to be enabled.
6178  */
6179 int
BCMATTACHFN(si_pcie_setup)6180 BCMATTACHFN(si_pcie_setup)(si_t *sih, uint coreidx)
6181 {
6182 	si_info_t *sii = SI_INFO(sih);
6183 	int main_intr, alt_intr;
6184 	uint pciepidx;
6185 	uint32 w;
6186 	osl_t *osh = si_osh(sih);
6187 	uint saveidx = si_coreidx(sih);
6188 	volatile void *oobrregs;
6189 	uint armcidx, armpidx;
6190 	int ret = BCME_OK;
6191 
6192 	/* try the new hnd oobr first */
6193 	if ((oobrregs = si_setcore(sih, HND_OOBR_CORE_ID, 0)) == NULL) {
6194 		goto exit;
6195 	}
6196 
6197 	ASSERT(BUSTYPE(sih->bustype) == PCI_BUS);
6198 	ASSERT(BUSTYPE(sih->buscoretype) == PCIE2_CORE_ID);
6199 
6200 	/* ==== Enable sb->pci interrupts ==== */
6201 
6202 	/* 1) query the pcie interrupt port index and
6203 	 *    re-route the main interrupt to pcie (from ARM) if necessary
6204 	 */
6205 	main_intr = hnd_oobr_get_intr_config(sih, coreidx,
6206 			HND_CORE_MAIN_INTR, sih->buscoreidx, &pciepidx);
6207 	if (main_intr < 0) {
6208 		/* query failure means the main interrupt is not routed
6209 		 * to the pcie core... re-route!
6210 		 */
6211 		armcidx = si_get_armcoreidx(sih);
6212 		if (!GOODIDX(armcidx, sii->numcores)) {
6213 			SI_MSG(("si_pcie_setup: arm core not found\n"));
6214 			ret = BCME_NOTFOUND;
6215 			goto exit;
6216 		}
6217 
6218 		/* query main and alt interrupt info */
6219 		main_intr = hnd_oobr_get_intr_config(sih, coreidx,
6220 				HND_CORE_MAIN_INTR, armcidx, &armpidx);
6221 		alt_intr = hnd_oobr_get_intr_config(sih, coreidx,
6222 				HND_CORE_ALT_INTR, sih->buscoreidx, &pciepidx);
6223 		if ((ret = main_intr) < 0 || (ret = alt_intr) < 0) {
6224 			SI_MSG(("si_pcie_setup: coreidx %u main (=%d) or "
6225 			        "alt (=%d) interrupt query failed\n",
6226 			        coreidx, main_intr, alt_intr));
6227 			goto exit;
6228 		}
6229 
6230 		/* swap main and alt interrupts at pcie input interrupts */
6231 		hnd_oobr_set_intr_src(sih, sih->buscoreidx, pciepidx, main_intr);
6232 		/* TODO: route the alternate interrupt to arm */
6233 		/* hnd_oobr_set_intr_src(sih, armcidx, armppidx, alt_intr); */
6234 		BCM_REFERENCE(armpidx);
6235 
6236 		/* query main interrupt info again.
6237 		 * is it really necessary?
6238 		 * it can't fail as we just set it up...
6239 		 */
6240 		main_intr = hnd_oobr_get_intr_config(sih, coreidx,
6241 				HND_CORE_MAIN_INTR, sih->buscoreidx, &pciepidx);
6242 		ASSERT(main_intr >= 0);
6243 	}
6244 	/* hnd_oobr_dump(sih); */
6245 
6246 	/* 2) pcie config write to set this core bit in PCIIntMask */
6247 	w = OSL_PCI_READ_CONFIG(osh, PCI_INT_MASK, sizeof(w));
6248 	w |= ((1 << pciepidx) << PCI_SBIM_SHIFT);
6249 	OSL_PCI_WRITE_CONFIG(osh, PCI_INT_MASK, sizeof(w), w);
6250 
6251 	/* ==== other setups ==== */
6252 
6253 	/* reset the return value */
6254 	ret = BCME_OK;
6255 exit:
6256 	/* return to the original core */
6257 	si_setcoreidx(sih, saveidx);
6258 	if (ret != BCME_OK) {
6259 		return ret;
6260 	}
6261 
6262 	/* fall back to the old way... */
6263 	if (oobrregs == NULL) {
6264 		uint coremask = (1 << coreidx);
6265 		si_pci_setup(sih, coremask);
6266 	}
6267 
6268 	return ret;
6269 }
6270 
6271 uint8
si_pcieclkreq(const si_t * sih,uint32 mask,uint32 val)6272 si_pcieclkreq(const si_t *sih, uint32 mask, uint32 val)
6273 {
6274 	const si_info_t *sii = SI_INFO(sih);
6275 
6276 	if (!PCIE(sii))
6277 		return 0;
6278 
6279 	return pcie_clkreq(sii->pch, mask, val);
6280 }
6281 
6282 uint32
si_pcielcreg(const si_t * sih,uint32 mask,uint32 val)6283 si_pcielcreg(const si_t *sih, uint32 mask, uint32 val)
6284 {
6285 	const si_info_t *sii = SI_INFO(sih);
6286 
6287 	if (!PCIE(sii))
6288 		return 0;
6289 
6290 	return pcie_lcreg(sii->pch, mask, val);
6291 }
6292 
6293 uint8
si_pcieltrenable(const si_t * sih,uint32 mask,uint32 val)6294 si_pcieltrenable(const si_t *sih, uint32 mask, uint32 val)
6295 {
6296 	const si_info_t *sii = SI_INFO(sih);
6297 
6298 	if (!(PCIE(sii)))
6299 		return 0;
6300 
6301 	return pcie_ltrenable(sii->pch, mask, val);
6302 }
6303 
6304 uint8
BCMATTACHFN(si_pcieobffenable)6305 BCMATTACHFN(si_pcieobffenable)(const si_t *sih, uint32 mask, uint32 val)
6306 {
6307 	const si_info_t *sii = SI_INFO(sih);
6308 
6309 	if (!(PCIE(sii)))
6310 		return 0;
6311 
6312 	return pcie_obffenable(sii->pch, mask, val);
6313 }
6314 
6315 uint32
si_pcieltr_reg(const si_t * sih,uint32 reg,uint32 mask,uint32 val)6316 si_pcieltr_reg(const si_t *sih, uint32 reg, uint32 mask, uint32 val)
6317 {
6318 	const si_info_t *sii = SI_INFO(sih);
6319 
6320 	if (!(PCIE(sii)))
6321 		return 0;
6322 
6323 	return pcie_ltr_reg(sii->pch, reg, mask, val);
6324 }
6325 
6326 uint32
si_pcieltrspacing_reg(const si_t * sih,uint32 mask,uint32 val)6327 si_pcieltrspacing_reg(const si_t *sih, uint32 mask, uint32 val)
6328 {
6329 	const si_info_t *sii = SI_INFO(sih);
6330 
6331 	if (!(PCIE(sii)))
6332 		return 0;
6333 
6334 	return pcieltrspacing_reg(sii->pch, mask, val);
6335 }
6336 
6337 uint32
si_pcieltrhysteresiscnt_reg(const si_t * sih,uint32 mask,uint32 val)6338 si_pcieltrhysteresiscnt_reg(const si_t *sih, uint32 mask, uint32 val)
6339 {
6340 	const si_info_t *sii = SI_INFO(sih);
6341 
6342 	if (!(PCIE(sii)))
6343 		return 0;
6344 
6345 	return pcieltrhysteresiscnt_reg(sii->pch, mask, val);
6346 }
6347 
6348 void
si_pcie_set_error_injection(const si_t * sih,uint32 mode)6349 si_pcie_set_error_injection(const si_t *sih, uint32 mode)
6350 {
6351 	const si_info_t *sii = SI_INFO(sih);
6352 
6353 	if (!PCIE(sii))
6354 		return;
6355 
6356 	pcie_set_error_injection(sii->pch, mode);
6357 }
6358 
6359 void
si_pcie_set_L1substate(const si_t * sih,uint32 substate)6360 si_pcie_set_L1substate(const si_t *sih, uint32 substate)
6361 {
6362 	const si_info_t *sii = SI_INFO(sih);
6363 
6364 	if (PCIE_GEN2(sii))
6365 		pcie_set_L1substate(sii->pch, substate);
6366 }
6367 #ifndef BCM_BOOTLOADER
6368 uint32
si_pcie_get_L1substate(const si_t * sih)6369 si_pcie_get_L1substate(const si_t *sih)
6370 {
6371 	const si_info_t *sii = SI_INFO(sih);
6372 
6373 	if (PCIE_GEN2(sii))
6374 		return pcie_get_L1substate(sii->pch);
6375 
6376 	return 0;
6377 }
6378 #endif /* BCM_BOOTLOADER */
6379 /** indirect way to read pcie config regs */
6380 uint
si_pcie_readreg(void * sih,uint addrtype,uint offset)6381 si_pcie_readreg(void *sih, uint addrtype, uint offset)
6382 {
6383 	return pcie_readreg(sih, (sbpcieregs_t *)PCIEREGS(((si_info_t *)sih)),
6384 	                    addrtype, offset);
6385 }
6386 
6387 /* indirect way to write pcie config regs */
6388 uint
si_pcie_writereg(void * sih,uint addrtype,uint offset,uint val)6389 si_pcie_writereg(void *sih, uint addrtype, uint offset, uint val)
6390 {
6391 	return pcie_writereg(sih, (sbpcieregs_t *)PCIEREGS(((si_info_t *)sih)),
6392 	                    addrtype, offset, val);
6393 }
6394 
6395 /**
6396  * PCI(e) core requires additional software initialization in an SROMless system. In such a system,
6397  * the PCIe core will assume POR defaults, which are mostly ok, with the exception of the mapping of
6398  * two address subwindows within the BAR0 window.
6399  * Note: the current core may be changed upon return.
6400  */
6401 int
si_pci_fixcfg(si_t * sih)6402 si_pci_fixcfg(si_t *sih)
6403 {
6404 #ifndef DONGLEBUILD
6405 
6406 	uint origidx, pciidx;
6407 	sbpciregs_t *pciregs = NULL;
6408 	sbpcieregs_t *pcieregs = NULL;
6409 	uint16 val16;
6410 	volatile uint16 *reg16 = NULL;
6411 
6412 	si_info_t *sii = SI_INFO(sih);
6413 
6414 	ASSERT(BUSTYPE(sii->pub.bustype) == PCI_BUS);
6415 
6416 	/* Fixup PI in SROM shadow area to enable the correct PCI core access */
6417 	origidx = si_coreidx(&sii->pub);
6418 
6419 	/* check 'pi' is correct and fix it if not. */
6420 	if (BUSCORETYPE(sii->pub.buscoretype) == PCIE2_CORE_ID) {
6421 		pcieregs = (sbpcieregs_t *)si_setcore(&sii->pub, PCIE2_CORE_ID, 0);
6422 		ASSERT(pcieregs != NULL);
6423 		reg16 = &pcieregs->sprom[SRSH_PI_OFFSET];
6424 	} else if (BUSCORETYPE(sii->pub.buscoretype) == PCIE_CORE_ID) {
6425 		pcieregs = (sbpcieregs_t *)si_setcore(&sii->pub, PCIE_CORE_ID, 0);
6426 		ASSERT(pcieregs != NULL);
6427 		reg16 = &pcieregs->sprom[SRSH_PI_OFFSET];
6428 	} else if (BUSCORETYPE(sii->pub.buscoretype) == PCI_CORE_ID) {
6429 		pciregs = (sbpciregs_t *)si_setcore(&sii->pub, PCI_CORE_ID, 0);
6430 		ASSERT(pciregs != NULL);
6431 		reg16 = &pciregs->sprom[SRSH_PI_OFFSET];
6432 	}
6433 	pciidx = si_coreidx(&sii->pub);
6434 
6435 	if (!reg16) return -1;
6436 
6437 	val16 = R_REG(sii->osh, reg16);
6438 	if (((val16 & SRSH_PI_MASK) >> SRSH_PI_SHIFT) != (uint16)pciidx) {
6439 		/* write bitfield used to translate 3rd and 7th 4K chunk in the Bar0 space. */
6440 		val16 = (uint16)(pciidx << SRSH_PI_SHIFT) | (val16 & ~SRSH_PI_MASK);
6441 		W_REG(sii->osh, reg16, val16);
6442 	}
6443 
6444 	/* restore the original index */
6445 	si_setcoreidx(&sii->pub, origidx);
6446 
6447 	pcicore_hwup(sii->pch);
6448 #endif /* DONGLEBUILD */
6449 	return 0;
6450 } /* si_pci_fixcfg */
6451 
6452 #if defined(BCMDBG) || defined(BCMDBG_DUMP) || defined(WLTEST)
6453 int
si_dump_pcieinfo(const si_t * sih,struct bcmstrbuf * b)6454 si_dump_pcieinfo(const si_t *sih, struct bcmstrbuf *b)
6455 {
6456 	const si_info_t *sii = SI_INFO(sih);
6457 
6458 	if (!PCIE_GEN1(sii) && !PCIE_GEN2(sii))
6459 		return BCME_ERROR;
6460 
6461 	return pcicore_dump_pcieinfo(sii->pch, b);
6462 }
6463 
6464 void
si_dump_pmuregs(si_t * sih,struct bcmstrbuf * b)6465 si_dump_pmuregs(si_t *sih, struct bcmstrbuf *b)
6466 {
6467 	uint i;
6468 	uint32 pmu_cap;
6469 	uint32 pmu_chip_reg;
6470 
6471 	bcm_bprintf(b, "===pmu(rev %d)===\n", sih->pmurev);
6472 	if (!(sih->pmurev == 0x11 || (sih->pmurev >= 0x15 && sih->pmurev <= 0x19))) {
6473 		bcm_bprintf(b, "PMU dump not supported\n");
6474 		return;
6475 	}
6476 	pmu_cap = si_ccreg(sih, PMU_CAP, 0, 0);
6477 	bcm_bprintf(b, "pmu_control 0x%x\n", si_ccreg(sih, PMU_CTL, 0, 0));
6478 	bcm_bprintf(b, "pmu_capabilities 0x%x\n", pmu_cap);
6479 	bcm_bprintf(b, "pmu_status 0x%x\n", si_ccreg(sih, PMU_ST, 0, 0));
6480 	bcm_bprintf(b, "res_state 0x%x\n", si_ccreg(sih, PMU_RES_STATE, 0, 0));
6481 	bcm_bprintf(b, "res_pending 0x%x\n", si_ccreg(sih, PMU_RES_PENDING, 0, 0));
6482 	bcm_bprintf(b, "pmu_timer1 %d\n", si_ccreg(sih, PMU_TIMER, 0, 0));
6483 	bcm_bprintf(b, "min_res_mask 0x%x\n", si_ccreg(sih, MINRESMASKREG, 0, 0));
6484 	bcm_bprintf(b, "max_res_mask 0x%x\n", si_ccreg(sih, MAXRESMASKREG, 0, 0));
6485 
6486 	pmu_chip_reg = (pmu_cap & 0xf8000000);
6487 	pmu_chip_reg = pmu_chip_reg >> 27;
6488 	bcm_bprintf(b, "si_pmu_chipcontrol: ");
6489 	for (i = 0; i < pmu_chip_reg; i++) {
6490 		bcm_bprintf(b, "[%d]=0x%x ", i, si_pmu_chipcontrol(sih, i, 0, 0));
6491 	}
6492 
6493 	pmu_chip_reg = (pmu_cap & 0x07c00000);
6494 	pmu_chip_reg = pmu_chip_reg >> 22;
6495 	bcm_bprintf(b, "\nsi_pmu_vregcontrol: ");
6496 	for (i = 0; i < pmu_chip_reg; i++) {
6497 		bcm_bprintf(b, "[%d]=0x%x ", i, si_pmu_vreg_control(sih, i, 0, 0));
6498 	}
6499 	pmu_chip_reg = (pmu_cap & 0x003e0000);
6500 	pmu_chip_reg = pmu_chip_reg >> 17;
6501 	bcm_bprintf(b, "\nsi_pmu_pllcontrol: ");
6502 	for (i = 0; i < pmu_chip_reg; i++) {
6503 		bcm_bprintf(b, "[%d]=0x%x ", i, si_pmu_pllcontrol(sih, i, 0, 0));
6504 	}
6505 	pmu_chip_reg = (pmu_cap & 0x0001e000);
6506 	pmu_chip_reg = pmu_chip_reg >> 13;
6507 	bcm_bprintf(b, "\nsi_pmu_res u/d timer: ");
6508 	for (i = 0; i < pmu_chip_reg; i++) {
6509 		si_corereg(sih, SI_CC_IDX, RSRCTABLEADDR, ~0, i);
6510 		bcm_bprintf(b, "[%d]=0x%x ", i, si_corereg(sih, SI_CC_IDX, RSRCUPDWNTIME, 0, 0));
6511 	}
6512 	pmu_chip_reg = (pmu_cap & 0x00001f00);
6513 	pmu_chip_reg = pmu_chip_reg >> 8;
6514 	bcm_bprintf(b, "\nsi_pmu_res dep_mask: ");
6515 	for (i = 0; i < pmu_chip_reg; i++) {
6516 		si_corereg(sih, SI_CC_IDX, RSRCTABLEADDR, ~0, i);
6517 		bcm_bprintf(b, "[%d]=0x%x ", i, si_corereg(sih, SI_CC_IDX, PMU_RES_DEP_MASK, 0, 0));
6518 	}
6519 	bcm_bprintf(b, "\n");
6520 }
6521 
6522 int
si_dump_pcieregs(const si_t * sih,struct bcmstrbuf * b)6523 si_dump_pcieregs(const si_t *sih, struct bcmstrbuf *b)
6524 {
6525 	const si_info_t *sii = SI_INFO(sih);
6526 
6527 	if (!PCIE_GEN1(sii) && !PCIE_GEN2(sii))
6528 		return BCME_ERROR;
6529 
6530 	return pcicore_dump_pcieregs(sii->pch, b);
6531 }
6532 
6533 #endif /* BCMDBG || BCMDBG_DUMP || WLTEST */
6534 
6535 #if defined(BCMDBG) || defined(BCMDBG_DUMP) || defined(BCMDBG_PHYDUMP)
6536 void
si_dump(const si_t * sih,struct bcmstrbuf * b)6537 si_dump(const si_t *sih, struct bcmstrbuf *b)
6538 {
6539 	const si_info_t *sii = SI_INFO(sih);
6540 	const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
6541 	uint i;
6542 
6543 	bcm_bprintf(b, "si %p chip 0x%x chiprev 0x%x boardtype 0x%x boardvendor 0x%x bus %d\n",
6544 		OSL_OBFUSCATE_BUF(sii), sih->chip, sih->chiprev,
6545 		sih->boardtype, sih->boardvendor, sih->bustype);
6546 	bcm_bprintf(b, "osh %p curmap %p\n",
6547 		OSL_OBFUSCATE_BUF(sii->osh), OSL_OBFUSCATE_BUF(sii->curmap));
6548 
6549 	if (CHIPTYPE(sih->socitype) == SOCI_SB)
6550 		bcm_bprintf(b, "sonicsrev %d ", sih->socirev);
6551 	bcm_bprintf(b, "ccrev %d buscoretype 0x%x buscorerev %d curidx %d\n",
6552 	            CCREV(sih->ccrev), sih->buscoretype, sih->buscorerev, sii->curidx);
6553 
6554 #ifdef	BCMDBG
6555 	if ((BUSTYPE(sih->bustype) == PCI_BUS) && (sii->pch))
6556 		pcicore_dump(sii->pch, b);
6557 #endif
6558 
6559 	bcm_bprintf(b, "cores:  ");
6560 	for (i = 0; i < sii->numcores; i++)
6561 		bcm_bprintf(b, "0x%x ", cores_info->coreid[i]);
6562 	bcm_bprintf(b, "\n");
6563 }
6564 
6565 void
si_ccreg_dump(si_t * sih,struct bcmstrbuf * b)6566 si_ccreg_dump(si_t *sih, struct bcmstrbuf *b)
6567 {
6568 	const si_info_t *sii = SI_INFO(sih);
6569 	uint origidx;
6570 	uint i;
6571 	bcm_int_bitmask_t intr_val;
6572 	chipcregs_t *cc;
6573 
6574 	/* only support corerev 22 for now */
6575 	if (CCREV(sih->ccrev) != 23)
6576 		return;
6577 
6578 	origidx = sii->curidx;
6579 
6580 	INTR_OFF(sii, &intr_val);
6581 
6582 	cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
6583 	ASSERT(cc);
6584 
6585 	bcm_bprintf(b, "\n===cc(rev %d) registers(offset val)===\n", CCREV(sih->ccrev));
6586 	for (i = 0; i <= 0xc4; i += 4) {
6587 		if (i == 0x4c) {
6588 			bcm_bprintf(b, "\n");
6589 			continue;
6590 		}
6591 		bcm_bprintf(b, "0x%x\t0x%x\n", i, *(uint32 *)((uintptr)cc + i));
6592 	}
6593 
6594 	bcm_bprintf(b, "\n");
6595 
6596 	for (i = 0x1e0; i <= 0x1e4; i += 4) {
6597 		bcm_bprintf(b, "0x%x\t0x%x\n", i, *(uint32 *)((uintptr)cc + i));
6598 	}
6599 	bcm_bprintf(b, "\n");
6600 
6601 	if (sih->cccaps & CC_CAP_PMU) {
6602 		for (i = 0x600; i <= 0x660; i += 4) {
6603 			bcm_bprintf(b, "0x%x\t0x%x\n", i, *(uint32 *)((uintptr)cc + i));
6604 		}
6605 	}
6606 	bcm_bprintf(b, "\n");
6607 
6608 	si_setcoreidx(sih, origidx);
6609 	INTR_RESTORE(sii, &intr_val);
6610 }
6611 
6612 /** dump dynamic clock control related registers */
6613 void
si_clkctl_dump(si_t * sih,struct bcmstrbuf * b)6614 si_clkctl_dump(si_t *sih, struct bcmstrbuf *b)
6615 {
6616 	const si_info_t *sii = SI_INFO(sih);
6617 	chipcregs_t *cc;
6618 	uint origidx;
6619 	bcm_int_bitmask_t intr_val;
6620 
6621 	if (!(sih->cccaps & CC_CAP_PWR_CTL))
6622 		return;
6623 
6624 	INTR_OFF(sii, &intr_val);
6625 	origidx = sii->curidx;
6626 	if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL)
6627 		goto done;
6628 
6629 	bcm_bprintf(b, "pll_on_delay 0x%x fref_sel_delay 0x%x ",
6630 		cc->pll_on_delay, cc->fref_sel_delay);
6631 	if ((CCREV(sih->ccrev) >= 6) && (CCREV(sih->ccrev) < 10))
6632 		bcm_bprintf(b, "slow_clk_ctl 0x%x ", cc->slow_clk_ctl);
6633 	if (CCREV(sih->ccrev) >= 10) {
6634 		bcm_bprintf(b, "system_clk_ctl 0x%x ", cc->system_clk_ctl);
6635 		bcm_bprintf(b, "clkstatestretch 0x%x ", cc->clkstatestretch);
6636 	}
6637 
6638 	if (BUSTYPE(sih->bustype) == PCI_BUS)
6639 		bcm_bprintf(b, "gpioout 0x%x gpioouten 0x%x ",
6640 		            OSL_PCI_READ_CONFIG(sii->osh, PCI_GPIO_OUT, sizeof(uint32)),
6641 		            OSL_PCI_READ_CONFIG(sii->osh, PCI_GPIO_OUTEN, sizeof(uint32)));
6642 
6643 	if (sih->cccaps & CC_CAP_PMU) {
6644 		/* dump some PMU register ? */
6645 	}
6646 	bcm_bprintf(b, "\n");
6647 
6648 	si_setcoreidx(sih, origidx);
6649 done:
6650 	INTR_RESTORE(sii, &intr_val);
6651 }
6652 
6653 int
si_gpiodump(si_t * sih,struct bcmstrbuf * b)6654 si_gpiodump(si_t *sih, struct bcmstrbuf *b)
6655 {
6656 	const si_info_t *sii = SI_INFO(sih);
6657 	uint origidx;
6658 	bcm_int_bitmask_t intr_val;
6659 	chipcregs_t *cc;
6660 
6661 	INTR_OFF(sii, &intr_val);
6662 
6663 	origidx = si_coreidx(sih);
6664 
6665 	cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
6666 	ASSERT(cc);
6667 
6668 	bcm_bprintf(b, "GPIOregs\t");
6669 
6670 	bcm_bprintf(b, "gpioin 0x%x ", R_REG(sii->osh, &cc->gpioin));
6671 	bcm_bprintf(b, "gpioout 0x%x ", R_REG(sii->osh, &cc->gpioout));
6672 	bcm_bprintf(b, "gpioouten 0x%x ", R_REG(sii->osh, &cc->gpioouten));
6673 	bcm_bprintf(b, "gpiocontrol 0x%x ", R_REG(sii->osh, &cc->gpiocontrol));
6674 	bcm_bprintf(b, "gpiointpolarity 0x%x ", R_REG(sii->osh, &cc->gpiointpolarity));
6675 	bcm_bprintf(b, "gpiointmask 0x%x ", R_REG(sii->osh, &cc->gpiointmask));
6676 
6677 	bcm_bprintf(b, "\n");
6678 
6679 	/* restore the original index */
6680 	si_setcoreidx(sih, origidx);
6681 
6682 	INTR_RESTORE(sii, &intr_val);
6683 	return 0;
6684 
6685 }
6686 #endif /* BCMDBG || BCMDBG_DUMP || BCMDBG_PHYDUMP */
6687 
6688 #endif /* !defined(BCMDONGLEHOST) */
6689 
6690 /** change logical "focus" to the gpio core for optimized access */
6691 volatile void *
si_gpiosetcore(si_t * sih)6692 si_gpiosetcore(si_t *sih)
6693 {
6694 	return (si_setcoreidx(sih, SI_CC_IDX));
6695 }
6696 
6697 /**
6698  * mask & set gpiocontrol bits.
6699  * If a gpiocontrol bit is set to 0, chipcommon controls the corresponding GPIO pin.
6700  * If a gpiocontrol bit is set to 1, the GPIO pin is no longer a GPIO and becomes dedicated
6701  *   to some chip-specific purpose.
6702  */
6703 uint32
BCMPOSTTRAPFN(si_gpiocontrol)6704 BCMPOSTTRAPFN(si_gpiocontrol)(si_t *sih, uint32 mask, uint32 val, uint8 priority)
6705 {
6706 	uint regoff;
6707 
6708 	regoff = 0;
6709 
6710 	/* gpios could be shared on router platforms
6711 	 * ignore reservation if it's high priority (e.g., test apps)
6712 	 */
6713 	if ((priority != GPIO_HI_PRIORITY) &&
6714 	    (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
6715 		mask = priority ? (si_gpioreservation & mask) :
6716 			((si_gpioreservation | mask) & ~(si_gpioreservation));
6717 		val &= mask;
6718 	}
6719 
6720 	regoff = OFFSETOF(chipcregs_t, gpiocontrol);
6721 	return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
6722 }
6723 
6724 /** mask&set gpio output enable bits */
6725 uint32
BCMPOSTTRAPFN(si_gpioouten)6726 BCMPOSTTRAPFN(si_gpioouten)(si_t *sih, uint32 mask, uint32 val, uint8 priority)
6727 {
6728 	uint regoff;
6729 
6730 	regoff = 0;
6731 
6732 	/* gpios could be shared on router platforms
6733 	 * ignore reservation if it's high priority (e.g., test apps)
6734 	 */
6735 	if ((priority != GPIO_HI_PRIORITY) &&
6736 	    (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
6737 		mask = priority ? (si_gpioreservation & mask) :
6738 			((si_gpioreservation | mask) & ~(si_gpioreservation));
6739 		val &= mask;
6740 	}
6741 
6742 	regoff = OFFSETOF(chipcregs_t, gpioouten);
6743 	return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
6744 }
6745 
6746 /** mask&set gpio output bits */
6747 uint32
BCMPOSTTRAPFN(si_gpioout)6748 BCMPOSTTRAPFN(si_gpioout)(si_t *sih, uint32 mask, uint32 val, uint8 priority)
6749 {
6750 	uint regoff;
6751 
6752 	regoff = 0;
6753 
6754 	/* gpios could be shared on router platforms
6755 	 * ignore reservation if it's high priority (e.g., test apps)
6756 	 */
6757 	if ((priority != GPIO_HI_PRIORITY) &&
6758 	    (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
6759 		mask = priority ? (si_gpioreservation & mask) :
6760 			((si_gpioreservation | mask) & ~(si_gpioreservation));
6761 		val &= mask;
6762 	}
6763 
6764 	regoff = OFFSETOF(chipcregs_t, gpioout);
6765 	return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
6766 }
6767 
6768 /** reserve one gpio */
6769 uint32
si_gpioreserve(const si_t * sih,uint32 gpio_bitmask,uint8 priority)6770 si_gpioreserve(const si_t *sih, uint32 gpio_bitmask, uint8 priority)
6771 {
6772 	/* only cores on SI_BUS share GPIO's and only applcation users need to
6773 	 * reserve/release GPIO
6774 	 */
6775 	if ((BUSTYPE(sih->bustype) != SI_BUS) || (!priority)) {
6776 		ASSERT((BUSTYPE(sih->bustype) == SI_BUS) && (priority));
6777 		return 0xffffffff;
6778 	}
6779 	/* make sure only one bit is set */
6780 	if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
6781 		ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1)));
6782 		return 0xffffffff;
6783 	}
6784 
6785 	/* already reserved */
6786 	if (si_gpioreservation & gpio_bitmask)
6787 		return 0xffffffff;
6788 	/* set reservation */
6789 	si_gpioreservation |= gpio_bitmask;
6790 
6791 	return si_gpioreservation;
6792 }
6793 
6794 /**
6795  * release one gpio.
6796  *
6797  * releasing the gpio doesn't change the current value on the GPIO last write value
6798  * persists till someone overwrites it.
6799  */
6800 uint32
si_gpiorelease(const si_t * sih,uint32 gpio_bitmask,uint8 priority)6801 si_gpiorelease(const si_t *sih, uint32 gpio_bitmask, uint8 priority)
6802 {
6803 	/* only cores on SI_BUS share GPIO's and only applcation users need to
6804 	 * reserve/release GPIO
6805 	 */
6806 	if ((BUSTYPE(sih->bustype) != SI_BUS) || (!priority)) {
6807 		ASSERT((BUSTYPE(sih->bustype) == SI_BUS) && (priority));
6808 		return 0xffffffff;
6809 	}
6810 	/* make sure only one bit is set */
6811 	if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
6812 		ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1)));
6813 		return 0xffffffff;
6814 	}
6815 
6816 	/* already released */
6817 	if (!(si_gpioreservation & gpio_bitmask))
6818 		return 0xffffffff;
6819 
6820 	/* clear reservation */
6821 	si_gpioreservation &= ~gpio_bitmask;
6822 
6823 	return si_gpioreservation;
6824 }
6825 
6826 /* return the current gpioin register value */
6827 uint32
si_gpioin(si_t * sih)6828 si_gpioin(si_t *sih)
6829 {
6830 	uint regoff;
6831 
6832 	regoff = OFFSETOF(chipcregs_t, gpioin);
6833 	return (si_corereg(sih, SI_CC_IDX, regoff, 0, 0));
6834 }
6835 
6836 /* mask&set gpio interrupt polarity bits */
6837 uint32
si_gpiointpolarity(si_t * sih,uint32 mask,uint32 val,uint8 priority)6838 si_gpiointpolarity(si_t *sih, uint32 mask, uint32 val, uint8 priority)
6839 {
6840 	uint regoff;
6841 
6842 	/* gpios could be shared on router platforms */
6843 	if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
6844 		mask = priority ? (si_gpioreservation & mask) :
6845 			((si_gpioreservation | mask) & ~(si_gpioreservation));
6846 		val &= mask;
6847 	}
6848 
6849 	regoff = OFFSETOF(chipcregs_t, gpiointpolarity);
6850 	return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
6851 }
6852 
6853 /* mask&set gpio interrupt mask bits */
6854 uint32
si_gpiointmask(si_t * sih,uint32 mask,uint32 val,uint8 priority)6855 si_gpiointmask(si_t *sih, uint32 mask, uint32 val, uint8 priority)
6856 {
6857 	uint regoff;
6858 
6859 	/* gpios could be shared on router platforms */
6860 	if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
6861 		mask = priority ? (si_gpioreservation & mask) :
6862 			((si_gpioreservation | mask) & ~(si_gpioreservation));
6863 		val &= mask;
6864 	}
6865 
6866 	regoff = OFFSETOF(chipcregs_t, gpiointmask);
6867 	return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
6868 }
6869 
6870 uint32
si_gpioeventintmask(si_t * sih,uint32 mask,uint32 val,uint8 priority)6871 si_gpioeventintmask(si_t *sih, uint32 mask, uint32 val, uint8 priority)
6872 {
6873 	uint regoff;
6874 	/* gpios could be shared on router platforms */
6875 	if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
6876 		mask = priority ? (si_gpioreservation & mask) :
6877 			((si_gpioreservation | mask) & ~(si_gpioreservation));
6878 		val &= mask;
6879 	}
6880 	regoff = OFFSETOF(chipcregs_t, gpioeventintmask);
6881 	return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
6882 }
6883 
6884 uint32
si_gpiopull(si_t * sih,bool updown,uint32 mask,uint32 val)6885 si_gpiopull(si_t *sih, bool updown, uint32 mask, uint32 val)
6886 {
6887 	uint offs;
6888 
6889 	if (CCREV(sih->ccrev) < 20)
6890 		return 0xffffffff;
6891 
6892 	offs = (updown ? OFFSETOF(chipcregs_t, gpiopulldown) : OFFSETOF(chipcregs_t, gpiopullup));
6893 	return (si_corereg(sih, SI_CC_IDX, offs, mask, val));
6894 }
6895 
6896 uint32
si_gpioevent(si_t * sih,uint regtype,uint32 mask,uint32 val)6897 si_gpioevent(si_t *sih, uint regtype, uint32 mask, uint32 val)
6898 {
6899 	uint offs;
6900 
6901 	if (CCREV(sih->ccrev) < 11)
6902 		return 0xffffffff;
6903 
6904 	if (regtype == GPIO_REGEVT)
6905 		offs = OFFSETOF(chipcregs_t, gpioevent);
6906 	else if (regtype == GPIO_REGEVT_INTMSK)
6907 		offs = OFFSETOF(chipcregs_t, gpioeventintmask);
6908 	else if (regtype == GPIO_REGEVT_INTPOL)
6909 		offs = OFFSETOF(chipcregs_t, gpioeventintpolarity);
6910 	else
6911 		return 0xffffffff;
6912 
6913 	return (si_corereg(sih, SI_CC_IDX, offs, mask, val));
6914 }
6915 
6916 uint32
BCMATTACHFN(si_gpio_int_enable)6917 BCMATTACHFN(si_gpio_int_enable)(si_t *sih, bool enable)
6918 {
6919 	uint offs;
6920 
6921 	if (CCREV(sih->ccrev) < 11)
6922 		return 0xffffffff;
6923 
6924 	offs = OFFSETOF(chipcregs_t, intmask);
6925 	return (si_corereg(sih, SI_CC_IDX, offs, CI_GPIO, (enable ? CI_GPIO : 0)));
6926 }
6927 
6928 #if !defined(BCMDONGLEHOST)
6929 void
si_gci_shif_config_wake_pin(si_t * sih,uint8 gpio_n,uint8 wake_events,bool gci_gpio)6930 si_gci_shif_config_wake_pin(si_t *sih, uint8 gpio_n, uint8 wake_events,
6931 		bool gci_gpio)
6932 {
6933 	uint8 chipcontrol = 0;
6934 	uint32 gci_wakset;
6935 
6936 	switch (CHIPID(sih->chip)) {
6937 		case BCM4376_CHIP_GRPID :
6938 		case BCM4378_CHIP_GRPID :
6939 			{
6940 				if (!gci_gpio) {
6941 					chipcontrol = (1 << GCI_GPIO_CHIPCTRL_ENAB_EXT_GPIO_BIT);
6942 				}
6943 				chipcontrol |= (1 << GCI_GPIO_CHIPCTRL_PULLUP_BIT);
6944 				chipcontrol |= (1 << GCI_GPIO_CHIPCTRL_INVERT_BIT);
6945 				si_gci_gpio_chipcontrol(sih, gpio_n,
6946 					(chipcontrol | (1 << GCI_GPIO_CHIPCTRL_ENAB_IN_BIT)));
6947 
6948 				/* enable gci gpio int/wake events */
6949 				si_gci_gpio_intmask(sih, gpio_n, wake_events, wake_events);
6950 				si_gci_gpio_wakemask(sih, gpio_n, wake_events, wake_events);
6951 
6952 				/* clear the existing status bits */
6953 				si_gci_gpio_status(sih, gpio_n,
6954 						GCI_GPIO_STS_CLEAR, GCI_GPIO_STS_CLEAR);
6955 
6956 				/* Enable gci2wl_wake for 4378 */
6957 				si_pmu_chipcontrol(sih, PMU_CHIPCTL2,
6958 						CC2_4378_GCI2WAKE_MASK, CC2_4378_GCI2WAKE_MASK);
6959 
6960 				/* enable gci int/wake events */
6961 				gci_wakset = (GCI_INTSTATUS_GPIOWAKE) | (GCI_INTSTATUS_GPIOINT);
6962 
6963 				si_gci_indirect(sih, 0,	GCI_OFFSETOF(sih, gci_intmask),
6964 						gci_wakset, gci_wakset);
6965 				/* Enable wake on GciWake */
6966 				si_gci_indirect(sih, 0,	GCI_OFFSETOF(sih, gci_wakemask),
6967 						gci_wakset, gci_wakset);
6968 				break;
6969 			}
6970 		case BCM4385_CHIP_GRPID :
6971 		case BCM4387_CHIP_GRPID :
6972 			{
6973 				if (!gci_gpio) {
6974 					chipcontrol = (1 << GCI_GPIO_CHIPCTRL_ENAB_EXT_GPIO_BIT);
6975 				}
6976 				chipcontrol |= (1 << GCI_GPIO_CHIPCTRL_PULLUP_BIT);
6977 				chipcontrol |= (1 << GCI_GPIO_CHIPCTRL_INVERT_BIT);
6978 				si_gci_gpio_chipcontrol(sih, gpio_n,
6979 					(chipcontrol | (1 << GCI_GPIO_CHIPCTRL_ENAB_IN_BIT)));
6980 
6981 				/* enable gci gpio int/wake events */
6982 				si_gci_gpio_intmask(sih, gpio_n, wake_events, wake_events);
6983 				si_gci_gpio_wakemask(sih, gpio_n, wake_events, wake_events);
6984 
6985 				/* clear the existing status bits */
6986 				si_gci_gpio_status(sih, gpio_n,
6987 						GCI_GPIO_STS_CLEAR, GCI_GPIO_STS_CLEAR);
6988 
6989 				/* Enable gci2wl_wake for 4387 */
6990 				si_pmu_chipcontrol(sih, PMU_CHIPCTL2,
6991 						CC2_4387_GCI2WAKE_MASK, CC2_4387_GCI2WAKE_MASK);
6992 
6993 				/* enable gci int/wake events */
6994 				gci_wakset = (GCI_INTSTATUS_GPIOWAKE) | (GCI_INTSTATUS_GPIOINT);
6995 
6996 				si_gci_indirect(sih, 0,	GCI_OFFSETOF(sih, gci_intmask),
6997 						gci_wakset, gci_wakset);
6998 				/* Enable wake on GciWake */
6999 				si_gci_indirect(sih, 0,	GCI_OFFSETOF(sih, gci_wakemask),
7000 						gci_wakset, gci_wakset);
7001 				break;
7002 			}
7003 		default:;
7004 	}
7005 }
7006 
7007 void
si_shif_int_enable(si_t * sih,uint8 gpio_n,uint8 wake_events,bool enable)7008 si_shif_int_enable(si_t *sih, uint8 gpio_n, uint8 wake_events, bool enable)
7009 {
7010 	if (enable) {
7011 		si_gci_gpio_intmask(sih, gpio_n, wake_events, wake_events);
7012 		si_gci_gpio_wakemask(sih, gpio_n, wake_events, wake_events);
7013 	} else {
7014 		si_gci_gpio_intmask(sih, gpio_n, wake_events, 0);
7015 		si_gci_gpio_wakemask(sih, gpio_n, wake_events, 0);
7016 	}
7017 }
7018 #endif /* !defined(BCMDONGLEHOST) */
7019 
7020 /** Return the size of the specified SYSMEM bank */
7021 static uint
sysmem_banksize(const si_info_t * sii,sysmemregs_t * regs,uint8 idx)7022 sysmem_banksize(const si_info_t *sii, sysmemregs_t *regs, uint8 idx)
7023 {
7024 	uint banksize, bankinfo;
7025 	uint bankidx = idx;
7026 
7027 	W_REG(sii->osh, &regs->bankidx, bankidx);
7028 	bankinfo = R_REG(sii->osh, &regs->bankinfo);
7029 	banksize = SYSMEM_BANKINFO_SZBASE * ((bankinfo & SYSMEM_BANKINFO_SZMASK) + 1);
7030 	return banksize;
7031 }
7032 
7033 /** Return the RAM size of the SYSMEM core */
7034 uint32
si_sysmem_size(si_t * sih)7035 si_sysmem_size(si_t *sih)
7036 {
7037 	const si_info_t *sii = SI_INFO(sih);
7038 	uint origidx;
7039 	bcm_int_bitmask_t intr_val;
7040 
7041 	sysmemregs_t *regs;
7042 	bool wasup;
7043 	uint32 coreinfo;
7044 	uint memsize = 0;
7045 	uint8 i;
7046 	uint nb, nrb;
7047 
7048 	/* Block ints and save current core */
7049 	INTR_OFF(sii, &intr_val);
7050 	origidx = si_coreidx(sih);
7051 
7052 	/* Switch to SYSMEM core */
7053 	if (!(regs = si_setcore(sih, SYSMEM_CORE_ID, 0)))
7054 		goto done;
7055 
7056 	/* Get info for determining size */
7057 	if (!(wasup = si_iscoreup(sih)))
7058 		si_core_reset(sih, 0, 0);
7059 	coreinfo = R_REG(sii->osh, &regs->coreinfo);
7060 
7061 	/* Number of ROM banks, SW need to skip the ROM banks. */
7062 	if (si_corerev(sih) < 12) {
7063 		nrb = (coreinfo & SYSMEM_SRCI_ROMNB_MASK) >> SYSMEM_SRCI_ROMNB_SHIFT;
7064 		nb = (coreinfo & SYSMEM_SRCI_SRNB_MASK) >> SYSMEM_SRCI_SRNB_SHIFT;
7065 	} else {
7066 		nrb = (coreinfo & SYSMEM_SRCI_NEW_ROMNB_MASK) >> SYSMEM_SRCI_NEW_ROMNB_SHIFT;
7067 		nb = (coreinfo & SYSMEM_SRCI_NEW_SRNB_MASK) >> SYSMEM_SRCI_NEW_SRNB_SHIFT;
7068 	}
7069 	for (i = 0; i < nb; i++)
7070 		memsize += sysmem_banksize(sii, regs, i + nrb);
7071 
7072 	si_setcoreidx(sih, origidx);
7073 
7074 done:
7075 	INTR_RESTORE(sii, &intr_val);
7076 
7077 	return memsize;
7078 }
7079 
7080 /** Return the size of the specified SOCRAM bank */
7081 static uint
socram_banksize(const si_info_t * sii,sbsocramregs_t * regs,uint8 idx,uint8 mem_type)7082 socram_banksize(const si_info_t *sii, sbsocramregs_t *regs, uint8 idx, uint8 mem_type)
7083 {
7084 	uint banksize, bankinfo;
7085 	uint bankidx = idx | (mem_type << SOCRAM_BANKIDX_MEMTYPE_SHIFT);
7086 
7087 	ASSERT(mem_type <= SOCRAM_MEMTYPE_DEVRAM);
7088 
7089 	W_REG(sii->osh, &regs->bankidx, bankidx);
7090 	bankinfo = R_REG(sii->osh, &regs->bankinfo);
7091 	banksize = SOCRAM_BANKINFO_SZBASE * ((bankinfo & SOCRAM_BANKINFO_SZMASK) + 1);
7092 	return banksize;
7093 }
7094 
si_socram_set_bankpda(si_t * sih,uint32 bankidx,uint32 bankpda)7095 void si_socram_set_bankpda(si_t *sih, uint32 bankidx, uint32 bankpda)
7096 {
7097 	const si_info_t *sii = SI_INFO(sih);
7098 	uint origidx;
7099 	bcm_int_bitmask_t intr_val;
7100 	sbsocramregs_t *regs;
7101 	bool wasup;
7102 	uint corerev;
7103 
7104 	/* Block ints and save current core */
7105 	INTR_OFF(sii, &intr_val);
7106 	origidx = si_coreidx(sih);
7107 
7108 	/* Switch to SOCRAM core */
7109 	if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
7110 		goto done;
7111 
7112 	if (!(wasup = si_iscoreup(sih)))
7113 		si_core_reset(sih, 0, 0);
7114 
7115 	corerev = si_corerev(sih);
7116 	if (corerev >= 16) {
7117 		W_REG(sii->osh, &regs->bankidx, bankidx);
7118 		W_REG(sii->osh, &regs->bankpda, bankpda);
7119 	}
7120 
7121 	/* Return to previous state and core */
7122 	if (!wasup)
7123 		si_core_disable(sih, 0);
7124 	si_setcoreidx(sih, origidx);
7125 
7126 done:
7127 	INTR_RESTORE(sii, &intr_val);
7128 }
7129 
7130 /** Return the RAM size of the SOCRAM core */
7131 uint32
si_socram_size(si_t * sih)7132 si_socram_size(si_t *sih)
7133 {
7134 	const si_info_t *sii = SI_INFO(sih);
7135 	uint origidx;
7136 	bcm_int_bitmask_t intr_val;
7137 
7138 	sbsocramregs_t *regs;
7139 	bool wasup;
7140 	uint corerev;
7141 	uint32 coreinfo;
7142 	uint memsize = 0;
7143 
7144 	/* Block ints and save current core */
7145 	INTR_OFF(sii, &intr_val);
7146 	origidx = si_coreidx(sih);
7147 
7148 	/* Switch to SOCRAM core */
7149 	if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
7150 		goto done;
7151 
7152 	/* Get info for determining size */
7153 	if (!(wasup = si_iscoreup(sih)))
7154 		si_core_reset(sih, 0, 0);
7155 	corerev = si_corerev(sih);
7156 	coreinfo = R_REG(sii->osh, &regs->coreinfo);
7157 
7158 	/* Calculate size from coreinfo based on rev */
7159 	if (corerev == 0)
7160 		memsize = 1 << (16 + (coreinfo & SRCI_MS0_MASK));
7161 	else if (corerev < 3) {
7162 		memsize = 1 << (SR_BSZ_BASE + (coreinfo & SRCI_SRBSZ_MASK));
7163 		memsize *= (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
7164 	} else if ((corerev <= 7) || (corerev == 12)) {
7165 		uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
7166 		uint bsz = (coreinfo & SRCI_SRBSZ_MASK);
7167 		uint lss = (coreinfo & SRCI_LSS_MASK) >> SRCI_LSS_SHIFT;
7168 		if (lss != 0)
7169 			nb --;
7170 		memsize = nb * (1 << (bsz + SR_BSZ_BASE));
7171 		if (lss != 0)
7172 			memsize += (1 << ((lss - 1) + SR_BSZ_BASE));
7173 	} else {
7174 		uint8 i;
7175 		uint nb;
7176 		/* length of SRAM Banks increased for corerev greater than 23 */
7177 		if (corerev >= 23) {
7178 			nb = (coreinfo & (SRCI_SRNB_MASK | SRCI_SRNB_MASK_EXT)) >> SRCI_SRNB_SHIFT;
7179 		} else {
7180 			nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
7181 		}
7182 		for (i = 0; i < nb; i++)
7183 			memsize += socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_RAM);
7184 	}
7185 
7186 	/* Return to previous state and core */
7187 	if (!wasup)
7188 		si_core_disable(sih, 0);
7189 	si_setcoreidx(sih, origidx);
7190 
7191 done:
7192 	INTR_RESTORE(sii, &intr_val);
7193 
7194 	return memsize;
7195 }
7196 
7197 /* Return true if bus MPU is present */
7198 bool
si_is_bus_mpu_present(si_t * sih)7199 si_is_bus_mpu_present(si_t *sih)
7200 {
7201 	uint origidx, newidx = NODEV_CORE_ID;
7202 	sysmemregs_t *sysmemregs = NULL;
7203 	cr4regs_t *cr4regs;
7204 	const si_info_t *sii = SI_INFO(sih);
7205 	uint ret = 0;
7206 	bool wasup;
7207 
7208 	origidx = si_coreidx(sih);
7209 
7210 	cr4regs = si_setcore(sih, ARMCR4_CORE_ID, 0);
7211 	if (cr4regs) {
7212 		/* ARMCR4 */
7213 		newidx = ARMCR4_CORE_ID;
7214 	} else {
7215 		sysmemregs = si_setcore(sih, SYSMEM_CORE_ID, 0);
7216 		if (sysmemregs) {
7217 			/* ARMCA7 */
7218 			newidx = SYSMEM_CORE_ID;
7219 		}
7220 	}
7221 
7222 	if (newidx != NODEV_CORE_ID) {
7223 		if (!(wasup = si_iscoreup(sih))) {
7224 			si_core_reset(sih, 0, 0);
7225 		}
7226 		if (newidx == ARMCR4_CORE_ID) {
7227 			/* ARMCR4 */
7228 			ret = R_REG(sii->osh, &cr4regs->corecapabilities) & CAP_MPU_MASK;
7229 		} else {
7230 			/* ARMCA7 */
7231 			ret = R_REG(sii->osh, &sysmemregs->mpucapabilities) &
7232 				ACC_MPU_REGION_CNT_MASK;
7233 		}
7234 		if (!wasup) {
7235 			si_core_disable(sih, 0);
7236 		}
7237 	}
7238 
7239 	si_setcoreidx(sih, origidx);
7240 
7241 	return ret ? TRUE : FALSE;
7242 }
7243 
7244 #if defined(BCMDONGLEHOST)
7245 
7246 /** Return the TCM-RAM size of the ARMCR4 core. */
7247 uint32
si_tcm_size(si_t * sih)7248 si_tcm_size(si_t *sih)
7249 {
7250 	const si_info_t *sii = SI_INFO(sih);
7251 	uint origidx;
7252 	bcm_int_bitmask_t intr_val;
7253 	volatile uint8 *regs;
7254 	bool wasup;
7255 	uint32 corecap;
7256 	uint memsize = 0;
7257 	uint banku_size = 0;
7258 	uint32 nab = 0;
7259 	uint32 nbb = 0;
7260 	uint32 totb = 0;
7261 	uint32 bxinfo = 0;
7262 	uint32 idx = 0;
7263 	volatile uint32 *arm_cap_reg;
7264 	volatile uint32 *arm_bidx;
7265 	volatile uint32 *arm_binfo;
7266 
7267 	/* Block ints and save current core */
7268 	INTR_OFF(sii, &intr_val);
7269 	origidx = si_coreidx(sih);
7270 
7271 	/* Switch to CR4 core */
7272 	if (!(regs = si_setcore(sih, ARMCR4_CORE_ID, 0)))
7273 		goto done;
7274 
7275 	/* Get info for determining size. If in reset, come out of reset,
7276 	 * but remain in halt
7277 	 */
7278 	if (!(wasup = si_iscoreup(sih)))
7279 		si_core_reset(sih, SICF_CPUHALT, SICF_CPUHALT);
7280 
7281 	arm_cap_reg = (volatile uint32 *)(regs + SI_CR4_CAP);
7282 	corecap = R_REG(sii->osh, arm_cap_reg);
7283 
7284 	nab = (corecap & ARMCR4_TCBANB_MASK) >> ARMCR4_TCBANB_SHIFT;
7285 	nbb = (corecap & ARMCR4_TCBBNB_MASK) >> ARMCR4_TCBBNB_SHIFT;
7286 	totb = nab + nbb;
7287 
7288 	arm_bidx = (volatile uint32 *)(regs + SI_CR4_BANKIDX);
7289 	arm_binfo = (volatile uint32 *)(regs + SI_CR4_BANKINFO);
7290 	for (idx = 0; idx < totb; idx++) {
7291 		W_REG(sii->osh, arm_bidx, idx);
7292 
7293 		bxinfo = R_REG(sii->osh, arm_binfo);
7294 		if (bxinfo & ARMCR4_BUNITSZ_MASK) {
7295 			banku_size = ARMCR4_BSZ_1K;
7296 		} else {
7297 			banku_size = ARMCR4_BSZ_8K;
7298 		}
7299 		memsize += ((bxinfo & ARMCR4_BSZ_MASK) + 1) * banku_size;
7300 	}
7301 
7302 	/* Return to previous state and core */
7303 	if (!wasup)
7304 		si_core_disable(sih, 0);
7305 	si_setcoreidx(sih, origidx);
7306 
7307 done:
7308 	INTR_RESTORE(sii, &intr_val);
7309 
7310 	return memsize;
7311 }
7312 
7313 bool
si_has_flops(si_t * sih)7314 si_has_flops(si_t *sih)
7315 {
7316 	uint origidx, cr4_rev;
7317 
7318 	/* Find out CR4 core revision */
7319 	origidx = si_coreidx(sih);
7320 	if (si_setcore(sih, ARMCR4_CORE_ID, 0)) {
7321 		cr4_rev = si_corerev(sih);
7322 		si_setcoreidx(sih, origidx);
7323 
7324 		if (cr4_rev == 1 || cr4_rev >= 3)
7325 			return TRUE;
7326 	}
7327 	return FALSE;
7328 }
7329 #endif /* BCMDONGLEHOST */
7330 
7331 uint32
si_socram_srmem_size(si_t * sih)7332 si_socram_srmem_size(si_t *sih)
7333 {
7334 	const si_info_t *sii = SI_INFO(sih);
7335 	uint origidx;
7336 	bcm_int_bitmask_t intr_val;
7337 
7338 	sbsocramregs_t *regs;
7339 	bool wasup;
7340 	uint corerev;
7341 	uint32 coreinfo;
7342 	uint memsize = 0;
7343 
7344 	/* Block ints and save current core */
7345 	INTR_OFF(sii, &intr_val);
7346 	origidx = si_coreidx(sih);
7347 
7348 	/* Switch to SOCRAM core */
7349 	if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
7350 		goto done;
7351 
7352 	/* Get info for determining size */
7353 	if (!(wasup = si_iscoreup(sih)))
7354 		si_core_reset(sih, 0, 0);
7355 	corerev = si_corerev(sih);
7356 	coreinfo = R_REG(sii->osh, &regs->coreinfo);
7357 
7358 	/* Calculate size from coreinfo based on rev */
7359 	if (corerev >= 16) {
7360 		uint8 i;
7361 		uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
7362 		for (i = 0; i < nb; i++) {
7363 			W_REG(sii->osh, &regs->bankidx, i);
7364 			if (R_REG(sii->osh, &regs->bankinfo) & SOCRAM_BANKINFO_RETNTRAM_MASK)
7365 				memsize += socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_RAM);
7366 		}
7367 	}
7368 
7369 	/* Return to previous state and core */
7370 	if (!wasup)
7371 		si_core_disable(sih, 0);
7372 	si_setcoreidx(sih, origidx);
7373 
7374 done:
7375 	INTR_RESTORE(sii, &intr_val);
7376 
7377 	return memsize;
7378 }
7379 
7380 #if !defined(BCMDONGLEHOST)
7381 static bool
BCMPOSTTRAPFN(si_seci_uart)7382 BCMPOSTTRAPFN(si_seci_uart)(const si_t *sih)
7383 {
7384 	return (sih->cccaps_ext & CC_CAP_EXT_SECI_PUART_PRESENT);
7385 }
7386 
7387 /** seci clock enable/disable */
7388 static void
BCMPOSTTRAPFN(si_seci_clkreq)7389 BCMPOSTTRAPFN(si_seci_clkreq)(si_t *sih, bool enable)
7390 {
7391 	uint32 clk_ctl_st;
7392 	uint32 offset;
7393 	uint32 val;
7394 	pmuregs_t *pmu;
7395 	uint32 origidx = 0;
7396 	const si_info_t *sii = SI_INFO(sih);
7397 #ifdef SECI_UART
7398 	bool fast;
7399 	chipcregs_t *cc = seci_set_core(sih, &origidx, &fast);
7400 	ASSERT(cc);
7401 #endif /* SECI_UART */
7402 	if (!si_seci(sih) && !si_seci_uart(sih))
7403 		return;
7404 	offset = OFFSETOF(chipcregs_t, clk_ctl_st);
7405 	clk_ctl_st = si_corereg(sih, 0, offset, 0, 0);
7406 
7407 	if (enable && !(clk_ctl_st & CLKCTL_STS_SECI_CLK_REQ)) {
7408 		val = CLKCTL_STS_SECI_CLK_REQ | CLKCTL_STS_HT_AVAIL_REQ;
7409 #ifdef SECI_UART
7410 		/* Restore the fast UART function select when enabling */
7411 		if (fast_uart_init) {
7412 			si_gci_set_functionsel(sih, fast_uart_tx, fast_uart_functionsel);
7413 			if (fuart_pullup_rx_cts_enab) {
7414 				si_gci_set_functionsel(sih, fast_uart_rx, fast_uart_functionsel);
7415 				si_gci_set_functionsel(sih, fast_uart_cts_in,
7416 					fast_uart_functionsel);
7417 			}
7418 		}
7419 #endif /* SECI_UART */
7420 	} else if (!enable && (clk_ctl_st & CLKCTL_STS_SECI_CLK_REQ)) {
7421 		val = 0;
7422 #ifdef SECI_UART
7423 		if (force_seci_clk) {
7424 			return;
7425 		}
7426 #endif /* SECI_UART */
7427 	} else {
7428 		return;
7429 	}
7430 #ifdef SECI_UART
7431 	/* park the fast UART as PULL UP when disabling the clocks to avoid sending
7432 	 * breaks to the host
7433 	 */
7434 	if (!enable && fast_uart_init) {
7435 		si_gci_set_functionsel(sih, fast_uart_tx, fast_uart_pup);
7436 		if (fuart_pullup_rx_cts_enab) {
7437 			W_REG(sii->osh, &cc->SECI_status, SECI_STAT_BI);
7438 			si_gci_set_functionsel(sih, fast_uart_rx, fast_uart_pup);
7439 			si_gci_set_functionsel(sih, fast_uart_cts_in, fast_uart_pup);
7440 			SPINWAIT(!(R_REG(sii->osh, &cc->SECI_status) & SECI_STAT_BI), 1000);
7441 		}
7442 	}
7443 #endif /* SECI_UART */
7444 
7445 	/* Setting/clearing bit 4 along with bit 8 of 0x1e0 block. the core requests that
7446 	  * the PMU set the device state such that the HT clock will be available on short notice.
7447 	  */
7448 	si_corereg(sih, SI_CC_IDX, offset,
7449 		CLKCTL_STS_SECI_CLK_REQ | CLKCTL_STS_HT_AVAIL_REQ, val);
7450 
7451 	if (!enable)
7452 		return;
7453 #ifndef SECI_UART
7454 	/* Remember original core before switch to chipc/pmu */
7455 	origidx = si_coreidx(sih);
7456 #endif
7457 
7458 	if (AOB_ENAB(sih)) {
7459 		pmu = si_setcore(sih, PMU_CORE_ID, 0);
7460 	} else {
7461 		pmu = si_setcoreidx(sih, SI_CC_IDX);
7462 	}
7463 	ASSERT(pmu != NULL);
7464 	(void)si_pmu_wait_for_steady_state(sih, sii->osh, pmu);
7465 	/* Return to original core */
7466 	si_setcoreidx(sih, origidx);
7467 
7468 	SPINWAIT(!(si_corereg(sih, 0, offset, 0, 0) & CLKCTL_STS_SECI_CLK_AVAIL),
7469 	        PMU_MAX_TRANSITION_DLY);
7470 
7471 	clk_ctl_st = si_corereg(sih, 0, offset, 0, 0);
7472 	if (enable) {
7473 		if (!(clk_ctl_st & CLKCTL_STS_SECI_CLK_AVAIL)) {
7474 			SI_ERROR(("SECI clock is not available\n"));
7475 			ASSERT(0);
7476 			return;
7477 		}
7478 	}
7479 }
7480 
7481 #if defined(BCMECICOEX) || defined(SECI_UART)
7482 static chipcregs_t *
BCMPOSTTRAPFN(seci_set_core)7483 BCMPOSTTRAPFN(seci_set_core)(si_t *sih, uint32 *origidx, bool *fast)
7484 {
7485 	chipcregs_t *cc;
7486 	const si_info_t *sii = SI_INFO(sih);
7487 	*fast = SI_FAST(sii);
7488 
7489 	if (!*fast) {
7490 		*origidx = sii->curidx;
7491 		cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
7492 	} else {
7493 		*origidx = 0;
7494 		cc = (chipcregs_t *)CCREGS_FAST(sii);
7495 	}
7496 	return cc;
7497 }
7498 
7499 static chipcregs_t *
BCMPOSTTRAPFN(si_seci_access_preamble)7500 BCMPOSTTRAPFN(si_seci_access_preamble)(si_t *sih, const si_info_t *sii, uint32 *origidx, bool *fast)
7501 {
7502 	chipcregs_t *cc = seci_set_core(sih, origidx, fast);
7503 
7504 	if (cc) {
7505 		if (((R_REG(sii->osh, &cc->clk_ctl_st) & CCS_SECICLKREQ) != CCS_SECICLKREQ)) {
7506 			/* enable SECI clock */
7507 			si_seci_clkreq(sih, TRUE);
7508 		}
7509 	}
7510 	return cc;
7511 }
7512 #endif /* BCMECICOEX||SECI_UART */
7513 #ifdef SECI_UART
7514 
7515 uint32
BCMPOSTTRAPFN(si_seci_access)7516 BCMPOSTTRAPFN(si_seci_access)(si_t *sih, uint32 val, int access)
7517 {
7518 	uint32 origidx;
7519 	bool fast;
7520 	const si_info_t *sii = SI_INFO(sih);
7521 	chipcregs_t *cc;
7522 	bcm_int_bitmask_t intr_val;
7523 	uint32 offset, retval = 1;
7524 
7525 	if (!si_seci_uart(sih))
7526 		return 0;
7527 
7528 	INTR_OFF(sii, &intr_val);
7529 	if (!(cc = si_seci_access_preamble(sih, sii, &origidx, &fast)))
7530 		goto exit;
7531 
7532 	switch (access) {
7533 	case SECI_ACCESS_STATUSMASK_SET:
7534 		offset = OFFSETOF(chipcregs_t, SECI_statusmask);
7535 		retval = si_corereg(sih, SI_CC_IDX, offset, ALLONES_32, val);
7536 		break;
7537 	case SECI_ACCESS_STATUSMASK_GET:
7538 		offset = OFFSETOF(chipcregs_t, SECI_statusmask);
7539 		retval = si_corereg(sih, SI_CC_IDX, offset, 0, 0);
7540 		break;
7541 	case SECI_ACCESS_INTRS:
7542 		offset = OFFSETOF(chipcregs_t, SECI_status);
7543 		retval = si_corereg(sih, SI_CC_IDX, offset,
7544 		                    ALLONES_32, ALLONES_32);
7545 		break;
7546 	case SECI_ACCESS_UART_CTS:
7547 		offset = OFFSETOF(chipcregs_t, seci_uart_msr);
7548 		retval = si_corereg(sih, SI_CC_IDX, offset, 0, 0);
7549 		retval = retval & SECI_UART_MSR_CTS_STATE;
7550 		break;
7551 	case SECI_ACCESS_UART_RTS:
7552 		offset = OFFSETOF(chipcregs_t, seci_uart_mcr);
7553 		if (val) {
7554 			/* clear forced flow control; enable auto rts */
7555 			retval = si_corereg(sih, SI_CC_IDX, offset,
7556 			           SECI_UART_MCR_PRTS |  SECI_UART_MCR_AUTO_RTS,
7557 			           SECI_UART_MCR_AUTO_RTS);
7558 		} else {
7559 			/* set forced flow control; clear auto rts */
7560 			retval = si_corereg(sih, SI_CC_IDX, offset,
7561 			           SECI_UART_MCR_PRTS |  SECI_UART_MCR_AUTO_RTS,
7562 			           SECI_UART_MCR_PRTS);
7563 		}
7564 		break;
7565 	case SECI_ACCESS_UART_RXEMPTY:
7566 		offset = OFFSETOF(chipcregs_t, SECI_status);
7567 		retval = si_corereg(sih, SI_CC_IDX, offset, 0, 0);
7568 		retval = (retval & SECI_STAT_SRFE) == SECI_STAT_SRFE;
7569 		break;
7570 	case SECI_ACCESS_UART_GETC:
7571 		/* assumes caller checked for nonempty rx FIFO */
7572 		offset = OFFSETOF(chipcregs_t, seci_uart_data);
7573 		retval = si_corereg(sih, SI_CC_IDX, offset, 0, 0) & 0xff;
7574 		break;
7575 	case SECI_ACCESS_UART_TXFULL:
7576 		offset = OFFSETOF(chipcregs_t, SECI_status);
7577 		retval = si_corereg(sih, SI_CC_IDX, offset, 0, 0);
7578 		retval = (retval & SECI_STAT_STFF) == SECI_STAT_STFF;
7579 		break;
7580 	case SECI_ACCESS_UART_PUTC:
7581 		/* This register must not do a RMW otherwise it will affect the RX FIFO */
7582 		W_REG(sii->osh, &cc->seci_uart_data, (uint32)(val & 0xff));
7583 		retval = 0;
7584 		break;
7585 	default:
7586 		ASSERT(0);
7587 	}
7588 
7589 exit:
7590 	/* restore previous core */
7591 	if (!fast)
7592 		si_setcoreidx(sih, origidx);
7593 
7594 	INTR_RESTORE(sii, &intr_val);
7595 
7596 	return retval;
7597 }
7598 
si_seci_clk_force(si_t * sih,bool val)7599 void si_seci_clk_force(si_t *sih, bool val)
7600 {
7601 	force_seci_clk = val;
7602 	if (force_seci_clk) {
7603 		si_seci_clkreq(sih, TRUE);
7604 	} else {
7605 		si_seci_down(sih);
7606 	}
7607 }
7608 
si_seci_clk_force_status(si_t * sih)7609 bool si_seci_clk_force_status(si_t *sih)
7610 {
7611 	return force_seci_clk;
7612 }
7613 #endif /* SECI_UART */
7614 
7615 /** SECI Init routine, pass in seci_mode */
7616 volatile void *
BCMINITFN(si_seci_init)7617 BCMINITFN(si_seci_init)(si_t *sih, uint8  seci_mode)
7618 {
7619 	uint32 origidx = 0;
7620 	uint32 offset;
7621 	const si_info_t *sii;
7622 	volatile void *ptr;
7623 	chipcregs_t *cc;
7624 	bool fast;
7625 	uint32 seci_conf;
7626 
7627 	if (sih->ccrev < 35)
7628 		return NULL;
7629 
7630 #ifdef SECI_UART
7631 	if (seci_mode == SECI_MODE_UART) {
7632 		if (!si_seci_uart(sih))
7633 			return NULL;
7634 	}
7635 	else {
7636 #endif /* SECI_UART */
7637 	if (!si_seci(sih))
7638 		return NULL;
7639 #ifdef SECI_UART
7640 	}
7641 #endif /* SECI_UART */
7642 
7643 	if (seci_mode > SECI_MODE_MASK)
7644 		return NULL;
7645 
7646 	sii = SI_INFO(sih);
7647 	fast = SI_FAST(sii);
7648 	if (!fast) {
7649 		origidx = sii->curidx;
7650 		if ((ptr = si_setcore(sih, CC_CORE_ID, 0)) == NULL)
7651 			return NULL;
7652 	} else if ((ptr = CCREGS_FAST(sii)) == NULL)
7653 		return NULL;
7654 	cc = (chipcregs_t *)ptr;
7655 	ASSERT(cc);
7656 
7657 	/* enable SECI clock */
7658 	if (seci_mode != SECI_MODE_LEGACY_3WIRE_WLAN)
7659 		si_seci_clkreq(sih, TRUE);
7660 
7661 	/* put the SECI in reset */
7662 	seci_conf = R_REG(sii->osh, &cc->SECI_config);
7663 	seci_conf &= ~SECI_ENAB_SECI_ECI;
7664 	W_REG(sii->osh, &cc->SECI_config, seci_conf);
7665 	seci_conf = SECI_RESET;
7666 	W_REG(sii->osh, &cc->SECI_config, seci_conf);
7667 
7668 	/* set force-low, and set EN_SECI for all non-legacy modes */
7669 	seci_conf |= SECI_ENAB_SECIOUT_DIS;
7670 	if ((seci_mode == SECI_MODE_UART) || (seci_mode == SECI_MODE_SECI) ||
7671 	    (seci_mode == SECI_MODE_HALF_SECI))
7672 	{
7673 		seci_conf |= SECI_ENAB_SECI_ECI;
7674 	}
7675 	W_REG(sii->osh, &cc->SECI_config, seci_conf);
7676 
7677 	if (seci_mode != SECI_MODE_LEGACY_3WIRE_WLAN) {
7678 		/* take seci out of reset */
7679 		seci_conf = R_REG(sii->osh, &cc->SECI_config);
7680 		seci_conf &= ~(SECI_RESET);
7681 		W_REG(sii->osh, &cc->SECI_config, seci_conf);
7682 	}
7683 	/* set UART/SECI baud rate */
7684 	/* hard-coded at 4MBaud for now */
7685 	if ((seci_mode == SECI_MODE_UART) || (seci_mode == SECI_MODE_SECI) ||
7686 	    (seci_mode == SECI_MODE_HALF_SECI)) {
7687 		offset = OFFSETOF(chipcregs_t, seci_uart_bauddiv);
7688 		si_corereg(sih, SI_CC_IDX, offset, 0xFF, 0xFF); /* 4MBaud */
7689 		if ((CHIPID(sih->chip) == BCM4360_CHIP_ID) ||
7690 			(CHIPID(sih->chip) == BCM43460_CHIP_ID) ||
7691 			(CHIPID(sih->chip) == BCM43526_CHIP_ID) ||
7692 			(CHIPID(sih->chip) == BCM4352_CHIP_ID)) {
7693 			/* MAC clk is 160MHz */
7694 			offset = OFFSETOF(chipcregs_t, seci_uart_bauddiv);
7695 			si_corereg(sih, SI_CC_IDX, offset, 0xFF, 0xFE);
7696 			offset = OFFSETOF(chipcregs_t, seci_uart_baudadj);
7697 			si_corereg(sih, SI_CC_IDX, offset, 0xFF, 0x44);
7698 			offset = OFFSETOF(chipcregs_t, seci_uart_mcr);
7699 			si_corereg(sih, SI_CC_IDX, offset,
7700 				0xFF, SECI_UART_MCR_BAUD_ADJ_EN); /* 0x81 */
7701 		}
7702 #ifdef SECI_UART
7703 		else if (CCREV(sih->ccrev) >= 62) {
7704 			/* rx FIFO level at which an interrupt is generated */
7705 			offset = OFFSETOF(chipcregs_t, eci.ge35.eci_uartfifolevel);
7706 			si_corereg(sih, SI_CC_IDX, offset, 0xff, 0x01);
7707 			offset = OFFSETOF(chipcregs_t, seci_uart_mcr);
7708 			si_corereg(sih, SI_CC_IDX, offset, SECI_UART_MCR_AUTO_RTS,
7709 				SECI_UART_MCR_AUTO_RTS);
7710 		}
7711 #endif /* SECI_UART */
7712 		else {
7713 			/* 4336 MAC clk is 80MHz */
7714 			offset = OFFSETOF(chipcregs_t, seci_uart_baudadj);
7715 			si_corereg(sih, SI_CC_IDX, offset, 0xFF, 0x22);
7716 			offset = OFFSETOF(chipcregs_t, seci_uart_mcr);
7717 			si_corereg(sih, SI_CC_IDX, offset,
7718 				0xFF, SECI_UART_MCR_BAUD_ADJ_EN); /* 0x80 */
7719 		}
7720 
7721 		/* LCR/MCR settings */
7722 		offset = OFFSETOF(chipcregs_t, seci_uart_lcr);
7723 		si_corereg(sih, SI_CC_IDX, offset, 0xFF,
7724 			(SECI_UART_LCR_RX_EN | SECI_UART_LCR_TXO_EN)); /* 0x28 */
7725 		offset = OFFSETOF(chipcregs_t, seci_uart_mcr);
7726 			si_corereg(sih, SI_CC_IDX, offset,
7727 			SECI_UART_MCR_TX_EN, SECI_UART_MCR_TX_EN); /* 0x01 */
7728 
7729 #ifndef SECI_UART
7730 		/* Give control of ECI output regs to MAC core */
7731 		offset = OFFSETOF(chipcregs_t, eci.ge35.eci_controllo);
7732 		si_corereg(sih, SI_CC_IDX, offset, ALLONES_32, ECI_MACCTRLLO_BITS);
7733 		offset = OFFSETOF(chipcregs_t, eci.ge35.eci_controlhi);
7734 		si_corereg(sih, SI_CC_IDX, offset, 0xFFFF, ECI_MACCTRLHI_BITS);
7735 #endif /* SECI_UART */
7736 	}
7737 
7738 	/* set the seci mode in seci conf register */
7739 	seci_conf = R_REG(sii->osh, &cc->SECI_config);
7740 	seci_conf &= ~(SECI_MODE_MASK << SECI_MODE_SHIFT);
7741 	seci_conf |= (seci_mode << SECI_MODE_SHIFT);
7742 	W_REG(sii->osh, &cc->SECI_config, seci_conf);
7743 
7744 	/* Clear force-low bit */
7745 	seci_conf = R_REG(sii->osh, &cc->SECI_config);
7746 	seci_conf &= ~SECI_ENAB_SECIOUT_DIS;
7747 	W_REG(sii->osh, &cc->SECI_config, seci_conf);
7748 
7749 	/* restore previous core */
7750 	if (!fast)
7751 		si_setcoreidx(sih, origidx);
7752 
7753 	return ptr;
7754 }
7755 
7756 #ifdef BCMECICOEX
7757 #define NOTIFY_BT_FM_DISABLE(sih, val) \
7758 	si_eci_notify_bt((sih), ECI_OUT_FM_DISABLE_MASK(CCREV(sih->ccrev)), \
7759 			 ((val) << ECI_OUT_FM_DISABLE_SHIFT(CCREV(sih->ccrev))), FALSE)
7760 
7761 /** Query OTP to see if FM is disabled */
7762 static int
BCMINITFN(si_query_FMDisabled_from_OTP)7763 BCMINITFN(si_query_FMDisabled_from_OTP)(si_t *sih, uint16 *FMDisabled)
7764 {
7765 	int error = BCME_OK;
7766 	uint bitoff = 0;
7767 	bool wasup;
7768 	void *oh;
7769 	uint32 min_res_mask = 0;
7770 
7771 	/* If there is a bit for this chip, check it */
7772 	if (bitoff) {
7773 		if (!(wasup = si_is_otp_powered(sih))) {
7774 			si_otp_power(sih, TRUE, &min_res_mask);
7775 		}
7776 
7777 		if ((oh = otp_init(sih)) != NULL)
7778 			*FMDisabled = !otp_read_bit(oh, OTP4325_FM_DISABLED_OFFSET);
7779 		else
7780 			error = BCME_NOTFOUND;
7781 
7782 		if (!wasup) {
7783 			si_otp_power(sih, FALSE, &min_res_mask);
7784 		}
7785 	}
7786 
7787 	return error;
7788 }
7789 
7790 bool
si_eci(const si_t * sih)7791 si_eci(const si_t *sih)
7792 {
7793 	return (!!(sih->cccaps & CC_CAP_ECI));
7794 }
7795 
7796 bool
BCMPOSTTRAPFN(si_seci)7797 BCMPOSTTRAPFN(si_seci)(const si_t *sih)
7798 {
7799 	return (sih->cccaps_ext & CC_CAP_EXT_SECI_PRESENT);
7800 }
7801 
7802 bool
si_gci(const si_t * sih)7803 si_gci(const si_t *sih)
7804 {
7805 	return (sih->cccaps_ext & CC_CAP_EXT_GCI_PRESENT);
7806 }
7807 
7808 bool
si_sraon(const si_t * sih)7809 si_sraon(const si_t *sih)
7810 {
7811 	return (sih->cccaps_ext & CC_CAP_SR_AON_PRESENT);
7812 }
7813 
7814 /** ECI Init routine */
7815 int
BCMINITFN(si_eci_init)7816 BCMINITFN(si_eci_init)(si_t *sih)
7817 {
7818 	uint32 origidx = 0;
7819 	const si_info_t *sii;
7820 	chipcregs_t *cc;
7821 	bool fast;
7822 	uint16 FMDisabled = FALSE;
7823 
7824 	/* check for ECI capability */
7825 	if (!(sih->cccaps & CC_CAP_ECI))
7826 		return BCME_ERROR;
7827 
7828 	sii = SI_INFO(sih);
7829 	fast = SI_FAST(sii);
7830 	if (!fast) {
7831 		origidx = sii->curidx;
7832 		if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL)
7833 			return BCME_ERROR;
7834 	} else if ((cc = (chipcregs_t *)CCREGS_FAST(sii)) == NULL)
7835 		return BCME_ERROR;
7836 	ASSERT(cc);
7837 
7838 	/* disable level based interrupts */
7839 	if (CCREV(sih->ccrev) < 35) {
7840 		W_REG(sii->osh, &cc->eci.lt35.eci_intmaskhi, 0x0);
7841 		W_REG(sii->osh, &cc->eci.lt35.eci_intmaskmi, 0x0);
7842 		W_REG(sii->osh, &cc->eci.lt35.eci_intmasklo, 0x0);
7843 	} else {
7844 		W_REG(sii->osh, &cc->eci.ge35.eci_intmaskhi, 0x0);
7845 		W_REG(sii->osh, &cc->eci.ge35.eci_intmasklo, 0x0);
7846 	}
7847 
7848 	/* Assign eci_output bits between 'wl' and dot11mac */
7849 	if (CCREV(sih->ccrev) < 35) {
7850 		W_REG(sii->osh, &cc->eci.lt35.eci_control, ECI_MACCTRL_BITS);
7851 	} else {
7852 		W_REG(sii->osh, &cc->eci.ge35.eci_controllo, ECI_MACCTRLLO_BITS);
7853 		W_REG(sii->osh, &cc->eci.ge35.eci_controlhi, ECI_MACCTRLHI_BITS);
7854 	}
7855 
7856 	/* enable only edge based interrupts
7857 	 * only toggle on bit 62 triggers an interrupt
7858 	 */
7859 	if (CCREV(sih->ccrev) < 35) {
7860 		W_REG(sii->osh, &cc->eci.lt35.eci_eventmaskhi, 0x0);
7861 		W_REG(sii->osh, &cc->eci.lt35.eci_eventmaskmi, 0x0);
7862 		W_REG(sii->osh, &cc->eci.lt35.eci_eventmasklo, 0x0);
7863 	} else {
7864 		W_REG(sii->osh, &cc->eci.ge35.eci_eventmaskhi, 0x0);
7865 		W_REG(sii->osh, &cc->eci.ge35.eci_eventmasklo, 0x0);
7866 	}
7867 
7868 	/* restore previous core */
7869 	if (!fast)
7870 		si_setcoreidx(sih, origidx);
7871 
7872 	/* if FM disabled in OTP, let BT know */
7873 	if (!si_query_FMDisabled_from_OTP(sih, &FMDisabled)) {
7874 		if (FMDisabled) {
7875 			NOTIFY_BT_FM_DISABLE(sih, 1);
7876 		}
7877 	}
7878 
7879 	return 0;
7880 }
7881 
7882 /** Write values to BT on eci_output. */
7883 void
si_eci_notify_bt(si_t * sih,uint32 mask,uint32 val,bool is_interrupt)7884 si_eci_notify_bt(si_t *sih, uint32 mask, uint32 val, bool is_interrupt)
7885 {
7886 	uint32 offset;
7887 
7888 	if ((sih->cccaps & CC_CAP_ECI) ||
7889 		(si_seci(sih)))
7890 	{
7891 		/* ECI or SECI mode */
7892 		/* Clear interrupt bit by default */
7893 		if (is_interrupt) {
7894 			si_corereg(sih, SI_CC_IDX,
7895 			   (CCREV(sih->ccrev) < 35 ?
7896 			    OFFSETOF(chipcregs_t, eci.lt35.eci_output) :
7897 			    OFFSETOF(chipcregs_t, eci.ge35.eci_outputlo)),
7898 			   (1 << 30), 0);
7899 		}
7900 
7901 		if (CCREV(sih->ccrev) >= 35) {
7902 			if ((mask & 0xFFFF0000) == ECI48_OUT_MASKMAGIC_HIWORD) {
7903 				offset = OFFSETOF(chipcregs_t, eci.ge35.eci_outputhi);
7904 				mask = mask & ~0xFFFF0000;
7905 			} else {
7906 				offset = OFFSETOF(chipcregs_t, eci.ge35.eci_outputlo);
7907 				mask = mask | (1<<30);
7908 				val = val & ~(1 << 30);
7909 			}
7910 		} else {
7911 			offset = OFFSETOF(chipcregs_t, eci.lt35.eci_output);
7912 			val = val & ~(1 << 30);
7913 		}
7914 
7915 		si_corereg(sih, SI_CC_IDX, offset, mask, val);
7916 
7917 		/* Set interrupt bit if needed */
7918 		if (is_interrupt) {
7919 			si_corereg(sih, SI_CC_IDX,
7920 			   (CCREV(sih->ccrev) < 35 ?
7921 			    OFFSETOF(chipcregs_t, eci.lt35.eci_output) :
7922 			    OFFSETOF(chipcregs_t, eci.ge35.eci_outputlo)),
7923 			   (1 << 30), (1 << 30));
7924 		}
7925 	} else if (sih->cccaps_ext & CC_CAP_EXT_GCI_PRESENT) {
7926 		/* GCI Mode */
7927 		if ((mask & 0xFFFF0000) == ECI48_OUT_MASKMAGIC_HIWORD) {
7928 			mask = mask & ~0xFFFF0000;
7929 			si_gci_direct(sih, GCI_OFFSETOF(sih, gci_output[1]), mask, val);
7930 		}
7931 	}
7932 }
7933 
7934 static void
BCMPOSTTRAPFN(seci_restore_coreidx)7935 BCMPOSTTRAPFN(seci_restore_coreidx)(si_t *sih, uint32 origidx, bool fast)
7936 {
7937 	if (!fast)
7938 		si_setcoreidx(sih, origidx);
7939 	return;
7940 }
7941 
7942 void
BCMPOSTTRAPFN(si_seci_down)7943 BCMPOSTTRAPFN(si_seci_down)(si_t *sih)
7944 {
7945 	uint32 origidx;
7946 	bool fast;
7947 	const si_info_t *sii = SI_INFO(sih);
7948 	const chipcregs_t *cc;
7949 	uint32 offset;
7950 
7951 	if (!si_seci(sih) && !si_seci_uart(sih))
7952 		return;
7953 	/* Don't proceed if request is already made to bring down the clock */
7954 	offset = OFFSETOF(chipcregs_t, clk_ctl_st);
7955 	if (!(si_corereg(sih, 0, offset, 0, 0) & CLKCTL_STS_SECI_CLK_REQ))
7956 		return;
7957 	if (!(cc = si_seci_access_preamble(sih, sii, &origidx, &fast)))
7958 	    goto exit;
7959 
7960 exit:
7961 	/* bring down the clock if up */
7962 	si_seci_clkreq(sih, FALSE);
7963 
7964 	/* restore previous core */
7965 	seci_restore_coreidx(sih, origidx, fast);
7966 }
7967 
7968 void
si_seci_upd(si_t * sih,bool enable)7969 si_seci_upd(si_t *sih, bool enable)
7970 {
7971 	uint32 origidx = 0;
7972 	const si_info_t *sii = SI_INFO(sih);
7973 	chipcregs_t *cc;
7974 	bool fast;
7975 	uint32 regval, seci_ctrl;
7976 	bcm_int_bitmask_t intr_val;
7977 
7978 	if (!si_seci(sih))
7979 		return;
7980 
7981 	fast = SI_FAST(sii);
7982 	INTR_OFF(sii, &intr_val);
7983 	if (!fast) {
7984 		origidx = sii->curidx;
7985 		if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL)
7986 			goto exit;
7987 	} else if ((cc = (chipcregs_t *)CCREGS_FAST(sii)) == NULL)
7988 		goto exit;
7989 
7990 	ASSERT(cc);
7991 
7992 	/* Select SECI based on enable input */
7993 	if ((CHIPID(sih->chip) == BCM4352_CHIP_ID) || (CHIPID(sih->chip) == BCM4360_CHIP_ID)) {
7994 		regval = R_REG(sii->osh, &cc->chipcontrol);
7995 
7996 		seci_ctrl = CCTRL4360_SECI_ON_GPIO01;
7997 
7998 		if (enable) {
7999 			regval |= seci_ctrl;
8000 		} else {
8001 			regval &= ~seci_ctrl;
8002 		}
8003 		W_REG(sii->osh, &cc->chipcontrol, regval);
8004 
8005 		if (enable) {
8006 			/* Send ECI update to BT */
8007 			regval = R_REG(sii->osh, &cc->SECI_config);
8008 			regval |= SECI_UPD_SECI;
8009 			W_REG(sii->osh, &cc->SECI_config, regval);
8010 			SPINWAIT((R_REG(sii->osh, &cc->SECI_config) & SECI_UPD_SECI), 1000);
8011 			/* Request ECI update from BT */
8012 			W_REG(sii->osh, &cc->seci_uart_data, SECI_SLIP_ESC_CHAR);
8013 			W_REG(sii->osh, &cc->seci_uart_data, SECI_REFRESH_REQ);
8014 		}
8015 	}
8016 
8017 exit:
8018 	/* restore previous core */
8019 	if (!fast)
8020 		si_setcoreidx(sih, origidx);
8021 
8022 	INTR_RESTORE(sii, &intr_val);
8023 }
8024 
8025 void *
BCMINITFN(si_gci_init)8026 BCMINITFN(si_gci_init)(si_t *sih)
8027 {
8028 #ifdef HNDGCI
8029 	const si_info_t *sii = SI_INFO(sih);
8030 #endif /* HNDGCI */
8031 
8032 	if (sih->cccaps_ext & CC_CAP_EXT_GCI_PRESENT)
8033 	{
8034 		si_gci_reset(sih);
8035 
8036 		if (sih->boardflags4 & BFL4_BTCOEX_OVER_SECI) {
8037 			si_gci_seci_init(sih);
8038 		}
8039 
8040 		/* Set GCI Control bits 40 - 47 to be SW Controlled. These bits
8041 		contain WL channel info and are sent to BT.
8042 		*/
8043 		si_gci_direct(sih, GCI_OFFSETOF(sih, gci_control_1),
8044 			GCI_WL_CHN_INFO_MASK, GCI_WL_CHN_INFO_MASK);
8045 	}
8046 #ifdef HNDGCI
8047 	hndgci_init(sih, sii->osh, HND_GCI_PLAIN_UART_MODE,
8048 		GCI_UART_BR_115200);
8049 #endif /* HNDGCI */
8050 
8051 	return (NULL);
8052 }
8053 #endif /* BCMECICOEX */
8054 #endif /* !(BCMDONGLEHOST) */
8055 
8056 /**
8057  * For boards that use GPIO(8) is used for Bluetooth Coex TX_WLAN pin,
8058  * when GPIOControl for Pin 8 is with ChipCommon core,
8059  * if UART_TX_1 (bit 5: Chipc capabilities) strapping option is set, then
8060  * GPIO pin 8 is driven by Uart0MCR:2 rather than GPIOOut:8. To drive this pin
8061  * low, one has to set Uart0MCR:2 to 1. This is required when the BTC is disabled,
8062  * or the driver goes down. Refer to PR35488.
8063  */
8064 void
si_btcgpiowar(si_t * sih)8065 si_btcgpiowar(si_t *sih)
8066 {
8067 	const si_info_t *sii = SI_INFO(sih);
8068 	uint origidx;
8069 	bcm_int_bitmask_t intr_val;
8070 	chipcregs_t *cc;
8071 
8072 	/* Make sure that there is ChipCommon core present &&
8073 	 * UART_TX is strapped to 1
8074 	 */
8075 	if (!(sih->cccaps & CC_CAP_UARTGPIO))
8076 		return;
8077 
8078 	/* si_corereg cannot be used as we have to guarantee 8-bit read/writes */
8079 	INTR_OFF(sii, &intr_val);
8080 
8081 	origidx = si_coreidx(sih);
8082 
8083 	cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
8084 	ASSERT(cc != NULL);
8085 
8086 	W_REG(sii->osh, &cc->uart0mcr, R_REG(sii->osh, &cc->uart0mcr) | 0x04);
8087 
8088 	/* restore the original index */
8089 	si_setcoreidx(sih, origidx);
8090 
8091 	INTR_RESTORE(sii, &intr_val);
8092 }
8093 
8094 void
si_chipcontrl_restore(si_t * sih,uint32 val)8095 si_chipcontrl_restore(si_t *sih, uint32 val)
8096 {
8097 	const si_info_t *sii = SI_INFO(sih);
8098 	chipcregs_t *cc;
8099 	uint origidx = si_coreidx(sih);
8100 
8101 	if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) {
8102 		SI_ERROR(("si_chipcontrl_restore: Failed to find CORE ID!\n"));
8103 		return;
8104 	}
8105 	W_REG(sii->osh, &cc->chipcontrol, val);
8106 	si_setcoreidx(sih, origidx);
8107 }
8108 
8109 uint32
si_chipcontrl_read(si_t * sih)8110 si_chipcontrl_read(si_t *sih)
8111 {
8112 	const si_info_t *sii = SI_INFO(sih);
8113 	chipcregs_t *cc;
8114 	uint origidx = si_coreidx(sih);
8115 	uint32 val;
8116 
8117 	if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) {
8118 		SI_ERROR(("si_chipcontrl_read: Failed to find CORE ID!\n"));
8119 		return -1;
8120 	}
8121 	val = R_REG(sii->osh, &cc->chipcontrol);
8122 	si_setcoreidx(sih, origidx);
8123 	return val;
8124 }
8125 
8126 /** switch muxed pins, on: SROM, off: FEMCTRL. Called for a family of ac chips, not just 4360. */
8127 void
si_chipcontrl_srom4360(si_t * sih,bool on)8128 si_chipcontrl_srom4360(si_t *sih, bool on)
8129 {
8130 	const si_info_t *sii = SI_INFO(sih);
8131 	chipcregs_t *cc;
8132 	uint origidx = si_coreidx(sih);
8133 	uint32 val;
8134 
8135 	if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) {
8136 		SI_ERROR(("si_chipcontrl_srom4360: Failed to find CORE ID!\n"));
8137 		return;
8138 	}
8139 	val = R_REG(sii->osh, &cc->chipcontrol);
8140 
8141 	if (on) {
8142 		val &= ~(CCTRL4360_SECI_MODE |
8143 			CCTRL4360_BTSWCTRL_MODE |
8144 			CCTRL4360_EXTRA_FEMCTRL_MODE |
8145 			CCTRL4360_BT_LGCY_MODE |
8146 			CCTRL4360_CORE2FEMCTRL4_ON);
8147 
8148 		W_REG(sii->osh, &cc->chipcontrol, val);
8149 	} else {
8150 		/* huh, nothing here? */
8151 	}
8152 
8153 	si_setcoreidx(sih, origidx);
8154 }
8155 
8156 /**
8157  * The SROM clock is derived from the backplane clock. For chips having a fast
8158  * backplane clock that requires a higher-than-POR-default clock divisor ratio for the SROM clock.
8159  */
8160 void
si_srom_clk_set(si_t * sih)8161 si_srom_clk_set(si_t *sih)
8162 {
8163 	const si_info_t *sii = SI_INFO(sih);
8164 	chipcregs_t *cc;
8165 	uint origidx = si_coreidx(sih);
8166 	uint32 val;
8167 	uint32 divisor = 1;
8168 
8169 	if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) {
8170 		SI_ERROR(("si_srom_clk_set: Failed to find CORE ID!\n"));
8171 		return;
8172 	}
8173 
8174 	val = R_REG(sii->osh, &cc->clkdiv2);
8175 	ASSERT(0);
8176 
8177 	W_REG(sii->osh, &cc->clkdiv2, ((val & ~CLKD2_SROM) | divisor));
8178 	si_setcoreidx(sih, origidx);
8179 }
8180 
8181 void
si_pmu_avb_clk_set(si_t * sih,osl_t * osh,bool set_flag)8182 si_pmu_avb_clk_set(si_t *sih, osl_t *osh, bool set_flag)
8183 {
8184 #if !defined(BCMDONGLEHOST)
8185 	switch (CHIPID(sih->chip)) {
8186 		case BCM43460_CHIP_ID:
8187 		case BCM4360_CHIP_ID:
8188 			si_pmu_avbtimer_enable(sih, osh, set_flag);
8189 			break;
8190 		default:
8191 			break;
8192 	}
8193 #endif
8194 }
8195 
8196 void
si_btc_enable_chipcontrol(si_t * sih)8197 si_btc_enable_chipcontrol(si_t *sih)
8198 {
8199 	const si_info_t *sii = SI_INFO(sih);
8200 	chipcregs_t *cc;
8201 	uint origidx = si_coreidx(sih);
8202 
8203 	if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) {
8204 		SI_ERROR(("si_btc_enable_chipcontrol: Failed to find CORE ID!\n"));
8205 		return;
8206 	}
8207 
8208 	/* BT fix */
8209 	W_REG(sii->osh, &cc->chipcontrol,
8210 		R_REG(sii->osh, &cc->chipcontrol) | CC_BTCOEX_EN_MASK);
8211 
8212 	si_setcoreidx(sih, origidx);
8213 }
8214 
8215 /** cache device removed state */
si_set_device_removed(si_t * sih,bool status)8216 void si_set_device_removed(si_t *sih, bool status)
8217 {
8218 	si_info_t *sii = SI_INFO(sih);
8219 
8220 	sii->device_removed = status;
8221 }
8222 
8223 /** check if the device is removed */
8224 bool
si_deviceremoved(const si_t * sih)8225 si_deviceremoved(const si_t *sih)
8226 {
8227 	uint32 w;
8228 	const si_info_t *sii = SI_INFO(sih);
8229 
8230 	if (sii->device_removed) {
8231 		return TRUE;
8232 	}
8233 
8234 	switch (BUSTYPE(sih->bustype)) {
8235 	case PCI_BUS:
8236 		ASSERT(SI_INFO(sih)->osh != NULL);
8237 		w = OSL_PCI_READ_CONFIG(SI_INFO(sih)->osh, PCI_CFG_VID, sizeof(uint32));
8238 		if ((w & 0xFFFF) != VENDOR_BROADCOM)
8239 			return TRUE;
8240 		break;
8241 	default:
8242 		break;
8243 	}
8244 	return FALSE;
8245 }
8246 
8247 bool
si_is_warmboot(void)8248 si_is_warmboot(void)
8249 {
8250 
8251 	return FALSE;
8252 }
8253 
8254 bool
si_is_sprom_available(si_t * sih)8255 si_is_sprom_available(si_t *sih)
8256 {
8257 	if (CCREV(sih->ccrev) >= 31) {
8258 		const si_info_t *sii;
8259 		uint origidx;
8260 		chipcregs_t *cc;
8261 		uint32 sromctrl;
8262 
8263 		if ((sih->cccaps & CC_CAP_SROM) == 0)
8264 			return FALSE;
8265 
8266 		sii = SI_INFO(sih);
8267 		origidx = sii->curidx;
8268 		cc = si_setcoreidx(sih, SI_CC_IDX);
8269 		ASSERT(cc);
8270 		sromctrl = R_REG(sii->osh, &cc->sromcontrol);
8271 		si_setcoreidx(sih, origidx);
8272 		return (sromctrl & SRC_PRESENT);
8273 	}
8274 
8275 	switch (CHIPID(sih->chip)) {
8276 	case BCM4369_CHIP_GRPID:
8277 		if (CHIPREV(sih->chiprev) == 0) {
8278 			/* WAR for 4369a0: HW4369-1729. no sprom, default to otp always. */
8279 			return 0;
8280 		} else {
8281 			return (sih->chipst & CST4369_SPROM_PRESENT) != 0;
8282 		}
8283 		break;
8284 	CASE_BCM43602_CHIP:
8285 		return (sih->chipst & CST43602_SPROM_PRESENT) != 0;
8286 	case BCM43012_CHIP_ID:
8287 	case BCM43013_CHIP_ID:
8288 	case BCM43014_CHIP_ID:
8289 		return FALSE;
8290 	case BCM4362_CHIP_GRPID:
8291 		return (sih->chipst & CST4362_SPROM_PRESENT) != 0;
8292 	case BCM4376_CHIP_GRPID:
8293 	case BCM4378_CHIP_GRPID:
8294 		return (sih->chipst & CST4378_SPROM_PRESENT) != 0;
8295 	case BCM4385_CHIP_GRPID:
8296 	case BCM4387_CHIP_GRPID:
8297 		return (sih->chipst & CST4387_SPROM_PRESENT) != 0;
8298 	case BCM4388_CHIP_GRPID:
8299 	case BCM4389_CHIP_GRPID:
8300 	case BCM4397_CHIP_GRPID:
8301 		/* 4389 supports only OTP */
8302 		return FALSE;
8303 	default:
8304 		return TRUE;
8305 	}
8306 }
8307 
8308 bool
si_is_sflash_available(const si_t * sih)8309 si_is_sflash_available(const si_t *sih)
8310 {
8311 	switch (CHIPID(sih->chip)) {
8312 	case BCM4387_CHIP_ID:
8313 		return (sih->chipst & CST4387_SFLASH_PRESENT) != 0;
8314 	default:
8315 		return FALSE;
8316 	}
8317 }
8318 
8319 #if !defined(BCMDONGLEHOST)
8320 bool
si_is_otp_disabled(const si_t * sih)8321 si_is_otp_disabled(const si_t *sih)
8322 {
8323 	switch (CHIPID(sih->chip)) {
8324 	case BCM4360_CHIP_ID:
8325 	case BCM43526_CHIP_ID:
8326 	case BCM43460_CHIP_ID:
8327 	case BCM4352_CHIP_ID:
8328 	case BCM43602_CHIP_ID:
8329 		/* 4360 OTP is always powered and enabled */
8330 		return FALSE;
8331 	/* These chips always have their OTP on */
8332 	case BCM43012_CHIP_ID:
8333 	case BCM43013_CHIP_ID:
8334 	case BCM43014_CHIP_ID:
8335 	case BCM4369_CHIP_GRPID:
8336 	case BCM4362_CHIP_GRPID:
8337 	case BCM4376_CHIP_GRPID:
8338 	case BCM4378_CHIP_GRPID:
8339 	case BCM4385_CHIP_GRPID:
8340 	case BCM4387_CHIP_GRPID:
8341 	case BCM4388_CHIP_GRPID:
8342 	case BCM4389_CHIP_GRPID:
8343 	case BCM4397_CHIP_GRPID:
8344 	default:
8345 		return FALSE;
8346 	}
8347 }
8348 
8349 bool
si_is_otp_powered(si_t * sih)8350 si_is_otp_powered(si_t *sih)
8351 {
8352 	if (PMUCTL_ENAB(sih))
8353 		return si_pmu_is_otp_powered(sih, si_osh(sih));
8354 	return TRUE;
8355 }
8356 
8357 void
si_otp_power(si_t * sih,bool on,uint32 * min_res_mask)8358 si_otp_power(si_t *sih, bool on, uint32* min_res_mask)
8359 {
8360 	if (PMUCTL_ENAB(sih))
8361 		si_pmu_otp_power(sih, si_osh(sih), on, min_res_mask);
8362 	OSL_DELAY(1000);
8363 }
8364 
8365 /* Return BCME_NOTFOUND if the card doesn't have CIS format nvram */
8366 int
si_cis_source(const si_t * sih)8367 si_cis_source(const si_t *sih)
8368 {
8369 	/* Most PCI chips use SROM format instead of CIS */
8370 	if (BUSTYPE(sih->bustype) == PCI_BUS) {
8371 		return BCME_NOTFOUND;
8372 	}
8373 
8374 	switch (CHIPID(sih->chip)) {
8375 	case BCM4360_CHIP_ID:
8376 	case BCM43460_CHIP_ID:
8377 	case BCM4352_CHIP_ID:
8378 	case BCM43526_CHIP_ID: {
8379 		if ((sih->chipst & CST4360_OTP_ENABLED))
8380 			return CIS_OTP;
8381 		return CIS_DEFAULT;
8382 	}
8383 	CASE_BCM43602_CHIP:
8384 		if (sih->chipst & CST43602_SPROM_PRESENT) {
8385 			/* Don't support CIS formatted SROM, use 'real' SROM format instead */
8386 			return BCME_NOTFOUND;
8387 		}
8388 		return CIS_OTP;
8389 	case BCM43012_CHIP_ID:
8390 	case BCM43013_CHIP_ID:
8391 	case BCM43014_CHIP_ID:
8392 		return CIS_OTP;
8393 	case BCM4369_CHIP_GRPID:
8394 		if (CHIPREV(sih->chiprev) == 0) {
8395 			/* WAR for 4369a0: HW4369-1729 */
8396 			return CIS_OTP;
8397 		} else if (sih->chipst & CST4369_SPROM_PRESENT) {
8398 			return CIS_SROM;
8399 		}
8400 		return CIS_OTP;
8401 	case BCM4362_CHIP_GRPID:
8402 		return ((sih->chipst & CST4362_SPROM_PRESENT)? CIS_SROM : CIS_OTP);
8403 	case BCM4376_CHIP_GRPID:
8404 	case BCM4378_CHIP_GRPID:
8405 		if (sih->chipst & CST4378_SPROM_PRESENT)
8406 			return CIS_SROM;
8407 		return CIS_OTP;
8408 	case BCM4385_CHIP_GRPID:
8409 	case BCM4387_CHIP_GRPID:
8410 		if (sih->chipst & CST4387_SPROM_PRESENT)
8411 			return CIS_SROM;
8412 		return CIS_OTP;
8413 	case BCM4388_CHIP_GRPID:
8414 	case BCM4389_CHIP_GRPID:
8415 	case BCM4397_CHIP_GRPID:
8416 		/* 4389 supports only OTP */
8417 		return CIS_OTP;
8418 	default:
8419 		return CIS_DEFAULT;
8420 	}
8421 }
8422 
BCMATTACHFN(si_fabid)8423 uint16 BCMATTACHFN(si_fabid)(si_t *sih)
8424 {
8425 	uint32 data;
8426 	uint16 fabid = 0;
8427 
8428 	switch (CHIPID(sih->chip)) {
8429 		CASE_BCM43602_CHIP:
8430 		case BCM43012_CHIP_ID:
8431 		case BCM43013_CHIP_ID:
8432 		case BCM43014_CHIP_ID:
8433 		case BCM4369_CHIP_GRPID:
8434 		case BCM4362_CHIP_GRPID:
8435 		case BCM4376_CHIP_GRPID:
8436 		case BCM4378_CHIP_GRPID:
8437 		case BCM4385_CHIP_GRPID:
8438 		case BCM4387_CHIP_GRPID:
8439 		case BCM4388_CHIP_GRPID:
8440 		case BCM4389_CHIP_GRPID:
8441 		case BCM4397_CHIP_GRPID:
8442 			data = si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, fabid),	0, 0);
8443 			fabid = data & 0xf;
8444 			break;
8445 
8446 		default:
8447 			break;
8448 	}
8449 
8450 	return fabid;
8451 }
8452 #endif /* !defined(BCMDONGLEHOST) */
8453 
BCMATTACHFN(si_get_sromctl)8454 uint32 BCMATTACHFN(si_get_sromctl)(si_t *sih)
8455 {
8456 	chipcregs_t *cc;
8457 	uint origidx = si_coreidx(sih);
8458 	uint32 sromctl;
8459 	osl_t *osh = si_osh(sih);
8460 
8461 	cc = si_setcoreidx(sih, SI_CC_IDX);
8462 	ASSERT((uintptr)cc);
8463 
8464 	sromctl = R_REG(osh, &cc->sromcontrol);
8465 
8466 	/* return to the original core */
8467 	si_setcoreidx(sih, origidx);
8468 	return sromctl;
8469 }
8470 
BCMATTACHFN(si_set_sromctl)8471 int BCMATTACHFN(si_set_sromctl)(si_t *sih, uint32 value)
8472 {
8473 	chipcregs_t *cc;
8474 	uint origidx = si_coreidx(sih);
8475 	osl_t *osh = si_osh(sih);
8476 	int ret = BCME_OK;
8477 
8478 	cc = si_setcoreidx(sih, SI_CC_IDX);
8479 	ASSERT((uintptr)cc);
8480 
8481 	/* get chipcommon rev */
8482 	if (si_corerev(sih) >= 32) {
8483 		/* SpromCtrl is only accessible if CoreCapabilities.SpromSupported and
8484 		 * SpromPresent is 1.
8485 		 */
8486 		if ((R_REG(osh, &cc->capabilities) & CC_CAP_SROM) != 0 &&
8487 		     (R_REG(osh, &cc->sromcontrol) & SRC_PRESENT)) {
8488 			W_REG(osh, &cc->sromcontrol, value);
8489 		} else {
8490 			ret = BCME_NODEVICE;
8491 		}
8492 	} else {
8493 		ret = BCME_UNSUPPORTED;
8494 	}
8495 
8496 	/* return to the original core */
8497 	si_setcoreidx(sih, origidx);
8498 
8499 	return ret;
8500 }
8501 
8502 uint
BCMPOSTTRAPFN(si_core_wrapperreg)8503 BCMPOSTTRAPFN(si_core_wrapperreg)(si_t *sih, uint32 coreidx, uint32 offset, uint32 mask, uint32 val)
8504 {
8505 	uint origidx;
8506 	bcm_int_bitmask_t intr_val;
8507 	uint ret_val;
8508 	const si_info_t *sii = SI_INFO(sih);
8509 
8510 	origidx = si_coreidx(sih);
8511 
8512 	INTR_OFF(sii, &intr_val);
8513 	/* Validate the core idx */
8514 	si_setcoreidx(sih, coreidx);
8515 
8516 	ret_val = si_wrapperreg(sih, offset, mask, val);
8517 
8518 	/* return to the original core */
8519 	si_setcoreidx(sih, origidx);
8520 	INTR_RESTORE(sii, &intr_val);
8521 	return ret_val;
8522 }
8523 
8524 #if !defined(BCMDONGLEHOST)
8525 static void
si_pmu_sr_upd(si_t * sih)8526 si_pmu_sr_upd(si_t *sih)
8527 {
8528 #if defined(SAVERESTORE)
8529 	if (SR_ENAB()) {
8530 		const si_info_t *sii = SI_INFO(sih);
8531 
8532 		/* min_mask is updated after SR code is downloaded to txfifo */
8533 		if (PMUCTL_ENAB(sih))
8534 			si_pmu_res_minmax_update(sih, sii->osh);
8535 	}
8536 #endif
8537 }
8538 
8539 /**
8540  * To make sure that, res mask is minimal to save power and also, to indicate
8541  * specifically to host about the SR logic.
8542  */
8543 void
si_update_masks(si_t * sih)8544 si_update_masks(si_t *sih)
8545 {
8546 	const si_info_t *sii = SI_INFO(sih);
8547 
8548 	switch (CHIPID(sih->chip)) {
8549 	case BCM4369_CHIP_GRPID:
8550 	CASE_BCM43602_CHIP:
8551 	case BCM4362_CHIP_GRPID:
8552 	case BCM4376_CHIP_GRPID:
8553 	case BCM4378_CHIP_GRPID:
8554 	case BCM4385_CHIP_GRPID:
8555 	case BCM4387_CHIP_GRPID:
8556 	case BCM4388_CHIP_GRPID:
8557 	case BCM4389_CHIP_GRPID:
8558 	case BCM4397_CHIP_GRPID:
8559 		/* Assumes SR engine has been enabled */
8560 		if (PMUCTL_ENAB(sih))
8561 			si_pmu_res_minmax_update(sih, sii->osh);
8562 		break;
8563 	case BCM43012_CHIP_ID:
8564 	case BCM43013_CHIP_ID:
8565 	case BCM43014_CHIP_ID:
8566 		/* min_mask is updated after SR code is downloaded to txfifo */
8567 		si_pmu_sr_upd(sih);
8568 		PMU_REG(sih, mac_res_req_timer, ~0x0, PMU43012_MAC_RES_REQ_TIMER);
8569 		PMU_REG(sih, mac_res_req_mask, ~0x0, PMU43012_MAC_RES_REQ_MASK);
8570 		break;
8571 
8572 	default:
8573 		ASSERT(0);
8574 	break;
8575 	}
8576 }
8577 
8578 void
si_force_islanding(si_t * sih,bool enable)8579 si_force_islanding(si_t *sih, bool enable)
8580 {
8581 	switch (CHIPID(sih->chip)) {
8582 	case BCM43012_CHIP_ID:
8583 	case BCM43013_CHIP_ID:
8584 	case BCM43014_CHIP_ID: {
8585 		if (enable) {
8586 			/* Turn on the islands */
8587 			si_pmu_chipcontrol(sih, CHIPCTRLREG2, 0x00000053, 0x0);
8588 #ifdef USE_MEMLPLDO
8589 			/* Force vddm pwrsw always on */
8590 			si_pmu_chipcontrol(sih, CHIPCTRLREG2, 0x000003, 0x000003);
8591 #endif
8592 #ifdef BCMQT
8593 			/* Turn off the islands */
8594 			si_pmu_chipcontrol(sih, CHIPCTRLREG2, 0x000050, 0x000050);
8595 #endif
8596 		} else {
8597 			/* Turn off the islands */
8598 			si_pmu_chipcontrol(sih, CHIPCTRLREG2, 0x000050, 0x000050);
8599 		}
8600 	}
8601 	break;
8602 
8603 	default:
8604 		ASSERT(0);
8605 	break;
8606 	}
8607 }
8608 
8609 #endif /* !defined(BCMDONGLEHOST) */
8610 
8611 /* cleanup the timer from the host when ARM is been halted
8612  * without a chance for ARM cleanup its resources
8613  * If left not cleanup, Intr from a software timer can still
8614  * request HT clk when ARM is halted.
8615  */
8616 uint32
si_pmu_res_req_timer_clr(si_t * sih)8617 si_pmu_res_req_timer_clr(si_t *sih)
8618 {
8619 	uint32 mask;
8620 
8621 	mask = PRRT_REQ_ACTIVE | PRRT_INTEN | PRRT_HT_REQ;
8622 	mask <<= 14;
8623 	/* clear mask bits */
8624 	pmu_corereg(sih, SI_CC_IDX, res_req_timer, mask, 0);
8625 	/* readback to ensure write completes */
8626 	return pmu_corereg(sih, SI_CC_IDX, res_req_timer, 0, 0);
8627 }
8628 
8629 /** turn on/off rfldo */
8630 void
si_pmu_rfldo(si_t * sih,bool on)8631 si_pmu_rfldo(si_t *sih, bool on)
8632 {
8633 #if !defined(BCMDONGLEHOST)
8634 	switch (CHIPID(sih->chip)) {
8635 	case BCM4360_CHIP_ID:
8636 	case BCM4352_CHIP_ID:
8637 	case BCM43526_CHIP_ID: {
8638 	CASE_BCM43602_CHIP:
8639 		si_pmu_vreg_control(sih, PMU_VREG_0, RCTRL4360_RFLDO_PWR_DOWN,
8640 			on ? 0 : RCTRL4360_RFLDO_PWR_DOWN);
8641 		break;
8642 	}
8643 	default:
8644 		ASSERT(0);
8645 	break;
8646 	}
8647 #endif
8648 }
8649 
8650 /* Caller of this function should make sure is on PCIE core
8651  * Used in pciedev.c.
8652  */
8653 void
si_pcie_disable_oobselltr(const si_t * sih)8654 si_pcie_disable_oobselltr(const si_t *sih)
8655 {
8656 	ASSERT(si_coreid(sih) == PCIE2_CORE_ID);
8657 	if (PCIECOREREV(sih->buscorerev) >= 23)
8658 		si_wrapperreg(sih, AI_OOBSELIND74, ~0, 0);
8659 	else
8660 		si_wrapperreg(sih, AI_OOBSELIND30, ~0, 0);
8661 }
8662 
8663 void
si_pcie_ltr_war(const si_t * sih)8664 si_pcie_ltr_war(const si_t *sih)
8665 {
8666 #if !defined(BCMDONGLEHOST)
8667 	const si_info_t *sii = SI_INFO(sih);
8668 
8669 	if (PCIE_GEN2(sii))
8670 		pcie_ltr_war(sii->pch, si_pcieltrenable(sih, 0, 0));
8671 #endif /* !defined(BCMDONGLEHOST */
8672 }
8673 
8674 void
si_pcie_hw_LTR_war(const si_t * sih)8675 si_pcie_hw_LTR_war(const si_t *sih)
8676 {
8677 #if !defined(BCMDONGLEHOST)
8678 	const si_info_t *sii = SI_INFO(sih);
8679 
8680 	if (PCIE_GEN2(sii))
8681 		pcie_hw_LTR_war(sii->pch);
8682 #endif /* !defined(BCMDONGLEHOST */
8683 }
8684 
8685 void
si_pciedev_reg_pm_clk_period(const si_t * sih)8686 si_pciedev_reg_pm_clk_period(const si_t *sih)
8687 {
8688 #if !defined(BCMDONGLEHOST)
8689 	const si_info_t *sii = SI_INFO(sih);
8690 
8691 	if (PCIE_GEN2(sii))
8692 		pciedev_reg_pm_clk_period(sii->pch);
8693 #endif /* !defined(BCMDONGLEHOST */
8694 }
8695 
8696 void
si_pciedev_crwlpciegen2(const si_t * sih)8697 si_pciedev_crwlpciegen2(const si_t *sih)
8698 {
8699 #if !defined(BCMDONGLEHOST)
8700 	const si_info_t *sii = SI_INFO(sih);
8701 
8702 	if (PCIE_GEN2(sii))
8703 		pciedev_crwlpciegen2(sii->pch);
8704 #endif /* !defined(BCMDONGLEHOST */
8705 }
8706 
8707 void
si_pcie_prep_D3(const si_t * sih,bool enter_D3)8708 si_pcie_prep_D3(const si_t *sih, bool enter_D3)
8709 {
8710 #if !defined(BCMDONGLEHOST)
8711 	const si_info_t *sii = SI_INFO(sih);
8712 
8713 	if (PCIE_GEN2(sii))
8714 		pciedev_prep_D3(sii->pch, enter_D3);
8715 #endif /* !defined(BCMDONGLEHOST */
8716 }
8717 
8718 #if !defined(BCMDONGLEHOST)
8719 uint
BCMPOSTTRAPFN(si_corereg_ifup)8720 BCMPOSTTRAPFN(si_corereg_ifup)(si_t *sih, uint core_id, uint regoff, uint mask, uint val)
8721 {
8722 	bool isup;
8723 	volatile void *regs;
8724 	uint origidx, ret_val, coreidx;
8725 
8726 	/* Remember original core before switch to chipc */
8727 	origidx = si_coreidx(sih);
8728 	regs = si_setcore(sih, core_id, 0);
8729 	BCM_REFERENCE(regs);
8730 	ASSERT(regs != NULL);
8731 
8732 	coreidx = si_coreidx(sih);
8733 
8734 	isup = si_iscoreup(sih);
8735 	if (isup == TRUE) {
8736 		ret_val = si_corereg(sih, coreidx, regoff, mask, val);
8737 	} else {
8738 		ret_val = 0;
8739 	}
8740 
8741 	/* Return to original core */
8742 	si_setcoreidx(sih, origidx);
8743 	return ret_val;
8744 }
8745 
8746 /* 43012 specific low power settings.
8747  * See http://confluence.broadcom.com/display/WLAN/BCM43012+Low+Power+Settings.
8748  * See 47xxtcl/43012.tcl proc lp_enable.
8749  */
si_43012_lp_enable(si_t * sih)8750 void si_43012_lp_enable(si_t *sih)
8751 {
8752 	const si_info_t *sii = SI_INFO(sih);
8753 	bcm_int_bitmask_t intr_val;
8754 	uint origidx;
8755 	int count;
8756 	gciregs_t *gciregs;
8757 
8758 	/* Block ints and save current core */
8759 	INTR_OFF(sii, &intr_val);
8760 	origidx = si_coreidx(sih);
8761 
8762 	/* Enable radiodig clk gating */
8763 	si_pmu_chipcontrol(sih, CHIPCTRLREG5, PMUCCTL05_43012_RADIO_DIG_CLK_GATING_EN,
8764 			PMUCCTL05_43012_RADIO_DIG_CLK_GATING_EN);
8765 
8766 	/* Disable SPM clock */
8767 	si_pmu_chipcontrol(sih, CHIPCTRLREG5, PMUCCTL05_43012_DISABLE_SPM_CLK,
8768 			PMUCCTL05_43012_DISABLE_SPM_CLK);
8769 
8770 	/* Enable access of radiodig registers using async apb interface */
8771 	si_pmu_chipcontrol(sih, CHIPCTRLREG6, PMUCCTL06_43012_GCI2RDIG_USE_ASYNCAPB,
8772 			PMUCCTL06_43012_GCI2RDIG_USE_ASYNCAPB);
8773 
8774 	/* Remove SFLASH clock request (which is default on for boot-from-flash support) */
8775 	CHIPC_REG(sih, clk_ctl_st, CCS_SFLASH_CLKREQ | CCS_HQCLKREQ, CCS_HQCLKREQ);
8776 
8777 	/* Switch to GCI core */
8778 	if (!(gciregs = si_setcore(sih, GCI_CORE_ID, 0))) {
8779 		goto done;
8780 	}
8781 
8782 	/* GCIForceRegClk Off */
8783 	if (!(sih->lpflags & LPFLAGS_SI_GCI_FORCE_REGCLK_DISABLE)) {
8784 		si_gci_direct(sih, GET_GCI_OFFSET(sih, gci_corectrl),
8785 				GCI_CORECTRL_FORCEREGCLK_MASK, 0);
8786 	}
8787 
8788 	/* Disable the sflash pad */
8789 	if (!(sih->lpflags & LPFLAGS_SI_SFLASH_DISABLE)) {
8790 		si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_03,
8791 			CC_GCI_03_LPFLAGS_SFLASH_MASK, CC_GCI_03_LPFLAGS_SFLASH_VAL);
8792 	}
8793 
8794 	/* Input disable all LHL I/O pins */
8795 	for (count = 0; count < GPIO_CTRL_REG_COUNT; count++) {
8796 		OR_REG(sii->osh, &gciregs->gpio_ctrl_iocfg_p_adr[count],
8797 			GPIO_CTRL_REG_DISABLE_INTERRUPT);
8798 	}
8799 
8800 	/* Power down BT LDO3p3 */
8801 	if (!(sih->lpflags & LPFLAGS_SI_BTLDO3P3_DISABLE)) {
8802 		si_pmu_chipcontrol(sih, CHIPCTRLREG2, PMUCCTL02_43012_BTLDO3P3_PU_FORCE_OFF,
8803 				PMUCCTL02_43012_BTLDO3P3_PU_FORCE_OFF);
8804 	}
8805 
8806 done:
8807 	si_setcoreidx(sih, origidx);
8808 	INTR_RESTORE(sii, &intr_val);
8809 }
8810 
8811 /** this function is called from the BMAC during (re) initialisation */
8812 void
si_lowpwr_opt(si_t * sih)8813 si_lowpwr_opt(si_t *sih)
8814 {
8815 	uint mask, val;
8816 
8817 	/* 43602 chip (all revision) related changes */
8818 	if (BCM43602_CHIP(sih->chip)) {
8819 		uint hosti = si_chip_hostif(sih);
8820 		uint origidx = si_coreidx(sih);
8821 		volatile void *regs;
8822 
8823 		regs = si_setcore(sih, CC_CORE_ID, 0);
8824 		BCM_REFERENCE(regs);
8825 		ASSERT(regs != NULL);
8826 
8827 		/* disable usb app clk */
8828 		/* Can be done any time. If it is not USB, then do it. In case */
8829 		/* of USB, do not write it */
8830 		if (hosti != CHIP_HOSTIF_USBMODE && !BCM43602_CHIP(sih->chip)) {
8831 			si_pmu_chipcontrol(sih, PMU_CHIPCTL5, (1 << USBAPP_CLK_BIT), 0);
8832 		}
8833 		/* disable pcie clks */
8834 		if (hosti != CHIP_HOSTIF_PCIEMODE) {
8835 			si_pmu_chipcontrol(sih, PMU_CHIPCTL5, (1 << PCIE_CLK_BIT), 0);
8836 		}
8837 
8838 		/* disable armcr4 debug clk */
8839 		/* Can be done anytime as long as driver is functional. */
8840 		/* In TCL, dhalt commands needs to change to undo this */
8841 		switch (CHIPID(sih->chip)) {
8842 			CASE_BCM43602_CHIP:
8843 				si_pmu_chipcontrol(sih, PMU_CHIPCTL3,
8844 					PMU43602_CC3_ARMCR4_DBG_CLK, 0);
8845 				break;
8846 			case BCM4369_CHIP_GRPID:
8847 			case BCM4362_CHIP_GRPID:
8848 				{
8849 					uint32 tapsel =	si_corereg(sih, SI_CC_IDX,
8850 						OFFSETOF(chipcregs_t, jtagctrl), 0, 0)
8851 						& JCTRL_TAPSEL_BIT;
8852 					/* SWD: if tap sel bit set, */
8853 					/* enable armcr4 debug clock */
8854 					si_pmu_chipcontrol(sih, PMU_CHIPCTL5,
8855 						(1 << ARMCR4_DBG_CLK_BIT),
8856 						tapsel?(1 << ARMCR4_DBG_CLK_BIT):0);
8857 				}
8858 				break;
8859 			default:
8860 				si_pmu_chipcontrol(sih, PMU_CHIPCTL5,
8861 					(1 << ARMCR4_DBG_CLK_BIT), 0);
8862 				break;
8863 		}
8864 
8865 		/* Power down unused BBPLL ch-6(pcie_tl_clk) and ch-5(sample-sync-clk), */
8866 		/* valid in all modes, ch-5 needs to be reenabled for sample-capture */
8867 		/* this needs to be done in the pmu init path, at the beginning. Should not be */
8868 		/* a pcie driver. Enable the sample-sync-clk in the sample capture function */
8869 		if (BCM43602_CHIP(sih->chip)) {
8870 			/* configure open loop PLL parameters, open loop is used during S/R */
8871 			val = (3 << PMU1_PLL0_PC1_M1DIV_SHIFT) | (6 << PMU1_PLL0_PC1_M2DIV_SHIFT) |
8872 			      (6 << PMU1_PLL0_PC1_M3DIV_SHIFT) | (8 << PMU1_PLL0_PC1_M4DIV_SHIFT);
8873 			si_pmu_pllcontrol(sih, PMU1_PLL0_PLLCTL4, ~0, val);
8874 			si_pmu_pllupd(sih);
8875 			si_pmu_chipcontrol(sih, PMU_CHIPCTL2,
8876 			  PMU43602_CC2_PCIE_CLKREQ_L_WAKE_EN |  PMU43602_CC2_PMU_WAKE_ALP_AVAIL_EN,
8877 			  PMU43602_CC2_PCIE_CLKREQ_L_WAKE_EN |  PMU43602_CC2_PMU_WAKE_ALP_AVAIL_EN);
8878 		}
8879 
8880 		/* Return to original core */
8881 		si_setcoreidx(sih, origidx);
8882 	}
8883 	if ((CHIPID(sih->chip) == BCM43012_CHIP_ID) ||
8884 		(CHIPID(sih->chip) == BCM43013_CHIP_ID) ||
8885 		(CHIPID(sih->chip) == BCM43014_CHIP_ID)) {
8886 		/* Enable memory standby based on lpflags */
8887 		if (sih->lpflags & LPFLAGS_SI_GLOBAL_DISABLE) {
8888 			SI_MSG(("si_lowpwr_opt: Disable lower power configuration!\n"));
8889 			goto exit;
8890 		}
8891 
8892 		SI_MSG(("si_lowpwr_opt: Enable lower power configuration!\n"));
8893 
8894 		/* Enable mem clk gating */
8895 		mask = (0x1 << MEM_CLK_GATE_BIT);
8896 		val = (0x1 << MEM_CLK_GATE_BIT);
8897 
8898 		si_corereg_ifup(sih, SDIOD_CORE_ID, SI_PWR_CTL_ST, mask, val);
8899 		si_corereg_ifup(sih, SOCRAM_CORE_ID, SI_PWR_CTL_ST, mask, val);
8900 
8901 		si_43012_lp_enable(sih);
8902 	}
8903 exit:
8904 	return;
8905 }
8906 #endif /* !defined(BCMDONGLEHOST) */
8907 
8908 #if defined(AXI_TIMEOUTS) || defined(AXI_TIMEOUTS_NIC)
8909 uint32
BCMPOSTTRAPFN(si_clear_backplane_to_per_core)8910 BCMPOSTTRAPFN(si_clear_backplane_to_per_core)(si_t *sih, uint coreid, uint coreunit, void * wrap)
8911 {
8912 	if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
8913 		(CHIPTYPE(sih->socitype) == SOCI_DVTBUS)) {
8914 		return ai_clear_backplane_to_per_core(sih, coreid, coreunit, wrap);
8915 	}
8916 	return AXI_WRAP_STS_NONE;
8917 }
8918 #endif /* AXI_TIMEOUTS || AXI_TIMEOUTS_NIC */
8919 
8920 uint32
BCMPOSTTRAPFN(si_clear_backplane_to)8921 BCMPOSTTRAPFN(si_clear_backplane_to)(si_t *sih)
8922 {
8923 	if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
8924 		(CHIPTYPE(sih->socitype) == SOCI_DVTBUS)) {
8925 		return ai_clear_backplane_to(sih);
8926 	}
8927 
8928 	return 0;
8929 }
8930 
8931 void
BCMATTACHFN(si_update_backplane_timeouts)8932 BCMATTACHFN(si_update_backplane_timeouts)(const si_t *sih, bool enable, uint32 timeout_exp,
8933 	uint32 cid)
8934 {
8935 #if defined(AXI_TIMEOUTS) || defined(AXI_TIMEOUTS_NIC)
8936 	/* Enable only for AXI */
8937 	if (CHIPTYPE(sih->socitype) != SOCI_AI) {
8938 		return;
8939 	}
8940 
8941 	ai_update_backplane_timeouts(sih, enable, timeout_exp, cid);
8942 #endif /* AXI_TIMEOUTS  || AXI_TIMEOUTS_NIC */
8943 }
8944 
8945 /*
8946  * This routine adds the AXI timeouts for
8947  * chipcommon, pcie and ARM slave wrappers
8948  */
8949 void
si_slave_wrapper_add(si_t * sih)8950 si_slave_wrapper_add(si_t *sih)
8951 {
8952 #if defined(AXI_TIMEOUTS) || defined(AXI_TIMEOUTS_NIC)
8953 	uint32 axi_to = 0;
8954 
8955 	/* Enable only for AXI */
8956 	if ((CHIPTYPE(sih->socitype) != SOCI_AI) &&
8957 		(CHIPTYPE(sih->socitype) != SOCI_DVTBUS)) {
8958 		return;
8959 	}
8960 
8961 	axi_to = AXI_TO_VAL;
8962 
8963 	/* All required slave wrappers are added in ai_scan */
8964 	ai_update_backplane_timeouts(sih, TRUE, axi_to, 0);
8965 
8966 #ifdef DISABLE_PCIE2_AXI_TIMEOUT
8967 	ai_update_backplane_timeouts(sih, FALSE, 0, PCIE_CORE_ID);
8968 	ai_update_backplane_timeouts(sih, FALSE, 0, PCIE2_CORE_ID);
8969 #endif
8970 
8971 #endif /* AXI_TIMEOUTS  || AXI_TIMEOUTS_NIC */
8972 
8973 }
8974 
8975 #ifndef BCMDONGLEHOST
8976 /* read from pcie space using back plane  indirect access */
8977 /* Set Below mask for reading 1, 2, 4 bytes in single read */
8978 /* #define	SI_BPIND_1BYTE		0x1 */
8979 /* #define	SI_BPIND_2BYTE		0x3 */
8980 /* #define	SI_BPIND_4BYTE		0xF */
8981 int
BCMPOSTTRAPFN(si_bpind_access)8982 BCMPOSTTRAPFN(si_bpind_access)(si_t *sih, uint32 addr_high, uint32 addr_low,
8983 		int32 * data, bool read, uint32 us_timeout)
8984 {
8985 
8986 	uint32 status = 0;
8987 	uint8 mask = SI_BPIND_4BYTE;
8988 	int ret_val = BCME_OK;
8989 
8990 	/* Program Address low and high fields */
8991 	si_ccreg(sih, OFFSETOF(chipcregs_t, bp_addrlow), ~0, addr_low);
8992 	si_ccreg(sih, OFFSETOF(chipcregs_t, bp_addrhigh), ~0, addr_high);
8993 
8994 	if (read) {
8995 		/* Start the read */
8996 		si_ccreg(sih, OFFSETOF(chipcregs_t, bp_indaccess), ~0,
8997 			CC_BP_IND_ACCESS_START_MASK | mask);
8998 	} else {
8999 		/* Write the data and force the trigger */
9000 		si_ccreg(sih, OFFSETOF(chipcregs_t, bp_data), ~0, *data);
9001 		si_ccreg(sih, OFFSETOF(chipcregs_t, bp_indaccess), ~0,
9002 			CC_BP_IND_ACCESS_START_MASK |
9003 			CC_BP_IND_ACCESS_RDWR_MASK | mask);
9004 
9005 	}
9006 
9007 	/* Wait for status to be cleared */
9008 	SPINWAIT(((status = si_ccreg(sih, OFFSETOF(chipcregs_t, bp_indaccess), 0, 0)) &
9009 		CC_BP_IND_ACCESS_START_MASK), us_timeout);
9010 
9011 	if (status & (CC_BP_IND_ACCESS_START_MASK | CC_BP_IND_ACCESS_ERROR_MASK)) {
9012 		ret_val = BCME_ERROR;
9013 		SI_ERROR(("Action Failed for address 0x%08x:0x%08x \t status: 0x%x\n",
9014 			addr_high, addr_low, status));
9015 	/* For ATE, Stop execution here, to catch BPind timeout */
9016 #ifdef ATE_BUILD
9017 		hnd_die();
9018 #endif /* ATE_BUILD */
9019 	} else {
9020 		/* read data */
9021 		if (read)
9022 			*data = si_ccreg(sih, OFFSETOF(chipcregs_t, bp_data), 0, 0);
9023 	}
9024 
9025 	return ret_val;
9026 }
9027 #endif /* !BCMDONGLEHOST */
9028 
9029 void
si_pll_sr_reinit(si_t * sih)9030 si_pll_sr_reinit(si_t *sih)
9031 {
9032 #if !defined(BCMDONGLEHOST) && !defined(DONGLEBUILD)
9033 	osl_t *osh = si_osh(sih);
9034 	const si_info_t *sii = SI_INFO(sih);
9035 	uint32 data;
9036 
9037 	/* disable PLL open loop operation */
9038 	switch (CHIPID(sih->chip)) {
9039 		case BCM43602_CHIP_ID:
9040 			/* read back the pll openloop state */
9041 			data = si_pmu_pllcontrol(sih, PMU1_PLL0_PLLCTL8, 0, 0);
9042 			/* check current pll mode */
9043 			if ((data & PMU1_PLLCTL8_OPENLOOP_MASK) == 0) {
9044 				/* no POR; don't required pll and saverestore init */
9045 				return;
9046 			}
9047 			si_pmu_pll_init(sih, osh, sii->xtalfreq);
9048 			si_pmu_pllcontrol(sih, PMU1_PLL0_PLLCTL8, PMU1_PLLCTL8_OPENLOOP_MASK, 0);
9049 			si_pmu_pllupd(sih);
9050 			/* allow PLL to settle after config PLL for closeloop operation */
9051 			OSL_DELAY(100);
9052 			break;
9053 		default:
9054 			/* any unsupported chip bail */
9055 			return;
9056 	}
9057 	si_pmu_init(sih, osh);
9058 	si_pmu_chip_init(sih, osh);
9059 #if defined(BCMPMU_STATS)
9060 	if (PMU_STATS_ENAB()) {
9061 		si_pmustatstimer_init(sih);
9062 	}
9063 #endif /* BCMPMU_STATS */
9064 #if defined(SR_ESSENTIALS)
9065 	/* Module can be power down during D3 state, thus
9066 	 * needs this before si_pmu_res_init() to use sr_isenab()
9067 	 * Full dongle may not need to reinit saverestore
9068 	 */
9069 	if (SR_ESSENTIALS_ENAB()) {
9070 		sr_save_restore_init(sih);
9071 	}
9072 #endif /* SR_ESSENTIALS */
9073 	si_pmu_res_init(sih, sii->osh);
9074 	si_pmu_swreg_init(sih, osh);
9075 	si_lowpwr_opt(sih);
9076 #endif /* !BCMDONGLEHOST && !DONGLEBUILD */
9077 }
9078 
9079 void
BCMATTACHFN(si_pll_closeloop)9080 BCMATTACHFN(si_pll_closeloop)(si_t *sih)
9081 {
9082 #if !defined(BCMDONGLEHOST) && !defined(DONGLEBUILD) || defined(SAVERESTORE)
9083 	uint32 data;
9084 
9085 	BCM_REFERENCE(data);
9086 
9087 	/* disable PLL open loop operation */
9088 	switch (CHIPID(sih->chip)) {
9089 #if !defined(BCMDONGLEHOST) && !defined(DONGLEBUILD)
9090 		/* Don't apply those changes to FULL DONGLE mode since the
9091 		 * behaviour was not verified
9092 		 */
9093 		case BCM43602_CHIP_ID:
9094 			/* read back the pll openloop state */
9095 			data = si_pmu_pllcontrol(sih, PMU1_PLL0_PLLCTL8, 0, 0);
9096 			/* current mode is openloop (possible POR) */
9097 			if ((data & PMU1_PLLCTL8_OPENLOOP_MASK) != 0) {
9098 				si_pmu_pllcontrol(sih, PMU1_PLL0_PLLCTL8,
9099 					PMU1_PLLCTL8_OPENLOOP_MASK, 0);
9100 				si_pmu_pllupd(sih);
9101 				/* allow PLL to settle after config PLL for closeloop operation */
9102 				OSL_DELAY(100);
9103 			}
9104 			break;
9105 #endif /* !BCMDONGLEHOST && !DONGLEBUILD */
9106 		case BCM4369_CHIP_GRPID:
9107 		case BCM4362_CHIP_GRPID:
9108 		case BCM4376_CHIP_GRPID:
9109 		case BCM4378_CHIP_GRPID:
9110 		case BCM4385_CHIP_GRPID:
9111 		case BCM4387_CHIP_GRPID:
9112 		case BCM4388_CHIP_GRPID:
9113 		case BCM4389_CHIP_GRPID:
9114 		case BCM4397_CHIP_GRPID:
9115 			si_pmu_chipcontrol(sih, PMU_CHIPCTL1,
9116 				PMU_CC1_ENABLE_CLOSED_LOOP_MASK, PMU_CC1_ENABLE_CLOSED_LOOP);
9117 			break;
9118 		default:
9119 			/* any unsupported chip bail */
9120 			return;
9121 	}
9122 #endif /* !BCMDONGLEHOST && !DONGLEBUILD || SAVERESTORE */
9123 }
9124 
9125 #if !defined(BCMDONGLEHOST)
9126 void
BCMPOSTTRAPFN(si_introff)9127 BCMPOSTTRAPFN(si_introff)(const si_t *sih, bcm_int_bitmask_t *intr_val)
9128 {
9129 	const si_info_t *sii = SI_INFO(sih);
9130 	INTR_OFF(sii, intr_val);
9131 }
9132 
9133 void
BCMPOSTTRAPFN(si_intrrestore)9134 BCMPOSTTRAPFN(si_intrrestore)(const si_t *sih, bcm_int_bitmask_t *intr_val)
9135 {
9136 	const si_info_t *sii = SI_INFO(sih);
9137 	INTR_RESTORE(sii, intr_val);
9138 }
9139 
9140 bool
si_get_nvram_rfldo3p3_war(const si_t * sih)9141 si_get_nvram_rfldo3p3_war(const si_t *sih)
9142 {
9143 	const si_info_t *sii = SI_INFO(sih);
9144 	return sii->rfldo3p3_war;
9145 }
9146 
9147 void
si_nvram_res_masks(const si_t * sih,uint32 * min_mask,uint32 * max_mask)9148 si_nvram_res_masks(const si_t *sih, uint32 *min_mask, uint32 *max_mask)
9149 {
9150 	const si_info_t *sii = SI_INFO(sih);
9151 	/* Apply nvram override to min mask */
9152 	if (sii->min_mask_valid == TRUE) {
9153 		SI_MSG(("Applying rmin=%d to min_mask\n", sii->nvram_min_mask));
9154 		*min_mask = sii->nvram_min_mask;
9155 	}
9156 	/* Apply nvram override to max mask */
9157 	if (sii->max_mask_valid == TRUE) {
9158 		SI_MSG(("Applying rmax=%d to max_mask\n", sii->nvram_max_mask));
9159 		*max_mask = sii->nvram_max_mask;
9160 	}
9161 }
9162 
9163 uint8
si_getspurmode(const si_t * sih)9164 si_getspurmode(const si_t *sih)
9165 {
9166 	const si_info_t *sii = SI_INFO(sih);
9167 	return sii->spurmode;
9168 }
9169 
9170 uint32
si_xtalfreq(const si_t * sih)9171 si_xtalfreq(const si_t *sih)
9172 {
9173 	const si_info_t *sii = SI_INFO(sih);
9174 	return sii->xtalfreq;
9175 }
9176 
9177 uint32
si_get_openloop_dco_code(const si_t * sih)9178 si_get_openloop_dco_code(const si_t *sih)
9179 {
9180 	const si_info_t *sii = SI_INFO(sih);
9181 	return sii->openloop_dco_code;
9182 }
9183 
9184 void
si_set_openloop_dco_code(si_t * sih,uint32 _openloop_dco_code)9185 si_set_openloop_dco_code(si_t *sih, uint32 _openloop_dco_code)
9186 {
9187 	si_info_t *sii = SI_INFO(sih);
9188 	sii->openloop_dco_code = _openloop_dco_code;
9189 }
9190 
9191 uint32
BCMPOSTTRAPFN(si_get_armpllclkfreq)9192 BCMPOSTTRAPFN(si_get_armpllclkfreq)(const si_t *sih)
9193 {
9194 	const si_info_t *sii = SI_INFO(sih);
9195 	uint32 armpllclkfreq = ARMPLL_FREQ_400MHZ;
9196 	BCM_REFERENCE(sii);
9197 
9198 #ifdef DONGLEBUILD
9199 	uint32 armpllclk_max;
9200 
9201 #if defined(__ARM_ARCH_7R__)
9202 	armpllclk_max = ARMPLL_FREQ_400MHZ;
9203 #elif defined(__ARM_ARCH_7A__)
9204 	armpllclk_max = ARMPLL_FREQ_1000MHZ;
9205 #else
9206 #error "Unknown CPU architecture for armpllclkfreq!"
9207 #endif
9208 
9209 	armpllclkfreq = (sii->armpllclkfreq) ? sii->armpllclkfreq : armpllclk_max;
9210 
9211 	SI_MSG(("armpllclkfreq = %d\n", armpllclkfreq));
9212 #endif /* DONGLEBUILD */
9213 
9214 	return armpllclkfreq;
9215 }
9216 
9217 uint8
BCMPOSTTRAPFN(si_get_ccidiv)9218 BCMPOSTTRAPFN(si_get_ccidiv)(const si_t *sih)
9219 {
9220 	const si_info_t *sii = SI_INFO(sih);
9221 	uint8 ccidiv = 0xFF;
9222 	BCM_REFERENCE(sii);
9223 
9224 #ifdef DONGLEBUILD
9225 	ccidiv = (sii->ccidiv) ? sii->ccidiv : CCIDIV_3_TO_1;
9226 #endif /* DONGLEBUILD */
9227 
9228 	return ccidiv;
9229 }
9230 #ifdef DONGLEBUILD
9231 uint32
BCMATTACHFN(si_wrapper_dump_buf_size)9232 BCMATTACHFN(si_wrapper_dump_buf_size)(const si_t *sih)
9233 {
9234 	if (CHIPTYPE(sih->socitype) == SOCI_AI)
9235 		return ai_wrapper_dump_buf_size(sih);
9236 	return 0;
9237 }
9238 
9239 uint32
BCMPOSTTRAPFN(si_wrapper_dump_binary)9240 BCMPOSTTRAPFN(si_wrapper_dump_binary)(const si_t *sih, uchar *p)
9241 {
9242 	if (CHIPTYPE(sih->socitype) == SOCI_AI)
9243 		return ai_wrapper_dump_binary(sih, p);
9244 	return 0;
9245 }
9246 
9247 #if defined(ETD) && !defined(ETD_DISABLED)
9248 uint32
BCMPOSTTRAPFN(si_wrapper_dump_last_timeout)9249 BCMPOSTTRAPFN(si_wrapper_dump_last_timeout)(const si_t *sih, uint32 *error, uint32 *core,
9250 	uint32 *ba, uchar *p)
9251 {
9252 	if (CHIPTYPE(sih->socitype) == SOCI_AI)
9253 		return ai_wrapper_dump_last_timeout(sih, error, core, ba, p);
9254 	return 0;
9255 }
9256 #endif /* ETD && !ETD_DISABLED */
9257 #endif /* DONGLEBUILD */
9258 #endif /* !BCMDONGLEHOST */
9259 
9260 uint32
BCMPOSTTRAPFN(si_findcoreidx_by_axiid)9261 BCMPOSTTRAPFN(si_findcoreidx_by_axiid)(const si_t *sih, uint32 axiid)
9262 {
9263 	if (CHIPTYPE(sih->socitype) == SOCI_AI)
9264 		return ai_findcoreidx_by_axiid(sih, axiid);
9265 	return 0;
9266 }
9267 
9268 void
BCMPOSTTRAPFN(si_wrapper_get_last_error)9269 BCMPOSTTRAPFN(si_wrapper_get_last_error)(const si_t *sih, uint32 *error_status, uint32 *core,
9270 	uint32 *lo, uint32 *hi, uint32 *id)
9271 {
9272 #if defined(AXI_TIMEOUTS_NIC) || defined(AXI_TIMEOUTS)
9273 	if (CHIPTYPE(sih->socitype) == SOCI_AI)
9274 		ai_wrapper_get_last_error(sih, error_status, core, lo, hi, id);
9275 #endif /* (AXI_TIMEOUTS_NIC) || (AXI_TIMEOUTS) */
9276 	return;
9277 }
9278 
9279 uint32
si_get_axi_timeout_reg(const si_t * sih)9280 si_get_axi_timeout_reg(const si_t *sih)
9281 {
9282 #if defined(AXI_TIMEOUTS_NIC) || defined(AXI_TIMEOUTS)
9283 	if (CHIPTYPE(sih->socitype) == SOCI_AI) {
9284 		return ai_get_axi_timeout_reg();
9285 	}
9286 #endif /* (AXI_TIMEOUTS_NIC) || (AXI_TIMEOUTS) */
9287 	return 0;
9288 }
9289 
9290 #if defined(BCMSRPWR) && !defined(BCMSRPWR_DISABLED)
9291 bool _bcmsrpwr = TRUE;
9292 #else
9293 bool _bcmsrpwr = FALSE;
9294 #endif
9295 
9296 #define PWRREQ_OFFSET(sih)	OFFSETOF(chipcregs_t, powerctl)
9297 
9298 static void
BCMPOSTTRAPFN(si_corereg_pciefast_write)9299 BCMPOSTTRAPFN(si_corereg_pciefast_write)(const si_t *sih, uint regoff, uint val)
9300 {
9301 	volatile uint32 *r = NULL;
9302 	const si_info_t *sii = SI_INFO(sih);
9303 
9304 	ASSERT((BUSTYPE(sih->bustype) == PCI_BUS));
9305 
9306 	r = (volatile uint32 *)((volatile char *)sii->curmap +
9307 		PCI_16KB0_PCIREGS_OFFSET + regoff);
9308 
9309 	W_REG(sii->osh, r, val);
9310 }
9311 
9312 static uint
BCMPOSTTRAPFN(si_corereg_pciefast_read)9313 BCMPOSTTRAPFN(si_corereg_pciefast_read)(const si_t *sih, uint regoff)
9314 {
9315 	volatile uint32 *r = NULL;
9316 	const si_info_t *sii = SI_INFO(sih);
9317 
9318 	ASSERT((BUSTYPE(sih->bustype) == PCI_BUS));
9319 
9320 	r = (volatile uint32 *)((volatile char *)sii->curmap +
9321 		PCI_16KB0_PCIREGS_OFFSET + regoff);
9322 
9323 	return R_REG(sii->osh, r);
9324 }
9325 
9326 uint32
BCMPOSTTRAPFN(si_srpwr_request)9327 BCMPOSTTRAPFN(si_srpwr_request)(const si_t *sih, uint32 mask, uint32 val)
9328 {
9329 	const si_info_t *sii = SI_INFO(sih);
9330 	uint32 r, offset = (BUSTYPE(sih->bustype) == SI_BUS) ?
9331 		OFFSETOF(chipcregs_t, powerctl) : PWRREQ_OFFSET(sih);
9332 	uint32 mask2 = mask;
9333 	uint32 val2 = val;
9334 	volatile uint32 *fast_srpwr_addr = (volatile uint32 *)((uintptr)SI_ENUM_BASE(sih)
9335 					 + (uintptr)offset);
9336 
9337 	if (FWSIGN_ENAB()) {
9338 		return 0;
9339 	}
9340 
9341 	if (mask || val) {
9342 		mask <<= SRPWR_REQON_SHIFT;
9343 		val  <<= SRPWR_REQON_SHIFT;
9344 
9345 		/* Return if requested power request is already set */
9346 		if (BUSTYPE(sih->bustype) == SI_BUS) {
9347 			r = R_REG(sii->osh, fast_srpwr_addr);
9348 		} else {
9349 			r = si_corereg_pciefast_read(sih, offset);
9350 		}
9351 
9352 		if ((r & mask) == val) {
9353 			return r;
9354 		}
9355 
9356 		r = (r & ~mask) | val;
9357 
9358 		if (BUSTYPE(sih->bustype) == SI_BUS) {
9359 			W_REG(sii->osh, fast_srpwr_addr, r);
9360 			r = R_REG(sii->osh, fast_srpwr_addr);
9361 		} else {
9362 			si_corereg_pciefast_write(sih, offset, r);
9363 			r = si_corereg_pciefast_read(sih, offset);
9364 		}
9365 
9366 		if (val2) {
9367 			if ((r & (mask2 << SRPWR_STATUS_SHIFT)) ==
9368 			(val2 << SRPWR_STATUS_SHIFT)) {
9369 				return r;
9370 			}
9371 			si_srpwr_stat_spinwait(sih, mask2, val2);
9372 		}
9373 	} else {
9374 		if (BUSTYPE(sih->bustype) == SI_BUS) {
9375 			r = R_REG(sii->osh, fast_srpwr_addr);
9376 		} else {
9377 			r = si_corereg_pciefast_read(sih, offset);
9378 		}
9379 	}
9380 
9381 	return r;
9382 }
9383 
9384 #ifdef CORE_PWRUP_WAR
9385 uint32
BCMPOSTTRAPFN(si_srpwr_request_on_rev80)9386 BCMPOSTTRAPFN(si_srpwr_request_on_rev80)(si_t *sih, uint32 mask, uint32 val, uint32 ucode_awake)
9387 {
9388 	const si_info_t *sii = SI_INFO(sih);
9389 	uint32 r, offset = OFFSETOF(chipcregs_t, powerctl); /* Same 0x1e8 per core */
9390 	uint cidx = (BUSTYPE(sih->bustype) == SI_BUS) ? SI_CC_IDX : sih->buscoreidx;
9391 	uint32 mask2 = mask;
9392 	uint32 val2 = val;
9393 	volatile uint32 *fast_srpwr_addr = (volatile uint32 *)((uintptr)SI_ENUM_BASE(sih)
9394 					 + (uintptr)offset);
9395 	if (mask || val) {
9396 		mask <<= SRPWR_REQON_SHIFT;
9397 		val  <<= SRPWR_REQON_SHIFT;
9398 
9399 		/* Return if requested power request is already set */
9400 		if (BUSTYPE(sih->bustype) == SI_BUS) {
9401 			r = R_REG(sii->osh, fast_srpwr_addr);
9402 		} else {
9403 			r = si_corereg(sih, cidx, offset, 0, 0);
9404 		}
9405 
9406 		if ((r & mask) == val) {
9407 			W_REG(sii->osh, fast_srpwr_addr, r);
9408 			return r;
9409 		}
9410 
9411 		r = (r & ~mask) | val;
9412 
9413 		if (BUSTYPE(sih->bustype) == SI_BUS) {
9414 			W_REG(sii->osh, fast_srpwr_addr, r);
9415 			r = R_REG(sii->osh, fast_srpwr_addr);
9416 		} else {
9417 			r = si_corereg(sih, cidx, offset, ~0, r);
9418 		}
9419 
9420 		if (val2) {
9421 
9422 			/*
9423 			 * When ucode is not requested to be awake by FW,
9424 			 * the power status may indicate ON due to FW or
9425 			 * ucode's earlier power down request is not
9426 			 * honored yet. In such case, FW will find the
9427 			 * power status high at this stage, but as it is in
9428 			 * transition (from ON to OFF), it may go down any
9429 			 * time and lead to AXI slave error. Hence we need
9430 			 * a fixed delay to cross any such transition state.
9431 			 */
9432 			if (ucode_awake == 0) {
9433 				hnd_delay(SRPWR_UP_DOWN_DELAY);
9434 			}
9435 
9436 			if ((r & (mask2 << SRPWR_STATUS_SHIFT)) ==
9437 			(val2 << SRPWR_STATUS_SHIFT)) {
9438 				return r;
9439 			}
9440 			si_srpwr_stat_spinwait(sih, mask2, val2);
9441 		}
9442 	} else {
9443 		if (BUSTYPE(sih->bustype) == SI_BUS) {
9444 			r = R_REG(sii->osh, fast_srpwr_addr);
9445 		} else {
9446 			r = si_corereg(sih, cidx, offset, 0, 0);
9447 		}
9448 		SPINWAIT(((R_REG(sii->osh, fast_srpwr_addr) &
9449 				(mask2 << SRPWR_REQON_SHIFT)) != 0),
9450 				PMU_MAX_TRANSITION_DLY);
9451 	}
9452 
9453 	return r;
9454 }
9455 #endif /* CORE_PWRUP_WAR */
9456 
9457 uint32
BCMPOSTTRAPFN(si_srpwr_stat_spinwait)9458 BCMPOSTTRAPFN(si_srpwr_stat_spinwait)(const si_t *sih, uint32 mask, uint32 val)
9459 {
9460 	const si_info_t *sii = SI_INFO(sih);
9461 	uint32 r, offset = (BUSTYPE(sih->bustype) == SI_BUS) ?
9462 		OFFSETOF(chipcregs_t, powerctl) : PWRREQ_OFFSET(sih);
9463 	volatile uint32 *fast_srpwr_addr = (volatile uint32 *)((uintptr)SI_ENUM_BASE(sih)
9464 					 + (uintptr)offset);
9465 
9466 	if (FWSIGN_ENAB()) {
9467 		return 0;
9468 	}
9469 	ASSERT(mask);
9470 	ASSERT(val);
9471 
9472 	/* spinwait on pwrstatus */
9473 	mask <<= SRPWR_STATUS_SHIFT;
9474 	val <<= SRPWR_STATUS_SHIFT;
9475 
9476 	if (BUSTYPE(sih->bustype) == SI_BUS) {
9477 		SPINWAIT(((R_REG(sii->osh, fast_srpwr_addr) & mask) != val),
9478 			PMU_MAX_TRANSITION_DLY);
9479 		r = R_REG(sii->osh, fast_srpwr_addr) & mask;
9480 		ASSERT(r == val);
9481 	} else {
9482 		SPINWAIT(((si_corereg_pciefast_read(sih, offset) & mask) != val),
9483 			PMU_MAX_TRANSITION_DLY);
9484 		r = si_corereg_pciefast_read(sih, offset) & mask;
9485 		ASSERT(r == val);
9486 	}
9487 
9488 	r = (r >> SRPWR_STATUS_SHIFT) & SRPWR_DMN_ALL_MASK(sih);
9489 
9490 	return r;
9491 }
9492 
9493 uint32
si_srpwr_stat(si_t * sih)9494 si_srpwr_stat(si_t *sih)
9495 {
9496 	uint32 r, offset = (BUSTYPE(sih->bustype) == SI_BUS) ?
9497 		OFFSETOF(chipcregs_t, powerctl) : PWRREQ_OFFSET(sih);
9498 	uint cidx = (BUSTYPE(sih->bustype) == SI_BUS) ? SI_CC_IDX : sih->buscoreidx;
9499 
9500 	if (BUSTYPE(sih->bustype) == SI_BUS) {
9501 		r = si_corereg(sih, cidx, offset, 0, 0);
9502 	} else {
9503 		r = si_corereg_pciefast_read(sih, offset);
9504 	}
9505 
9506 	r = (r >> SRPWR_STATUS_SHIFT) & SRPWR_DMN_ALL_MASK(sih);
9507 
9508 	return r;
9509 }
9510 
9511 uint32
si_srpwr_domain(si_t * sih)9512 si_srpwr_domain(si_t *sih)
9513 {
9514 	uint32 r, offset = (BUSTYPE(sih->bustype) == SI_BUS) ?
9515 		OFFSETOF(chipcregs_t, powerctl) : PWRREQ_OFFSET(sih);
9516 	uint cidx = (BUSTYPE(sih->bustype) == SI_BUS) ? SI_CC_IDX : sih->buscoreidx;
9517 
9518 	if (FWSIGN_ENAB()) {
9519 		return 0;
9520 	}
9521 
9522 	if (BUSTYPE(sih->bustype) == SI_BUS) {
9523 		r = si_corereg(sih, cidx, offset, 0, 0);
9524 	} else {
9525 		r = si_corereg_pciefast_read(sih, offset);
9526 	}
9527 
9528 	r = (r >> SRPWR_DMN_ID_SHIFT) & SRPWR_DMN_ID_MASK;
9529 
9530 	return r;
9531 }
9532 
9533 uint8
si_srpwr_domain_wl(si_t * sih)9534 si_srpwr_domain_wl(si_t *sih)
9535 {
9536 	return SRPWR_DMN1_ARMBPSD;
9537 }
9538 
9539 bool
si_srpwr_cap(si_t * sih)9540 si_srpwr_cap(si_t *sih)
9541 {
9542 	if (FWSIGN_ENAB()) {
9543 		return FALSE;
9544 	}
9545 
9546 	/* If domain ID is non-zero, chip supports power domain control */
9547 	return si_srpwr_domain(sih) != 0 ? TRUE : FALSE;
9548 }
9549 
9550 uint32
BCMPOSTTRAPFN(si_srpwr_domain_all_mask)9551 BCMPOSTTRAPFN(si_srpwr_domain_all_mask)(const si_t *sih)
9552 {
9553 	uint32 mask = SRPWR_DMN0_PCIE_MASK |
9554 	              SRPWR_DMN1_ARMBPSD_MASK |
9555 	              SRPWR_DMN2_MACAUX_MASK |
9556 	              SRPWR_DMN3_MACMAIN_MASK;
9557 
9558 	if (si_scan_core_present(sih)) {
9559 		mask |= SRPWR_DMN4_MACSCAN_MASK;
9560 	}
9561 
9562 	return mask;
9563 }
9564 
9565 uint32
si_srpwr_bt_status(si_t * sih)9566 si_srpwr_bt_status(si_t *sih)
9567 {
9568 	uint32 r;
9569 	uint32 offset = (BUSTYPE(sih->bustype) == SI_BUS) ?
9570 		OFFSETOF(chipcregs_t, powerctl) : PWRREQ_OFFSET(sih);
9571 	uint32 cidx = (BUSTYPE(sih->bustype) == SI_BUS) ? SI_CC_IDX : sih->buscoreidx;
9572 
9573 	if (BUSTYPE(sih->bustype) == SI_BUS) {
9574 		r = si_corereg(sih, cidx, offset, 0, 0);
9575 	} else {
9576 		r = si_corereg_pciefast_read(sih, offset);
9577 	}
9578 
9579 	r = (r >> SRPWR_BT_STATUS_SHIFT) & SRPWR_BT_STATUS_MASK;
9580 
9581 	return r;
9582 }
9583 /* Utility API to read/write the raw registers with absolute address.
9584  * This function can be invoked from either FW or host driver.
9585  */
9586 uint32
si_raw_reg(const si_t * sih,uint32 reg,uint32 val,uint32 wrire_req)9587 si_raw_reg(const si_t *sih, uint32 reg, uint32 val, uint32 wrire_req)
9588 {
9589 	const si_info_t *sii = SI_INFO(sih);
9590 	uint32 address_space = reg & ~0xFFF;
9591 	volatile uint32 * addr = (void*)(uintptr)(reg);
9592 	uint32 prev_value = 0;
9593 	uint32 cfg_reg = 0;
9594 
9595 	if (sii == NULL) {
9596 		return 0;
9597 	}
9598 
9599 	/* No need to translate the absolute address on SI bus */
9600 	if (BUSTYPE(sih->bustype) == SI_BUS) {
9601 		goto skip_cfg;
9602 	}
9603 
9604 	/* This API supports only the PCI host interface */
9605 	if (BUSTYPE(sih->bustype) != PCI_BUS) {
9606 		return ID32_INVALID;
9607 	}
9608 
9609 	if (PCIE_GEN2(sii)) {
9610 		/* Use BAR0 Secondary window is PCIe Gen2.
9611 		 * Set the secondary BAR0 Window to current register of interest
9612 		 */
9613 		addr = (volatile uint32*)(((volatile uint8*)sii->curmap) +
9614 			PCI_SEC_BAR0_WIN_OFFSET + (reg & 0xfff));
9615 		cfg_reg = PCIE2_BAR0_CORE2_WIN;
9616 
9617 	} else {
9618 		/* PCIe Gen1 do not have secondary BAR0 window.
9619 		 * reuse the BAR0 WIN2
9620 		 */
9621 		addr = (volatile uint32*)(((volatile uint8*)sii->curmap) +
9622 			PCI_BAR0_WIN2_OFFSET + (reg & 0xfff));
9623 		cfg_reg = PCI_BAR0_WIN2;
9624 	}
9625 
9626 	prev_value = OSL_PCI_READ_CONFIG(sii->osh, cfg_reg, 4);
9627 
9628 	if (prev_value != address_space) {
9629 		OSL_PCI_WRITE_CONFIG(sii->osh, cfg_reg,
9630 			sizeof(uint32), address_space);
9631 	} else {
9632 		prev_value = 0;
9633 	}
9634 
9635 skip_cfg:
9636 	if (wrire_req) {
9637 		W_REG(sii->osh, addr, val);
9638 	} else {
9639 		val = R_REG(sii->osh, addr);
9640 	}
9641 
9642 	if (prev_value) {
9643 		/* Restore BAR0 WIN2 for PCIE GEN1 devices */
9644 		OSL_PCI_WRITE_CONFIG(sii->osh,
9645 			cfg_reg, sizeof(uint32), prev_value);
9646 	}
9647 
9648 	return val;
9649 }
9650 
9651 #ifdef DONGLEBUILD
9652 /* if the logs could be gathered, host could be notified with to take logs or not  */
9653 bool
BCMPOSTTRAPFN(si_check_enable_backplane_log)9654 BCMPOSTTRAPFN(si_check_enable_backplane_log)(const si_t *sih)
9655 {
9656 	if (CHIPTYPE(sih->socitype) == SOCI_AI) {
9657 		return ai_check_enable_backplane_log(sih);
9658 	}
9659 	return TRUE;
9660 }
9661 #endif /* DONGLEBUILD */
9662 
9663 uint8
si_lhl_ps_mode(const si_t * sih)9664 si_lhl_ps_mode(const si_t *sih)
9665 {
9666 	const si_info_t *sii = SI_INFO(sih);
9667 	return sii->lhl_ps_mode;
9668 }
9669 
9670 uint8
si_hib_ext_wakeup_isenab(const si_t * sih)9671 si_hib_ext_wakeup_isenab(const si_t *sih)
9672 {
9673 	const si_info_t *sii = SI_INFO(sih);
9674 	return sii->hib_ext_wakeup_enab;
9675 }
9676 
9677 static void
BCMATTACHFN(si_oob_war_BT_F1)9678 BCMATTACHFN(si_oob_war_BT_F1)(si_t *sih)
9679 {
9680 	uint origidx = si_coreidx(sih);
9681 	volatile void *regs;
9682 
9683 	if (FWSIGN_ENAB()) {
9684 		return;
9685 	}
9686 	regs = si_setcore(sih, AXI2AHB_BRIDGE_ID, 0);
9687 	ASSERT(regs);
9688 	BCM_REFERENCE(regs);
9689 
9690 	si_wrapperreg(sih, AI_OOBSELINA30, 0xF00, 0x300);
9691 
9692 	si_setcoreidx(sih, origidx);
9693 }
9694 
9695 #ifndef BCMDONGLEHOST
9696 
9697 #define RF_SW_CTRL_ELNABYP_ANT_MASK	0x000CC330
9698 
9699 /* These are the outputs to the rfem which go out via the CLB */
9700 #define RF_SW_CTRL_ELNABYP_2G0_MASK	0x00000010
9701 #define RF_SW_CTRL_ELNABYP_5G0_MASK	0x00000020
9702 #define RF_SW_CTRL_ELNABYP_2G1_MASK	0x00004000
9703 #define RF_SW_CTRL_ELNABYP_5G1_MASK	0x00008000
9704 
9705 /* Feedback values go into the phy from CLB output
9706  * The polarity of the feedback is opposite to the elnabyp signal going out to the rfem
9707  */
9708 #define RF_SW_CTRL_ELNABYP_2G0_MASK_FB	0x00000100
9709 #define RF_SW_CTRL_ELNABYP_5G0_MASK_FB	0x00000200
9710 #define RF_SW_CTRL_ELNABYP_2G1_MASK_FB	0x00040000
9711 #define RF_SW_CTRL_ELNABYP_5G1_MASK_FB	0x00080000
9712 
9713 /* The elnabyp override values for each rfem */
9714 #define ELNABYP_IOVAR_2G0_VALUE_MASK	0x01
9715 #define ELNABYP_IOVAR_5G0_VALUE_MASK	0x02
9716 #define ELNABYP_IOVAR_2G1_VALUE_MASK	0x04
9717 #define ELNABYP_IOVAR_5G1_VALUE_MASK	0x08
9718 
9719 /* The elnabyp override enables for each rfem
9720  * The values are 'don't care' if the corresponding enables are 0
9721  */
9722 #define ELNABYP_IOVAR_2G0_ENABLE_MASK	0x10
9723 #define ELNABYP_IOVAR_5G0_ENABLE_MASK	0x20
9724 #define ELNABYP_IOVAR_2G1_ENABLE_MASK	0x40
9725 #define ELNABYP_IOVAR_5G1_ENABLE_MASK	0x80
9726 
9727 #define ANTENNA_0_ENABLE	0x00000044
9728 #define ANTENNA_1_ENABLE	0x20000000
9729 #define RFFE_CTRL_START		0x80000000
9730 #define RFFE_CTRL_READ		0x40000000
9731 #define RFFE_CTRL_RFEM_SEL	0x08000000
9732 #define RFFE_MISC_EN_PHYCYCLES	0x00000002
9733 
9734 void
si_rffe_rfem_init(si_t * sih)9735 si_rffe_rfem_init(si_t *sih)
9736 {
9737 	ASSERT(GCI_OFFSETOF(sih, gci_chipctrl) == OFFSETOF(gciregs_t, gci_chipctrl));
9738 	/* Enable RFFE clock
9739 	 * GCI Chip Control reg 15 - Bits 29 & 30 (Global 509 & 510)
9740 	 */
9741 	si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_15, ALLONES_32, 0x60000000);
9742 	/* SDATA0/1 rf_sw_ctrl pull down
9743 	 * GCI chip control reg 23 - Bits 29 & 30 (Global 765 & 766)
9744 	 */
9745 	si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_23, 0x3 << 29, 0x3 << 29);
9746 	/* RFFE Clk Ctrl Reg */
9747 	si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_clk_ctrl), ALLONES_32, 0x101);
9748 
9749 	/* Disable override control of RFFE controller and enable phy control */
9750 	si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_change_detect_ovr_wlmc), ALLONES_32, 0);
9751 	si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_change_detect_ovr_wlac), ALLONES_32, 0);
9752 	si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_change_detect_ovr_wlsc), ALLONES_32, 0);
9753 	si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_change_detect_ovr_btmc), ALLONES_32, 0);
9754 	si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_change_detect_ovr_btsc), ALLONES_32, 0);
9755 
9756 	/* reg address = 0x16, deviceID of rffe dev1 = 0xE, deviceID of dev0 = 0xC,
9757 	 * last_mux_ctrl = 0, disable_preemption = 0 (1 for 4387b0), tssi_mask = 3, tssi_en = 0,
9758 	 * rffe_disable_line1 = 0, enable rffe_en_phyaccess = 1,
9759 	 * disable BRCM proprietary reg0 wr = 0
9760 	 */
9761 	if (sih->ccrev == 68) {
9762 		si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_misc_ctrl), ALLONES_32, 0x0016EC72);
9763 	} else {
9764 		si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_misc_ctrl), ALLONES_32, 0x0016EC32);
9765 	}
9766 
9767 	/* Enable Dual RFFE Master: rffe_single_master = 0
9768 	 * Use Master0 SW interface only : rffe_dis_sw_intf_m1 = 1
9769 	 */
9770 	if (sih->ccrev >= 71) {
9771 		si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_clk_ctrl),
9772 			1u << 20u | 1u << 26u, 1u << 26u);
9773 	}
9774 
9775 	/* Enable antenna access for both cores */
9776 	si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_03, ALLONES_32, ANTENNA_0_ENABLE);
9777 	si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_02, ALLONES_32, ANTENNA_1_ENABLE);
9778 }
9779 
9780 void
si_rffe_set_debug_mode(si_t * sih,bool enable)9781 si_rffe_set_debug_mode(si_t *sih, bool enable)
9782 {
9783 	uint32 misc_ctrl_set = 0;
9784 	/* Enable/Disable rffe_en_phyaccess bit */
9785 	if (!enable) {
9786 		misc_ctrl_set = RFFE_MISC_EN_PHYCYCLES;
9787 	}
9788 	si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_misc_ctrl), RFFE_MISC_EN_PHYCYCLES,
9789 		misc_ctrl_set);
9790 
9791 	sih->rffe_debug_mode =  enable;
9792 }
9793 
9794 bool
si_rffe_get_debug_mode(si_t * sih)9795 si_rffe_get_debug_mode(si_t *sih)
9796 {
9797 	return sih->rffe_debug_mode;
9798 }
9799 
9800 int8
si_rffe_get_elnabyp_mode(si_t * sih)9801 si_rffe_get_elnabyp_mode(si_t *sih)
9802 {
9803 	return sih->rffe_elnabyp_mode;
9804 }
9805 
9806 int
si_rffe_set_elnabyp_mode(si_t * sih,uint8 mode)9807 si_rffe_set_elnabyp_mode(si_t *sih, uint8 mode)
9808 {
9809 	int ret = BCME_OK;
9810 	uint32 elnabyp_ovr_val = 0;
9811 	uint32 elnabyp_ovr_en  = 0;
9812 
9813 	if ((mode & ELNABYP_IOVAR_2G0_VALUE_MASK) && (mode & ELNABYP_IOVAR_2G0_ENABLE_MASK)) {
9814 		elnabyp_ovr_val |= RF_SW_CTRL_ELNABYP_2G0_MASK;
9815 	} else if (mode & ELNABYP_IOVAR_2G0_ENABLE_MASK) {
9816 		elnabyp_ovr_val |= RF_SW_CTRL_ELNABYP_2G0_MASK_FB;
9817 	}
9818 	if ((mode & ELNABYP_IOVAR_5G0_VALUE_MASK) && (mode & ELNABYP_IOVAR_5G0_ENABLE_MASK)) {
9819 		elnabyp_ovr_val |= RF_SW_CTRL_ELNABYP_5G0_MASK;
9820 	} else if (mode & ELNABYP_IOVAR_5G0_ENABLE_MASK) {
9821 		elnabyp_ovr_val |= RF_SW_CTRL_ELNABYP_5G0_MASK_FB;
9822 	}
9823 	if ((mode & ELNABYP_IOVAR_2G1_VALUE_MASK) && (mode & ELNABYP_IOVAR_2G1_ENABLE_MASK)) {
9824 		elnabyp_ovr_val |= RF_SW_CTRL_ELNABYP_2G1_MASK;
9825 	} else if (mode & ELNABYP_IOVAR_2G1_ENABLE_MASK) {
9826 		elnabyp_ovr_val |= RF_SW_CTRL_ELNABYP_2G1_MASK_FB;
9827 	}
9828 	if ((mode & ELNABYP_IOVAR_5G1_VALUE_MASK) && (mode & ELNABYP_IOVAR_5G1_ENABLE_MASK)) {
9829 		elnabyp_ovr_val |= RF_SW_CTRL_ELNABYP_5G1_MASK;
9830 	} else if (mode & ELNABYP_IOVAR_5G1_ENABLE_MASK) {
9831 		elnabyp_ovr_val |= RF_SW_CTRL_ELNABYP_5G1_MASK_FB;
9832 	}
9833 
9834 	if (mode & ELNABYP_IOVAR_2G0_ENABLE_MASK) {
9835 		elnabyp_ovr_en |= (RF_SW_CTRL_ELNABYP_2G0_MASK | RF_SW_CTRL_ELNABYP_2G0_MASK_FB);
9836 	}
9837 	if (mode & ELNABYP_IOVAR_5G0_ENABLE_MASK) {
9838 		elnabyp_ovr_en |= (RF_SW_CTRL_ELNABYP_5G0_MASK | RF_SW_CTRL_ELNABYP_5G0_MASK_FB);
9839 	}
9840 	if (mode & ELNABYP_IOVAR_2G1_ENABLE_MASK) {
9841 		elnabyp_ovr_en |= (RF_SW_CTRL_ELNABYP_2G1_MASK | RF_SW_CTRL_ELNABYP_2G1_MASK_FB);
9842 	}
9843 	if (mode & ELNABYP_IOVAR_5G1_ENABLE_MASK) {
9844 		elnabyp_ovr_en |= (RF_SW_CTRL_ELNABYP_5G1_MASK | RF_SW_CTRL_ELNABYP_5G1_MASK_FB);
9845 	}
9846 
9847 	si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_14, RF_SW_CTRL_ELNABYP_ANT_MASK,
9848 		elnabyp_ovr_val);
9849 	si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_15, RF_SW_CTRL_ELNABYP_ANT_MASK,
9850 		elnabyp_ovr_en);
9851 
9852 	sih->rffe_elnabyp_mode = mode;
9853 
9854 	return ret;
9855 }
9856 
9857 int
BCMPOSTTRAPFN(si_rffe_rfem_read)9858 BCMPOSTTRAPFN(si_rffe_rfem_read)(si_t *sih, uint8 dev_id, uint8 antenna, uint16 reg_addr,
9859 	uint32 *val)
9860 {
9861 	int ret = BCME_OK;
9862 	uint32 gci_rffe_ctrl_val, antenna_0_enable, antenna_1_enable;
9863 	uint32 gci_rffe_ctrl = si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_ctrl), 0, 0);
9864 	uint32 gci_chipcontrol_03 = si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_03, 0, 0);
9865 	uint32 gci_chipcontrol_02 = si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_02, 0, 0);
9866 
9867 	si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_ctrl), ALLONES_32, 0);
9868 
9869 	switch (antenna) {
9870 		case 1:
9871 			gci_rffe_ctrl_val = RFFE_CTRL_START | RFFE_CTRL_READ;
9872 			antenna_0_enable = ANTENNA_0_ENABLE;
9873 			antenna_1_enable = 0;
9874 			break;
9875 		case 2:
9876 			gci_rffe_ctrl_val = RFFE_CTRL_START | RFFE_CTRL_READ | RFFE_CTRL_RFEM_SEL;
9877 			antenna_0_enable = 0;
9878 			antenna_1_enable = ANTENNA_1_ENABLE;
9879 			break;
9880 		default:
9881 			ret = BCME_BADOPTION;
9882 	}
9883 
9884 	if (ret == BCME_OK) {
9885 		si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_config), ALLONES_32,
9886 			((uint16) dev_id) << 8);
9887 		si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_rfem_addr), ALLONES_32, reg_addr);
9888 		si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_03, ANTENNA_0_ENABLE, antenna_0_enable);
9889 		si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_02, ANTENNA_1_ENABLE, antenna_1_enable);
9890 		/* Initiate read */
9891 		si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_ctrl),
9892 			RFFE_CTRL_START | RFFE_CTRL_READ | RFFE_CTRL_RFEM_SEL, gci_rffe_ctrl_val);
9893 		/* Wait for status */
9894 		SPINWAIT(si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_ctrl), 0, 0) &
9895 			RFFE_CTRL_START, 100);
9896 		if (si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_ctrl), 0, 0) &
9897 			RFFE_CTRL_START) {
9898 			OSL_SYS_HALT();
9899 			ret = BCME_NOTREADY;
9900 		} else {
9901 			*val = si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_rfem_data0), 0, 0);
9902 			/* Clear read and rfem_sel flags */
9903 			si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_ctrl),
9904 				RFFE_CTRL_READ | RFFE_CTRL_RFEM_SEL, 0);
9905 		}
9906 	}
9907 
9908 	/* Restore the values */
9909 	si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_ctrl), ALLONES_32, gci_rffe_ctrl);
9910 	si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_03, ALLONES_32, gci_chipcontrol_03);
9911 	si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_02, ALLONES_32, gci_chipcontrol_02);
9912 	return ret;
9913 }
9914 
9915 int
BCMPOSTTRAPFN(si_rffe_rfem_write)9916 BCMPOSTTRAPFN(si_rffe_rfem_write)(si_t *sih, uint8 dev_id, uint8 antenna, uint16 reg_addr,
9917 	uint32 data)
9918 {
9919 	int ret = BCME_OK;
9920 	uint32 antenna_0_enable, antenna_1_enable;
9921 	uint32 gci_rffe_ctrl = si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_ctrl), 0, 0);
9922 	uint32 gci_chipcontrol_03 = si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_03, 0, 0);
9923 	uint32 gci_chipcontrol_02 = si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_02, 0, 0);
9924 	uint8 repeat = (sih->ccrev == 69) ? 2 : 1; /* WAR for 4387c0 */
9925 
9926 	si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_ctrl), ALLONES_32, 0);
9927 
9928 	switch (antenna) {
9929 		case 1:
9930 			antenna_0_enable = ANTENNA_0_ENABLE;
9931 			antenna_1_enable = 0;
9932 			break;
9933 		case 2:
9934 			antenna_0_enable = 0;
9935 			antenna_1_enable = ANTENNA_1_ENABLE;
9936 			break;
9937 		case 3:
9938 			antenna_0_enable = ANTENNA_0_ENABLE;
9939 			antenna_1_enable = ANTENNA_1_ENABLE;
9940 			break;
9941 		default:
9942 			ret = BCME_BADOPTION;
9943 	}
9944 
9945 	if (ret == BCME_OK) {
9946 		si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_config), ALLONES_32,
9947 			((uint16) dev_id) << 8);
9948 		si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_rfem_addr), ALLONES_32, reg_addr);
9949 		si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_03, ANTENNA_0_ENABLE, antenna_0_enable);
9950 		si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_02, ANTENNA_1_ENABLE, antenna_1_enable);
9951 		si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_rfem_data0), ALLONES_32, data);
9952 		while (repeat--) {
9953 			/* Initiate write */
9954 			si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_ctrl), RFFE_CTRL_START |
9955 				RFFE_CTRL_READ | RFFE_CTRL_RFEM_SEL, RFFE_CTRL_START);
9956 			/* Wait for status */
9957 			SPINWAIT(si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_ctrl), 0, 0) &
9958 				RFFE_CTRL_START, 100);
9959 			if (si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_ctrl), 0, 0) &
9960 				RFFE_CTRL_START) {
9961 				OSL_SYS_HALT();
9962 				ret = BCME_NOTREADY;
9963 			}
9964 		}
9965 	}
9966 
9967 	/* Restore the values */
9968 	si_gci_direct(sih, OFFSETOF(gciregs_t, gci_rffe_ctrl), ALLONES_32, gci_rffe_ctrl);
9969 	si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_03, ALLONES_32, gci_chipcontrol_03);
9970 	si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_02, ALLONES_32, gci_chipcontrol_02);
9971 	return ret;
9972 }
9973 #endif /* !BCMDONGLEHOST */
9974 
9975 #if defined(BCMSDIODEV_ENABLED) && defined(ATE_BUILD)
9976 bool
si_chipcap_sdio_ate_only(const si_t * sih)9977 si_chipcap_sdio_ate_only(const si_t *sih)
9978 {
9979 	bool ate_build = FALSE;
9980 	switch (CHIPID(sih->chip)) {
9981 	case BCM4369_CHIP_GRPID:
9982 		if (CST4369_CHIPMODE_SDIOD(sih->chipst) &&
9983 			CST4369_CHIPMODE_PCIE(sih->chipst)) {
9984 			ate_build = TRUE;
9985 		}
9986 		break;
9987 	case BCM4376_CHIP_GRPID:
9988 	case BCM4378_CHIP_GRPID:
9989 	case BCM4385_CHIP_GRPID:
9990 	case BCM4387_CHIP_GRPID:
9991 	case BCM4388_CHIP_GRPID:
9992 	case BCM4389_CHIP_GRPID:
9993 	case BCM4397_CHIP_GRPID:
9994 		ate_build = TRUE;
9995 		break;
9996 	 case BCM4362_CHIP_GRPID:
9997 		if (CST4362_CHIPMODE_SDIOD(sih->chipst) &&
9998 			CST4362_CHIPMODE_PCIE(sih->chipst)) {
9999 			ate_build = TRUE;
10000 		}
10001 		break;
10002 	default:
10003 		break;
10004 	}
10005 	return ate_build;
10006 }
10007 #endif /* BCMSDIODEV_ENABLED && ATE_BUILD */
10008 
10009 #ifdef UART_TRAP_DBG
10010 void
si_dump_APB_Bridge_registers(const si_t * sih)10011 si_dump_APB_Bridge_registers(const si_t *sih)
10012 {
10013 	if (CHIPTYPE(sih->socitype) == SOCI_AI) {
10014 		ai_dump_APB_Bridge_registers(sih);
10015 	}
10016 }
10017 #endif /* UART_TRAP_DBG */
10018 
10019 void
si_force_clocks(const si_t * sih,uint clock_state)10020 si_force_clocks(const si_t *sih, uint clock_state)
10021 {
10022 	if (CHIPTYPE(sih->socitype) == SOCI_AI) {
10023 		ai_force_clocks(sih, clock_state);
10024 	}
10025 }
10026 
10027 /* Indicates to the siutils how the PICe BAR0 is mappend,
10028  * used for siutils to arrange BAR0 window management,
10029  * for PCI NIC driver.
10030  *
10031  * Here is the current scheme, which are all using BAR0:
10032  *
10033  * id     enum       wrapper
10034  * ====   =========  =========
10035  *    0   0000-0FFF  1000-1FFF
10036  *    1   4000-4FFF  5000-5FFF
10037  *    2   9000-9FFF  A000-AFFF
10038  * >= 3   not supported
10039  */
10040 void
si_set_slice_id(si_t * sih,uint8 slice)10041 si_set_slice_id(si_t *sih, uint8 slice)
10042 {
10043 	si_info_t *sii = SI_INFO(sih);
10044 
10045 	sii->slice = slice;
10046 }
10047 
10048 uint8
si_get_slice_id(const si_t * sih)10049 si_get_slice_id(const si_t *sih)
10050 {
10051 	const si_info_t *sii = SI_INFO(sih);
10052 
10053 	return sii->slice;
10054 }
10055 
10056 bool
BCMPOSTTRAPRAMFN(si_scan_core_present)10057 BCMPOSTTRAPRAMFN(si_scan_core_present)(const si_t *sih)
10058 {
10059 	return (si_numcoreunits(sih, D11_CORE_ID) > 2);
10060 }
10061 
10062 #if !defined(BCMDONGLEHOST)
10063 bool
si_btc_bt_status_in_reset(si_t * sih)10064 si_btc_bt_status_in_reset(si_t *sih)
10065 {
10066 	uint32 chipst = 0;
10067 	switch (CHIPID(sih->chip)) {
10068 		case BCM4387_CHIP_GRPID:
10069 			chipst = si_corereg(sih, SI_CC_IDX,
10070 				OFFSETOF(chipcregs_t, chipstatus), 0, 0);
10071 			/* 1 =bt in reset 0 = bt out of reset */
10072 			return (chipst & (1 << BT_IN_RESET_BIT_SHIFT)) ? TRUE : FALSE;
10073 			break;
10074 		default:
10075 			ASSERT(0);
10076 			break;
10077 	}
10078 	return FALSE;
10079 }
10080 
10081 bool
si_btc_bt_status_in_pds(si_t * sih)10082 si_btc_bt_status_in_pds(si_t *sih)
10083 {
10084 	return !((si_gci_chipstatus(sih, GCI_CHIPSTATUS_04) >>
10085 		BT_IN_PDS_BIT_SHIFT) & 0x1);
10086 }
10087 
10088 int
si_btc_bt_pds_wakeup_force(si_t * sih,bool force)10089 si_btc_bt_pds_wakeup_force(si_t *sih, bool force)
10090 {
10091 	if (force) {
10092 		si_pmu_chipcontrol(sih, PMU_CHIPCTL0,
10093 			PMU_CC0_4387_BT_PU_WAKE_MASK, PMU_CC0_4387_BT_PU_WAKE_MASK);
10094 		SPINWAIT((si_btc_bt_status_in_pds(sih) == TRUE), PMU_MAX_TRANSITION_DLY);
10095 		if (si_btc_bt_status_in_pds(sih) == TRUE) {
10096 			SI_ERROR(("si_btc_bt_pds_wakeup_force"
10097 				" ERROR : BT still in PDS after pds_wakeup_force \n"));
10098 			return BCME_ERROR;
10099 		} else {
10100 			return BCME_OK;
10101 		}
10102 	} else {
10103 		si_pmu_chipcontrol(sih, PMU_CHIPCTL0, PMU_CC0_4387_BT_PU_WAKE_MASK, 0);
10104 		return BCME_OK;
10105 	}
10106 }
10107 
10108 #endif /* !defined(BCMDONGLEHOST) */
10109 
10110 #ifndef BCMDONGLEHOST
10111 /* query d11 core type */
10112 uint
BCMATTACHFN(si_core_d11_type)10113 BCMATTACHFN(si_core_d11_type)(si_t *sih, uint coreunit)
10114 {
10115 #ifdef WL_SCAN_CORE_SIM
10116 	/* use the core unit WL_SCAN_CORE_SIM as the scan core */
10117 	return (coreunit == WL_SCAN_CORE_SIM) ?
10118 	        D11_CORE_TYPE_SCAN : D11_CORE_TYPE_NORM;
10119 #else
10120 	uint coreidx;
10121 	volatile void *regs;
10122 	uint coretype;
10123 
10124 	coreidx = si_coreidx(sih);
10125 	regs = si_setcore(sih, D11_CORE_ID, coreunit);
10126 	ASSERT(regs != NULL);
10127 	BCM_REFERENCE(regs);
10128 
10129 	coretype = (si_core_sflags(sih, 0, 0) & SISF_CORE_BITS_SCAN) != 0 ?
10130 	        D11_CORE_TYPE_SCAN : D11_CORE_TYPE_NORM;
10131 
10132 	si_setcoreidx(sih, coreidx);
10133 	return coretype;
10134 #endif /* WL_SCAN_CORE_SIM */
10135 }
10136 
10137 /* decide if this core is allowed by the package option or not... */
10138 bool
BCMATTACHFN(si_pkgopt_d11_allowed)10139 BCMATTACHFN(si_pkgopt_d11_allowed)(si_t *sih, uint coreunit)
10140 {
10141 	uint coreidx;
10142 	volatile void *regs;
10143 	bool allowed;
10144 
10145 	coreidx = si_coreidx(sih);
10146 	regs = si_setcore(sih, D11_CORE_ID, coreunit);
10147 	ASSERT(regs != NULL);
10148 	BCM_REFERENCE(regs);
10149 
10150 	allowed = ((si_core_sflags(sih, 0, 0) & SISF_CORE_BITS_SCAN) == 0 ||
10151 	        (si_gci_chipstatus(sih, GCI_CHIPSTATUS_09) & GCI_CST9_SCAN_DIS) == 0);
10152 
10153 	si_setcoreidx(sih, coreidx);
10154 	return allowed;
10155 }
10156 
10157 void
si_configure_pwrthrottle_gpio(si_t * sih,uint8 pwrthrottle_gpio_in)10158 si_configure_pwrthrottle_gpio(si_t *sih, uint8 pwrthrottle_gpio_in)
10159 {
10160 	uint32 board_gpio = 0;
10161 	if (CHIPID(sih->chip) == BCM4369_CHIP_ID || CHIPID(sih->chip) == BCM4377_CHIP_ID) {
10162 		si_gci_set_functionsel(sih, pwrthrottle_gpio_in, 1);
10163 	}
10164 	board_gpio = 1 << pwrthrottle_gpio_in;
10165 	si_gpiocontrol(sih, board_gpio, 0, GPIO_DRV_PRIORITY);
10166 	si_gpioouten(sih, board_gpio, 0, GPIO_DRV_PRIORITY);
10167 }
10168 
10169 void
si_configure_onbody_gpio(si_t * sih,uint8 onbody_gpio_in)10170 si_configure_onbody_gpio(si_t *sih, uint8 onbody_gpio_in)
10171 {
10172 	uint32 board_gpio = 0;
10173 	if (CHIPID(sih->chip) == BCM4369_CHIP_ID || CHIPID(sih->chip) == BCM4377_CHIP_ID) {
10174 		si_gci_set_functionsel(sih, onbody_gpio_in, 1);
10175 	}
10176 	board_gpio = 1 << onbody_gpio_in;
10177 	si_gpiocontrol(sih, board_gpio, 0, GPIO_DRV_PRIORITY);
10178 	si_gpioouten(sih, board_gpio, 0, GPIO_DRV_PRIORITY);
10179 }
10180 
10181 #endif /* !BCMDONGLEHOST */
10182 
10183 void
si_jtag_udr_pwrsw_main_toggle(si_t * sih,bool on)10184 si_jtag_udr_pwrsw_main_toggle(si_t *sih, bool on)
10185 {
10186 #ifdef DONGLEBUILD
10187 	int val = on ? 0 : 1;
10188 
10189 	switch (CHIPID(sih->chip)) {
10190 	case BCM4387_CHIP_GRPID:
10191 		jtag_setbit_128(sih, 8, 99, val);
10192 		jtag_setbit_128(sih, 8, 101, val);
10193 		jtag_setbit_128(sih, 8, 105, val);
10194 		break;
10195 	default:
10196 		SI_ERROR(("si_jtag_udr_pwrsw_main_toggle: add support for this chip!\n"));
10197 		OSL_SYS_HALT();
10198 		break;
10199 	}
10200 #endif
10201 }
10202 
10203 /* return the backplane address where the sssr dumps are stored per D11 core */
10204 uint32
BCMATTACHFN(si_d11_core_sssr_addr)10205 BCMATTACHFN(si_d11_core_sssr_addr)(si_t *sih, uint unit, uint32 *sssr_size)
10206 {
10207 	uint32 sssr_dmp_src = 0;
10208 	*sssr_size = 0;
10209 	/* ideally these addresses should be grok'ed from EROM map */
10210 	switch (CHIPID(sih->chip)) {
10211 		case BCM4387_CHIP_GRPID:
10212 			if (unit == 0) {
10213 				sssr_dmp_src = BCM4387_SSSR_DUMP_AXI_MAIN;
10214 				*sssr_size = (uint32)BCM4387_SSSR_DUMP_MAIN_SIZE;
10215 			} else if (unit == 1) {
10216 				sssr_dmp_src = BCM4387_SSSR_DUMP_AXI_AUX;
10217 				*sssr_size = (uint32)BCM4387_SSSR_DUMP_AUX_SIZE;
10218 			} else if (unit == 2) {
10219 				sssr_dmp_src = BCM4387_SSSR_DUMP_AXI_SCAN;
10220 				*sssr_size = (uint32)BCM4387_SSSR_DUMP_SCAN_SIZE;
10221 			}
10222 			break;
10223 		default:
10224 			break;
10225 	}
10226 	return (sssr_dmp_src);
10227 }
10228 
10229 #ifdef USE_LHL_TIMER
10230 /* Get current HIB time API */
10231 uint32
si_cur_hib_time(si_t * sih)10232 si_cur_hib_time(si_t *sih)
10233 {
10234 	uint32 hib_time;
10235 
10236 	hib_time = LHL_REG(sih, lhl_hibtim_adr, 0, 0);
10237 
10238 	/* there is no HW sync on the read path for LPO regs,
10239 	 * so SW should read twice and if values are same,
10240 	 * then use the value, else read again and use the
10241 	 * latest value
10242 	 */
10243 	if (hib_time != LHL_REG(sih, lhl_hibtim_adr, 0, 0)) {
10244 		hib_time = LHL_REG(sih, lhl_hibtim_adr, 0, 0);
10245 	}
10246 
10247 	return (hib_time);
10248 }
10249 #endif /* USE_LHL_TIMER */
10250