• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Misc utility routines for accessing chip-specific features
4  * of the SiliconBackplane-based Broadcom chips.
5  *
6  * Copyright (C) 1999-2019, Broadcom.
7  *
8  *      Unless you and Broadcom execute a separate written software license
9  * agreement governing use of this software, this software is licensed to you
10  * under the terms of the GNU General Public License version 2 (the "GPL"),
11  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
12  * following added to such license:
13  *
14  *      As a special exception, the copyright holders of this software give you
15  * permission to link this software with independent modules, and to copy and
16  * distribute the resulting executable under terms of your choice, provided that
17  * you also meet, for each linked independent module, the terms and conditions of
18  * the license of that module.  An independent module is a module which is not
19  * derived from this software.  The special exception does not apply to any
20  * modifications of the software.
21  *
22  *      Notwithstanding the above, under no circumstances may you combine this
23  * software in any way with any other Broadcom software provided under a license
24  * other than the GPL, without Broadcom's express prior written consent.
25  *
26  *
27  * <<Broadcom-WL-IPTag/Open:>>
28  *
29  * $Id: aiutils.c 823201 2019-06-03 03:49:36Z $
30  */
31 #include <bcm_cfg.h>
32 #include <typedefs.h>
33 #include <bcmdefs.h>
34 #include <osl.h>
35 #include <bcmutils.h>
36 #include <siutils.h>
37 #include <hndsoc.h>
38 #include <sbchipc.h>
39 #include <pcicfg.h>
40 
41 #include "siutils_priv.h"
42 #include <bcmdevs.h>
43 
44 #define BCM53573_DMP() (0)
45 #define BCM4707_DMP() (0)
46 #define PMU_DMP() (0)
47 #define GCI_DMP() (0)
48 
49 #if defined(BCM_BACKPLANE_TIMEOUT)
50 static bool ai_get_apb_bridge(si_t *sih, uint32 coreidx, uint32 *apb_id, uint32 *apb_coreuinit);
51 #endif /* BCM_BACKPLANE_TIMEOUT */
52 
53 #if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
54 static void ai_reset_axi_to(si_info_t *sii, aidmp_t *ai);
55 #endif	/* defined (AXI_TIMEOUTS) || defined (BCM_BACKPLANE_TIMEOUT) */
56 
57 /* EROM parsing */
58 
59 static uint32
get_erom_ent(si_t * sih,uint32 ** eromptr,uint32 mask,uint32 match)60 get_erom_ent(si_t *sih, uint32 **eromptr, uint32 mask, uint32 match)
61 {
62 	uint32 ent;
63 	uint inv = 0, nom = 0;
64 	uint32 size = 0;
65 
66 	while (TRUE) {
67 		ent = R_REG(si_osh(sih), *eromptr);
68 		(*eromptr)++;
69 
70 		if (mask == 0)
71 			break;
72 
73 		if ((ent & ER_VALID) == 0) {
74 			inv++;
75 			continue;
76 		}
77 
78 		if (ent == (ER_END | ER_VALID))
79 			break;
80 
81 		if ((ent & mask) == match)
82 			break;
83 
84 		/* escape condition related EROM size if it has invalid values */
85 		size += sizeof(*eromptr);
86 		if (size >= ER_SZ_MAX) {
87 			SI_ERROR(("Failed to find end of EROM marker\n"));
88 			break;
89 		}
90 
91 		nom++;
92 	}
93 
94 	SI_VMSG(("%s: Returning ent 0x%08x\n", __FUNCTION__, ent));
95 	if (inv + nom) {
96 		SI_VMSG(("  after %d invalid and %d non-matching entries\n", inv, nom));
97 	}
98 	return ent;
99 }
100 
101 static uint32
get_asd(si_t * sih,uint32 ** eromptr,uint sp,uint ad,uint st,uint32 * addrl,uint32 * addrh,uint32 * sizel,uint32 * sizeh)102 get_asd(si_t *sih, uint32 **eromptr, uint sp, uint ad, uint st, uint32 *addrl, uint32 *addrh,
103 	uint32 *sizel, uint32 *sizeh)
104 {
105 	uint32 asd, sz, szd;
106 
107 	BCM_REFERENCE(ad);
108 
109 	asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
110 	if (((asd & ER_TAG1) != ER_ADD) ||
111 	    (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
112 	    ((asd & AD_ST_MASK) != st)) {
113 		/* This is not what we want, "push" it back */
114 		(*eromptr)--;
115 		return 0;
116 	}
117 	*addrl = asd & AD_ADDR_MASK;
118 	if (asd & AD_AG32)
119 		*addrh = get_erom_ent(sih, eromptr, 0, 0);
120 	else
121 		*addrh = 0;
122 	*sizeh = 0;
123 	sz = asd & AD_SZ_MASK;
124 	if (sz == AD_SZ_SZD) {
125 		szd = get_erom_ent(sih, eromptr, 0, 0);
126 		*sizel = szd & SD_SZ_MASK;
127 		if (szd & SD_SG32)
128 			*sizeh = get_erom_ent(sih, eromptr, 0, 0);
129 	} else
130 		*sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
131 
132 	SI_VMSG(("  SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n",
133 	        sp, ad, st, *sizeh, *sizel, *addrh, *addrl));
134 
135 	return asd;
136 }
137 
138 /* Parse the enumeration rom to identify all cores
139  * Erom content format can be found in:
140  * http://hwnbu-twiki.broadcom.com/twiki/pub/Mwgroup/ArmDocumentation/SystemDiscovery.pdf
141  */
142 void
ai_scan(si_t * sih,void * regs,uint devid)143 ai_scan(si_t *sih, void *regs, uint devid)
144 {
145 	si_info_t *sii = SI_INFO(sih);
146 	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
147 	chipcregs_t *cc = (chipcregs_t *)regs;
148 	uint32 erombase, *eromptr, *eromlim;
149 	axi_wrapper_t * axi_wrapper = sii->axi_wrapper;
150 
151 	BCM_REFERENCE(devid);
152 
153 	erombase = R_REG(sii->osh, &cc->eromptr);
154 
155 	switch (BUSTYPE(sih->bustype)) {
156 	case SI_BUS:
157 		eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
158 		break;
159 
160 	case PCI_BUS:
161 		/* Set wrappers address */
162 		sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE);
163 
164 		/* Now point the window at the erom */
165 		OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, erombase);
166 		eromptr = regs;
167 		break;
168 
169 #ifdef BCMSDIO
170 	case SPI_BUS:
171 	case SDIO_BUS:
172 		eromptr = (uint32 *)(uintptr)erombase;
173 		break;
174 #endif	/* BCMSDIO */
175 
176 	case PCMCIA_BUS:
177 	default:
178 		SI_ERROR(("Don't know how to do AXI enumeration on bus %d\n", sih->bustype));
179 		ASSERT(0);
180 		return;
181 	}
182 	eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
183 	sii->axi_num_wrappers = 0;
184 
185 	SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim = 0x%p\n",
186 	         OSL_OBFUSCATE_BUF(regs), erombase,
187 		OSL_OBFUSCATE_BUF(eromptr), OSL_OBFUSATE_BUF(eromlim)));
188 	while (eromptr < eromlim) {
189 		uint32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp;
190 		uint32 mpd, asd, addrl, addrh, sizel, sizeh;
191 		uint i, j, idx;
192 		bool br;
193 
194 		br = FALSE;
195 
196 		/* Grok a component */
197 		cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
198 		if (cia == (ER_END | ER_VALID)) {
199 			SI_VMSG(("Found END of erom after %d cores\n", sii->numcores));
200 			return;
201 		}
202 
203 		cib = get_erom_ent(sih, &eromptr, 0, 0);
204 
205 		if ((cib & ER_TAG) != ER_CI) {
206 			SI_ERROR(("CIA not followed by CIB\n"));
207 			goto error;
208 		}
209 
210 		cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
211 		mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
212 		crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
213 		nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
214 		nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
215 		nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
216 		nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
217 
218 #ifdef BCMDBG_SI
219 		SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with nmw = %d, "
220 		         "nsw = %d, nmp = %d & nsp = %d\n",
221 		         mfg, cid, crev, OSL_OBFUSCATE_BUF(eromptr - 1), nmw, nsw, nmp, nsp));
222 #else
223 		BCM_REFERENCE(crev);
224 #endif // endif
225 
226 		if (BCM4347_CHIP(sih->chip)) {
227 			/* 4347 has more entries for ARM core
228 			 * This should apply to all chips but crashes on router
229 			 * This is a temp fix to be further analyze
230 			 */
231 			if (nsp == 0)
232 				continue;
233 		} else
234 		{
235 			/* Include Default slave wrapper for timeout monitoring */
236 			if ((nsp == 0) ||
237 #if !defined(AXI_TIMEOUTS) && !defined(BCM_BACKPLANE_TIMEOUT)
238 				((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) ||
239 #else
240 				((CHIPTYPE(sii->pub.socitype) == SOCI_NAI) &&
241 				(mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) ||
242 #endif /* !defined(AXI_TIMEOUTS) && !defined(BCM_BACKPLANE_TIMEOUT) */
243 				FALSE) {
244 				continue;
245 			}
246 		}
247 
248 		if ((nmw + nsw == 0)) {
249 			/* A component which is not a core */
250 			if (cid == OOB_ROUTER_CORE_ID) {
251 				asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
252 					&addrl, &addrh, &sizel, &sizeh);
253 				if (asd != 0) {
254 					if ((sii->oob_router != 0) && (sii->oob_router != addrl)) {
255 						sii->oob_router1 = addrl;
256 					} else {
257 						sii->oob_router = addrl;
258 					}
259 				}
260 			}
261 			if (cid != NS_CCB_CORE_ID &&
262 				cid != PMU_CORE_ID && cid != GCI_CORE_ID && cid != SR_CORE_ID &&
263 				cid != HUB_CORE_ID && cid != HND_OOBR_CORE_ID)
264 				continue;
265 		}
266 
267 		idx = sii->numcores;
268 
269 		cores_info->cia[idx] = cia;
270 		cores_info->cib[idx] = cib;
271 		cores_info->coreid[idx] = cid;
272 
273 		for (i = 0; i < nmp; i++) {
274 			mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
275 			if ((mpd & ER_TAG) != ER_MP) {
276 				SI_ERROR(("Not enough MP entries for component 0x%x\n", cid));
277 				goto error;
278 			}
279 			SI_VMSG(("  Master port %d, mp: %d id: %d\n", i,
280 			         (mpd & MPD_MP_MASK) >> MPD_MP_SHIFT,
281 			         (mpd & MPD_MUI_MASK) >> MPD_MUI_SHIFT));
282 		}
283 
284 		/* First Slave Address Descriptor should be port 0:
285 		 * the main register space for the core
286 		 */
287 		asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
288 		if (asd == 0) {
289 			do {
290 			/* Try again to see if it is a bridge */
291 			asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
292 			              &sizel, &sizeh);
293 			if (asd != 0)
294 				br = TRUE;
295 			else {
296 					if (br == TRUE) {
297 						break;
298 					}
299 					else if ((addrh != 0) || (sizeh != 0) ||
300 						(sizel != SI_CORE_SIZE)) {
301 						SI_ERROR(("addrh = 0x%x\t sizeh = 0x%x\t size1 ="
302 							"0x%x\n", addrh, sizeh, sizel));
303 						SI_ERROR(("First Slave ASD for"
304 							"core 0x%04x malformed "
305 							"(0x%08x)\n", cid, asd));
306 						goto error;
307 					}
308 				}
309 			} while (1);
310 		}
311 		cores_info->coresba[idx] = addrl;
312 		cores_info->coresba_size[idx] = sizel;
313 		/* Get any more ASDs in first port */
314 		j = 1;
315 		do {
316 			asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
317 			              &sizel, &sizeh);
318 			if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) {
319 				cores_info->coresba2[idx] = addrl;
320 				cores_info->coresba2_size[idx] = sizel;
321 			}
322 			j++;
323 		} while (asd != 0);
324 
325 		/* Go through the ASDs for other slave ports */
326 		for (i = 1; i < nsp; i++) {
327 			j = 0;
328 			do {
329 				asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh,
330 				              &sizel, &sizeh);
331 				/* To get the first base address of second slave port */
332 				if ((asd != 0) && (i == 1) && (j == 0)) {
333 					cores_info->csp2ba[idx] = addrl;
334 					cores_info->csp2ba_size[idx] = sizel;
335 				}
336 				if (asd == 0)
337 					break;
338 				j++;
339 			} while (1);
340 			if (j == 0) {
341 				SI_ERROR((" SP %d has no address descriptors\n", i));
342 				goto error;
343 			}
344 		}
345 
346 		/* Now get master wrappers */
347 		for (i = 0; i < nmw; i++) {
348 			asd = get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl, &addrh,
349 			              &sizel, &sizeh);
350 			if (asd == 0) {
351 				SI_ERROR(("Missing descriptor for MW %d\n", i));
352 				goto error;
353 			}
354 			if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
355 				SI_ERROR(("Master wrapper %d is not 4KB\n", i));
356 				goto error;
357 			}
358 			if (i == 0) {
359 				cores_info->wrapba[idx] = addrl;
360 			} else if (i == 1) {
361 				cores_info->wrapba2[idx] = addrl;
362 			} else if (i == 2) {
363 				cores_info->wrapba3[idx] = addrl;
364 			}
365 
366 			if (axi_wrapper &&
367 				(sii->axi_num_wrappers < SI_MAX_AXI_WRAPPERS)) {
368 				axi_wrapper[sii->axi_num_wrappers].mfg = mfg;
369 				axi_wrapper[sii->axi_num_wrappers].cid = cid;
370 				axi_wrapper[sii->axi_num_wrappers].rev = crev;
371 				axi_wrapper[sii->axi_num_wrappers].wrapper_type = AI_MASTER_WRAPPER;
372 				axi_wrapper[sii->axi_num_wrappers].wrapper_addr = addrl;
373 				sii->axi_num_wrappers++;
374 				SI_VMSG(("MASTER WRAPPER: %d, mfg:%x, cid:%x,"
375 					"rev:%x, addr:%x, size:%x\n",
376 					sii->axi_num_wrappers, mfg, cid, crev, addrl, sizel));
377 			}
378 		}
379 
380 		/* And finally slave wrappers */
381 		for (i = 0; i < nsw; i++) {
382 			uint fwp = (nsp == 1) ? 0 : 1;
383 			asd = get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP, &addrl, &addrh,
384 			              &sizel, &sizeh);
385 
386 			/* cache APB bridge wrapper address for set/clear timeout */
387 			if ((mfg == MFGID_ARM) && (cid == APB_BRIDGE_ID)) {
388 				ASSERT(sii->num_br < SI_MAXBR);
389 				sii->br_wrapba[sii->num_br++] = addrl;
390 			}
391 
392 			if (asd == 0) {
393 				SI_ERROR(("Missing descriptor for SW %d\n", i));
394 				goto error;
395 			}
396 			if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
397 				SI_ERROR(("Slave wrapper %d is not 4KB\n", i));
398 				goto error;
399 			}
400 			if ((nmw == 0) && (i == 0)) {
401 				cores_info->wrapba[idx] = addrl;
402 			} else if ((nmw == 0) && (i == 1)) {
403 				cores_info->wrapba2[idx] = addrl;
404 			} else if ((nmw == 0) && (i == 2)) {
405 				cores_info->wrapba3[idx] = addrl;
406 			}
407 
408 			/* Include all slave wrappers to the list to
409 			 * enable and monitor watchdog timeouts
410 			 */
411 
412 			if (axi_wrapper &&
413 				(sii->axi_num_wrappers < SI_MAX_AXI_WRAPPERS)) {
414 				axi_wrapper[sii->axi_num_wrappers].mfg = mfg;
415 				axi_wrapper[sii->axi_num_wrappers].cid = cid;
416 				axi_wrapper[sii->axi_num_wrappers].rev = crev;
417 				axi_wrapper[sii->axi_num_wrappers].wrapper_type = AI_SLAVE_WRAPPER;
418 
419 			/* Software WAR as discussed with hardware team, to ensure proper
420 			* Slave Wrapper Base address is set for 4364 Chip ID.
421 			* Current address is 0x1810c000, Corrected the same to 0x1810e000.
422 			* This ensures AXI default slave wrapper is registered along with
423 			* other slave wrapper cores and is useful while generating trap info
424 			* when write operation is tried on Invalid Core / Wrapper register
425 			*/
426 
427 				if ((CHIPID(sih->chip) == BCM4364_CHIP_ID) &&
428 						(cid == DEF_AI_COMP)) {
429 					axi_wrapper[sii->axi_num_wrappers].wrapper_addr =
430 						0x1810e000;
431 				} else {
432 					axi_wrapper[sii->axi_num_wrappers].wrapper_addr = addrl;
433 				}
434 
435 				sii->axi_num_wrappers++;
436 
437 				SI_VMSG(("SLAVE WRAPPER: %d,  mfg:%x, cid:%x,"
438 					"rev:%x, addr:%x, size:%x\n",
439 					sii->axi_num_wrappers,  mfg, cid, crev, addrl, sizel));
440 			}
441 		}
442 
443 #ifndef BCM_BACKPLANE_TIMEOUT
444 		/* Don't record bridges */
445 		if (br)
446 			continue;
447 #endif // endif
448 
449 		/* Done with core */
450 		sii->numcores++;
451 	}
452 
453 	SI_ERROR(("Reached end of erom without finding END\n"));
454 
455 error:
456 	sii->numcores = 0;
457 	return;
458 }
459 
460 #define AI_SETCOREIDX_MAPSIZE(coreid) \
461 	(((coreid) == NS_CCB_CORE_ID) ? 15 * SI_CORE_SIZE : SI_CORE_SIZE)
462 
463 /* This function changes the logical "focus" to the indicated core.
464  * Return the current core's virtual address.
465  */
466 static volatile void *
_ai_setcoreidx(si_t * sih,uint coreidx,uint use_wrapn)467 _ai_setcoreidx(si_t *sih, uint coreidx, uint use_wrapn)
468 {
469 	si_info_t *sii = SI_INFO(sih);
470 	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
471 	uint32 addr, wrap, wrap2, wrap3;
472 	volatile void *regs;
473 
474 	if (coreidx >= MIN(sii->numcores, SI_MAXCORES))
475 		return (NULL);
476 
477 	addr = cores_info->coresba[coreidx];
478 	wrap = cores_info->wrapba[coreidx];
479 	wrap2 = cores_info->wrapba2[coreidx];
480 	wrap3 = cores_info->wrapba3[coreidx];
481 
482 #ifdef BCM_BACKPLANE_TIMEOUT
483 	/* No need to disable interrupts while entering/exiting APB bridge core */
484 	if ((cores_info->coreid[coreidx] != APB_BRIDGE_CORE_ID) &&
485 		(cores_info->coreid[sii->curidx] != APB_BRIDGE_CORE_ID))
486 #endif /* BCM_BACKPLANE_TIMEOUT */
487 	{
488 		/*
489 		 * If the user has provided an interrupt mask enabled function,
490 		 * then assert interrupts are disabled before switching the core.
491 		 */
492 		ASSERT((sii->intrsenabled_fn == NULL) ||
493 			!(*(sii)->intrsenabled_fn)((sii)->intr_arg));
494 	}
495 
496 	switch (BUSTYPE(sih->bustype)) {
497 	case SI_BUS:
498 		/* map new one */
499 		if (!cores_info->regs[coreidx]) {
500 			cores_info->regs[coreidx] = REG_MAP(addr,
501 				AI_SETCOREIDX_MAPSIZE(cores_info->coreid[coreidx]));
502 			ASSERT(GOODREGS(cores_info->regs[coreidx]));
503 		}
504 		sii->curmap = regs = cores_info->regs[coreidx];
505 		if (!cores_info->wrappers[coreidx] && (wrap != 0)) {
506 			cores_info->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE);
507 			ASSERT(GOODREGS(cores_info->wrappers[coreidx]));
508 		}
509 		if (!cores_info->wrappers2[coreidx] && (wrap2 != 0)) {
510 			cores_info->wrappers2[coreidx] = REG_MAP(wrap2, SI_CORE_SIZE);
511 			ASSERT(GOODREGS(cores_info->wrappers2[coreidx]));
512 		}
513 		if (!cores_info->wrappers3[coreidx] && (wrap3 != 0)) {
514 			cores_info->wrappers3[coreidx] = REG_MAP(wrap3, SI_CORE_SIZE);
515 			ASSERT(GOODREGS(cores_info->wrappers3[coreidx]));
516 		}
517 
518 		if (use_wrapn == 2) {
519 			sii->curwrap = cores_info->wrappers3[coreidx];
520 		} else if (use_wrapn == 1) {
521 			sii->curwrap = cores_info->wrappers2[coreidx];
522 		} else {
523 			sii->curwrap = cores_info->wrappers[coreidx];
524 		}
525 		break;
526 
527 	case PCI_BUS:
528 #ifdef BCM_BACKPLANE_TIMEOUT
529 		/* No need to set the BAR0 if core is APB Bridge.
530 		 * This is to reduce 2 PCI writes while checkng for errlog
531 		 */
532 		if (cores_info->coreid[coreidx] != APB_BRIDGE_CORE_ID)
533 #endif /* BCM_BACKPLANE_TIMEOUT */
534 		{
535 			/* point bar0 window */
536 			OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, addr);
537 		}
538 
539 		regs = sii->curmap;
540 		/* point bar0 2nd 4KB window to the primary wrapper */
541 		if (use_wrapn)
542 			wrap = wrap2;
543 		if (PCIE_GEN2(sii))
544 			OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_WIN2, 4, wrap);
545 		else
546 			OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN2, 4, wrap);
547 		break;
548 
549 #ifdef BCMSDIO
550 	case SPI_BUS:
551 	case SDIO_BUS:
552 		sii->curmap = regs = (void *)((uintptr)addr);
553 		if (use_wrapn)
554 			sii->curwrap = (void *)((uintptr)wrap2);
555 		else
556 			sii->curwrap = (void *)((uintptr)wrap);
557 		break;
558 #endif	/* BCMSDIO */
559 
560 	case PCMCIA_BUS:
561 	default:
562 		ASSERT(0);
563 		regs = NULL;
564 		break;
565 	}
566 
567 	sii->curmap = regs;
568 	sii->curidx = coreidx;
569 
570 	return regs;
571 }
572 
573 volatile void *
ai_setcoreidx(si_t * sih,uint coreidx)574 ai_setcoreidx(si_t *sih, uint coreidx)
575 {
576 	return _ai_setcoreidx(sih, coreidx, 0);
577 }
578 
579 volatile void *
ai_setcoreidx_2ndwrap(si_t * sih,uint coreidx)580 ai_setcoreidx_2ndwrap(si_t *sih, uint coreidx)
581 {
582 	return _ai_setcoreidx(sih, coreidx, 1);
583 }
584 
585 volatile void *
ai_setcoreidx_3rdwrap(si_t * sih,uint coreidx)586 ai_setcoreidx_3rdwrap(si_t *sih, uint coreidx)
587 {
588 	return _ai_setcoreidx(sih, coreidx, 2);
589 }
590 
591 void
ai_coreaddrspaceX(si_t * sih,uint asidx,uint32 * addr,uint32 * size)592 ai_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size)
593 {
594 	si_info_t *sii = SI_INFO(sih);
595 	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
596 	chipcregs_t *cc = NULL;
597 	uint32 erombase, *eromptr, *eromlim;
598 	uint i, j, cidx;
599 	uint32 cia, cib, nmp, nsp;
600 	uint32 asd, addrl, addrh, sizel, sizeh;
601 
602 	for (i = 0; i < sii->numcores; i++) {
603 		if (cores_info->coreid[i] == CC_CORE_ID) {
604 			cc = (chipcregs_t *)cores_info->regs[i];
605 			break;
606 		}
607 	}
608 	if (cc == NULL)
609 		goto error;
610 
611 	BCM_REFERENCE(erombase);
612 	erombase = R_REG(sii->osh, &cc->eromptr);
613 	eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
614 	eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
615 
616 	cidx = sii->curidx;
617 	cia = cores_info->cia[cidx];
618 	cib = cores_info->cib[cidx];
619 
620 	nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
621 	nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
622 
623 	/* scan for cores */
624 	while (eromptr < eromlim) {
625 		if ((get_erom_ent(sih, &eromptr, ER_TAG, ER_CI) == cia) &&
626 			(get_erom_ent(sih, &eromptr, 0, 0) == cib)) {
627 			break;
628 		}
629 	}
630 
631 	/* skip master ports */
632 	for (i = 0; i < nmp; i++)
633 		get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
634 
635 	/* Skip ASDs in port 0 */
636 	asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
637 	if (asd == 0) {
638 		/* Try again to see if it is a bridge */
639 		asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
640 		              &sizel, &sizeh);
641 	}
642 
643 	j = 1;
644 	do {
645 		asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
646 		              &sizel, &sizeh);
647 		j++;
648 	} while (asd != 0);
649 
650 	/* Go through the ASDs for other slave ports */
651 	for (i = 1; i < nsp; i++) {
652 		j = 0;
653 		do {
654 			asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh,
655 				&sizel, &sizeh);
656 			if (asd == 0)
657 				break;
658 
659 			if (!asidx--) {
660 				*addr = addrl;
661 				*size = sizel;
662 				return;
663 			}
664 			j++;
665 		} while (1);
666 
667 		if (j == 0) {
668 			SI_ERROR((" SP %d has no address descriptors\n", i));
669 			break;
670 		}
671 	}
672 
673 error:
674 	*size = 0;
675 	return;
676 }
677 
678 /* Return the number of address spaces in current core */
679 int
ai_numaddrspaces(si_t * sih)680 ai_numaddrspaces(si_t *sih)
681 {
682 
683 	BCM_REFERENCE(sih);
684 
685 	return 2;
686 }
687 
688 /* Return the address of the nth address space in the current core
689  * Arguments:
690  * sih : Pointer to struct si_t
691  * spidx : slave port index
692  * baidx : base address index
693  */
694 uint32
ai_addrspace(si_t * sih,uint spidx,uint baidx)695 ai_addrspace(si_t *sih, uint spidx, uint baidx)
696 {
697 	si_info_t *sii = SI_INFO(sih);
698 	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
699 	uint cidx;
700 
701 	cidx = sii->curidx;
702 
703 	if (spidx == CORE_SLAVE_PORT_0) {
704 		if (baidx == CORE_BASE_ADDR_0)
705 			return cores_info->coresba[cidx];
706 		else if (baidx == CORE_BASE_ADDR_1)
707 			return cores_info->coresba2[cidx];
708 	} else if (spidx == CORE_SLAVE_PORT_1) {
709 		if (baidx == CORE_BASE_ADDR_0)
710 			return cores_info->csp2ba[cidx];
711 	}
712 
713 	SI_ERROR(("%s: Need to parse the erom again to find %d base addr in %d slave port\n",
714 	      __FUNCTION__, baidx, spidx));
715 
716 	return 0;
717 }
718 
719 /* Return the size of the nth address space in the current core
720 * Arguments:
721 * sih : Pointer to struct si_t
722 * spidx : slave port index
723 * baidx : base address index
724 */
725 uint32
ai_addrspacesize(si_t * sih,uint spidx,uint baidx)726 ai_addrspacesize(si_t *sih, uint spidx, uint baidx)
727 {
728 	si_info_t *sii = SI_INFO(sih);
729 	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
730 	uint cidx;
731 
732 	cidx = sii->curidx;
733 	if (spidx == CORE_SLAVE_PORT_0) {
734 		if (baidx == CORE_BASE_ADDR_0)
735 			return cores_info->coresba_size[cidx];
736 		else if (baidx == CORE_BASE_ADDR_1)
737 			return cores_info->coresba2_size[cidx];
738 	} else if (spidx == CORE_SLAVE_PORT_1) {
739 		if (baidx == CORE_BASE_ADDR_0)
740 			return cores_info->csp2ba_size[cidx];
741 	}
742 
743 	SI_ERROR(("%s: Need to parse the erom again to find %d base addr in %d slave port\n",
744 	      __FUNCTION__, baidx, spidx));
745 
746 	return 0;
747 }
748 
749 uint
ai_flag(si_t * sih)750 ai_flag(si_t *sih)
751 {
752 	si_info_t *sii = SI_INFO(sih);
753 	aidmp_t *ai;
754 
755 	if (BCM4707_DMP()) {
756 		SI_ERROR(("%s: Attempting to read CHIPCOMMONB DMP registers on 4707\n",
757 			__FUNCTION__));
758 		return sii->curidx;
759 	}
760 	if (BCM53573_DMP()) {
761 		SI_ERROR(("%s: Attempting to read DMP registers on 53573\n", __FUNCTION__));
762 		return sii->curidx;
763 	}
764 	if (PMU_DMP()) {
765 		uint idx, flag;
766 		idx = sii->curidx;
767 		ai_setcoreidx(sih, SI_CC_IDX);
768 		flag = ai_flag_alt(sih);
769 		ai_setcoreidx(sih, idx);
770 		return flag;
771 	}
772 
773 	ai = sii->curwrap;
774 	ASSERT(ai != NULL);
775 
776 	return (R_REG(sii->osh, &ai->oobselouta30) & 0x1f);
777 }
778 
779 uint
ai_flag_alt(si_t * sih)780 ai_flag_alt(si_t *sih)
781 {
782 	si_info_t *sii = SI_INFO(sih);
783 	aidmp_t *ai;
784 
785 	if (BCM4707_DMP()) {
786 		SI_ERROR(("%s: Attempting to read CHIPCOMMONB DMP registers on 4707\n",
787 			__FUNCTION__));
788 		return sii->curidx;
789 	}
790 
791 	ai = sii->curwrap;
792 
793 	return ((R_REG(sii->osh, &ai->oobselouta30) >> AI_OOBSEL_1_SHIFT) & AI_OOBSEL_MASK);
794 }
795 
796 void
ai_setint(si_t * sih,int siflag)797 ai_setint(si_t *sih, int siflag)
798 {
799 	BCM_REFERENCE(sih);
800 	BCM_REFERENCE(siflag);
801 
802 }
803 
804 uint
ai_wrap_reg(si_t * sih,uint32 offset,uint32 mask,uint32 val)805 ai_wrap_reg(si_t *sih, uint32 offset, uint32 mask, uint32 val)
806 {
807 	si_info_t *sii = SI_INFO(sih);
808 	uint32 *addr = (uint32 *) ((uchar *)(sii->curwrap) + offset);
809 
810 	if (mask || val) {
811 		uint32 w = R_REG(sii->osh, addr);
812 		w &= ~mask;
813 		w |= val;
814 		W_REG(sii->osh, addr, w);
815 	}
816 	return (R_REG(sii->osh, addr));
817 }
818 
819 uint
ai_corevendor(si_t * sih)820 ai_corevendor(si_t *sih)
821 {
822 	si_info_t *sii = SI_INFO(sih);
823 	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
824 	uint32 cia;
825 
826 	cia = cores_info->cia[sii->curidx];
827 	return ((cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT);
828 }
829 
830 uint
ai_corerev(si_t * sih)831 ai_corerev(si_t *sih)
832 {
833 	si_info_t *sii = SI_INFO(sih);
834 	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
835 	uint32 cib;
836 
837 	cib = cores_info->cib[sii->curidx];
838 	return ((cib & CIB_REV_MASK) >> CIB_REV_SHIFT);
839 }
840 
841 uint
ai_corerev_minor(si_t * sih)842 ai_corerev_minor(si_t *sih)
843 {
844 	return (ai_core_sflags(sih, 0, 0) >> SISF_MINORREV_D11_SHIFT) &
845 			SISF_MINORREV_D11_MASK;
846 }
847 
848 bool
ai_iscoreup(si_t * sih)849 ai_iscoreup(si_t *sih)
850 {
851 	si_info_t *sii = SI_INFO(sih);
852 	aidmp_t *ai;
853 
854 	ai = sii->curwrap;
855 
856 	return (((R_REG(sii->osh, &ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) == SICF_CLOCK_EN) &&
857 	        ((R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) == 0));
858 }
859 
860 /*
861  * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
862  * switch back to the original core, and return the new value.
863  *
864  * When using the silicon backplane, no fiddling with interrupts or core switches is needed.
865  *
866  * Also, when using pci/pcie, we can optimize away the core switching for pci registers
867  * and (on newer pci cores) chipcommon registers.
868  */
869 uint
ai_corereg(si_t * sih,uint coreidx,uint regoff,uint mask,uint val)870 ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
871 {
872 	uint origidx = 0;
873 	volatile uint32 *r = NULL;
874 	uint w;
875 	uint intr_val = 0;
876 	bool fast = FALSE;
877 	si_info_t *sii = SI_INFO(sih);
878 	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
879 
880 	ASSERT(GOODIDX(coreidx));
881 	ASSERT(regoff < SI_CORE_SIZE);
882 	ASSERT((val & ~mask) == 0);
883 
884 	if (coreidx >= SI_MAXCORES)
885 		return 0;
886 
887 	if (BUSTYPE(sih->bustype) == SI_BUS) {
888 		/* If internal bus, we can always get at everything */
889 		fast = TRUE;
890 		/* map if does not exist */
891 		if (!cores_info->regs[coreidx]) {
892 			cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
893 			                            SI_CORE_SIZE);
894 			ASSERT(GOODREGS(cores_info->regs[coreidx]));
895 		}
896 		r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
897 	} else if (BUSTYPE(sih->bustype) == PCI_BUS) {
898 		/* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
899 
900 		if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
901 			/* Chipc registers are mapped at 12KB */
902 
903 			fast = TRUE;
904 			r = (volatile uint32 *)((volatile char *)sii->curmap +
905 			               PCI_16KB0_CCREGS_OFFSET + regoff);
906 		} else if (sii->pub.buscoreidx == coreidx) {
907 			/* pci registers are at either in the last 2KB of an 8KB window
908 			 * or, in pcie and pci rev 13 at 8KB
909 			 */
910 			fast = TRUE;
911 			if (SI_FAST(sii))
912 				r = (volatile uint32 *)((volatile char *)sii->curmap +
913 				               PCI_16KB0_PCIREGS_OFFSET + regoff);
914 			else
915 				r = (volatile uint32 *)((volatile char *)sii->curmap +
916 				               ((regoff >= SBCONFIGOFF) ?
917 				                PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
918 				               regoff);
919 		}
920 	}
921 
922 	if (!fast) {
923 		INTR_OFF(sii, intr_val);
924 
925 		/* save current core index */
926 		origidx = si_coreidx(&sii->pub);
927 
928 		/* switch core */
929 		r = (volatile uint32*) ((volatile uchar*) ai_setcoreidx(&sii->pub, coreidx) +
930 		               regoff);
931 	}
932 	ASSERT(r != NULL);
933 
934 	/* mask and set */
935 	if (mask || val) {
936 		w = (R_REG(sii->osh, r) & ~mask) | val;
937 		W_REG(sii->osh, r, w);
938 	}
939 
940 	/* readback */
941 	w = R_REG(sii->osh, r);
942 
943 	if (!fast) {
944 		/* restore core index */
945 		if (origidx != coreidx)
946 			ai_setcoreidx(&sii->pub, origidx);
947 
948 		INTR_RESTORE(sii, intr_val);
949 	}
950 
951 	return (w);
952 }
953 
954 /*
955  * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
956  * switch back to the original core, and return the new value.
957  *
958  * When using the silicon backplane, no fiddling with interrupts or core switches is needed.
959  *
960  * Also, when using pci/pcie, we can optimize away the core switching for pci registers
961  * and (on newer pci cores) chipcommon registers.
962  */
963 uint
ai_corereg_writeonly(si_t * sih,uint coreidx,uint regoff,uint mask,uint val)964 ai_corereg_writeonly(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
965 {
966 	uint origidx = 0;
967 	volatile uint32 *r = NULL;
968 	uint w = 0;
969 	uint intr_val = 0;
970 	bool fast = FALSE;
971 	si_info_t *sii = SI_INFO(sih);
972 	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
973 
974 	ASSERT(GOODIDX(coreidx));
975 	ASSERT(regoff < SI_CORE_SIZE);
976 	ASSERT((val & ~mask) == 0);
977 
978 	if (coreidx >= SI_MAXCORES)
979 		return 0;
980 
981 	if (BUSTYPE(sih->bustype) == SI_BUS) {
982 		/* If internal bus, we can always get at everything */
983 		fast = TRUE;
984 		/* map if does not exist */
985 		if (!cores_info->regs[coreidx]) {
986 			cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
987 			                            SI_CORE_SIZE);
988 			ASSERT(GOODREGS(cores_info->regs[coreidx]));
989 		}
990 		r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
991 	} else if (BUSTYPE(sih->bustype) == PCI_BUS) {
992 		/* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
993 
994 		if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
995 			/* Chipc registers are mapped at 12KB */
996 
997 			fast = TRUE;
998 			r = (volatile uint32 *)((volatile char *)sii->curmap +
999 			               PCI_16KB0_CCREGS_OFFSET + regoff);
1000 		} else if (sii->pub.buscoreidx == coreidx) {
1001 			/* pci registers are at either in the last 2KB of an 8KB window
1002 			 * or, in pcie and pci rev 13 at 8KB
1003 			 */
1004 			fast = TRUE;
1005 			if (SI_FAST(sii))
1006 				r = (volatile uint32 *)((volatile char *)sii->curmap +
1007 				               PCI_16KB0_PCIREGS_OFFSET + regoff);
1008 			else
1009 				r = (volatile uint32 *)((volatile char *)sii->curmap +
1010 				               ((regoff >= SBCONFIGOFF) ?
1011 				                PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
1012 				               regoff);
1013 		}
1014 	}
1015 
1016 	if (!fast) {
1017 		INTR_OFF(sii, intr_val);
1018 
1019 		/* save current core index */
1020 		origidx = si_coreidx(&sii->pub);
1021 
1022 		/* switch core */
1023 		r = (volatile uint32*) ((volatile uchar*) ai_setcoreidx(&sii->pub, coreidx) +
1024 		               regoff);
1025 	}
1026 	ASSERT(r != NULL);
1027 
1028 	/* mask and set */
1029 	if (mask || val) {
1030 		w = (R_REG(sii->osh, r) & ~mask) | val;
1031 		W_REG(sii->osh, r, w);
1032 	}
1033 
1034 	if (!fast) {
1035 		/* restore core index */
1036 		if (origidx != coreidx)
1037 			ai_setcoreidx(&sii->pub, origidx);
1038 
1039 		INTR_RESTORE(sii, intr_val);
1040 	}
1041 
1042 	return (w);
1043 }
1044 
1045 /*
1046  * If there is no need for fiddling with interrupts or core switches (typically silicon
1047  * back plane registers, pci registers and chipcommon registers), this function
1048  * returns the register offset on this core to a mapped address. This address can
1049  * be used for W_REG/R_REG directly.
1050  *
1051  * For accessing registers that would need a core switch, this function will return
1052  * NULL.
1053  */
1054 volatile uint32 *
ai_corereg_addr(si_t * sih,uint coreidx,uint regoff)1055 ai_corereg_addr(si_t *sih, uint coreidx, uint regoff)
1056 {
1057 	volatile uint32 *r = NULL;
1058 	bool fast = FALSE;
1059 	si_info_t *sii = SI_INFO(sih);
1060 	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
1061 
1062 	ASSERT(GOODIDX(coreidx));
1063 	ASSERT(regoff < SI_CORE_SIZE);
1064 
1065 	if (coreidx >= SI_MAXCORES)
1066 		return 0;
1067 
1068 	if (BUSTYPE(sih->bustype) == SI_BUS) {
1069 		/* If internal bus, we can always get at everything */
1070 		fast = TRUE;
1071 		/* map if does not exist */
1072 		if (!cores_info->regs[coreidx]) {
1073 			cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
1074 			                            SI_CORE_SIZE);
1075 			ASSERT(GOODREGS(cores_info->regs[coreidx]));
1076 		}
1077 		r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
1078 	} else if (BUSTYPE(sih->bustype) == PCI_BUS) {
1079 		/* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
1080 
1081 		if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
1082 			/* Chipc registers are mapped at 12KB */
1083 
1084 			fast = TRUE;
1085 			r = (volatile uint32 *)((volatile char *)sii->curmap +
1086 			               PCI_16KB0_CCREGS_OFFSET + regoff);
1087 		} else if (sii->pub.buscoreidx == coreidx) {
1088 			/* pci registers are at either in the last 2KB of an 8KB window
1089 			 * or, in pcie and pci rev 13 at 8KB
1090 			 */
1091 			fast = TRUE;
1092 			if (SI_FAST(sii))
1093 				r = (volatile uint32 *)((volatile char *)sii->curmap +
1094 				               PCI_16KB0_PCIREGS_OFFSET + regoff);
1095 			else
1096 				r = (volatile uint32 *)((volatile char *)sii->curmap +
1097 				               ((regoff >= SBCONFIGOFF) ?
1098 				                PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
1099 				               regoff);
1100 		}
1101 	}
1102 
1103 	if (!fast) {
1104 		ASSERT(sii->curidx == coreidx);
1105 		r = (volatile uint32*) ((volatile uchar*)sii->curmap + regoff);
1106 	}
1107 
1108 	return (r);
1109 }
1110 
1111 void
ai_core_disable(si_t * sih,uint32 bits)1112 ai_core_disable(si_t *sih, uint32 bits)
1113 {
1114 	si_info_t *sii = SI_INFO(sih);
1115 	volatile uint32 dummy;
1116 	uint32 status;
1117 	aidmp_t *ai;
1118 
1119 	ASSERT(GOODREGS(sii->curwrap));
1120 	ai = sii->curwrap;
1121 
1122 	/* if core is already in reset, just return */
1123 	if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) {
1124 		return;
1125 	}
1126 
1127 	/* ensure there are no pending backplane operations */
1128 	SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
1129 
1130 	/* if pending backplane ops still, try waiting longer */
1131 	if (status != 0) {
1132 		/* 300usecs was sufficient to allow backplane ops to clear for big hammer */
1133 		/* during driver load we may need more time */
1134 		SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 10000);
1135 		/* if still pending ops, continue on and try disable anyway */
1136 		/* this is in big hammer path, so don't call wl_reinit in this case... */
1137 	}
1138 
1139 	W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
1140 	dummy = R_REG(sii->osh, &ai->resetctrl);
1141 	BCM_REFERENCE(dummy);
1142 	OSL_DELAY(1);
1143 
1144 	W_REG(sii->osh, &ai->ioctrl, bits);
1145 	dummy = R_REG(sii->osh, &ai->ioctrl);
1146 	BCM_REFERENCE(dummy);
1147 	OSL_DELAY(10);
1148 }
1149 
1150 /* reset and re-enable a core
1151  * inputs:
1152  * bits - core specific bits that are set during and after reset sequence
1153  * resetbits - core specific bits that are set only during reset sequence
1154  */
1155 static void
_ai_core_reset(si_t * sih,uint32 bits,uint32 resetbits)1156 _ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
1157 {
1158 	si_info_t *sii = SI_INFO(sih);
1159 #if defined(UCM_CORRUPTION_WAR)
1160 	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
1161 #endif // endif
1162 	aidmp_t *ai;
1163 	volatile uint32 dummy;
1164 	uint loop_counter = 10;
1165 
1166 	ASSERT(GOODREGS(sii->curwrap));
1167 	ai = sii->curwrap;
1168 
1169 	/* ensure there are no pending backplane operations */
1170 	SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
1171 
1172 	/* put core into reset state */
1173 	W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
1174 	OSL_DELAY(10);
1175 
1176 	/* ensure there are no pending backplane operations */
1177 	SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
1178 
1179 	W_REG(sii->osh, &ai->ioctrl, (bits | resetbits | SICF_FGC | SICF_CLOCK_EN));
1180 	dummy = R_REG(sii->osh, &ai->ioctrl);
1181 	BCM_REFERENCE(dummy);
1182 #ifdef UCM_CORRUPTION_WAR
1183 	if (cores_info->coreid[sii->curidx] == D11_CORE_ID) {
1184 		/* Reset FGC */
1185 		OSL_DELAY(1);
1186 		W_REG(sii->osh, &ai->ioctrl, (dummy & (~SICF_FGC)));
1187 	}
1188 #endif /* UCM_CORRUPTION_WAR */
1189 	/* ensure there are no pending backplane operations */
1190 	SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
1191 
1192 	while (R_REG(sii->osh, &ai->resetctrl) != 0 && --loop_counter != 0) {
1193 		/* ensure there are no pending backplane operations */
1194 		SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
1195 
1196 		/* take core out of reset */
1197 		W_REG(sii->osh, &ai->resetctrl, 0);
1198 
1199 		/* ensure there are no pending backplane operations */
1200 		SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
1201 	}
1202 
1203 #ifdef UCM_CORRUPTION_WAR
1204 	/* Pulse FGC after lifting Reset */
1205 	W_REG(sii->osh, &ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN));
1206 #else
1207 	W_REG(sii->osh, &ai->ioctrl, (bits | SICF_CLOCK_EN));
1208 #endif /* UCM_CORRUPTION_WAR */
1209 	dummy = R_REG(sii->osh, &ai->ioctrl);
1210 	BCM_REFERENCE(dummy);
1211 #ifdef UCM_CORRUPTION_WAR
1212 	if (cores_info->coreid[sii->curidx] == D11_CORE_ID) {
1213 		/* Reset FGC */
1214 		OSL_DELAY(1);
1215 		W_REG(sii->osh, &ai->ioctrl, (dummy & (~SICF_FGC)));
1216 	}
1217 #endif /* UCM_CORRUPTION_WAR */
1218 	OSL_DELAY(1);
1219 }
1220 
1221 void
ai_core_reset(si_t * sih,uint32 bits,uint32 resetbits)1222 ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
1223 {
1224 	si_info_t *sii = SI_INFO(sih);
1225 	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
1226 	uint idx = sii->curidx;
1227 
1228 	if (cores_info->wrapba3[idx] != 0) {
1229 		ai_setcoreidx_3rdwrap(sih, idx);
1230 		_ai_core_reset(sih, bits, resetbits);
1231 		ai_setcoreidx(sih, idx);
1232 	}
1233 
1234 	if (cores_info->wrapba2[idx] != 0) {
1235 		ai_setcoreidx_2ndwrap(sih, idx);
1236 		_ai_core_reset(sih, bits, resetbits);
1237 		ai_setcoreidx(sih, idx);
1238 	}
1239 
1240 	_ai_core_reset(sih, bits, resetbits);
1241 }
1242 
1243 void
ai_core_cflags_wo(si_t * sih,uint32 mask,uint32 val)1244 ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
1245 {
1246 	si_info_t *sii = SI_INFO(sih);
1247 	aidmp_t *ai;
1248 	uint32 w;
1249 
1250 	if (BCM4707_DMP()) {
1251 		SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
1252 			__FUNCTION__));
1253 		return;
1254 	}
1255 	if (PMU_DMP()) {
1256 		SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n",
1257 			__FUNCTION__));
1258 		return;
1259 	}
1260 
1261 	ASSERT(GOODREGS(sii->curwrap));
1262 	ai = sii->curwrap;
1263 
1264 	ASSERT((val & ~mask) == 0);
1265 
1266 	if (mask || val) {
1267 		w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
1268 		W_REG(sii->osh, &ai->ioctrl, w);
1269 	}
1270 }
1271 
1272 uint32
ai_core_cflags(si_t * sih,uint32 mask,uint32 val)1273 ai_core_cflags(si_t *sih, uint32 mask, uint32 val)
1274 {
1275 	si_info_t *sii = SI_INFO(sih);
1276 	aidmp_t *ai;
1277 	uint32 w;
1278 
1279 	if (BCM4707_DMP()) {
1280 		SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
1281 			__FUNCTION__));
1282 		return 0;
1283 	}
1284 
1285 	if (PMU_DMP()) {
1286 		SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n",
1287 			__FUNCTION__));
1288 		return 0;
1289 	}
1290 	ASSERT(GOODREGS(sii->curwrap));
1291 	ai = sii->curwrap;
1292 
1293 	ASSERT((val & ~mask) == 0);
1294 
1295 	if (mask || val) {
1296 		w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
1297 		W_REG(sii->osh, &ai->ioctrl, w);
1298 	}
1299 
1300 	return R_REG(sii->osh, &ai->ioctrl);
1301 }
1302 
1303 uint32
ai_core_sflags(si_t * sih,uint32 mask,uint32 val)1304 ai_core_sflags(si_t *sih, uint32 mask, uint32 val)
1305 {
1306 	si_info_t *sii = SI_INFO(sih);
1307 	aidmp_t *ai;
1308 	uint32 w;
1309 
1310 	if (BCM4707_DMP()) {
1311 		SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
1312 			__FUNCTION__));
1313 		return 0;
1314 	}
1315 	if (PMU_DMP()) {
1316 		SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n",
1317 			__FUNCTION__));
1318 		return 0;
1319 	}
1320 
1321 	ASSERT(GOODREGS(sii->curwrap));
1322 	ai = sii->curwrap;
1323 
1324 	ASSERT((val & ~mask) == 0);
1325 	ASSERT((mask & ~SISF_CORE_BITS) == 0);
1326 
1327 	if (mask || val) {
1328 		w = ((R_REG(sii->osh, &ai->iostatus) & ~mask) | val);
1329 		W_REG(sii->osh, &ai->iostatus, w);
1330 	}
1331 
1332 	return R_REG(sii->osh, &ai->iostatus);
1333 }
1334 
1335 #if defined(BCMDBG_PHYDUMP)
1336 /* print interesting aidmp registers */
1337 void
ai_dumpregs(si_t * sih,struct bcmstrbuf * b)1338 ai_dumpregs(si_t *sih, struct bcmstrbuf *b)
1339 {
1340 	si_info_t *sii = SI_INFO(sih);
1341 	osl_t *osh;
1342 	aidmp_t *ai;
1343 	uint i;
1344 	uint32 prev_value = 0;
1345 	axi_wrapper_t * axi_wrapper = sii->axi_wrapper;
1346 	uint32 cfg_reg = 0;
1347 	uint bar0_win_offset = 0;
1348 
1349 	osh = sii->osh;
1350 
1351 	/* Save and restore wrapper access window */
1352 	if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1353 		if (PCIE_GEN2(sii)) {
1354 			cfg_reg = PCIE2_BAR0_CORE2_WIN2;
1355 			bar0_win_offset = PCIE2_BAR0_CORE2_WIN2_OFFSET;
1356 		} else {
1357 			cfg_reg = PCI_BAR0_WIN2;
1358 			bar0_win_offset = PCI_BAR0_WIN2_OFFSET;
1359 		}
1360 
1361 		prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 4);
1362 
1363 		if (prev_value == ID32_INVALID) {
1364 			SI_PRINT(("%s, PCI_BAR0_WIN2 - %x\n", __FUNCTION__, prev_value));
1365 			return;
1366 		}
1367 	}
1368 
1369 	bcm_bprintf(b, "ChipNum:%x, ChipRev;%x, BusType:%x, BoardType:%x, BoardVendor:%x\n\n",
1370 		sih->chip, sih->chiprev, sih->bustype, sih->boardtype, sih->boardvendor);
1371 
1372 	for (i = 0; i < sii->axi_num_wrappers; i++) {
1373 
1374 		if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1375 			/* Set BAR0 window to bridge wapper base address */
1376 			OSL_PCI_WRITE_CONFIG(osh,
1377 				cfg_reg, 4, axi_wrapper[i].wrapper_addr);
1378 
1379 			ai = (aidmp_t *) ((volatile uint8*)sii->curmap + bar0_win_offset);
1380 		} else {
1381 			ai = (aidmp_t *)(uintptr) axi_wrapper[i].wrapper_addr;
1382 		}
1383 
1384 		bcm_bprintf(b, "core 0x%x: core_rev:%d, %s_WR ADDR:%x \n", axi_wrapper[i].cid,
1385 			axi_wrapper[i].rev,
1386 			axi_wrapper[i].wrapper_type == AI_SLAVE_WRAPPER ? "SLAVE" : "MASTER",
1387 			axi_wrapper[i].wrapper_addr);
1388 
1389 		/* BCM4707_DMP() */
1390 		if (BCM4707_CHIP(CHIPID(sih->chip)) &&
1391 			(axi_wrapper[i].cid == NS_CCB_CORE_ID)) {
1392 			bcm_bprintf(b, "Skipping chipcommonb in 4707\n");
1393 			continue;
1394 		}
1395 
1396 		bcm_bprintf(b, "ioctrlset 0x%x ioctrlclear 0x%x ioctrl 0x%x iostatus 0x%x "
1397 			    "ioctrlwidth 0x%x iostatuswidth 0x%x\n"
1398 			    "resetctrl 0x%x resetstatus 0x%x resetreadid 0x%x resetwriteid 0x%x\n"
1399 			    "errlogctrl 0x%x errlogdone 0x%x errlogstatus 0x%x "
1400 			    "errlogaddrlo 0x%x errlogaddrhi 0x%x\n"
1401 			    "errlogid 0x%x errloguser 0x%x errlogflags 0x%x\n"
1402 			    "intstatus 0x%x config 0x%x itcr 0x%x\n\n",
1403 			    R_REG(osh, &ai->ioctrlset),
1404 			    R_REG(osh, &ai->ioctrlclear),
1405 			    R_REG(osh, &ai->ioctrl),
1406 			    R_REG(osh, &ai->iostatus),
1407 			    R_REG(osh, &ai->ioctrlwidth),
1408 			    R_REG(osh, &ai->iostatuswidth),
1409 			    R_REG(osh, &ai->resetctrl),
1410 			    R_REG(osh, &ai->resetstatus),
1411 			    R_REG(osh, &ai->resetreadid),
1412 			    R_REG(osh, &ai->resetwriteid),
1413 			    R_REG(osh, &ai->errlogctrl),
1414 			    R_REG(osh, &ai->errlogdone),
1415 			    R_REG(osh, &ai->errlogstatus),
1416 			    R_REG(osh, &ai->errlogaddrlo),
1417 			    R_REG(osh, &ai->errlogaddrhi),
1418 			    R_REG(osh, &ai->errlogid),
1419 			    R_REG(osh, &ai->errloguser),
1420 			    R_REG(osh, &ai->errlogflags),
1421 			    R_REG(osh, &ai->intstatus),
1422 			    R_REG(osh, &ai->config),
1423 			    R_REG(osh, &ai->itcr));
1424 	}
1425 
1426 	/* Restore the initial wrapper space */
1427 	if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1428 		if (prev_value && cfg_reg) {
1429 			OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 4, prev_value);
1430 		}
1431 	}
1432 }
1433 #endif // endif
1434 
1435 void
ai_update_backplane_timeouts(si_t * sih,bool enable,uint32 timeout_exp,uint32 cid)1436 ai_update_backplane_timeouts(si_t *sih, bool enable, uint32 timeout_exp, uint32 cid)
1437 {
1438 #if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
1439 	si_info_t *sii = SI_INFO(sih);
1440 	aidmp_t *ai;
1441 	uint32 i;
1442 	axi_wrapper_t * axi_wrapper = sii->axi_wrapper;
1443 	uint32 errlogctrl = (enable << AIELC_TO_ENAB_SHIFT) |
1444 		((timeout_exp << AIELC_TO_EXP_SHIFT) & AIELC_TO_EXP_MASK);
1445 
1446 #ifdef BCM_BACKPLANE_TIMEOUT
1447 	uint32 prev_value = 0;
1448 	osl_t *osh = sii->osh;
1449 	uint32 cfg_reg = 0;
1450 	uint32 offset = 0;
1451 #endif /* BCM_BACKPLANE_TIMEOUT */
1452 
1453 	if ((sii->axi_num_wrappers == 0) ||
1454 #ifdef BCM_BACKPLANE_TIMEOUT
1455 		(!PCIE(sii)) ||
1456 #endif /* BCM_BACKPLANE_TIMEOUT */
1457 		FALSE) {
1458 		SI_VMSG((" %s, axi_num_wrappers:%d, Is_PCIE:%d, BUS_TYPE:%d, ID:%x\n",
1459 			__FUNCTION__, sii->axi_num_wrappers, PCIE(sii),
1460 			BUSTYPE(sii->pub.bustype), sii->pub.buscoretype));
1461 		return;
1462 	}
1463 
1464 #ifdef BCM_BACKPLANE_TIMEOUT
1465 	/* Save and restore the wrapper access window */
1466 	if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1467 		if (PCIE_GEN1(sii)) {
1468 			cfg_reg = PCI_BAR0_WIN2;
1469 			offset = PCI_BAR0_WIN2_OFFSET;
1470 		} else if (PCIE_GEN2(sii)) {
1471 			cfg_reg = PCIE2_BAR0_CORE2_WIN2;
1472 			offset = PCIE2_BAR0_CORE2_WIN2_OFFSET;
1473 		}
1474 		else {
1475 			ASSERT(!"!PCIE_GEN1 && !PCIE_GEN2");
1476 		}
1477 
1478 		prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 4);
1479 		if (prev_value == ID32_INVALID) {
1480 			SI_PRINT(("%s, PCI_BAR0_WIN2 - %x\n", __FUNCTION__, prev_value));
1481 			return;
1482 		}
1483 	}
1484 #endif /* BCM_BACKPLANE_TIMEOUT */
1485 
1486 	for (i = 0; i < sii->axi_num_wrappers; ++i) {
1487 
1488 		if (axi_wrapper[i].wrapper_type != AI_SLAVE_WRAPPER) {
1489 			SI_VMSG(("SKIP ENABLE BPT: MFG:%x, CID:%x, ADDR:%x\n",
1490 				axi_wrapper[i].mfg,
1491 				axi_wrapper[i].cid,
1492 				axi_wrapper[i].wrapper_addr));
1493 			continue;
1494 		}
1495 
1496 		/* Update only given core if requested */
1497 		if ((cid != 0) && (axi_wrapper[i].cid != cid)) {
1498 			continue;
1499 		}
1500 
1501 #ifdef BCM_BACKPLANE_TIMEOUT
1502 		if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1503 			/* Set BAR0_CORE2_WIN2 to bridge wapper base address */
1504 			OSL_PCI_WRITE_CONFIG(osh,
1505 				cfg_reg, 4, axi_wrapper[i].wrapper_addr);
1506 
1507 			/* set AI to BAR0 + Offset corresponding to Gen1 or gen2 */
1508 			ai = (aidmp_t *) (DISCARD_QUAL(sii->curmap, uint8) + offset);
1509 		}
1510 		else
1511 #endif /* BCM_BACKPLANE_TIMEOUT */
1512 		{
1513 			ai = (aidmp_t *)(uintptr) axi_wrapper[i].wrapper_addr;
1514 		}
1515 
1516 		W_REG(sii->osh, &ai->errlogctrl, errlogctrl);
1517 
1518 		SI_VMSG(("ENABLED BPT: MFG:%x, CID:%x, ADDR:%x, ERR_CTRL:%x\n",
1519 			axi_wrapper[i].mfg,
1520 			axi_wrapper[i].cid,
1521 			axi_wrapper[i].wrapper_addr,
1522 			R_REG(sii->osh, &ai->errlogctrl)));
1523 	}
1524 
1525 #ifdef BCM_BACKPLANE_TIMEOUT
1526 	/* Restore the initial wrapper space */
1527 	if (prev_value) {
1528 		OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 4, prev_value);
1529 	}
1530 #endif /* BCM_BACKPLANE_TIMEOUT */
1531 
1532 #endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */
1533 }
1534 
1535 #if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
1536 
1537 /* slave error is ignored, so account for those cases */
1538 static uint32 si_ignore_errlog_cnt = 0;
1539 
1540 static bool
ai_ignore_errlog(si_info_t * sii,aidmp_t * ai,uint32 lo_addr,uint32 hi_addr,uint32 err_axi_id,uint32 errsts)1541 ai_ignore_errlog(si_info_t *sii, aidmp_t *ai,
1542 	uint32 lo_addr, uint32 hi_addr, uint32 err_axi_id, uint32 errsts)
1543 {
1544 	uint32 axi_id;
1545 #ifdef BCMPCIE_BTLOG
1546 	uint32 axi_id2 = BCM4347_UNUSED_AXI_ID;
1547 #endif	/* BCMPCIE_BTLOG */
1548 	uint32 ignore_errsts = AIELS_SLAVE_ERR;
1549 	uint32 ignore_hi = BT_CC_SPROM_BADREG_HI;
1550 	uint32 ignore_lo = BT_CC_SPROM_BADREG_LO;
1551 	uint32 ignore_size = BT_CC_SPROM_BADREG_SIZE;
1552 
1553 	/* ignore the BT slave errors if the errlog is to chipcommon addr 0x190 */
1554 	switch (CHIPID(sii->pub.chip)) {
1555 		case BCM4350_CHIP_ID:
1556 			axi_id = BCM4350_BT_AXI_ID;
1557 			break;
1558 		case BCM4345_CHIP_ID:
1559 			axi_id = BCM4345_BT_AXI_ID;
1560 			break;
1561 		case BCM4349_CHIP_GRPID:
1562 			axi_id = BCM4349_BT_AXI_ID;
1563 			break;
1564 		case BCM4364_CHIP_ID:
1565 		case BCM4373_CHIP_ID:
1566 			axi_id = BCM4364_BT_AXI_ID;
1567 			break;
1568 #ifdef BCMPCIE_BTLOG
1569 		case BCM4347_CHIP_ID:
1570 		case BCM4357_CHIP_ID:
1571 			axi_id = BCM4347_CC_AXI_ID;
1572 			axi_id2 = BCM4347_PCIE_AXI_ID;
1573 			ignore_errsts = AIELS_TIMEOUT;
1574 			ignore_hi = BCM4347_BT_ADDR_HI;
1575 			ignore_lo = BCM4347_BT_ADDR_LO;
1576 			ignore_size = BCM4347_BT_SIZE;
1577 			break;
1578 #endif	/* BCMPCIE_BTLOG */
1579 
1580 		default:
1581 			return FALSE;
1582 	}
1583 
1584 	/* AXI ID check */
1585 	err_axi_id &= AI_ERRLOGID_AXI_ID_MASK;
1586 	if (!(err_axi_id == axi_id ||
1587 #ifdef BCMPCIE_BTLOG
1588 	      (axi_id2 != BCM4347_UNUSED_AXI_ID && err_axi_id == axi_id2)))
1589 #else
1590 	      FALSE))
1591 #endif	/* BCMPCIE_BTLOG */
1592 		return FALSE;
1593 
1594 	/* slave errors */
1595 	if ((errsts & AIELS_TIMEOUT_MASK) != ignore_errsts)
1596 		return FALSE;
1597 
1598 	/* address range check */
1599 	if ((hi_addr != ignore_hi) ||
1600 	    (lo_addr < ignore_lo) || (lo_addr >= (ignore_lo + ignore_size)))
1601 		return FALSE;
1602 
1603 #ifdef BCMPCIE_BTLOG
1604 	if (ignore_errsts == AIELS_TIMEOUT) {
1605 		/* reset AXI timeout */
1606 		ai_reset_axi_to(sii, ai);
1607 	}
1608 #endif	/* BCMPCIE_BTLOG */
1609 
1610 	return TRUE;
1611 }
1612 #endif /* defined (AXI_TIMEOUTS) || defined (BCM_BACKPLANE_TIMEOUT) */
1613 
1614 #ifdef BCM_BACKPLANE_TIMEOUT
1615 
1616 /* Function to return the APB bridge details corresponding to the core */
1617 static bool
ai_get_apb_bridge(si_t * sih,uint32 coreidx,uint32 * apb_id,uint32 * apb_coreuinit)1618 ai_get_apb_bridge(si_t * sih, uint32 coreidx, uint32 *apb_id, uint32 * apb_coreuinit)
1619 {
1620 	uint i;
1621 	uint32 core_base, core_end;
1622 	si_info_t *sii = SI_INFO(sih);
1623 	static uint32 coreidx_cached = 0, apb_id_cached = 0, apb_coreunit_cached = 0;
1624 	uint32 tmp_coreunit = 0;
1625 	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
1626 
1627 	if (coreidx >= MIN(sii->numcores, SI_MAXCORES))
1628 		return FALSE;
1629 
1630 	/* Most of the time apb bridge query will be for d11 core.
1631 	 * Maintain the last cache and return if found rather than iterating the table
1632 	 */
1633 	if (coreidx_cached == coreidx) {
1634 		*apb_id = apb_id_cached;
1635 		*apb_coreuinit = apb_coreunit_cached;
1636 		return TRUE;
1637 	}
1638 
1639 	core_base = cores_info->coresba[coreidx];
1640 	core_end = core_base + cores_info->coresba_size[coreidx];
1641 
1642 	for (i = 0; i < sii->numcores; i++) {
1643 		if (cores_info->coreid[i] == APB_BRIDGE_ID) {
1644 			uint32 apb_base;
1645 			uint32 apb_end;
1646 
1647 			apb_base = cores_info->coresba[i];
1648 			apb_end = apb_base + cores_info->coresba_size[i];
1649 
1650 			if ((core_base >= apb_base) &&
1651 				(core_end <= apb_end)) {
1652 				/* Current core is attached to this APB bridge */
1653 				*apb_id = apb_id_cached = APB_BRIDGE_ID;
1654 				*apb_coreuinit = apb_coreunit_cached = tmp_coreunit;
1655 				coreidx_cached = coreidx;
1656 				return TRUE;
1657 			}
1658 			/* Increment the coreunit */
1659 			tmp_coreunit++;
1660 		}
1661 	}
1662 
1663 	return FALSE;
1664 }
1665 
1666 uint32
ai_clear_backplane_to_fast(si_t * sih,void * addr)1667 ai_clear_backplane_to_fast(si_t *sih, void *addr)
1668 {
1669 	si_info_t *sii = SI_INFO(sih);
1670 	volatile void *curmap = sii->curmap;
1671 	bool core_reg = FALSE;
1672 
1673 	/* Use fast path only for core register access */
1674 	if (((uintptr)addr >= (uintptr)curmap) &&
1675 		((uintptr)addr < ((uintptr)curmap + SI_CORE_SIZE))) {
1676 		/* address being accessed is within current core reg map */
1677 		core_reg = TRUE;
1678 	}
1679 
1680 	if (core_reg) {
1681 		uint32 apb_id, apb_coreuinit;
1682 
1683 		if (ai_get_apb_bridge(sih, si_coreidx(&sii->pub),
1684 			&apb_id, &apb_coreuinit) == TRUE) {
1685 			/* Found the APB bridge corresponding to current core,
1686 			 * Check for bus errors in APB wrapper
1687 			 */
1688 			return ai_clear_backplane_to_per_core(sih,
1689 				apb_id, apb_coreuinit, NULL);
1690 		}
1691 	}
1692 
1693 	/* Default is to poll for errors on all slave wrappers */
1694 	return si_clear_backplane_to(sih);
1695 }
1696 #endif /* BCM_BACKPLANE_TIMEOUT */
1697 
1698 #if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
1699 static bool g_disable_backplane_logs = FALSE;
1700 
1701 #if defined(ETD)
1702 static uint32 last_axi_error = AXI_WRAP_STS_NONE;
1703 static uint32 last_axi_error_core = 0;
1704 static uint32 last_axi_error_wrap = 0;
1705 #endif /* ETD */
1706 
1707 /*
1708  * API to clear the back plane timeout per core.
1709  * Caller may passs optional wrapper address. If present this will be used as
1710  * the wrapper base address. If wrapper base address is provided then caller
1711  * must provide the coreid also.
1712  * If both coreid and wrapper is zero, then err status of current bridge
1713  * will be verified.
1714  */
1715 uint32
ai_clear_backplane_to_per_core(si_t * sih,uint coreid,uint coreunit,void * wrap)1716 ai_clear_backplane_to_per_core(si_t *sih, uint coreid, uint coreunit, void *wrap)
1717 {
1718 	int ret = AXI_WRAP_STS_NONE;
1719 	aidmp_t *ai = NULL;
1720 	uint32 errlog_status = 0;
1721 	si_info_t *sii = SI_INFO(sih);
1722 	uint32 errlog_lo = 0, errlog_hi = 0, errlog_id = 0, errlog_flags = 0;
1723 	uint32 current_coreidx = si_coreidx(sih);
1724 	uint32 target_coreidx = si_findcoreidx(sih, coreid, coreunit);
1725 
1726 #if defined(BCM_BACKPLANE_TIMEOUT)
1727 	si_axi_error_t * axi_error = sih->err_info ?
1728 		&sih->err_info->axi_error[sih->err_info->count] : NULL;
1729 #endif /* BCM_BACKPLANE_TIMEOUT */
1730 	bool restore_core = FALSE;
1731 
1732 	if ((sii->axi_num_wrappers == 0) ||
1733 #ifdef BCM_BACKPLANE_TIMEOUT
1734 		(!PCIE(sii)) ||
1735 #endif /* BCM_BACKPLANE_TIMEOUT */
1736 		FALSE) {
1737 		SI_VMSG((" %s, axi_num_wrappers:%d, Is_PCIE:%d, BUS_TYPE:%d, ID:%x\n",
1738 			__FUNCTION__, sii->axi_num_wrappers, PCIE(sii),
1739 			BUSTYPE(sii->pub.bustype), sii->pub.buscoretype));
1740 		return AXI_WRAP_STS_NONE;
1741 	}
1742 
1743 	if (wrap != NULL) {
1744 		ai = (aidmp_t *)wrap;
1745 	} else if (coreid && (target_coreidx != current_coreidx)) {
1746 
1747 		if (ai_setcoreidx(sih, target_coreidx) == NULL) {
1748 			/* Unable to set the core */
1749 			SI_PRINT(("Set Code Failed: coreid:%x, unit:%d, target_coreidx:%d\n",
1750 				coreid, coreunit, target_coreidx));
1751 			errlog_lo = target_coreidx;
1752 			ret = AXI_WRAP_STS_SET_CORE_FAIL;
1753 			goto end;
1754 		}
1755 
1756 		restore_core = TRUE;
1757 		ai = (aidmp_t *)si_wrapperregs(sih);
1758 	} else {
1759 		/* Read error status of current wrapper */
1760 		ai = (aidmp_t *)si_wrapperregs(sih);
1761 
1762 		/* Update CoreID to current Code ID */
1763 		coreid = si_coreid(sih);
1764 	}
1765 
1766 	/* read error log status */
1767 	errlog_status = R_REG(sii->osh, &ai->errlogstatus);
1768 
1769 	if (errlog_status == ID32_INVALID) {
1770 		/* Do not try to peek further */
1771 		SI_PRINT(("%s, errlogstatus:%x - Slave Wrapper:%x\n",
1772 			__FUNCTION__, errlog_status, coreid));
1773 		ret = AXI_WRAP_STS_WRAP_RD_ERR;
1774 		errlog_lo = (uint32)(uintptr)&ai->errlogstatus;
1775 		goto end;
1776 	}
1777 
1778 	if ((errlog_status & AIELS_TIMEOUT_MASK) != 0) {
1779 		uint32 tmp;
1780 		uint32 count = 0;
1781 		/* set ErrDone to clear the condition */
1782 		W_REG(sii->osh, &ai->errlogdone, AIELD_ERRDONE_MASK);
1783 
1784 		/* SPINWAIT on errlogstatus timeout status bits */
1785 		while ((tmp = R_REG(sii->osh, &ai->errlogstatus)) & AIELS_TIMEOUT_MASK) {
1786 
1787 			if (tmp == ID32_INVALID) {
1788 				SI_PRINT(("%s: prev errlogstatus:%x, errlogstatus:%x\n",
1789 					__FUNCTION__, errlog_status, tmp));
1790 				ret = AXI_WRAP_STS_WRAP_RD_ERR;
1791 				errlog_lo = (uint32)(uintptr)&ai->errlogstatus;
1792 				goto end;
1793 			}
1794 			/*
1795 			 * Clear again, to avoid getting stuck in the loop, if a new error
1796 			 * is logged after we cleared the first timeout
1797 			 */
1798 			W_REG(sii->osh, &ai->errlogdone, AIELD_ERRDONE_MASK);
1799 
1800 			count++;
1801 			OSL_DELAY(10);
1802 			if ((10 * count) > AI_REG_READ_TIMEOUT) {
1803 				errlog_status = tmp;
1804 				break;
1805 			}
1806 		}
1807 
1808 		errlog_lo = R_REG(sii->osh, &ai->errlogaddrlo);
1809 		errlog_hi = R_REG(sii->osh, &ai->errlogaddrhi);
1810 		errlog_id = R_REG(sii->osh, &ai->errlogid);
1811 		errlog_flags = R_REG(sii->osh, &ai->errlogflags);
1812 
1813 		/* we are already in the error path, so OK to check for the  slave error */
1814 		if (ai_ignore_errlog(sii, ai, errlog_lo, errlog_hi, errlog_id,
1815 			errlog_status)) {
1816 			si_ignore_errlog_cnt++;
1817 			goto end;
1818 		}
1819 
1820 		/* only reset APB Bridge on timeout (not slave error, or dec error) */
1821 		switch (errlog_status & AIELS_TIMEOUT_MASK) {
1822 			case AIELS_SLAVE_ERR:
1823 				SI_PRINT(("AXI slave error\n"));
1824 				ret = AXI_WRAP_STS_SLAVE_ERR;
1825 				break;
1826 
1827 			case AIELS_TIMEOUT:
1828 				ai_reset_axi_to(sii, ai);
1829 				ret = AXI_WRAP_STS_TIMEOUT;
1830 				break;
1831 
1832 			case AIELS_DECODE:
1833 				SI_PRINT(("AXI decode error\n"));
1834 				ret = AXI_WRAP_STS_DECODE_ERR;
1835 				break;
1836 			default:
1837 				ASSERT(0);	/* should be impossible */
1838 		}
1839 
1840 		SI_PRINT(("\tCoreID: %x\n", coreid));
1841 		SI_PRINT(("\t errlog: lo 0x%08x, hi 0x%08x, id 0x%08x, flags 0x%08x"
1842 			", status 0x%08x\n",
1843 			errlog_lo, errlog_hi, errlog_id, errlog_flags,
1844 			errlog_status));
1845 	}
1846 
1847 end:
1848 #if defined(ETD)
1849 	if (ret != AXI_WRAP_STS_NONE) {
1850 		last_axi_error = ret;
1851 		last_axi_error_core = coreid;
1852 		last_axi_error_wrap = (uint32)ai;
1853 	}
1854 #endif /* ETD */
1855 
1856 #if defined(BCM_BACKPLANE_TIMEOUT)
1857 	if (axi_error && (ret != AXI_WRAP_STS_NONE)) {
1858 		axi_error->error = ret;
1859 		axi_error->coreid = coreid;
1860 		axi_error->errlog_lo = errlog_lo;
1861 		axi_error->errlog_hi = errlog_hi;
1862 		axi_error->errlog_id = errlog_id;
1863 		axi_error->errlog_flags = errlog_flags;
1864 		axi_error->errlog_status = errlog_status;
1865 		sih->err_info->count++;
1866 
1867 		if (sih->err_info->count == SI_MAX_ERRLOG_SIZE) {
1868 			sih->err_info->count = SI_MAX_ERRLOG_SIZE - 1;
1869 			SI_PRINT(("AXI Error log overflow\n"));
1870 		}
1871 	}
1872 #endif /* BCM_BACKPLANE_TIMEOUT */
1873 
1874 	if (restore_core) {
1875 		if (ai_setcoreidx(sih, current_coreidx) == NULL) {
1876 			/* Unable to set the core */
1877 			return ID32_INVALID;
1878 		}
1879 	}
1880 
1881 	return ret;
1882 }
1883 
1884 /* reset AXI timeout */
1885 static void
ai_reset_axi_to(si_info_t * sii,aidmp_t * ai)1886 ai_reset_axi_to(si_info_t *sii, aidmp_t *ai)
1887 {
1888 	/* reset APB Bridge */
1889 	OR_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
1890 	/* sync write */
1891 	(void)R_REG(sii->osh, &ai->resetctrl);
1892 	/* clear Reset bit */
1893 	AND_REG(sii->osh, &ai->resetctrl, ~(AIRC_RESET));
1894 	/* sync write */
1895 	(void)R_REG(sii->osh, &ai->resetctrl);
1896 	SI_PRINT(("AXI timeout\n"));
1897 	if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) {
1898 		SI_PRINT(("reset failed on wrapper %p\n", ai));
1899 		g_disable_backplane_logs = TRUE;
1900 	}
1901 }
1902 #endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */
1903 
1904 /*
1905  * This API polls all slave wrappers for errors and returns bit map of
1906  * all reported errors.
1907  * return - bit map of
1908  *	AXI_WRAP_STS_NONE
1909  *	AXI_WRAP_STS_TIMEOUT
1910  *	AXI_WRAP_STS_SLAVE_ERR
1911  *	AXI_WRAP_STS_DECODE_ERR
1912  *	AXI_WRAP_STS_PCI_RD_ERR
1913  *	AXI_WRAP_STS_WRAP_RD_ERR
1914  *	AXI_WRAP_STS_SET_CORE_FAIL
1915  * On timeout detection, correspondign bridge will be reset to
1916  * unblock the bus.
1917  * Error reported in each wrapper can be retrieved using the API
1918  * si_get_axi_errlog_info()
1919  */
1920 uint32
ai_clear_backplane_to(si_t * sih)1921 ai_clear_backplane_to(si_t *sih)
1922 {
1923 	uint32 ret = 0;
1924 #if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
1925 
1926 	si_info_t *sii = SI_INFO(sih);
1927 	aidmp_t *ai;
1928 	uint32 i;
1929 	axi_wrapper_t * axi_wrapper = sii->axi_wrapper;
1930 
1931 #ifdef BCM_BACKPLANE_TIMEOUT
1932 	uint32 prev_value = 0;
1933 	osl_t *osh = sii->osh;
1934 	uint32 cfg_reg = 0;
1935 	uint32 offset = 0;
1936 
1937 	if ((sii->axi_num_wrappers == 0) || (!PCIE(sii)))
1938 #else
1939 	if (sii->axi_num_wrappers == 0)
1940 #endif // endif
1941 	{
1942 		SI_VMSG((" %s, axi_num_wrappers:%d, Is_PCIE:%d, BUS_TYPE:%d, ID:%x\n",
1943 			__FUNCTION__, sii->axi_num_wrappers, PCIE(sii),
1944 			BUSTYPE(sii->pub.bustype), sii->pub.buscoretype));
1945 		return AXI_WRAP_STS_NONE;
1946 	}
1947 
1948 #ifdef BCM_BACKPLANE_TIMEOUT
1949 	/* Save and restore wrapper access window */
1950 	if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1951 		if (PCIE_GEN1(sii)) {
1952 			cfg_reg = PCI_BAR0_WIN2;
1953 			offset = PCI_BAR0_WIN2_OFFSET;
1954 		} else if (PCIE_GEN2(sii)) {
1955 			cfg_reg = PCIE2_BAR0_CORE2_WIN2;
1956 			offset = PCIE2_BAR0_CORE2_WIN2_OFFSET;
1957 		}
1958 		else {
1959 			ASSERT(!"!PCIE_GEN1 && !PCIE_GEN2");
1960 		}
1961 
1962 		prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 4);
1963 
1964 		if (prev_value == ID32_INVALID) {
1965 			si_axi_error_t * axi_error =
1966 				sih->err_info ?
1967 					&sih->err_info->axi_error[sih->err_info->count] :
1968 					NULL;
1969 
1970 			SI_PRINT(("%s, PCI_BAR0_WIN2 - %x\n", __FUNCTION__, prev_value));
1971 			if (axi_error) {
1972 				axi_error->error = ret = AXI_WRAP_STS_PCI_RD_ERR;
1973 				axi_error->errlog_lo = cfg_reg;
1974 				sih->err_info->count++;
1975 
1976 				if (sih->err_info->count == SI_MAX_ERRLOG_SIZE) {
1977 					sih->err_info->count = SI_MAX_ERRLOG_SIZE - 1;
1978 					SI_PRINT(("AXI Error log overflow\n"));
1979 				}
1980 			}
1981 
1982 			return ret;
1983 		}
1984 	}
1985 #endif /* BCM_BACKPLANE_TIMEOUT */
1986 
1987 	for (i = 0; i < sii->axi_num_wrappers; ++i) {
1988 		uint32 tmp;
1989 
1990 		if (axi_wrapper[i].wrapper_type != AI_SLAVE_WRAPPER) {
1991 			continue;
1992 		}
1993 
1994 #ifdef BCM_BACKPLANE_TIMEOUT
1995 		if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1996 			/* Set BAR0_CORE2_WIN2 to bridge wapper base address */
1997 			OSL_PCI_WRITE_CONFIG(osh,
1998 				cfg_reg, 4, axi_wrapper[i].wrapper_addr);
1999 
2000 			/* set AI to BAR0 + Offset corresponding to Gen1 or gen2 */
2001 			ai = (aidmp_t *) (DISCARD_QUAL(sii->curmap, uint8) + offset);
2002 		}
2003 		else
2004 #endif /* BCM_BACKPLANE_TIMEOUT */
2005 		{
2006 			ai = (aidmp_t *)(uintptr) axi_wrapper[i].wrapper_addr;
2007 		}
2008 
2009 		tmp = ai_clear_backplane_to_per_core(sih, axi_wrapper[i].cid, 0,
2010 			DISCARD_QUAL(ai, void));
2011 
2012 		ret |= tmp;
2013 	}
2014 
2015 #ifdef BCM_BACKPLANE_TIMEOUT
2016 	/* Restore the initial wrapper space */
2017 	if (prev_value) {
2018 		OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 4, prev_value);
2019 	}
2020 #endif /* BCM_BACKPLANE_TIMEOUT */
2021 
2022 #endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */
2023 
2024 	return ret;
2025 }
2026 
2027 uint
ai_num_slaveports(si_t * sih,uint coreidx)2028 ai_num_slaveports(si_t *sih, uint coreidx)
2029 {
2030 	si_info_t *sii = SI_INFO(sih);
2031 	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
2032 	uint32 cib;
2033 
2034 	cib = cores_info->cib[coreidx];
2035 	return ((cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT);
2036 }
2037 
2038 #ifdef UART_TRAP_DBG
2039 void
ai_dump_APB_Bridge_registers(si_t * sih)2040 ai_dump_APB_Bridge_registers(si_t *sih)
2041 {
2042 aidmp_t *ai;
2043 si_info_t *sii = SI_INFO(sih);
2044 
2045 	ai = (aidmp_t *) sii->br_wrapba[0];
2046 	printf("APB Bridge 0\n");
2047 	printf("lo 0x%08x, hi 0x%08x, id 0x%08x, flags 0x%08x",
2048 		R_REG(sii->osh, &ai->errlogaddrlo),
2049 		R_REG(sii->osh, &ai->errlogaddrhi),
2050 		R_REG(sii->osh, &ai->errlogid),
2051 		R_REG(sii->osh, &ai->errlogflags));
2052 	printf("\n status 0x%08x\n", R_REG(sii->osh, &ai->errlogstatus));
2053 }
2054 #endif /* UART_TRAP_DBG */
2055 
2056 void
ai_force_clocks(si_t * sih,uint clock_state)2057 ai_force_clocks(si_t *sih, uint clock_state)
2058 {
2059 
2060 	si_info_t *sii = SI_INFO(sih);
2061 	aidmp_t *ai, *ai_sec = NULL;
2062 	volatile uint32 dummy;
2063 	uint32 ioctrl;
2064 	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
2065 
2066 	ASSERT(GOODREGS(sii->curwrap));
2067 	ai = sii->curwrap;
2068 	if (cores_info->wrapba2[sii->curidx])
2069 		ai_sec = REG_MAP(cores_info->wrapba2[sii->curidx], SI_CORE_SIZE);
2070 
2071 	/* ensure there are no pending backplane operations */
2072 	SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
2073 
2074 	if (clock_state == FORCE_CLK_ON) {
2075 		ioctrl = R_REG(sii->osh, &ai->ioctrl);
2076 		W_REG(sii->osh, &ai->ioctrl, (ioctrl | SICF_FGC));
2077 		dummy = R_REG(sii->osh, &ai->ioctrl);
2078 		BCM_REFERENCE(dummy);
2079 		if (ai_sec) {
2080 			ioctrl = R_REG(sii->osh, &ai_sec->ioctrl);
2081 			W_REG(sii->osh, &ai_sec->ioctrl, (ioctrl | SICF_FGC));
2082 			dummy = R_REG(sii->osh, &ai_sec->ioctrl);
2083 			BCM_REFERENCE(dummy);
2084 		}
2085 	} else {
2086 		ioctrl = R_REG(sii->osh, &ai->ioctrl);
2087 		W_REG(sii->osh, &ai->ioctrl, (ioctrl & (~SICF_FGC)));
2088 		dummy = R_REG(sii->osh, &ai->ioctrl);
2089 		BCM_REFERENCE(dummy);
2090 		if (ai_sec) {
2091 			ioctrl = R_REG(sii->osh, &ai_sec->ioctrl);
2092 			W_REG(sii->osh, &ai_sec->ioctrl, (ioctrl & (~SICF_FGC)));
2093 			dummy = R_REG(sii->osh, &ai_sec->ioctrl);
2094 			BCM_REFERENCE(dummy);
2095 		}
2096 	}
2097 	/* ensure there are no pending backplane operations */
2098 	SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
2099 }
2100