• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Misc utility routines for accessing chip-specific features
3  * of the SiliconBackplane-based Broadcom chips.
4  *
5  * Copyright (C) 2020, Broadcom.
6  *
7  *      Unless you and Broadcom execute a separate written software license
8  * agreement governing use of this software, this software is licensed to you
9  * under the terms of the GNU General Public License version 2 (the "GPL"),
10  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11  * following added to such license:
12  *
13  *      As a special exception, the copyright holders of this software give you
14  * permission to link this software with independent modules, and to copy and
15  * distribute the resulting executable under terms of your choice, provided that
16  * you also meet, for each linked independent module, the terms and conditions of
17  * the license of that module.  An independent module is a module which is not
18  * derived from this software.  The special exception does not apply to any
19  * modifications of the software.
20  *
21  *
22  * <<Broadcom-WL-IPTag/Dual:>>
23  */
24 
25 #include <typedefs.h>
26 #include <bcmdefs.h>
27 #include <osl.h>
28 #include <bcmutils.h>
29 #include <siutils.h>
30 #include <hndsoc.h>
31 #include <sbchipc.h>
32 #include <pcicfg.h>
33 #include <pcie_core.h>
34 
35 #include "siutils_priv.h"
36 #include <bcmdevs.h>
37 
38 #if defined(ETD)
39 #include <etd.h>
40 #endif
41 
42 #if !defined(BCMDONGLEHOST)
43 #define PMU_DMP()  (cores_info->coreid[sii->curidx] == PMU_CORE_ID)
44 #define GCI_DMP()  (cores_info->coreid[sii->curidx] == GCI_CORE_ID)
45 #else
46 #define PMU_DMP() (0)
47 #define GCI_DMP() (0)
48 #endif /* !defined(BCMDONGLEHOST) */
49 
50 #if defined(AXI_TIMEOUTS_NIC)
51 static bool ai_get_apb_bridge(const si_t *sih, uint32 coreidx, uint32 *apb_id,
52 	uint32 *apb_coreunit);
53 #endif /* AXI_TIMEOUTS_NIC */
54 
55 #if defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC)
56 static void ai_reset_axi_to(const si_info_t *sii, aidmp_t *ai);
57 #endif	/* defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC) */
58 
59 #ifdef DONGLEBUILD
60 static uint32 ai_get_sizeof_wrapper_offsets_to_dump(void);
61 static uint32 ai_get_wrapper_base_addr(uint32 **offset);
62 #endif /* DONGLEBUILD */
63 
64 /* AXI ID to CoreID + unit mappings */
65 typedef struct axi_to_coreidx {
66 	uint coreid;
67 	uint coreunit;
68 } axi_to_coreidx_t;
69 
70 static const axi_to_coreidx_t axi2coreidx_4369[] = {
71 	{CC_CORE_ID, 0},	/* 00 Chipcommon */
72 	{PCIE2_CORE_ID, 0},	/* 01 PCIe */
73 	{D11_CORE_ID, 0},	/* 02 D11 Main */
74 	{ARMCR4_CORE_ID, 0},	/* 03 ARM */
75 	{BT_CORE_ID, 0},	/* 04 BT AHB */
76 	{D11_CORE_ID, 1},	/* 05 D11 Aux */
77 	{D11_CORE_ID, 0},	/* 06 D11 Main l1 */
78 	{D11_CORE_ID, 1},	/* 07 D11 Aux  l1 */
79 	{D11_CORE_ID, 0},	/* 08 D11 Main l2 */
80 	{D11_CORE_ID, 1},	/* 09 D11 Aux  l2 */
81 	{NODEV_CORE_ID, 0},	/* 10 M2M DMA */
82 	{NODEV_CORE_ID, 0},	/* 11 unused */
83 	{NODEV_CORE_ID, 0},	/* 12 unused */
84 	{NODEV_CORE_ID, 0},	/* 13 unused */
85 	{NODEV_CORE_ID, 0},	/* 14 unused */
86 	{NODEV_CORE_ID, 0}	/* 15 unused */
87 };
88 
89 /* EROM parsing */
90 
91 static uint32
get_erom_ent(const si_t * sih,uint32 ** eromptr,uint32 mask,uint32 match)92 get_erom_ent(const si_t *sih, uint32 **eromptr, uint32 mask, uint32 match)
93 {
94 	uint32 ent;
95 	uint inv = 0, nom = 0;
96 	uint32 size = 0;
97 
98 	while (TRUE) {
99 		ent = R_REG(SI_INFO(sih)->osh, *eromptr);
100 		(*eromptr)++;
101 
102 		if (mask == 0)
103 			break;
104 
105 		if ((ent & ER_VALID) == 0) {
106 			inv++;
107 			continue;
108 		}
109 
110 		if (ent == (ER_END | ER_VALID))
111 			break;
112 
113 		if ((ent & mask) == match)
114 			break;
115 
116 		/* escape condition related EROM size if it has invalid values */
117 		size += sizeof(*eromptr);
118 		if (size >= ER_SZ_MAX) {
119 			SI_ERROR(("Failed to find end of EROM marker\n"));
120 			break;
121 		}
122 
123 		nom++;
124 	}
125 
126 	SI_VMSG(("get_erom_ent: Returning ent 0x%08x\n", ent));
127 	if (inv + nom) {
128 		SI_VMSG(("  after %d invalid and %d non-matching entries\n", inv, nom));
129 	}
130 	return ent;
131 }
132 
133 static uint32
get_asd(const si_t * sih,uint32 ** eromptr,uint sp,uint ad,uint st,uint32 * addrl,uint32 * addrh,uint32 * sizel,uint32 * sizeh)134 get_asd(const si_t *sih, uint32 **eromptr, uint sp, uint ad, uint st, uint32 *addrl, uint32 *addrh,
135 	uint32 *sizel, uint32 *sizeh)
136 {
137 	uint32 asd, sz, szd;
138 
139 	BCM_REFERENCE(ad);
140 
141 	asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
142 	if (((asd & ER_TAG1) != ER_ADD) ||
143 	    (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
144 	    ((asd & AD_ST_MASK) != st)) {
145 		/* This is not what we want, "push" it back */
146 		(*eromptr)--;
147 		return 0;
148 	}
149 	*addrl = asd & AD_ADDR_MASK;
150 	if (asd & AD_AG32)
151 		*addrh = get_erom_ent(sih, eromptr, 0, 0);
152 	else
153 		*addrh = 0;
154 	*sizeh = 0;
155 	sz = asd & AD_SZ_MASK;
156 	if (sz == AD_SZ_SZD) {
157 		szd = get_erom_ent(sih, eromptr, 0, 0);
158 		*sizel = szd & SD_SZ_MASK;
159 		if (szd & SD_SG32)
160 			*sizeh = get_erom_ent(sih, eromptr, 0, 0);
161 	} else
162 		*sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
163 
164 	SI_VMSG(("  SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n",
165 	        sp, ad, st, *sizeh, *sizel, *addrh, *addrl));
166 
167 	return asd;
168 }
169 
170 /* Parse the enumeration rom to identify all cores */
171 void
BCMATTACHFN(ai_scan)172 BCMATTACHFN(ai_scan)(si_t *sih, void *regs, uint devid)
173 {
174 	si_info_t *sii = SI_INFO(sih);
175 	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
176 	chipcregs_t *cc = (chipcregs_t *)regs;
177 	uint32 erombase, *eromptr, *eromlim;
178 	axi_wrapper_t * axi_wrapper = sii->axi_wrapper;
179 
180 	BCM_REFERENCE(devid);
181 
182 	erombase = R_REG(sii->osh, &cc->eromptr);
183 
184 	switch (BUSTYPE(sih->bustype)) {
185 	case SI_BUS:
186 		eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
187 		break;
188 
189 	case PCI_BUS:
190 		/* Set wrappers address */
191 		sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE);
192 
193 		/* Now point the window at the erom */
194 		OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, erombase);
195 		eromptr = regs;
196 		break;
197 
198 #ifdef BCMSDIO
199 	case SPI_BUS:
200 	case SDIO_BUS:
201 		eromptr = (uint32 *)(uintptr)erombase;
202 		break;
203 #endif	/* BCMSDIO */
204 
205 	default:
206 		SI_ERROR(("Don't know how to do AXI enumeration on bus %d\n", sih->bustype));
207 		ASSERT(0);
208 		return;
209 	}
210 	eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
211 	sii->axi_num_wrappers = 0;
212 
213 	SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim = 0x%p\n",
214 	         OSL_OBFUSCATE_BUF(regs), erombase,
215 		OSL_OBFUSCATE_BUF(eromptr), OSL_OBFUSCATE_BUF(eromlim)));
216 	while (eromptr < eromlim) {
217 		uint32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp;
218 		uint32 mpd, asd, addrl, addrh, sizel, sizeh;
219 		uint i, j, idx;
220 		bool br;
221 
222 		br = FALSE;
223 
224 		/* Grok a component */
225 		cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
226 		if (cia == (ER_END | ER_VALID)) {
227 			SI_VMSG(("Found END of erom after %d cores\n", sii->numcores));
228 			return;
229 		}
230 
231 		cib = get_erom_ent(sih, &eromptr, 0, 0);
232 
233 		if ((cib & ER_TAG) != ER_CI) {
234 			SI_ERROR(("CIA not followed by CIB\n"));
235 			goto error;
236 		}
237 
238 		cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
239 		mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
240 		crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
241 		nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
242 		nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
243 		nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
244 		nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
245 
246 #ifdef BCMDBG_SI
247 		SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with nmw = %d, "
248 		         "nsw = %d, nmp = %d & nsp = %d\n",
249 		         mfg, cid, crev, OSL_OBFUSCATE_BUF(eromptr - 1), nmw, nsw, nmp, nsp));
250 #else
251 		BCM_REFERENCE(crev);
252 #endif
253 
254 		/* Include Default slave wrapper for timeout monitoring */
255 		if ((nsp == 0 && nsw == 0) ||
256 #if !defined(AXI_TIMEOUTS) && !defined(AXI_TIMEOUTS_NIC)
257 			((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) ||
258 #else
259 			((CHIPTYPE(sii->pub.socitype) == SOCI_NAI) &&
260 			(mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) ||
261 #endif /* !defined(AXI_TIMEOUTS) && !defined(AXI_TIMEOUTS_NIC) */
262 			FALSE) {
263 			continue;
264 		}
265 
266 		if ((nmw + nsw == 0)) {
267 			/* A component which is not a core */
268 			/* Should record some info */
269 			if (cid == OOB_ROUTER_CORE_ID) {
270 				asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
271 					&addrl, &addrh, &sizel, &sizeh);
272 				if (asd != 0) {
273 					if ((sii->oob_router != 0) && (sii->oob_router != addrl)) {
274 						sii->oob_router1 = addrl;
275 					} else {
276 						sii->oob_router = addrl;
277 					}
278 				}
279 			}
280 			if ((cid != NS_CCB_CORE_ID) && (cid != PMU_CORE_ID) &&
281 				(cid != GCI_CORE_ID) && (cid != SR_CORE_ID) &&
282 				(cid != HUB_CORE_ID) && (cid != HND_OOBR_CORE_ID) &&
283 				(cid != CCI400_CORE_ID) && (cid != SPMI_SLAVE_CORE_ID)) {
284 				continue;
285 			}
286 		}
287 
288 		idx = sii->numcores;
289 
290 		cores_info->cia[idx] = cia;
291 		cores_info->cib[idx] = cib;
292 		cores_info->coreid[idx] = cid;
293 
294 		/* workaround the fact the variable buscoretype is used in _ai_set_coreidx()
295 		 * when checking PCIE_GEN2() for PCI_BUS case before it is setup later...,
296 		 * both use and setup happen in si_buscore_setup().
297 		 */
298 		if (BUSTYPE(sih->bustype) == PCI_BUS &&
299 		    (cid == PCI_CORE_ID || cid == PCIE_CORE_ID || cid == PCIE2_CORE_ID)) {
300 			sii->pub.buscoretype = (uint16)cid;
301 		}
302 
303 		for (i = 0; i < nmp; i++) {
304 			mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
305 			if ((mpd & ER_TAG) != ER_MP) {
306 				SI_ERROR(("Not enough MP entries for component 0x%x\n", cid));
307 				goto error;
308 			}
309 			/* Record something? */
310 			SI_VMSG(("  Master port %d, mp: %d id: %d\n", i,
311 			         (mpd & MPD_MP_MASK) >> MPD_MP_SHIFT,
312 			         (mpd & MPD_MUI_MASK) >> MPD_MUI_SHIFT));
313 		}
314 
315 		/* First Slave Address Descriptor should be port 0:
316 		 * the main register space for the core
317 		 */
318 		asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
319 		if (asd == 0) {
320 			do {
321 			/* Try again to see if it is a bridge */
322 			asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
323 			              &sizel, &sizeh);
324 			if (asd != 0)
325 				br = TRUE;
326 			else {
327 					break;
328 				}
329 			} while (1);
330 		} else {
331 			if (addrl == 0 || sizel == 0) {
332 				SI_ERROR((" Invalid ASD %x for slave port \n", asd));
333 				goto error;
334 			}
335 			cores_info->coresba[idx] = addrl;
336 			cores_info->coresba_size[idx] = sizel;
337 		}
338 
339 		/* Get any more ASDs in first port */
340 		j = 1;
341 		do {
342 			asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
343 			              &sizel, &sizeh);
344 			/* Support ARM debug core ASD with address space > 4K */
345 			if ((asd != 0) && (j == 1)) {
346 				SI_VMSG(("Warning: sizel > 0x1000\n"));
347 				cores_info->coresba2[idx] = addrl;
348 				cores_info->coresba2_size[idx] = sizel;
349 			}
350 			j++;
351 		} while (asd != 0);
352 
353 		/* Go through the ASDs for other slave ports */
354 		for (i = 1; i < nsp; i++) {
355 			j = 0;
356 			do {
357 				asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh,
358 				              &sizel, &sizeh);
359 				/* To get the first base address of second slave port */
360 				if ((asd != 0) && (i == 1) && (j == 0)) {
361 					cores_info->csp2ba[idx] = addrl;
362 					cores_info->csp2ba_size[idx] = sizel;
363 				}
364 				if (asd == 0)
365 					break;
366 				j++;
367 			} while (1);
368 			if (j == 0) {
369 				SI_ERROR((" SP %d has no address descriptors\n", i));
370 				goto error;
371 			}
372 		}
373 
374 		/* Now get master wrappers */
375 		for (i = 0; i < nmw; i++) {
376 			asd = get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl, &addrh,
377 			              &sizel, &sizeh);
378 			if (asd == 0) {
379 				SI_ERROR(("Missing descriptor for MW %d\n", i));
380 				goto error;
381 			}
382 			if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
383 				SI_ERROR(("Master wrapper %d is not 4KB\n", i));
384 				goto error;
385 			}
386 			if (i == 0) {
387 				cores_info->wrapba[idx] = addrl;
388 			} else if (i == 1) {
389 				cores_info->wrapba2[idx] = addrl;
390 			} else if (i == 2) {
391 				cores_info->wrapba3[idx] = addrl;
392 			}
393 
394 			if (axi_wrapper && (sii->axi_num_wrappers < SI_MAX_AXI_WRAPPERS)) {
395 				axi_wrapper[sii->axi_num_wrappers].mfg = mfg;
396 				axi_wrapper[sii->axi_num_wrappers].cid = cid;
397 				axi_wrapper[sii->axi_num_wrappers].rev = crev;
398 				axi_wrapper[sii->axi_num_wrappers].wrapper_type = AI_MASTER_WRAPPER;
399 				axi_wrapper[sii->axi_num_wrappers].wrapper_addr = addrl;
400 				sii->axi_num_wrappers++;
401 				SI_VMSG(("MASTER WRAPPER: %d, mfg:%x, cid:%x,"
402 					"rev:%x, addr:%x, size:%x\n",
403 					sii->axi_num_wrappers, mfg, cid, crev, addrl, sizel));
404 			}
405 		}
406 
407 		/* And finally slave wrappers */
408 		for (i = 0; i < nsw; i++) {
409 			uint fwp = (nsp <= 1) ? 0 : 1;
410 			asd = get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP, &addrl, &addrh,
411 			              &sizel, &sizeh);
412 			if (asd == 0) {
413 				SI_ERROR(("Missing descriptor for SW %d cid %x eromp %p fwp %d \n",
414 					i, cid, eromptr, fwp));
415 				goto error;
416 			}
417 
418 			if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
419 				SI_ERROR(("Slave wrapper %d is not 4KB\n", i));
420 				goto error;
421 			}
422 
423 			/* cache APB bridge wrapper address for set/clear timeout */
424 			if ((mfg == MFGID_ARM) && (cid == APB_BRIDGE_ID)) {
425 				ASSERT(sii->num_br < SI_MAXBR);
426 				sii->br_wrapba[sii->num_br++] = addrl;
427 			}
428 
429 			if ((mfg == MFGID_ARM) && (cid == ADB_BRIDGE_ID)) {
430 				br = TRUE;
431 			}
432 
433 			BCM_REFERENCE(br);
434 
435 			if ((nmw == 0) && (i == 0)) {
436 				cores_info->wrapba[idx] = addrl;
437 			} else if ((nmw == 0) && (i == 1)) {
438 				cores_info->wrapba2[idx] = addrl;
439 			} else if ((nmw == 0) && (i == 2)) {
440 				cores_info->wrapba3[idx] = addrl;
441 			}
442 
443 			/* Include all slave wrappers to the list to
444 			 * enable and monitor watchdog timeouts
445 			 */
446 
447 			if (axi_wrapper && (sii->axi_num_wrappers < SI_MAX_AXI_WRAPPERS)) {
448 				axi_wrapper[sii->axi_num_wrappers].mfg = mfg;
449 				axi_wrapper[sii->axi_num_wrappers].cid = cid;
450 				axi_wrapper[sii->axi_num_wrappers].rev = crev;
451 				axi_wrapper[sii->axi_num_wrappers].wrapper_type = AI_SLAVE_WRAPPER;
452 				axi_wrapper[sii->axi_num_wrappers].wrapper_addr = addrl;
453 
454 				sii->axi_num_wrappers++;
455 
456 				SI_VMSG(("SLAVE WRAPPER: %d,  mfg:%x, cid:%x,"
457 					"rev:%x, addr:%x, size:%x\n",
458 					sii->axi_num_wrappers,  mfg, cid, crev, addrl, sizel));
459 			}
460 		}
461 
462 #ifndef AXI_TIMEOUTS_NIC
463 		/* Don't record bridges and core with 0 slave ports */
464 		if (br || (nsp == 0)) {
465 			continue;
466 		}
467 #endif
468 
469 		/* Done with core */
470 		sii->numcores++;
471 	}
472 
473 	SI_ERROR(("Reached end of erom without finding END\n"));
474 
475 error:
476 	sii->numcores = 0;
477 	return;
478 }
479 
480 #define AI_SETCOREIDX_MAPSIZE(coreid) \
481 	(((coreid) == NS_CCB_CORE_ID) ? 15 * SI_CORE_SIZE : SI_CORE_SIZE)
482 
483 /* This function changes the logical "focus" to the indicated core.
484  * Return the current core's virtual address.
485  */
486 static volatile void *
BCMPOSTTRAPFN(_ai_setcoreidx)487 BCMPOSTTRAPFN(_ai_setcoreidx)(si_t *sih, uint coreidx, uint use_wrapn)
488 {
489 	si_info_t *sii = SI_INFO(sih);
490 	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
491 	uint32 addr, wrap, wrap2, wrap3;
492 	volatile void *regs;
493 
494 	if (coreidx >= MIN(sii->numcores, SI_MAXCORES))
495 		return (NULL);
496 
497 	addr = cores_info->coresba[coreidx];
498 	wrap = cores_info->wrapba[coreidx];
499 	wrap2 = cores_info->wrapba2[coreidx];
500 	wrap3 = cores_info->wrapba3[coreidx];
501 
502 #ifdef AXI_TIMEOUTS_NIC
503 	/* No need to disable interrupts while entering/exiting APB bridge core */
504 	if ((cores_info->coreid[coreidx] != APB_BRIDGE_CORE_ID) &&
505 		(cores_info->coreid[sii->curidx] != APB_BRIDGE_CORE_ID))
506 #endif /* AXI_TIMEOUTS_NIC */
507 	{
508 		/*
509 		 * If the user has provided an interrupt mask enabled function,
510 		 * then assert interrupts are disabled before switching the core.
511 		 */
512 		ASSERT((sii->intrsenabled_fn == NULL) ||
513 			!(*(sii)->intrsenabled_fn)((sii)->intr_arg));
514 	}
515 
516 	switch (BUSTYPE(sih->bustype)) {
517 	case SI_BUS:
518 		/* map new one */
519 		if (!cores_info->regs[coreidx]) {
520 			cores_info->regs[coreidx] = REG_MAP(addr,
521 				AI_SETCOREIDX_MAPSIZE(cores_info->coreid[coreidx]));
522 			ASSERT(GOODREGS(cores_info->regs[coreidx]));
523 		}
524 		sii->curmap = regs = cores_info->regs[coreidx];
525 		if (!cores_info->wrappers[coreidx] && (wrap != 0)) {
526 			cores_info->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE);
527 			ASSERT(GOODREGS(cores_info->wrappers[coreidx]));
528 		}
529 		if (!cores_info->wrappers2[coreidx] && (wrap2 != 0)) {
530 			cores_info->wrappers2[coreidx] = REG_MAP(wrap2, SI_CORE_SIZE);
531 			ASSERT(GOODREGS(cores_info->wrappers2[coreidx]));
532 		}
533 		if (!cores_info->wrappers3[coreidx] && (wrap3 != 0)) {
534 			cores_info->wrappers3[coreidx] = REG_MAP(wrap3, SI_CORE_SIZE);
535 			ASSERT(GOODREGS(cores_info->wrappers3[coreidx]));
536 		}
537 
538 		if (use_wrapn == 2) {
539 			sii->curwrap = cores_info->wrappers3[coreidx];
540 		} else if (use_wrapn == 1) {
541 			sii->curwrap = cores_info->wrappers2[coreidx];
542 		} else {
543 			sii->curwrap = cores_info->wrappers[coreidx];
544 		}
545 		break;
546 
547 	case PCI_BUS:
548 		regs = sii->curmap;
549 
550 		/* point bar0 2nd 4KB window to the primary wrapper */
551 		if (use_wrapn == 2) {
552 			wrap = wrap3;
553 		} else if (use_wrapn == 1) {
554 			wrap = wrap2;
555 		}
556 
557 		/* Use BAR0 Window to support dual mac chips... */
558 
559 		/* TODO: the other mac unit can't be supportd by the current BAR0 window.
560 		 * need to find other ways to access these cores.
561 		 */
562 
563 		switch (sii->slice) {
564 		case 0: /* main/first slice */
565 #ifdef AXI_TIMEOUTS_NIC
566 			/* No need to set the BAR0 if core is APB Bridge.
567 			 * This is to reduce 2 PCI writes while checkng for errlog
568 			 */
569 			if (cores_info->coreid[coreidx] != APB_BRIDGE_CORE_ID)
570 #endif /* AXI_TIMEOUTS_NIC */
571 			{
572 				/* point bar0 window */
573 				OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, addr);
574 			}
575 
576 			if (PCIE_GEN2(sii))
577 				OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_WIN2, 4, wrap);
578 			else
579 				OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN2, 4, wrap);
580 
581 			break;
582 
583 		case 1: /* aux/second slice */
584 			/* PCIE GEN2 only for other slices */
585 			if (!PCIE_GEN2(sii)) {
586 				/* other slices not supported */
587 				SI_ERROR(("PCI GEN not supported for slice %d\n", sii->slice));
588 				ASSERT(0);
589 				break;
590 			}
591 
592 			/* 0x4000 - 0x4fff: enum space 0x5000 - 0x5fff: wrapper space */
593 			regs = (volatile uint8 *)regs + PCI_SEC_BAR0_WIN_OFFSET;
594 			sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE);
595 
596 			/* point bar0 window */
597 			OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_CORE2_WIN, 4, addr);
598 			OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_CORE2_WIN2, 4, wrap);
599 			break;
600 
601 		case 2: /* scan/third slice */
602 			/* PCIE GEN2 only for other slices */
603 			if (!PCIE_GEN2(sii)) {
604 				/* other slices not supported */
605 				SI_ERROR(("PCI GEN not supported for slice %d\n", sii->slice));
606 				ASSERT(0);
607 				break;
608 			}
609 
610 			/* 0x9000 - 0x9fff: enum space 0xa000 - 0xafff: wrapper space */
611 			regs = (volatile uint8 *)regs + PCI_TER_BAR0_WIN_OFFSET;
612 			sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE);
613 
614 			/* point bar0 window */
615 			ai_corereg(sih, sih->buscoreidx, PCIE_TER_BAR0_WIN, ~0, addr);
616 			ai_corereg(sih, sih->buscoreidx, PCIE_TER_BAR0_WRAPPER, ~0, wrap);
617 			break;
618 
619 		default: /* other slices */
620 			SI_ERROR(("BAR0 Window not supported for slice %d\n", sii->slice));
621 			ASSERT(0);
622 			break;
623 		}
624 
625 		break;
626 
627 #ifdef BCMSDIO
628 	case SPI_BUS:
629 	case SDIO_BUS:
630 		sii->curmap = regs = (void *)((uintptr)addr);
631 		if (use_wrapn)
632 			sii->curwrap = (void *)((uintptr)wrap2);
633 		else
634 			sii->curwrap = (void *)((uintptr)wrap);
635 		break;
636 #endif	/* BCMSDIO */
637 
638 	default:
639 		ASSERT(0);
640 		sii->curmap = regs = NULL;
641 		break;
642 	}
643 
644 	sii->curidx = coreidx;
645 
646 	return regs;
647 }
648 
649 volatile void *
BCMPOSTTRAPFN(ai_setcoreidx)650 BCMPOSTTRAPFN(ai_setcoreidx)(si_t *sih, uint coreidx)
651 {
652 	return _ai_setcoreidx(sih, coreidx, 0);
653 }
654 
655 volatile void *
BCMPOSTTRAPFN(ai_setcoreidx_2ndwrap)656 BCMPOSTTRAPFN(ai_setcoreidx_2ndwrap)(si_t *sih, uint coreidx)
657 {
658 	return _ai_setcoreidx(sih, coreidx, 1);
659 }
660 
661 volatile void *
BCMPOSTTRAPFN(ai_setcoreidx_3rdwrap)662 BCMPOSTTRAPFN(ai_setcoreidx_3rdwrap)(si_t *sih, uint coreidx)
663 {
664 	return _ai_setcoreidx(sih, coreidx, 2);
665 }
666 
667 void
ai_coreaddrspaceX(const si_t * sih,uint asidx,uint32 * addr,uint32 * size)668 ai_coreaddrspaceX(const si_t *sih, uint asidx, uint32 *addr, uint32 *size)
669 {
670 	const si_info_t *sii = SI_INFO(sih);
671 	const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
672 	chipcregs_t *cc = NULL;
673 	uint32 erombase, *eromptr, *eromlim;
674 	uint i, j, cidx;
675 	uint32 cia, cib, nmp, nsp;
676 	uint32 asd, addrl, addrh, sizel, sizeh;
677 
678 	for (i = 0; i < sii->numcores; i++) {
679 		if (cores_info->coreid[i] == CC_CORE_ID) {
680 			cc = (chipcregs_t *)cores_info->regs[i];
681 			break;
682 		}
683 	}
684 	if (cc == NULL)
685 		goto error;
686 
687 	BCM_REFERENCE(erombase);
688 	erombase = R_REG(sii->osh, &cc->eromptr);
689 	eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
690 	eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
691 
692 	cidx = sii->curidx;
693 	cia = cores_info->cia[cidx];
694 	cib = cores_info->cib[cidx];
695 
696 	nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
697 	nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
698 
699 	/* scan for cores */
700 	while (eromptr < eromlim) {
701 		if ((get_erom_ent(sih, &eromptr, ER_TAG, ER_CI) == cia) &&
702 			(get_erom_ent(sih, &eromptr, 0, 0) == cib)) {
703 			break;
704 		}
705 	}
706 
707 	/* skip master ports */
708 	for (i = 0; i < nmp; i++)
709 		get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
710 
711 	/* Skip ASDs in port 0 */
712 	asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
713 	if (asd == 0) {
714 		/* Try again to see if it is a bridge */
715 		asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
716 		              &sizel, &sizeh);
717 	}
718 
719 	j = 1;
720 	do {
721 		asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
722 		              &sizel, &sizeh);
723 		j++;
724 	} while (asd != 0);
725 
726 	/* Go through the ASDs for other slave ports */
727 	for (i = 1; i < nsp; i++) {
728 		j = 0;
729 		do {
730 			asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh,
731 				&sizel, &sizeh);
732 			if (asd == 0)
733 				break;
734 
735 			if (!asidx--) {
736 				*addr = addrl;
737 				*size = sizel;
738 				return;
739 			}
740 			j++;
741 		} while (1);
742 
743 		if (j == 0) {
744 			SI_ERROR((" SP %d has no address descriptors\n", i));
745 			break;
746 		}
747 	}
748 
749 error:
750 	*size = 0;
751 	return;
752 }
753 
754 /* Return the number of address spaces in current core */
755 int
ai_numaddrspaces(const si_t * sih)756 ai_numaddrspaces(const si_t *sih)
757 {
758 	/* TODO: Either save it or parse the EROM on demand, currently hardcode 2 */
759 	BCM_REFERENCE(sih);
760 
761 	return 2;
762 }
763 
764 /* Return the address of the nth address space in the current core
765  * Arguments:
766  * sih : Pointer to struct si_t
767  * spidx : slave port index
768  * baidx : base address index
769  */
770 uint32
ai_addrspace(const si_t * sih,uint spidx,uint baidx)771 ai_addrspace(const si_t *sih, uint spidx, uint baidx)
772 {
773 	const si_info_t *sii = SI_INFO(sih);
774 	const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
775 	uint cidx;
776 
777 	cidx = sii->curidx;
778 
779 	if (spidx == CORE_SLAVE_PORT_0) {
780 		if (baidx == CORE_BASE_ADDR_0)
781 			return cores_info->coresba[cidx];
782 		else if (baidx == CORE_BASE_ADDR_1)
783 			return cores_info->coresba2[cidx];
784 	}
785 	else if (spidx == CORE_SLAVE_PORT_1) {
786 		if (baidx == CORE_BASE_ADDR_0)
787 			return cores_info->csp2ba[cidx];
788 	}
789 
790 	SI_ERROR(("ai_addrspace: Need to parse the erom again to find %d base addr"
791 		" in %d slave port\n",
792 		baidx, spidx));
793 
794 	return 0;
795 
796 }
797 
798 /* Return the size of the nth address space in the current core
799 * Arguments:
800 * sih : Pointer to struct si_t
801 * spidx : slave port index
802 * baidx : base address index
803 */
804 uint32
ai_addrspacesize(const si_t * sih,uint spidx,uint baidx)805 ai_addrspacesize(const si_t *sih, uint spidx, uint baidx)
806 {
807 	const si_info_t *sii = SI_INFO(sih);
808 	const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
809 	uint cidx;
810 
811 	cidx = sii->curidx;
812 	if (spidx == CORE_SLAVE_PORT_0) {
813 		if (baidx == CORE_BASE_ADDR_0)
814 			return cores_info->coresba_size[cidx];
815 		else if (baidx == CORE_BASE_ADDR_1)
816 			return cores_info->coresba2_size[cidx];
817 	}
818 	else if (spidx == CORE_SLAVE_PORT_1) {
819 		if (baidx == CORE_BASE_ADDR_0)
820 			return cores_info->csp2ba_size[cidx];
821 	}
822 
823 	SI_ERROR(("ai_addrspacesize: Need to parse the erom again to find %d"
824 		" base addr in %d slave port\n",
825 		baidx, spidx));
826 
827 	return 0;
828 }
829 
830 uint
ai_flag(si_t * sih)831 ai_flag(si_t *sih)
832 {
833 	const si_info_t *sii = SI_INFO(sih);
834 #if !defined(BCMDONGLEHOST)
835 	const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
836 #endif
837 	aidmp_t *ai;
838 
839 	if (PMU_DMP()) {
840 		uint idx, flag;
841 		idx = sii->curidx;
842 		ai_setcoreidx(sih, SI_CC_IDX);
843 		flag = ai_flag_alt(sih);
844 		ai_setcoreidx(sih, idx);
845 		return flag;
846 	}
847 
848 	ai = sii->curwrap;
849 	ASSERT(ai != NULL);
850 
851 	return (R_REG(sii->osh, &ai->oobselouta30) & 0x1f);
852 }
853 
854 uint
ai_flag_alt(const si_t * sih)855 ai_flag_alt(const si_t *sih)
856 {
857 	const si_info_t *sii = SI_INFO(sih);
858 	aidmp_t *ai = sii->curwrap;
859 
860 	return ((R_REG(sii->osh, &ai->oobselouta30) >> AI_OOBSEL_1_SHIFT) & AI_OOBSEL_MASK);
861 }
862 
863 void
ai_setint(const si_t * sih,int siflag)864 ai_setint(const si_t *sih, int siflag)
865 {
866 	BCM_REFERENCE(sih);
867 	BCM_REFERENCE(siflag);
868 
869 	/* TODO: Figure out how to set interrupt mask in ai */
870 }
871 
872 uint
BCMPOSTTRAPFN(ai_wrap_reg)873 BCMPOSTTRAPFN(ai_wrap_reg)(const si_t *sih, uint32 offset, uint32 mask, uint32 val)
874 {
875 	const si_info_t *sii = SI_INFO(sih);
876 	uint32 *addr = (uint32 *) ((uchar *)(sii->curwrap) + offset);
877 
878 	if (mask || val) {
879 		uint32 w = R_REG(sii->osh, addr);
880 		w &= ~mask;
881 		w |= val;
882 		W_REG(sii->osh, addr, w);
883 	}
884 	return (R_REG(sii->osh, addr));
885 }
886 
887 uint
ai_corevendor(const si_t * sih)888 ai_corevendor(const si_t *sih)
889 {
890 	const si_info_t *sii = SI_INFO(sih);
891 	const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
892 	uint32 cia;
893 
894 	cia = cores_info->cia[sii->curidx];
895 	return ((cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT);
896 }
897 
898 uint
BCMPOSTTRAPFN(ai_corerev)899 BCMPOSTTRAPFN(ai_corerev)(const si_t *sih)
900 {
901 	const si_info_t *sii = SI_INFO(sih);
902 	const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
903 	uint32 cib;
904 
905 	cib = cores_info->cib[sii->curidx];
906 	return ((cib & CIB_REV_MASK) >> CIB_REV_SHIFT);
907 }
908 
909 uint
ai_corerev_minor(const si_t * sih)910 ai_corerev_minor(const si_t *sih)
911 {
912 	return (ai_core_sflags(sih, 0, 0) >> SISF_MINORREV_D11_SHIFT) &
913 			SISF_MINORREV_D11_MASK;
914 }
915 
916 bool
BCMPOSTTRAPFN(ai_iscoreup)917 BCMPOSTTRAPFN(ai_iscoreup)(const si_t *sih)
918 {
919 	const si_info_t *sii = SI_INFO(sih);
920 	aidmp_t *ai = sii->curwrap;
921 
922 	return (((R_REG(sii->osh, &ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) == SICF_CLOCK_EN) &&
923 	        ((R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) == 0));
924 }
925 
926 /*
927  * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
928  * switch back to the original core, and return the new value.
929  *
930  * When using the silicon backplane, no fiddling with interrupts or core switches is needed.
931  *
932  * Also, when using pci/pcie, we can optimize away the core switching for pci registers
933  * and (on newer pci cores) chipcommon registers.
934  */
935 uint
BCMPOSTTRAPFN(ai_corereg)936 BCMPOSTTRAPFN(ai_corereg)(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
937 {
938 	uint origidx = 0;
939 	volatile uint32 *r = NULL;
940 	uint w;
941 	bcm_int_bitmask_t intr_val;
942 	bool fast = FALSE;
943 	si_info_t *sii = SI_INFO(sih);
944 	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
945 
946 	ASSERT(GOODIDX(coreidx, sii->numcores));
947 	ASSERT(regoff < SI_CORE_SIZE);
948 	ASSERT((val & ~mask) == 0);
949 
950 	if (coreidx >= SI_MAXCORES)
951 		return 0;
952 
953 	if (BUSTYPE(sih->bustype) == SI_BUS) {
954 		/* If internal bus, we can always get at everything */
955 		fast = TRUE;
956 		/* map if does not exist */
957 		if (!cores_info->regs[coreidx]) {
958 			cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
959 			                            SI_CORE_SIZE);
960 			ASSERT(GOODREGS(cores_info->regs[coreidx]));
961 		}
962 		r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
963 	} else if (BUSTYPE(sih->bustype) == PCI_BUS) {
964 		/* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
965 
966 		if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
967 			/* Chipc registers are mapped at 12KB */
968 
969 			fast = TRUE;
970 			r = (volatile uint32 *)((volatile char *)sii->curmap +
971 			               PCI_16KB0_CCREGS_OFFSET + regoff);
972 		} else if (sii->pub.buscoreidx == coreidx) {
973 			/* pci registers are at either in the last 2KB of an 8KB window
974 			 * or, in pcie and pci rev 13 at 8KB
975 			 */
976 			fast = TRUE;
977 			if (SI_FAST(sii))
978 				r = (volatile uint32 *)((volatile char *)sii->curmap +
979 				               PCI_16KB0_PCIREGS_OFFSET + regoff);
980 			else
981 				r = (volatile uint32 *)((volatile char *)sii->curmap +
982 				               ((regoff >= SBCONFIGOFF) ?
983 				                PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
984 				               regoff);
985 		}
986 	}
987 
988 	if (!fast) {
989 		INTR_OFF(sii, &intr_val);
990 
991 		/* save current core index */
992 		origidx = si_coreidx(&sii->pub);
993 
994 		/* switch core */
995 		r = (volatile uint32*) ((volatile uchar*) ai_setcoreidx(&sii->pub, coreidx) +
996 		               regoff);
997 	}
998 	ASSERT(r != NULL);
999 
1000 	/* mask and set */
1001 	if (mask || val) {
1002 		w = (R_REG(sii->osh, r) & ~mask) | val;
1003 		W_REG(sii->osh, r, w);
1004 	}
1005 
1006 	/* readback */
1007 	w = R_REG(sii->osh, r);
1008 
1009 	if (!fast) {
1010 		/* restore core index */
1011 		if (origidx != coreidx)
1012 			ai_setcoreidx(&sii->pub, origidx);
1013 
1014 		INTR_RESTORE(sii, &intr_val);
1015 	}
1016 
1017 	return (w);
1018 }
1019 
1020 /*
1021  * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
1022  * switch back to the original core, and return the new value.
1023  *
1024  * When using the silicon backplane, no fiddling with interrupts or core switches is needed.
1025  *
1026  * Also, when using pci/pcie, we can optimize away the core switching for pci registers
1027  * and (on newer pci cores) chipcommon registers.
1028  */
1029 uint
ai_corereg_writeonly(si_t * sih,uint coreidx,uint regoff,uint mask,uint val)1030 ai_corereg_writeonly(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
1031 {
1032 	uint origidx = 0;
1033 	volatile uint32 *r = NULL;
1034 	uint w = 0;
1035 	bcm_int_bitmask_t intr_val;
1036 	bool fast = FALSE;
1037 	si_info_t *sii = SI_INFO(sih);
1038 	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
1039 
1040 	ASSERT(GOODIDX(coreidx, sii->numcores));
1041 	ASSERT(regoff < SI_CORE_SIZE);
1042 	ASSERT((val & ~mask) == 0);
1043 
1044 	if (coreidx >= SI_MAXCORES)
1045 		return 0;
1046 
1047 	if (BUSTYPE(sih->bustype) == SI_BUS) {
1048 		/* If internal bus, we can always get at everything */
1049 		fast = TRUE;
1050 		/* map if does not exist */
1051 		if (!cores_info->regs[coreidx]) {
1052 			cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
1053 			                            SI_CORE_SIZE);
1054 			ASSERT(GOODREGS(cores_info->regs[coreidx]));
1055 		}
1056 		r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
1057 	} else if (BUSTYPE(sih->bustype) == PCI_BUS) {
1058 		/* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
1059 
1060 		if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
1061 			/* Chipc registers are mapped at 12KB */
1062 
1063 			fast = TRUE;
1064 			r = (volatile uint32 *)((volatile char *)sii->curmap +
1065 			               PCI_16KB0_CCREGS_OFFSET + regoff);
1066 		} else if (sii->pub.buscoreidx == coreidx) {
1067 			/* pci registers are at either in the last 2KB of an 8KB window
1068 			 * or, in pcie and pci rev 13 at 8KB
1069 			 */
1070 			fast = TRUE;
1071 			if (SI_FAST(sii))
1072 				r = (volatile uint32 *)((volatile char *)sii->curmap +
1073 				               PCI_16KB0_PCIREGS_OFFSET + regoff);
1074 			else
1075 				r = (volatile uint32 *)((volatile char *)sii->curmap +
1076 				               ((regoff >= SBCONFIGOFF) ?
1077 				                PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
1078 				               regoff);
1079 		}
1080 	}
1081 
1082 	if (!fast) {
1083 		INTR_OFF(sii, &intr_val);
1084 
1085 		/* save current core index */
1086 		origidx = si_coreidx(&sii->pub);
1087 
1088 		/* switch core */
1089 		r = (volatile uint32*) ((volatile uchar*) ai_setcoreidx(&sii->pub, coreidx) +
1090 		               regoff);
1091 	}
1092 	ASSERT(r != NULL);
1093 
1094 	/* mask and set */
1095 	if (mask || val) {
1096 		w = (R_REG(sii->osh, r) & ~mask) | val;
1097 		W_REG(sii->osh, r, w);
1098 	}
1099 
1100 	if (!fast) {
1101 		/* restore core index */
1102 		if (origidx != coreidx)
1103 			ai_setcoreidx(&sii->pub, origidx);
1104 
1105 		INTR_RESTORE(sii, &intr_val);
1106 	}
1107 
1108 	return (w);
1109 }
1110 
1111 /*
1112  * If there is no need for fiddling with interrupts or core switches (typically silicon
1113  * back plane registers, pci registers and chipcommon registers), this function
1114  * returns the register offset on this core to a mapped address. This address can
1115  * be used for W_REG/R_REG directly.
1116  *
1117  * For accessing registers that would need a core switch, this function will return
1118  * NULL.
1119  */
1120 volatile uint32 *
BCMPOSTTRAPFN(ai_corereg_addr)1121 BCMPOSTTRAPFN(ai_corereg_addr)(si_t *sih, uint coreidx, uint regoff)
1122 {
1123 	volatile uint32 *r = NULL;
1124 	bool fast = FALSE;
1125 	si_info_t *sii = SI_INFO(sih);
1126 	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
1127 
1128 	ASSERT(GOODIDX(coreidx, sii->numcores));
1129 	ASSERT(regoff < SI_CORE_SIZE);
1130 
1131 	if (coreidx >= SI_MAXCORES)
1132 		return 0;
1133 
1134 	if (BUSTYPE(sih->bustype) == SI_BUS) {
1135 		/* If internal bus, we can always get at everything */
1136 		fast = TRUE;
1137 		/* map if does not exist */
1138 		if (!cores_info->regs[coreidx]) {
1139 			cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
1140 			                            SI_CORE_SIZE);
1141 			ASSERT(GOODREGS(cores_info->regs[coreidx]));
1142 		}
1143 		r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
1144 	} else if (BUSTYPE(sih->bustype) == PCI_BUS) {
1145 		/* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
1146 
1147 		if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
1148 			/* Chipc registers are mapped at 12KB */
1149 
1150 			fast = TRUE;
1151 			r = (volatile uint32 *)((volatile char *)sii->curmap +
1152 			               PCI_16KB0_CCREGS_OFFSET + regoff);
1153 		} else if (sii->pub.buscoreidx == coreidx) {
1154 			/* pci registers are at either in the last 2KB of an 8KB window
1155 			 * or, in pcie and pci rev 13 at 8KB
1156 			 */
1157 			fast = TRUE;
1158 			if (SI_FAST(sii))
1159 				r = (volatile uint32 *)((volatile char *)sii->curmap +
1160 				               PCI_16KB0_PCIREGS_OFFSET + regoff);
1161 			else
1162 				r = (volatile uint32 *)((volatile char *)sii->curmap +
1163 				               ((regoff >= SBCONFIGOFF) ?
1164 				                PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
1165 				               regoff);
1166 		}
1167 	}
1168 
1169 	if (!fast) {
1170 		ASSERT(sii->curidx == coreidx);
1171 		r = (volatile uint32*) ((volatile uchar*)sii->curmap + regoff);
1172 	}
1173 
1174 	return (r);
1175 }
1176 
1177 void
ai_core_disable(const si_t * sih,uint32 bits)1178 ai_core_disable(const si_t *sih, uint32 bits)
1179 {
1180 	const si_info_t *sii = SI_INFO(sih);
1181 	volatile uint32 dummy;
1182 	uint32 status;
1183 	aidmp_t *ai;
1184 
1185 	ASSERT(GOODREGS(sii->curwrap));
1186 	ai = sii->curwrap;
1187 
1188 	/* if core is already in reset, just return */
1189 	if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) {
1190 		return;
1191 	}
1192 
1193 	/* ensure there are no pending backplane operations */
1194 	SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
1195 
1196 	/* if pending backplane ops still, try waiting longer */
1197 	if (status != 0) {
1198 		/* 300usecs was sufficient to allow backplane ops to clear for big hammer */
1199 		/* during driver load we may need more time */
1200 		SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 10000);
1201 		/* if still pending ops, continue on and try disable anyway */
1202 		/* this is in big hammer path, so don't call wl_reinit in this case... */
1203 #ifdef BCMDBG
1204 		if (status != 0) {
1205 			SI_ERROR(("ai_core_disable: WARN: resetstatus=%0x on core disable\n",
1206 				status));
1207 		}
1208 #endif
1209 	}
1210 
1211 	W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
1212 	dummy = R_REG(sii->osh, &ai->resetctrl);
1213 	BCM_REFERENCE(dummy);
1214 	OSL_DELAY(1);
1215 
1216 	W_REG(sii->osh, &ai->ioctrl, bits);
1217 	dummy = R_REG(sii->osh, &ai->ioctrl);
1218 	BCM_REFERENCE(dummy);
1219 	OSL_DELAY(10);
1220 }
1221 
1222 /* reset and re-enable a core
1223  * inputs:
1224  * bits - core specific bits that are set during and after reset sequence
1225  * resetbits - core specific bits that are set only during reset sequence
1226  */
1227 static void
BCMPOSTTRAPFN(_ai_core_reset)1228 BCMPOSTTRAPFN(_ai_core_reset)(const si_t *sih, uint32 bits, uint32 resetbits)
1229 {
1230 	const si_info_t *sii = SI_INFO(sih);
1231 	aidmp_t *ai;
1232 	volatile uint32 dummy;
1233 	uint loop_counter = 10;
1234 
1235 	ASSERT(GOODREGS(sii->curwrap));
1236 	ai = sii->curwrap;
1237 
1238 	/* ensure there are no pending backplane operations */
1239 	SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
1240 
1241 #ifdef BCMDBG_ERR
1242 	if (dummy != 0) {
1243 		SI_ERROR(("_ai_core_reset: WARN1: resetstatus=0x%0x\n", dummy));
1244 	}
1245 #endif /* BCMDBG_ERR */
1246 
1247 	/* put core into reset state */
1248 	W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
1249 	OSL_DELAY(10);
1250 
1251 	/* ensure there are no pending backplane operations */
1252 	SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
1253 
1254 	W_REG(sii->osh, &ai->ioctrl, (bits | resetbits | SICF_FGC | SICF_CLOCK_EN));
1255 	dummy = R_REG(sii->osh, &ai->ioctrl);
1256 	BCM_REFERENCE(dummy);
1257 #ifdef UCM_CORRUPTION_WAR
1258 	if (si_coreid(sih) == D11_CORE_ID) {
1259 		/* Reset FGC */
1260 		OSL_DELAY(1);
1261 		W_REG(sii->osh, &ai->ioctrl, (dummy & (~SICF_FGC)));
1262 	}
1263 #endif /* UCM_CORRUPTION_WAR */
1264 	/* ensure there are no pending backplane operations */
1265 	SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
1266 
1267 #ifdef BCMDBG_ERR
1268 	if (dummy != 0)
1269 		SI_ERROR(("_ai_core_reset: WARN2: resetstatus=0x%0x\n", dummy));
1270 #endif
1271 
1272 	while (R_REG(sii->osh, &ai->resetctrl) != 0 && --loop_counter != 0) {
1273 		/* ensure there are no pending backplane operations */
1274 		SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
1275 
1276 #ifdef BCMDBG_ERR
1277 		if (dummy != 0)
1278 			SI_ERROR(("_ai_core_reset: WARN3 resetstatus=0x%0x\n", dummy));
1279 #endif
1280 
1281 		/* take core out of reset */
1282 		W_REG(sii->osh, &ai->resetctrl, 0);
1283 
1284 		/* ensure there are no pending backplane operations */
1285 		SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
1286 	}
1287 
1288 #ifdef BCMDBG_ERR
1289 	if (loop_counter == 0) {
1290 		SI_ERROR(("_ai_core_reset: Failed to take core 0x%x out of reset\n",
1291 			si_coreid(sih)));
1292 	}
1293 #endif
1294 
1295 #ifdef UCM_CORRUPTION_WAR
1296 	/* Pulse FGC after lifting Reset */
1297 	W_REG(sii->osh, &ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN));
1298 #else
1299 	W_REG(sii->osh, &ai->ioctrl, (bits | SICF_CLOCK_EN));
1300 #endif /* UCM_CORRUPTION_WAR */
1301 	dummy = R_REG(sii->osh, &ai->ioctrl);
1302 	BCM_REFERENCE(dummy);
1303 #ifdef UCM_CORRUPTION_WAR
1304 	if (si_coreid(sih) == D11_CORE_ID) {
1305 		/* Reset FGC */
1306 		OSL_DELAY(1);
1307 		W_REG(sii->osh, &ai->ioctrl, (dummy & (~SICF_FGC)));
1308 	}
1309 #endif /* UCM_CORRUPTION_WAR */
1310 	OSL_DELAY(1);
1311 }
1312 
1313 void
BCMPOSTTRAPFN(ai_core_reset)1314 BCMPOSTTRAPFN(ai_core_reset)(si_t *sih, uint32 bits, uint32 resetbits)
1315 {
1316 	si_info_t *sii = SI_INFO(sih);
1317 	const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
1318 	uint idx = sii->curidx;
1319 
1320 	if (cores_info->wrapba3[idx] != 0) {
1321 		ai_setcoreidx_3rdwrap(sih, idx);
1322 		_ai_core_reset(sih, bits, resetbits);
1323 		ai_setcoreidx(sih, idx);
1324 	}
1325 
1326 	if (cores_info->wrapba2[idx] != 0) {
1327 		ai_setcoreidx_2ndwrap(sih, idx);
1328 		_ai_core_reset(sih, bits, resetbits);
1329 		ai_setcoreidx(sih, idx);
1330 	}
1331 
1332 	_ai_core_reset(sih, bits, resetbits);
1333 }
1334 
1335 #ifdef BOOKER_NIC400_INF
1336 void
BCMPOSTTRAPFN(ai_core_reset_ext)1337 BCMPOSTTRAPFN(ai_core_reset_ext)(const si_t *sih, uint32 bits, uint32 resetbits)
1338 {
1339 	_ai_core_reset(sih, bits, resetbits);
1340 }
1341 #endif /* BOOKER_NIC400_INF */
1342 
1343 void
ai_core_cflags_wo(const si_t * sih,uint32 mask,uint32 val)1344 ai_core_cflags_wo(const si_t *sih, uint32 mask, uint32 val)
1345 {
1346 	const si_info_t *sii = SI_INFO(sih);
1347 #if !defined(BCMDONGLEHOST)
1348 	const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
1349 #endif
1350 	aidmp_t *ai;
1351 	uint32 w;
1352 
1353 	if (PMU_DMP()) {
1354 		SI_ERROR(("ai_core_cflags_wo: Accessing PMU DMP register (ioctrl)\n"));
1355 		return;
1356 	}
1357 
1358 	ASSERT(GOODREGS(sii->curwrap));
1359 	ai = sii->curwrap;
1360 
1361 	ASSERT((val & ~mask) == 0);
1362 
1363 	if (mask || val) {
1364 		w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
1365 		W_REG(sii->osh, &ai->ioctrl, w);
1366 	}
1367 }
1368 
1369 uint32
BCMPOSTTRAPFN(ai_core_cflags)1370 BCMPOSTTRAPFN(ai_core_cflags)(const si_t *sih, uint32 mask, uint32 val)
1371 {
1372 	const si_info_t *sii = SI_INFO(sih);
1373 #if !defined(BCMDONGLEHOST)
1374 	const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
1375 #endif
1376 	aidmp_t *ai;
1377 	uint32 w;
1378 
1379 	if (PMU_DMP()) {
1380 		SI_ERROR(("ai_core_cflags: Accessing PMU DMP register (ioctrl)\n"));
1381 		return 0;
1382 	}
1383 	ASSERT(GOODREGS(sii->curwrap));
1384 	ai = sii->curwrap;
1385 
1386 	ASSERT((val & ~mask) == 0);
1387 
1388 	if (mask || val) {
1389 		w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
1390 		W_REG(sii->osh, &ai->ioctrl, w);
1391 	}
1392 
1393 	return R_REG(sii->osh, &ai->ioctrl);
1394 }
1395 
1396 uint32
ai_core_sflags(const si_t * sih,uint32 mask,uint32 val)1397 ai_core_sflags(const si_t *sih, uint32 mask, uint32 val)
1398 {
1399 	const si_info_t *sii = SI_INFO(sih);
1400 #if !defined(BCMDONGLEHOST)
1401 	const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
1402 #endif
1403 	aidmp_t *ai;
1404 	uint32 w;
1405 
1406 	if (PMU_DMP()) {
1407 		SI_ERROR(("ai_core_sflags: Accessing PMU DMP register (ioctrl)\n"));
1408 		return 0;
1409 	}
1410 
1411 	ASSERT(GOODREGS(sii->curwrap));
1412 	ai = sii->curwrap;
1413 
1414 	ASSERT((val & ~mask) == 0);
1415 	ASSERT((mask & ~SISF_CORE_BITS) == 0);
1416 
1417 	if (mask || val) {
1418 		w = ((R_REG(sii->osh, &ai->iostatus) & ~mask) | val);
1419 		W_REG(sii->osh, &ai->iostatus, w);
1420 	}
1421 
1422 	return R_REG(sii->osh, &ai->iostatus);
1423 }
1424 
1425 #if defined(BCMDBG) || defined(BCMDBG_DUMP) || defined(BCMDBG_PHYDUMP)
1426 /* print interesting aidmp registers */
1427 void
ai_dumpregs(const si_t * sih,struct bcmstrbuf * b)1428 ai_dumpregs(const si_t *sih, struct bcmstrbuf *b)
1429 {
1430 	const si_info_t *sii = SI_INFO(sih);
1431 	osl_t *osh;
1432 	aidmp_t *ai;
1433 	uint i;
1434 	uint32 prev_value = 0;
1435 	const axi_wrapper_t * axi_wrapper = sii->axi_wrapper;
1436 	uint32 cfg_reg = 0;
1437 	uint bar0_win_offset = 0;
1438 
1439 	osh = sii->osh;
1440 
1441 	/* Save and restore wrapper access window */
1442 	if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1443 		if (PCIE_GEN2(sii)) {
1444 			cfg_reg = PCIE2_BAR0_CORE2_WIN2;
1445 			bar0_win_offset = PCIE2_BAR0_CORE2_WIN2_OFFSET;
1446 		} else {
1447 			cfg_reg = PCI_BAR0_WIN2;
1448 			bar0_win_offset = PCI_BAR0_WIN2_OFFSET;
1449 		}
1450 
1451 		prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 4);
1452 
1453 		if (prev_value == ID32_INVALID) {
1454 			SI_PRINT(("ai_dumpregs, PCI_BAR0_WIN2 - %x\n", prev_value));
1455 			return;
1456 		}
1457 	}
1458 
1459 	bcm_bprintf(b, "ChipNum:%x, ChipRev;%x, BusType:%x, BoardType:%x, BoardVendor:%x\n\n",
1460 		sih->chip, sih->chiprev, sih->bustype, sih->boardtype, sih->boardvendor);
1461 
1462 	for (i = 0; i < sii->axi_num_wrappers; i++) {
1463 
1464 		if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1465 			/* Set BAR0 window to bridge wapper base address */
1466 			OSL_PCI_WRITE_CONFIG(osh,
1467 				cfg_reg, 4, axi_wrapper[i].wrapper_addr);
1468 
1469 			ai = (aidmp_t *) ((volatile uint8*)sii->curmap + bar0_win_offset);
1470 		} else {
1471 			ai = (aidmp_t *)(uintptr) axi_wrapper[i].wrapper_addr;
1472 		}
1473 
1474 		bcm_bprintf(b, "core 0x%x: core_rev:%d, %s_WR ADDR:%x \n", axi_wrapper[i].cid,
1475 			axi_wrapper[i].rev,
1476 			axi_wrapper[i].wrapper_type == AI_SLAVE_WRAPPER ? "SLAVE" : "MASTER",
1477 			axi_wrapper[i].wrapper_addr);
1478 
1479 		bcm_bprintf(b, "ioctrlset 0x%x ioctrlclear 0x%x ioctrl 0x%x iostatus 0x%x "
1480 			    "ioctrlwidth 0x%x iostatuswidth 0x%x\n"
1481 			    "resetctrl 0x%x resetstatus 0x%x resetreadid 0x%x resetwriteid 0x%x\n"
1482 			    "errlogctrl 0x%x errlogdone 0x%x errlogstatus 0x%x "
1483 			    "errlogaddrlo 0x%x errlogaddrhi 0x%x\n"
1484 			    "errlogid 0x%x errloguser 0x%x errlogflags 0x%x\n"
1485 			    "intstatus 0x%x config 0x%x itcr 0x%x\n\n",
1486 			    R_REG(osh, &ai->ioctrlset),
1487 			    R_REG(osh, &ai->ioctrlclear),
1488 			    R_REG(osh, &ai->ioctrl),
1489 			    R_REG(osh, &ai->iostatus),
1490 			    R_REG(osh, &ai->ioctrlwidth),
1491 			    R_REG(osh, &ai->iostatuswidth),
1492 			    R_REG(osh, &ai->resetctrl),
1493 			    R_REG(osh, &ai->resetstatus),
1494 			    R_REG(osh, &ai->resetreadid),
1495 			    R_REG(osh, &ai->resetwriteid),
1496 			    R_REG(osh, &ai->errlogctrl),
1497 			    R_REG(osh, &ai->errlogdone),
1498 			    R_REG(osh, &ai->errlogstatus),
1499 			    R_REG(osh, &ai->errlogaddrlo),
1500 			    R_REG(osh, &ai->errlogaddrhi),
1501 			    R_REG(osh, &ai->errlogid),
1502 			    R_REG(osh, &ai->errloguser),
1503 			    R_REG(osh, &ai->errlogflags),
1504 			    R_REG(osh, &ai->intstatus),
1505 			    R_REG(osh, &ai->config),
1506 			    R_REG(osh, &ai->itcr));
1507 	}
1508 
1509 	/* Restore the initial wrapper space */
1510 	if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1511 		if (prev_value && cfg_reg) {
1512 			OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 4, prev_value);
1513 		}
1514 	}
1515 }
1516 #endif	/* BCMDBG || BCMDBG_DUMP || BCMDBG_PHYDUMP */
1517 
1518 #ifdef BCMDBG
1519 static void
_ai_view(osl_t * osh,aidmp_t * ai,uint32 cid,uint32 addr,bool verbose)1520 _ai_view(osl_t *osh, aidmp_t *ai, uint32 cid, uint32 addr, bool verbose)
1521 {
1522 	uint32 config;
1523 
1524 	config = R_REG(osh, &ai->config);
1525 	SI_PRINT(("\nCore ID: 0x%x, addr 0x%x, config 0x%x\n", cid, addr, config));
1526 
1527 	if (config & AICFG_RST)
1528 		SI_PRINT(("resetctrl 0x%x, resetstatus 0x%x, resetreadid 0x%x, resetwriteid 0x%x\n",
1529 		          R_REG(osh, &ai->resetctrl), R_REG(osh, &ai->resetstatus),
1530 		          R_REG(osh, &ai->resetreadid), R_REG(osh, &ai->resetwriteid)));
1531 
1532 	if (config & AICFG_IOC)
1533 		SI_PRINT(("ioctrl 0x%x, width %d\n", R_REG(osh, &ai->ioctrl),
1534 		          R_REG(osh, &ai->ioctrlwidth)));
1535 
1536 	if (config & AICFG_IOS)
1537 		SI_PRINT(("iostatus 0x%x, width %d\n", R_REG(osh, &ai->iostatus),
1538 		          R_REG(osh, &ai->iostatuswidth)));
1539 
1540 	if (config & AICFG_ERRL) {
1541 		SI_PRINT(("errlogctrl 0x%x, errlogdone 0x%x, errlogstatus 0x%x, intstatus 0x%x\n",
1542 		          R_REG(osh, &ai->errlogctrl), R_REG(osh, &ai->errlogdone),
1543 		          R_REG(osh, &ai->errlogstatus), R_REG(osh, &ai->intstatus)));
1544 		SI_PRINT(("errlogid 0x%x, errloguser 0x%x, errlogflags 0x%x, errlogaddr "
1545 		          "0x%x/0x%x\n",
1546 		          R_REG(osh, &ai->errlogid), R_REG(osh, &ai->errloguser),
1547 		          R_REG(osh, &ai->errlogflags), R_REG(osh, &ai->errlogaddrhi),
1548 		          R_REG(osh, &ai->errlogaddrlo)));
1549 	}
1550 
1551 	if (verbose && (config & AICFG_OOB)) {
1552 		SI_PRINT(("oobselina30 0x%x, oobselina74 0x%x\n",
1553 		          R_REG(osh, &ai->oobselina30), R_REG(osh, &ai->oobselina74)));
1554 		SI_PRINT(("oobselinb30 0x%x, oobselinb74 0x%x\n",
1555 		          R_REG(osh, &ai->oobselinb30), R_REG(osh, &ai->oobselinb74)));
1556 		SI_PRINT(("oobselinc30 0x%x, oobselinc74 0x%x\n",
1557 		          R_REG(osh, &ai->oobselinc30), R_REG(osh, &ai->oobselinc74)));
1558 		SI_PRINT(("oobselind30 0x%x, oobselind74 0x%x\n",
1559 		          R_REG(osh, &ai->oobselind30), R_REG(osh, &ai->oobselind74)));
1560 		SI_PRINT(("oobselouta30 0x%x, oobselouta74 0x%x\n",
1561 		          R_REG(osh, &ai->oobselouta30), R_REG(osh, &ai->oobselouta74)));
1562 		SI_PRINT(("oobseloutb30 0x%x, oobseloutb74 0x%x\n",
1563 		          R_REG(osh, &ai->oobseloutb30), R_REG(osh, &ai->oobseloutb74)));
1564 		SI_PRINT(("oobseloutc30 0x%x, oobseloutc74 0x%x\n",
1565 		          R_REG(osh, &ai->oobseloutc30), R_REG(osh, &ai->oobseloutc74)));
1566 		SI_PRINT(("oobseloutd30 0x%x, oobseloutd74 0x%x\n",
1567 		          R_REG(osh, &ai->oobseloutd30), R_REG(osh, &ai->oobseloutd74)));
1568 		SI_PRINT(("oobsynca 0x%x, oobseloutaen 0x%x\n",
1569 		          R_REG(osh, &ai->oobsynca), R_REG(osh, &ai->oobseloutaen)));
1570 		SI_PRINT(("oobsyncb 0x%x, oobseloutben 0x%x\n",
1571 		          R_REG(osh, &ai->oobsyncb), R_REG(osh, &ai->oobseloutben)));
1572 		SI_PRINT(("oobsyncc 0x%x, oobseloutcen 0x%x\n",
1573 		          R_REG(osh, &ai->oobsyncc), R_REG(osh, &ai->oobseloutcen)));
1574 		SI_PRINT(("oobsyncd 0x%x, oobseloutden 0x%x\n",
1575 		          R_REG(osh, &ai->oobsyncd), R_REG(osh, &ai->oobseloutden)));
1576 		SI_PRINT(("oobaextwidth 0x%x, oobainwidth 0x%x, oobaoutwidth 0x%x\n",
1577 		          R_REG(osh, &ai->oobaextwidth), R_REG(osh, &ai->oobainwidth),
1578 		          R_REG(osh, &ai->oobaoutwidth)));
1579 		SI_PRINT(("oobbextwidth 0x%x, oobbinwidth 0x%x, oobboutwidth 0x%x\n",
1580 		          R_REG(osh, &ai->oobbextwidth), R_REG(osh, &ai->oobbinwidth),
1581 		          R_REG(osh, &ai->oobboutwidth)));
1582 		SI_PRINT(("oobcextwidth 0x%x, oobcinwidth 0x%x, oobcoutwidth 0x%x\n",
1583 		          R_REG(osh, &ai->oobcextwidth), R_REG(osh, &ai->oobcinwidth),
1584 		          R_REG(osh, &ai->oobcoutwidth)));
1585 		SI_PRINT(("oobdextwidth 0x%x, oobdinwidth 0x%x, oobdoutwidth 0x%x\n",
1586 		          R_REG(osh, &ai->oobdextwidth), R_REG(osh, &ai->oobdinwidth),
1587 		          R_REG(osh, &ai->oobdoutwidth)));
1588 	}
1589 }
1590 
1591 void
ai_view(const si_t * sih,bool verbose)1592 ai_view(const si_t *sih, bool verbose)
1593 {
1594 	const si_info_t *sii = SI_INFO(sih);
1595 	const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
1596 	osl_t *osh;
1597 	aidmp_t *ai;
1598 	uint32 cid, addr;
1599 
1600 	ai = sii->curwrap;
1601 	osh = sii->osh;
1602 
1603 	if (PMU_DMP()) {
1604 		SI_ERROR(("Cannot access pmu DMP\n"));
1605 		return;
1606 	}
1607 	cid = cores_info->coreid[sii->curidx];
1608 	addr = cores_info->wrapba[sii->curidx];
1609 	_ai_view(osh, ai, cid, addr, verbose);
1610 }
1611 
1612 void
ai_viewall(si_t * sih,bool verbose)1613 ai_viewall(si_t *sih, bool verbose)
1614 {
1615 	const si_info_t *sii = SI_INFO(sih);
1616 	const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
1617 	osl_t *osh;
1618 	aidmp_t *ai;
1619 	uint32 cid, addr;
1620 	uint i;
1621 
1622 	osh = sii->osh;
1623 	for (i = 0; i < sii->numcores; i++) {
1624 		si_setcoreidx(sih, i);
1625 
1626 		if (PMU_DMP()) {
1627 			SI_ERROR(("Skipping pmu DMP\n"));
1628 			continue;
1629 		}
1630 		ai = sii->curwrap;
1631 		cid = cores_info->coreid[sii->curidx];
1632 		addr = cores_info->wrapba[sii->curidx];
1633 		_ai_view(osh, ai, cid, addr, verbose);
1634 	}
1635 }
1636 #endif	/* BCMDBG */
1637 
1638 void
ai_update_backplane_timeouts(const si_t * sih,bool enable,uint32 timeout_exp,uint32 cid)1639 ai_update_backplane_timeouts(const si_t *sih, bool enable, uint32 timeout_exp, uint32 cid)
1640 {
1641 #if defined(AXI_TIMEOUTS) || defined(AXI_TIMEOUTS_NIC)
1642 	const si_info_t *sii = SI_INFO(sih);
1643 	aidmp_t *ai;
1644 	uint32 i;
1645 	axi_wrapper_t * axi_wrapper = sii->axi_wrapper;
1646 	uint32 errlogctrl = (enable << AIELC_TO_ENAB_SHIFT) |
1647 		((timeout_exp << AIELC_TO_EXP_SHIFT) & AIELC_TO_EXP_MASK);
1648 
1649 #ifdef AXI_TIMEOUTS_NIC
1650 	uint32 prev_value = 0;
1651 	osl_t *osh = sii->osh;
1652 	uint32 cfg_reg = 0;
1653 	uint32 offset = 0;
1654 #endif /* AXI_TIMEOUTS_NIC */
1655 
1656 	if ((sii->axi_num_wrappers == 0) ||
1657 #ifdef AXI_TIMEOUTS_NIC
1658 		(!PCIE(sii)) ||
1659 #endif /* AXI_TIMEOUTS_NIC */
1660 		FALSE) {
1661 		SI_VMSG((" iai_update_backplane_timeouts, axi_num_wrappers:%d, Is_PCIE:%d,"
1662 			" BUS_TYPE:%d, ID:%x\n",
1663 			sii->axi_num_wrappers, PCIE(sii),
1664 			BUSTYPE(sii->pub.bustype), sii->pub.buscoretype));
1665 		return;
1666 	}
1667 
1668 #ifdef AXI_TIMEOUTS_NIC
1669 	/* Save and restore the wrapper access window */
1670 	if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1671 		if (PCIE_GEN1(sii)) {
1672 			cfg_reg = PCI_BAR0_WIN2;
1673 			offset = PCI_BAR0_WIN2_OFFSET;
1674 		} else if (PCIE_GEN2(sii)) {
1675 			cfg_reg = PCIE2_BAR0_CORE2_WIN2;
1676 			offset = PCIE2_BAR0_CORE2_WIN2_OFFSET;
1677 		}
1678 		else {
1679 			ASSERT(!"!PCIE_GEN1 && !PCIE_GEN2");
1680 		}
1681 
1682 		prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 4);
1683 		if (prev_value == ID32_INVALID) {
1684 			SI_PRINT(("ai_update_backplane_timeouts, PCI_BAR0_WIN2 - %x\n",
1685 				prev_value));
1686 			return;
1687 		}
1688 	}
1689 #endif /* AXI_TIMEOUTS_NIC */
1690 
1691 	for (i = 0; i < sii->axi_num_wrappers; ++i) {
1692 		/* WAR for wrong EROM entries w.r.t slave and master wrapper
1693 		 * for ADB bridge core...so checking actual wrapper config to determine type
1694 		 * http://jira.broadcom.com/browse/HW4388-905
1695 		*/
1696 		if ((cid == 0 || cid == ADB_BRIDGE_ID) &&
1697 				(axi_wrapper[i].cid == ADB_BRIDGE_ID)) {
1698 			/* WAR is applicable only to 89B0 and 89C0 */
1699 			if (CCREV(sih->ccrev) == 70) {
1700 				ai = (aidmp_t *)(uintptr)axi_wrapper[i].wrapper_addr;
1701 				if (R_REG(sii->osh, &ai->config) & WRAPPER_TIMEOUT_CONFIG) {
1702 					axi_wrapper[i].wrapper_type  = AI_SLAVE_WRAPPER;
1703 				} else {
1704 					axi_wrapper[i].wrapper_type  = AI_MASTER_WRAPPER;
1705 				}
1706 			}
1707 		}
1708 		if (axi_wrapper[i].wrapper_type != AI_SLAVE_WRAPPER || ((BCM4389_CHIP(sih->chip) ||
1709 				BCM4388_CHIP(sih->chip)) &&
1710 				(axi_wrapper[i].wrapper_addr == WL_BRIDGE1_S ||
1711 				axi_wrapper[i].wrapper_addr == WL_BRIDGE2_S))) {
1712 			SI_VMSG(("SKIP ENABLE BPT: MFG:%x, CID:%x, ADDR:%x\n",
1713 				axi_wrapper[i].mfg,
1714 				axi_wrapper[i].cid,
1715 				axi_wrapper[i].wrapper_addr));
1716 			continue;
1717 		}
1718 
1719 		/* Update only given core if requested */
1720 		if ((cid != 0) && (axi_wrapper[i].cid != cid)) {
1721 			continue;
1722 		}
1723 
1724 #ifdef AXI_TIMEOUTS_NIC
1725 		if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1726 			/* Set BAR0_CORE2_WIN2 to bridge wapper base address */
1727 			OSL_PCI_WRITE_CONFIG(osh,
1728 				cfg_reg, 4, axi_wrapper[i].wrapper_addr);
1729 
1730 			/* set AI to BAR0 + Offset corresponding to Gen1 or gen2 */
1731 			ai = (aidmp_t *) (DISCARD_QUAL(sii->curmap, uint8) + offset);
1732 		}
1733 		else
1734 #endif /* AXI_TIMEOUTS_NIC */
1735 		{
1736 			ai = (aidmp_t *)(uintptr) axi_wrapper[i].wrapper_addr;
1737 		}
1738 
1739 		W_REG(sii->osh, &ai->errlogctrl, errlogctrl);
1740 
1741 		SI_VMSG(("ENABLED BPT: MFG:%x, CID:%x, ADDR:%x, ERR_CTRL:%x\n",
1742 			axi_wrapper[i].mfg,
1743 			axi_wrapper[i].cid,
1744 			axi_wrapper[i].wrapper_addr,
1745 			R_REG(sii->osh, &ai->errlogctrl)));
1746 	}
1747 
1748 #ifdef AXI_TIMEOUTS_NIC
1749 	/* Restore the initial wrapper space */
1750 	if (prev_value) {
1751 		OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 4, prev_value);
1752 	}
1753 #endif /* AXI_TIMEOUTS_NIC */
1754 
1755 #endif /* AXI_TIMEOUTS || AXI_TIMEOUTS_NIC */
1756 }
1757 
1758 #if defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC)
1759 
1760 /* slave error is ignored, so account for those cases */
1761 static uint32 si_ignore_errlog_cnt = 0;
1762 
1763 static bool
BCMPOSTTRAPFN(ai_ignore_errlog)1764 BCMPOSTTRAPFN(ai_ignore_errlog)(const si_info_t *sii, const aidmp_t *ai,
1765 	uint32 lo_addr, uint32 hi_addr, uint32 err_axi_id, uint32 errsts)
1766 {
1767 	uint32 ignore_errsts = AIELS_SLAVE_ERR;
1768 	uint32 ignore_errsts_2 = 0;
1769 	uint32 ignore_hi = BT_CC_SPROM_BADREG_HI;
1770 	uint32 ignore_lo = BT_CC_SPROM_BADREG_LO;
1771 	uint32 ignore_size = BT_CC_SPROM_BADREG_SIZE;
1772 	bool address_check = TRUE;
1773 	uint32 axi_id = 0;
1774 	uint32 axi_id2 = 0;
1775 	bool extd_axi_id_mask = FALSE;
1776 	uint32 axi_id_mask;
1777 
1778 	SI_PRINT(("err check: core %p, error %d, axi id 0x%04x, addr(0x%08x:%08x)\n",
1779 		ai, errsts, err_axi_id, hi_addr, lo_addr));
1780 
1781 	/* ignore the BT slave errors if the errlog is to chipcommon addr 0x190 */
1782 	switch (CHIPID(sii->pub.chip)) {
1783 #if defined(BT_WLAN_REG_ON_WAR)
1784 		/*
1785 		 * 4389B0/C0 - WL and BT turn on WAR, ignore AXI error originating from
1786 		 * AHB-AXI bridge i.e, any slave error or timeout from BT access
1787 		 */
1788 		case BCM4389_CHIP_GRPID:
1789 			axi_id = BCM4389_BT_AXI_ID;
1790 			ignore_errsts = AIELS_SLAVE_ERR;
1791 			axi_id2 = BCM4389_BT_AXI_ID;
1792 			ignore_errsts_2 = AIELS_TIMEOUT;
1793 			address_check = FALSE;
1794 			extd_axi_id_mask = TRUE;
1795 			break;
1796 #endif /* BT_WLAN_REG_ON_WAR */
1797 #ifdef BTOVERPCIE
1798 		case BCM4388_CHIP_GRPID:
1799 			axi_id = BCM4388_BT_AXI_ID;
1800 		/* For BT over PCIE, ignore any slave error from BT. */
1801 		/* No need to check any address range */
1802 			address_check = FALSE;
1803 			ignore_errsts_2 = AIELS_DECODE;
1804 			break;
1805 		case BCM4369_CHIP_GRPID:
1806 			axi_id = BCM4369_BT_AXI_ID;
1807 		/* For BT over PCIE, ignore any slave error from BT. */
1808 		/* No need to check any address range */
1809 			address_check = FALSE;
1810 			ignore_errsts_2 = AIELS_DECODE;
1811 			break;
1812 #endif /* BTOVERPCIE */
1813 		case BCM4376_CHIP_GRPID:
1814 		case BCM4378_CHIP_GRPID:
1815 		case BCM4385_CHIP_GRPID:
1816 		case BCM4387_CHIP_GRPID:
1817 #ifdef BTOVERPCIE
1818 			axi_id = BCM4378_BT_AXI_ID;
1819 			/* For BT over PCIE, ignore any slave error from BT. */
1820 			/* No need to check any address range */
1821 			address_check = FALSE;
1822 #endif /* BTOVERPCIE */
1823 			axi_id2 = BCM4378_ARM_PREFETCH_AXI_ID;
1824 			extd_axi_id_mask = TRUE;
1825 			ignore_errsts_2 = AIELS_DECODE;
1826 			break;
1827 #ifdef USE_HOSTMEM
1828 		case BCM43602_CHIP_ID:
1829 			axi_id = BCM43602_BT_AXI_ID;
1830 			address_check = FALSE;
1831 			break;
1832 #endif /* USE_HOSTMEM */
1833 		default:
1834 			return FALSE;
1835 	}
1836 
1837 	axi_id_mask = extd_axi_id_mask ? AI_ERRLOGID_AXI_ID_MASK_EXTD : AI_ERRLOGID_AXI_ID_MASK;
1838 
1839 	/* AXI ID check */
1840 	err_axi_id &= axi_id_mask;
1841 	errsts &=  AIELS_ERROR_MASK;
1842 
1843 	/* check the ignore error cases. 2 checks */
1844 	if (!(((err_axi_id == axi_id) && (errsts == ignore_errsts)) ||
1845 		((err_axi_id == axi_id2) && (errsts == ignore_errsts_2)))) {
1846 		/* not the error ignore cases */
1847 		return FALSE;
1848 
1849 	}
1850 
1851 	/* check the specific address checks now, if specified */
1852 	if (address_check) {
1853 		/* address range check */
1854 		if ((hi_addr != ignore_hi) ||
1855 		    (lo_addr < ignore_lo) || (lo_addr >= (ignore_lo + ignore_size))) {
1856 			return FALSE;
1857 		}
1858 	}
1859 
1860 	SI_PRINT(("err check: ignored\n"));
1861 	return TRUE;
1862 }
1863 #endif /* defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC) */
1864 
1865 #ifdef AXI_TIMEOUTS_NIC
1866 
1867 /* Function to return the APB bridge details corresponding to the core */
1868 static bool
ai_get_apb_bridge(const si_t * sih,uint32 coreidx,uint32 * apb_id,uint32 * apb_coreunit)1869 ai_get_apb_bridge(const si_t * sih, uint32 coreidx, uint32 *apb_id, uint32 * apb_coreunit)
1870 {
1871 	uint i;
1872 	uint32 core_base, core_end;
1873 	const si_info_t *sii = SI_INFO(sih);
1874 	static uint32 coreidx_cached = 0, apb_id_cached = 0, apb_coreunit_cached = 0;
1875 	uint32 tmp_coreunit = 0;
1876 	const si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
1877 
1878 	if (coreidx >= MIN(sii->numcores, SI_MAXCORES))
1879 		return FALSE;
1880 
1881 	/* Most of the time apb bridge query will be for d11 core.
1882 	 * Maintain the last cache and return if found rather than iterating the table
1883 	 */
1884 	if (coreidx_cached == coreidx) {
1885 		*apb_id = apb_id_cached;
1886 		*apb_coreunit = apb_coreunit_cached;
1887 		return TRUE;
1888 	}
1889 
1890 	core_base = cores_info->coresba[coreidx];
1891 	core_end = core_base + cores_info->coresba_size[coreidx];
1892 
1893 	for (i = 0; i < sii->numcores; i++) {
1894 		if (cores_info->coreid[i] == APB_BRIDGE_ID) {
1895 			uint32 apb_base;
1896 			uint32 apb_end;
1897 
1898 			apb_base = cores_info->coresba[i];
1899 			apb_end = apb_base + cores_info->coresba_size[i];
1900 
1901 			if ((core_base >= apb_base) &&
1902 				(core_end <= apb_end)) {
1903 				/* Current core is attached to this APB bridge */
1904 				*apb_id = apb_id_cached = APB_BRIDGE_ID;
1905 				*apb_coreunit = apb_coreunit_cached = tmp_coreunit;
1906 				coreidx_cached = coreidx;
1907 				return TRUE;
1908 			}
1909 			/* Increment the coreunit */
1910 			tmp_coreunit++;
1911 		}
1912 	}
1913 
1914 	return FALSE;
1915 }
1916 
1917 uint32
ai_clear_backplane_to_fast(si_t * sih,void * addr)1918 ai_clear_backplane_to_fast(si_t *sih, void *addr)
1919 {
1920 	const si_info_t *sii = SI_INFO(sih);
1921 	volatile const void *curmap = sii->curmap;
1922 	bool core_reg = FALSE;
1923 
1924 	/* Use fast path only for core register access */
1925 	if (((uintptr)addr >= (uintptr)curmap) &&
1926 		((uintptr)addr < ((uintptr)curmap + SI_CORE_SIZE))) {
1927 		/* address being accessed is within current core reg map */
1928 		core_reg = TRUE;
1929 	}
1930 
1931 	if (core_reg) {
1932 		uint32 apb_id, apb_coreunit;
1933 
1934 		if (ai_get_apb_bridge(sih, si_coreidx(&sii->pub),
1935 			&apb_id, &apb_coreunit) == TRUE) {
1936 			/* Found the APB bridge corresponding to current core,
1937 			 * Check for bus errors in APB wrapper
1938 			 */
1939 			return ai_clear_backplane_to_per_core(sih,
1940 				apb_id, apb_coreunit, NULL);
1941 		}
1942 	}
1943 
1944 	/* Default is to poll for errors on all slave wrappers */
1945 	return si_clear_backplane_to(sih);
1946 }
1947 #endif /* AXI_TIMEOUTS_NIC */
1948 
1949 #if defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC)
1950 static bool g_disable_backplane_logs = FALSE;
1951 
1952 static uint32 last_axi_error = AXI_WRAP_STS_NONE;
1953 static uint32 last_axi_error_log_status = 0;
1954 static uint32 last_axi_error_core = 0;
1955 static uint32 last_axi_error_wrap = 0;
1956 static uint32 last_axi_errlog_lo = 0;
1957 static uint32 last_axi_errlog_hi = 0;
1958 static uint32 last_axi_errlog_id = 0;
1959 
1960 /*
1961  * API to clear the back plane timeout per core.
1962  * Caller may pass optional wrapper address. If present this will be used as
1963  * the wrapper base address. If wrapper base address is provided then caller
1964  * must provide the coreid also.
1965  * If both coreid and wrapper is zero, then err status of current bridge
1966  * will be verified.
1967  */
1968 uint32
BCMPOSTTRAPFN(ai_clear_backplane_to_per_core)1969 BCMPOSTTRAPFN(ai_clear_backplane_to_per_core)(si_t *sih, uint coreid, uint coreunit, void *wrap)
1970 {
1971 	int ret = AXI_WRAP_STS_NONE;
1972 	aidmp_t *ai = NULL;
1973 	uint32 errlog_status = 0;
1974 	const si_info_t *sii = SI_INFO(sih);
1975 	uint32 errlog_lo = 0, errlog_hi = 0, errlog_id = 0, errlog_flags = 0;
1976 	uint32 current_coreidx = si_coreidx(sih);
1977 	uint32 target_coreidx = si_findcoreidx(sih, coreid, coreunit);
1978 
1979 #if defined(AXI_TIMEOUTS_NIC)
1980 	si_axi_error_t * axi_error = sih->err_info ?
1981 		&sih->err_info->axi_error[sih->err_info->count] : NULL;
1982 #endif /* AXI_TIMEOUTS_NIC */
1983 	bool restore_core = FALSE;
1984 
1985 	if ((sii->axi_num_wrappers == 0) ||
1986 #ifdef AXI_TIMEOUTS_NIC
1987 		(!PCIE(sii)) ||
1988 #endif /* AXI_TIMEOUTS_NIC */
1989 		FALSE) {
1990 		SI_VMSG(("ai_clear_backplane_to_per_core, axi_num_wrappers:%d, Is_PCIE:%d,"
1991 			" BUS_TYPE:%d, ID:%x\n",
1992 			sii->axi_num_wrappers, PCIE(sii),
1993 			BUSTYPE(sii->pub.bustype), sii->pub.buscoretype));
1994 		return AXI_WRAP_STS_NONE;
1995 	}
1996 
1997 	if (wrap != NULL) {
1998 		ai = (aidmp_t *)wrap;
1999 	} else if (coreid && (target_coreidx != current_coreidx)) {
2000 
2001 		if (ai_setcoreidx(sih, target_coreidx) == NULL) {
2002 			/* Unable to set the core */
2003 			SI_PRINT(("Set Code Failed: coreid:%x, unit:%d, target_coreidx:%d\n",
2004 				coreid, coreunit, target_coreidx));
2005 			errlog_lo = target_coreidx;
2006 			ret = AXI_WRAP_STS_SET_CORE_FAIL;
2007 			goto end;
2008 		}
2009 
2010 		restore_core = TRUE;
2011 		ai = (aidmp_t *)si_wrapperregs(sih);
2012 	} else {
2013 		/* Read error status of current wrapper */
2014 		ai = (aidmp_t *)si_wrapperregs(sih);
2015 
2016 		/* Update CoreID to current Code ID */
2017 		coreid = si_coreid(sih);
2018 	}
2019 
2020 	/* read error log status */
2021 	errlog_status = R_REG(sii->osh, &ai->errlogstatus);
2022 
2023 	if (errlog_status == ID32_INVALID) {
2024 		/* Do not try to peek further */
2025 		SI_PRINT(("ai_clear_backplane_to_per_core, errlogstatus:%x - Slave Wrapper:%x\n",
2026 			errlog_status, coreid));
2027 		ret = AXI_WRAP_STS_WRAP_RD_ERR;
2028 		errlog_lo = (uint32)(uintptr)&ai->errlogstatus;
2029 		goto end;
2030 	}
2031 
2032 	if ((errlog_status & AIELS_ERROR_MASK) != 0) {
2033 		uint32 tmp;
2034 		uint32 count = 0;
2035 		/* set ErrDone to clear the condition */
2036 		W_REG(sii->osh, &ai->errlogdone, AIELD_ERRDONE_MASK);
2037 
2038 		/* SPINWAIT on errlogstatus timeout status bits */
2039 		while ((tmp = R_REG(sii->osh, &ai->errlogstatus)) & AIELS_ERROR_MASK) {
2040 
2041 			if (tmp == ID32_INVALID) {
2042 				SI_PRINT(("ai_clear_backplane_to_per_core: prev errlogstatus:%x,"
2043 					" errlogstatus:%x\n",
2044 					errlog_status, tmp));
2045 				ret = AXI_WRAP_STS_WRAP_RD_ERR;
2046 				errlog_lo = (uint32)(uintptr)&ai->errlogstatus;
2047 				goto end;
2048 			}
2049 			/*
2050 			 * Clear again, to avoid getting stuck in the loop, if a new error
2051 			 * is logged after we cleared the first timeout
2052 			 */
2053 			W_REG(sii->osh, &ai->errlogdone, AIELD_ERRDONE_MASK);
2054 
2055 			count++;
2056 			OSL_DELAY(10);
2057 			if ((10 * count) > AI_REG_READ_TIMEOUT) {
2058 				errlog_status = tmp;
2059 				break;
2060 			}
2061 		}
2062 
2063 		errlog_lo = R_REG(sii->osh, &ai->errlogaddrlo);
2064 		errlog_hi = R_REG(sii->osh, &ai->errlogaddrhi);
2065 		errlog_id = R_REG(sii->osh, &ai->errlogid);
2066 		errlog_flags = R_REG(sii->osh, &ai->errlogflags);
2067 
2068 		/* we are already in the error path, so OK to check for the  slave error */
2069 		if (ai_ignore_errlog(sii, ai, errlog_lo, errlog_hi, errlog_id,
2070 			errlog_status)) {
2071 			si_ignore_errlog_cnt++;
2072 			goto end;
2073 		}
2074 
2075 		/* only reset APB Bridge on timeout (not slave error, or dec error) */
2076 		switch (errlog_status & AIELS_ERROR_MASK) {
2077 			case AIELS_SLAVE_ERR:
2078 				SI_PRINT(("AXI slave error\n"));
2079 				ret |= AXI_WRAP_STS_SLAVE_ERR;
2080 				break;
2081 
2082 			case AIELS_TIMEOUT:
2083 				ai_reset_axi_to(sii, ai);
2084 				ret |= AXI_WRAP_STS_TIMEOUT;
2085 				break;
2086 
2087 			case AIELS_DECODE:
2088 				SI_PRINT(("AXI decode error\n"));
2089 #ifdef USE_HOSTMEM
2090 				/* Ignore known cases of CR4 prefetch abort bugs */
2091 				if ((errlog_id & (BCM_AXI_ID_MASK | BCM_AXI_ACCESS_TYPE_MASK)) !=
2092 					(BCM43xx_AXI_ACCESS_TYPE_PREFETCH | BCM43xx_CR4_AXI_ID))
2093 #endif
2094 				{
2095 					ret |= AXI_WRAP_STS_DECODE_ERR;
2096 				}
2097 				break;
2098 			default:
2099 				ASSERT(0);	/* should be impossible */
2100 		}
2101 
2102 		if (errlog_status & AIELS_MULTIPLE_ERRORS) {
2103 			SI_PRINT(("Multiple AXI Errors\n"));
2104 			/* Set multiple errors bit only if actual error is not ignored */
2105 			if (ret) {
2106 				ret |= AXI_WRAP_STS_MULTIPLE_ERRORS;
2107 			}
2108 		}
2109 
2110 		SI_PRINT(("\tCoreID: %x\n", coreid));
2111 		SI_PRINT(("\t errlog: lo 0x%08x, hi 0x%08x, id 0x%08x, flags 0x%08x"
2112 			", status 0x%08x\n",
2113 			errlog_lo, errlog_hi, errlog_id, errlog_flags,
2114 			errlog_status));
2115 	}
2116 
2117 end:
2118 	if (ret != AXI_WRAP_STS_NONE) {
2119 		last_axi_error = ret;
2120 		last_axi_error_log_status = errlog_status;
2121 		last_axi_error_core = coreid;
2122 		last_axi_error_wrap = (uint32)ai;
2123 		last_axi_errlog_lo = errlog_lo;
2124 		last_axi_errlog_hi = errlog_hi;
2125 		last_axi_errlog_id = errlog_id;
2126 	}
2127 
2128 #if defined(AXI_TIMEOUTS_NIC)
2129 	if (axi_error && (ret != AXI_WRAP_STS_NONE)) {
2130 		axi_error->error = ret;
2131 		axi_error->coreid = coreid;
2132 		axi_error->errlog_lo = errlog_lo;
2133 		axi_error->errlog_hi = errlog_hi;
2134 		axi_error->errlog_id = errlog_id;
2135 		axi_error->errlog_flags = errlog_flags;
2136 		axi_error->errlog_status = errlog_status;
2137 		sih->err_info->count++;
2138 
2139 		if (sih->err_info->count == SI_MAX_ERRLOG_SIZE) {
2140 			sih->err_info->count = SI_MAX_ERRLOG_SIZE - 1;
2141 			SI_PRINT(("AXI Error log overflow\n"));
2142 		}
2143 	}
2144 #endif /* AXI_TIMEOUTS_NIC */
2145 
2146 	if (restore_core) {
2147 		if (ai_setcoreidx(sih, current_coreidx) == NULL) {
2148 			/* Unable to set the core */
2149 			return ID32_INVALID;
2150 		}
2151 	}
2152 
2153 	return ret;
2154 }
2155 
2156 /* reset AXI timeout */
2157 static void
BCMPOSTTRAPFN(ai_reset_axi_to)2158 BCMPOSTTRAPFN(ai_reset_axi_to)(const si_info_t *sii, aidmp_t *ai)
2159 {
2160 	/* reset APB Bridge */
2161 	OR_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
2162 	/* sync write */
2163 	(void)R_REG(sii->osh, &ai->resetctrl);
2164 	/* clear Reset bit */
2165 	AND_REG(sii->osh, &ai->resetctrl, ~(AIRC_RESET));
2166 	/* sync write */
2167 	(void)R_REG(sii->osh, &ai->resetctrl);
2168 	SI_PRINT(("AXI timeout\n"));
2169 	if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) {
2170 		SI_PRINT(("reset failed on wrapper %p\n", ai));
2171 		g_disable_backplane_logs = TRUE;
2172 	}
2173 }
2174 
2175 void
BCMPOSTTRAPFN(ai_wrapper_get_last_error)2176 BCMPOSTTRAPFN(ai_wrapper_get_last_error)(const si_t *sih, uint32 *error_status, uint32 *core,
2177 	uint32 *lo, uint32 *hi, uint32 *id)
2178 {
2179 	*error_status = last_axi_error_log_status;
2180 	*core = last_axi_error_core;
2181 	*lo = last_axi_errlog_lo;
2182 	*hi = last_axi_errlog_hi;
2183 	*id = last_axi_errlog_id;
2184 }
2185 
2186 /* Function to check whether AXI timeout has been registered on a core */
2187 uint32
ai_get_axi_timeout_reg(void)2188 ai_get_axi_timeout_reg(void)
2189 {
2190 	return (GOODREGS(last_axi_errlog_lo) ? last_axi_errlog_lo : 0);
2191 }
2192 #endif /* AXI_TIMEOUTS || AXI_TIMEOUTS_NIC */
2193 
2194 uint32
BCMPOSTTRAPFN(ai_findcoreidx_by_axiid)2195 BCMPOSTTRAPFN(ai_findcoreidx_by_axiid)(const si_t *sih, uint32 axiid)
2196 {
2197 	uint coreid = 0;
2198 	uint coreunit = 0;
2199 	const axi_to_coreidx_t *axi2coreidx = NULL;
2200 	switch (CHIPID(sih->chip)) {
2201 		case BCM4369_CHIP_GRPID:
2202 			axi2coreidx = axi2coreidx_4369;
2203 			break;
2204 		default:
2205 			SI_PRINT(("Chipid mapping not found\n"));
2206 			break;
2207 	}
2208 
2209 	if (!axi2coreidx)
2210 		return (BADIDX);
2211 
2212 	coreid = axi2coreidx[axiid].coreid;
2213 	coreunit = axi2coreidx[axiid].coreunit;
2214 
2215 	return si_findcoreidx(sih, coreid, coreunit);
2216 
2217 }
2218 
2219 /*
2220  * This API polls all slave wrappers for errors and returns bit map of
2221  * all reported errors.
2222  * return - bit map of
2223  *	AXI_WRAP_STS_NONE
2224  *	AXI_WRAP_STS_TIMEOUT
2225  *	AXI_WRAP_STS_SLAVE_ERR
2226  *	AXI_WRAP_STS_DECODE_ERR
2227  *	AXI_WRAP_STS_PCI_RD_ERR
2228  *	AXI_WRAP_STS_WRAP_RD_ERR
2229  *	AXI_WRAP_STS_SET_CORE_FAIL
2230  * On timeout detection, correspondign bridge will be reset to
2231  * unblock the bus.
2232  * Error reported in each wrapper can be retrieved using the API
2233  * si_get_axi_errlog_info()
2234  */
2235 uint32
BCMPOSTTRAPFN(ai_clear_backplane_to)2236 BCMPOSTTRAPFN(ai_clear_backplane_to)(si_t *sih)
2237 {
2238 	uint32 ret = 0;
2239 #if defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC)
2240 	const si_info_t *sii = SI_INFO(sih);
2241 	aidmp_t *ai;
2242 	uint32 i;
2243 	axi_wrapper_t * axi_wrapper = sii->axi_wrapper;
2244 
2245 #ifdef AXI_TIMEOUTS_NIC
2246 	uint32 prev_value = 0;
2247 	osl_t *osh = sii->osh;
2248 	uint32 cfg_reg = 0;
2249 	uint32 offset = 0;
2250 
2251 	if ((sii->axi_num_wrappers == 0) || (!PCIE(sii)))
2252 #else
2253 	if (sii->axi_num_wrappers == 0)
2254 #endif
2255 	{
2256 		SI_VMSG(("ai_clear_backplane_to, axi_num_wrappers:%d, Is_PCIE:%d, BUS_TYPE:%d,"
2257 			" ID:%x\n",
2258 			sii->axi_num_wrappers, PCIE(sii),
2259 			BUSTYPE(sii->pub.bustype), sii->pub.buscoretype));
2260 		return AXI_WRAP_STS_NONE;
2261 	}
2262 
2263 #ifdef AXI_TIMEOUTS_NIC
2264 	/* Save and restore wrapper access window */
2265 	if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
2266 		if (PCIE_GEN1(sii)) {
2267 			cfg_reg = PCI_BAR0_WIN2;
2268 			offset = PCI_BAR0_WIN2_OFFSET;
2269 		} else if (PCIE_GEN2(sii)) {
2270 			cfg_reg = PCIE2_BAR0_CORE2_WIN2;
2271 			offset = PCIE2_BAR0_CORE2_WIN2_OFFSET;
2272 		}
2273 		else {
2274 			ASSERT(!"!PCIE_GEN1 && !PCIE_GEN2");
2275 		}
2276 
2277 		prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 4);
2278 
2279 		if (prev_value == ID32_INVALID) {
2280 			si_axi_error_t * axi_error =
2281 				sih->err_info ?
2282 					&sih->err_info->axi_error[sih->err_info->count] :
2283 					NULL;
2284 
2285 			SI_PRINT(("ai_clear_backplane_to, PCI_BAR0_WIN2 - %x\n", prev_value));
2286 			if (axi_error) {
2287 				axi_error->error = ret = AXI_WRAP_STS_PCI_RD_ERR;
2288 				axi_error->errlog_lo = cfg_reg;
2289 				sih->err_info->count++;
2290 
2291 				if (sih->err_info->count == SI_MAX_ERRLOG_SIZE) {
2292 					sih->err_info->count = SI_MAX_ERRLOG_SIZE - 1;
2293 					SI_PRINT(("AXI Error log overflow\n"));
2294 				}
2295 			}
2296 
2297 			return ret;
2298 		}
2299 	}
2300 #endif /* AXI_TIMEOUTS_NIC */
2301 
2302 	for (i = 0; i < sii->axi_num_wrappers; ++i) {
2303 		uint32 tmp;
2304 
2305 		if (axi_wrapper[i].wrapper_type != AI_SLAVE_WRAPPER) {
2306 			continue;
2307 		}
2308 
2309 #ifdef AXI_TIMEOUTS_NIC
2310 		if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
2311 			/* Set BAR0_CORE2_WIN2 to bridge wapper base address */
2312 			OSL_PCI_WRITE_CONFIG(osh,
2313 				cfg_reg, 4, axi_wrapper[i].wrapper_addr);
2314 
2315 			/* set AI to BAR0 + Offset corresponding to Gen1 or gen2 */
2316 			ai = (aidmp_t *) (DISCARD_QUAL(sii->curmap, uint8) + offset);
2317 		}
2318 		else
2319 #endif /* AXI_TIMEOUTS_NIC */
2320 		{
2321 			ai = (aidmp_t *)(uintptr) axi_wrapper[i].wrapper_addr;
2322 		}
2323 
2324 		tmp = ai_clear_backplane_to_per_core(sih, axi_wrapper[i].cid, 0,
2325 			DISCARD_QUAL(ai, void));
2326 
2327 		ret |= tmp;
2328 	}
2329 
2330 #ifdef AXI_TIMEOUTS_NIC
2331 	/* Restore the initial wrapper space */
2332 	if (prev_value) {
2333 		OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 4, prev_value);
2334 	}
2335 #endif /* AXI_TIMEOUTS_NIC */
2336 
2337 #endif /* AXI_TIMEOUTS || AXI_TIMEOUTS_NIC */
2338 
2339 	return ret;
2340 }
2341 
2342 uint
ai_num_slaveports(const si_t * sih,uint coreidx)2343 ai_num_slaveports(const si_t *sih, uint coreidx)
2344 {
2345 	const si_info_t *sii = SI_INFO(sih);
2346 	const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
2347 	uint32 cib;
2348 
2349 	cib = cores_info->cib[coreidx];
2350 	return ((cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT);
2351 }
2352 
2353 #ifdef UART_TRAP_DBG
2354 void
ai_dump_APB_Bridge_registers(const si_t * sih)2355 ai_dump_APB_Bridge_registers(const si_t *sih)
2356 {
2357 	aidmp_t *ai;
2358 	const si_info_t *sii = SI_INFO(sih);
2359 
2360 	ai = (aidmp_t *)sii->br_wrapba[0];
2361 	printf("APB Bridge 0\n");
2362 	printf("lo 0x%08x, hi 0x%08x, id 0x%08x, flags 0x%08x",
2363 		R_REG(sii->osh, &ai->errlogaddrlo),
2364 		R_REG(sii->osh, &ai->errlogaddrhi),
2365 		R_REG(sii->osh, &ai->errlogid),
2366 		R_REG(sii->osh, &ai->errlogflags));
2367 	printf("\n status 0x%08x\n", R_REG(sii->osh, &ai->errlogstatus));
2368 }
2369 #endif /* UART_TRAP_DBG */
2370 
2371 void
ai_force_clocks(const si_t * sih,uint clock_state)2372 ai_force_clocks(const si_t *sih, uint clock_state)
2373 {
2374 	const si_info_t *sii = SI_INFO(sih);
2375 	aidmp_t *ai, *ai_sec = NULL;
2376 	volatile uint32 dummy;
2377 	uint32 ioctrl;
2378 	const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
2379 
2380 	ASSERT(GOODREGS(sii->curwrap));
2381 	ai = sii->curwrap;
2382 	if (cores_info->wrapba2[sii->curidx])
2383 		ai_sec = REG_MAP(cores_info->wrapba2[sii->curidx], SI_CORE_SIZE);
2384 
2385 	/* ensure there are no pending backplane operations */
2386 	SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
2387 
2388 	if (clock_state == FORCE_CLK_ON) {
2389 		ioctrl = R_REG(sii->osh, &ai->ioctrl);
2390 		W_REG(sii->osh, &ai->ioctrl, (ioctrl | SICF_FGC));
2391 		dummy = R_REG(sii->osh, &ai->ioctrl);
2392 		BCM_REFERENCE(dummy);
2393 		if (ai_sec) {
2394 			ioctrl = R_REG(sii->osh, &ai_sec->ioctrl);
2395 			W_REG(sii->osh, &ai_sec->ioctrl, (ioctrl | SICF_FGC));
2396 			dummy = R_REG(sii->osh, &ai_sec->ioctrl);
2397 			BCM_REFERENCE(dummy);
2398 		}
2399 	} else {
2400 		ioctrl = R_REG(sii->osh, &ai->ioctrl);
2401 		W_REG(sii->osh, &ai->ioctrl, (ioctrl & (~SICF_FGC)));
2402 		dummy = R_REG(sii->osh, &ai->ioctrl);
2403 		BCM_REFERENCE(dummy);
2404 		if (ai_sec) {
2405 			ioctrl = R_REG(sii->osh, &ai_sec->ioctrl);
2406 			W_REG(sii->osh, &ai_sec->ioctrl, (ioctrl & (~SICF_FGC)));
2407 			dummy = R_REG(sii->osh, &ai_sec->ioctrl);
2408 			BCM_REFERENCE(dummy);
2409 		}
2410 	}
2411 	/* ensure there are no pending backplane operations */
2412 	SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
2413 }
2414 
2415 #ifdef DONGLEBUILD
2416 /*
2417  * this is not declared as static const, although that is the right thing to do
2418  * reason being if declared as static const, compile/link process would that in
2419  * read only section...
2420  * currently this code/array is used to identify the registers which are dumped
2421  * during trap processing
2422  * and usually for the trap buffer, .rodata buffer is reused,  so for now just static
2423 */
2424 static uint32 BCMPOST_TRAP_RODATA(wrapper_offsets_to_dump)[] = {
2425 	OFFSETOF(aidmp_t, ioctrlset),
2426 	OFFSETOF(aidmp_t, ioctrlclear),
2427 	OFFSETOF(aidmp_t, ioctrl),
2428 	OFFSETOF(aidmp_t, iostatus),
2429 	OFFSETOF(aidmp_t, ioctrlwidth),
2430 	OFFSETOF(aidmp_t, iostatuswidth),
2431 	OFFSETOF(aidmp_t, resetctrl),
2432 	OFFSETOF(aidmp_t, resetstatus),
2433 	OFFSETOF(aidmp_t, resetreadid),
2434 	OFFSETOF(aidmp_t, resetwriteid),
2435 	OFFSETOF(aidmp_t, errlogctrl),
2436 	OFFSETOF(aidmp_t, errlogdone),
2437 	OFFSETOF(aidmp_t, errlogstatus),
2438 	OFFSETOF(aidmp_t, errlogaddrlo),
2439 	OFFSETOF(aidmp_t, errlogaddrhi),
2440 	OFFSETOF(aidmp_t, errlogid),
2441 	OFFSETOF(aidmp_t, errloguser),
2442 	OFFSETOF(aidmp_t, errlogflags),
2443 	OFFSETOF(aidmp_t, intstatus),
2444 	OFFSETOF(aidmp_t, config),
2445 	OFFSETOF(aidmp_t, itipoobaout),
2446 	OFFSETOF(aidmp_t, itipoobbout),
2447 	OFFSETOF(aidmp_t, itipoobcout),
2448 	OFFSETOF(aidmp_t, itipoobdout)};
2449 
2450 #ifdef ETD
2451 
2452 /* This is used for dumping wrapper registers for etd when axierror happens.
2453  * This should match with the structure hnd_ext_trap_bp_err_t
2454  */
2455 static uint32 BCMPOST_TRAP_RODATA(etd_wrapper_offsets_axierr)[] = {
2456 	OFFSETOF(aidmp_t, ioctrl),
2457 	OFFSETOF(aidmp_t, iostatus),
2458 	OFFSETOF(aidmp_t, resetctrl),
2459 	OFFSETOF(aidmp_t, resetstatus),
2460 	OFFSETOF(aidmp_t, resetreadid),
2461 	OFFSETOF(aidmp_t, resetwriteid),
2462 	OFFSETOF(aidmp_t, errlogctrl),
2463 	OFFSETOF(aidmp_t, errlogdone),
2464 	OFFSETOF(aidmp_t, errlogstatus),
2465 	OFFSETOF(aidmp_t, errlogaddrlo),
2466 	OFFSETOF(aidmp_t, errlogaddrhi),
2467 	OFFSETOF(aidmp_t, errlogid),
2468 	OFFSETOF(aidmp_t, errloguser),
2469 	OFFSETOF(aidmp_t, errlogflags),
2470 	OFFSETOF(aidmp_t, itipoobaout),
2471 	OFFSETOF(aidmp_t, itipoobbout),
2472 	OFFSETOF(aidmp_t, itipoobcout),
2473 	OFFSETOF(aidmp_t, itipoobdout)};
2474 #endif /* ETD */
2475 
2476 /* wrapper function to access the global array wrapper_offsets_to_dump */
2477 static uint32
BCMRAMFN(ai_get_sizeof_wrapper_offsets_to_dump)2478 BCMRAMFN(ai_get_sizeof_wrapper_offsets_to_dump)(void)
2479 {
2480 	return (sizeof(wrapper_offsets_to_dump));
2481 }
2482 
2483 static uint32
BCMPOSTTRAPRAMFN(ai_get_wrapper_base_addr)2484 BCMPOSTTRAPRAMFN(ai_get_wrapper_base_addr)(uint32 **offset)
2485 {
2486 	uint32 arr_size = ARRAYSIZE(wrapper_offsets_to_dump);
2487 
2488 	*offset = &wrapper_offsets_to_dump[0];
2489 	return arr_size;
2490 }
2491 
2492 uint32
BCMATTACHFN(ai_wrapper_dump_buf_size)2493 BCMATTACHFN(ai_wrapper_dump_buf_size)(const si_t *sih)
2494 {
2495 	uint32 buf_size = 0;
2496 	uint32 wrapper_count = 0;
2497 	const si_info_t *sii = SI_INFO(sih);
2498 
2499 	wrapper_count = sii->axi_num_wrappers;
2500 	if (wrapper_count == 0)
2501 		return 0;
2502 
2503 	/* cnt indicates how many registers, tag_id 0 will say these are address/value */
2504 	/* address/value pairs */
2505 	buf_size += 2 * (ai_get_sizeof_wrapper_offsets_to_dump() * wrapper_count);
2506 
2507 	return buf_size;
2508 }
2509 
2510 static uint32*
BCMPOSTTRAPFN(ai_wrapper_dump_binary_one)2511 BCMPOSTTRAPFN(ai_wrapper_dump_binary_one)(const si_info_t *sii, uint32 *p32, uint32 wrap_ba)
2512 {
2513 	uint i;
2514 	uint32 *addr;
2515 	uint32 arr_size;
2516 	uint32 *offset_base;
2517 
2518 	arr_size = ai_get_wrapper_base_addr(&offset_base);
2519 
2520 	for (i = 0; i < arr_size; i++) {
2521 		addr = (uint32 *)(wrap_ba + *(offset_base + i));
2522 		*p32++ = (uint32)addr;
2523 		*p32++ = R_REG(sii->osh, addr);
2524 	}
2525 	return p32;
2526 }
2527 
2528 #if defined(ETD)
2529 static uint32
BCMPOSTTRAPRAMFN(ai_get_wrapper_base_addr_etd_axierr)2530 BCMPOSTTRAPRAMFN(ai_get_wrapper_base_addr_etd_axierr)(uint32 **offset)
2531 {
2532 	uint32 arr_size = ARRAYSIZE(etd_wrapper_offsets_axierr);
2533 
2534 	*offset = &etd_wrapper_offsets_axierr[0];
2535 	return arr_size;
2536 }
2537 
2538 uint32
BCMPOSTTRAPFN(ai_wrapper_dump_last_timeout)2539 BCMPOSTTRAPFN(ai_wrapper_dump_last_timeout)(const si_t *sih, uint32 *error, uint32 *core,
2540 	uint32 *ba, uchar *p)
2541 {
2542 #if defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC)
2543 	uint32 *p32;
2544 	uint32 wrap_ba = last_axi_error_wrap;
2545 	uint i;
2546 	uint32 *addr;
2547 
2548 	const si_info_t *sii = SI_INFO(sih);
2549 
2550 	if (last_axi_error != AXI_WRAP_STS_NONE)
2551 	{
2552 		if (wrap_ba)
2553 		{
2554 			p32 = (uint32 *)p;
2555 			uint32 arr_size;
2556 			uint32 *offset_base;
2557 
2558 			arr_size = ai_get_wrapper_base_addr_etd_axierr(&offset_base);
2559 			for (i = 0; i < arr_size; i++) {
2560 				addr = (uint32 *)(wrap_ba + *(offset_base + i));
2561 				*p32++ = R_REG(sii->osh, addr);
2562 			}
2563 		}
2564 		*error = last_axi_error;
2565 		*core = last_axi_error_core;
2566 		*ba = wrap_ba;
2567 	}
2568 #else
2569 	*error = 0;
2570 	*core = 0;
2571 	*ba = 0;
2572 #endif /* AXI_TIMEOUTS || AXI_TIMEOUTS_NIC */
2573 	return 0;
2574 }
2575 #endif /* ETD */
2576 
2577 uint32
BCMPOSTTRAPFN(ai_wrapper_dump_binary)2578 BCMPOSTTRAPFN(ai_wrapper_dump_binary)(const si_t *sih, uchar *p)
2579 {
2580 	uint32 *p32 = (uint32 *)p;
2581 	uint32 i;
2582 	const si_info_t *sii = SI_INFO(sih);
2583 
2584 	for (i = 0; i < sii->axi_num_wrappers; i++) {
2585 		p32 = ai_wrapper_dump_binary_one(sii, p32, sii->axi_wrapper[i].wrapper_addr);
2586 	}
2587 	return 0;
2588 }
2589 
2590 bool
BCMPOSTTRAPFN(ai_check_enable_backplane_log)2591 BCMPOSTTRAPFN(ai_check_enable_backplane_log)(const si_t *sih)
2592 {
2593 #if defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC)
2594 	if (g_disable_backplane_logs) {
2595 		return FALSE;
2596 	}
2597 	else {
2598 		return TRUE;
2599 	}
2600 #else /*  (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC) */
2601 	return FALSE;
2602 #endif /*  (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC) */
2603 }
2604 #endif /* DONGLEBUILD */
2605