1 /*
2 * Misc utility routines for accessing chip-specific features
3 * of the SiliconBackplane-based Broadcom chips.
4 *
5 * Copyright (C) 1999-2019, Broadcom.
6 *
7 * Unless you and Broadcom execute a separate written software license
8 * agreement governing use of this software, this software is licensed to you
9 * under the terms of the GNU General Public License version 2 (the "GPL"),
10 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11 * following added to such license:
12 *
13 * As a special exception, the copyright holders of this software give you
14 * permission to link this software with independent modules, and to copy and
15 * distribute the resulting executable under terms of your choice, provided that
16 * you also meet, for each linked independent module, the terms and conditions
17 * of the license of that module. An independent module is a module which is
18 * not derived from this software. The special exception does not apply to any
19 * modifications of the software.
20 *
21 * Notwithstanding the above, under no circumstances may you combine this
22 * software in any way with any other Broadcom software provided under a license
23 * other than the GPL, without Broadcom's express prior written consent.
24 *
25 *
26 * <<Broadcom-WL-IPTag/Open:>>
27 *
28 * $Id: aiutils.c 823201 2019-06-03 03:49:36Z $
29 */
30 #include <bcm_cfg.h>
31 #include <typedefs.h>
32 #include <bcmdefs.h>
33 #include <osl.h>
34 #include <bcmutils.h>
35 #include <siutils.h>
36 #include <hndsoc.h>
37 #include <sbchipc.h>
38 #include <pcicfg.h>
39
40 #include "siutils_priv.h"
41 #include <bcmdevs.h>
42
43 #define BCM53573_DMP() (0)
44 #define BCM4707_DMP() (0)
45 #define PMU_DMP() (0)
46 #define GCI_DMP() (0)
47
48 #if defined(BCM_BACKPLANE_TIMEOUT)
49 static bool ai_get_apb_bridge(si_t *sih, uint32 coreidx, uint32 *apb_id,
50 uint32 *apb_coreuinit);
51 #endif /* BCM_BACKPLANE_TIMEOUT */
52
53 #if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
54 static void ai_reset_axi_to(si_info_t *sii, aidmp_t *ai);
55 #endif /* defined (AXI_TIMEOUTS) || defined (BCM_BACKPLANE_TIMEOUT) */
56
57 /* EROM parsing */
58
get_erom_ent(si_t * sih,uint32 ** eromptr,uint32 mask,uint32 match)59 static uint32 get_erom_ent(si_t *sih, uint32 **eromptr, uint32 mask,
60 uint32 match)
61 {
62 uint32 ent;
63 uint inv = 0, nom = 0;
64 uint32 size = 0;
65
66 while (TRUE) {
67 ent = R_REG(si_osh(sih), *eromptr);
68 (*eromptr)++;
69
70 if (mask == 0) {
71 break;
72 }
73
74 if ((ent & ER_VALID) == 0) {
75 inv++;
76 continue;
77 }
78
79 if (ent == (ER_END | ER_VALID)) {
80 break;
81 }
82
83 if ((ent & mask) == match) {
84 break;
85 }
86
87 /* escape condition related EROM size if it has invalid values */
88 size += sizeof(*eromptr);
89 if (size >= ER_SZ_MAX) {
90 SI_ERROR(("Failed to find end of EROM marker\n"));
91 break;
92 }
93
94 nom++;
95 }
96
97 SI_VMSG(("%s: Returning ent 0x%08x\n", __FUNCTION__, ent));
98 if (inv + nom) {
99 SI_VMSG((" after %d invalid and %d non-matching entries\n", inv, nom));
100 }
101 return ent;
102 }
103
get_asd(si_t * sih,uint32 ** eromptr,uint sp,uint ad,uint st,uint32 * addrl,uint32 * addrh,uint32 * sizel,uint32 * sizeh)104 static uint32 get_asd(si_t *sih, uint32 **eromptr, uint sp, uint ad, uint st,
105 uint32 *addrl, uint32 *addrh, uint32 *sizel,
106 uint32 *sizeh)
107 {
108 uint32 asd, sz, szd;
109
110 BCM_REFERENCE(ad);
111
112 asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
113 if (((asd & ER_TAG1) != ER_ADD) ||
114 (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
115 ((asd & AD_ST_MASK) != st)) {
116 /* This is not what we want, "push" it back */
117 (*eromptr)--;
118 return 0;
119 }
120 *addrl = asd & AD_ADDR_MASK;
121 if (asd & AD_AG32) {
122 *addrh = get_erom_ent(sih, eromptr, 0, 0);
123 } else {
124 *addrh = 0;
125 }
126 *sizeh = 0;
127 sz = asd & AD_SZ_MASK;
128 if (sz == AD_SZ_SZD) {
129 szd = get_erom_ent(sih, eromptr, 0, 0);
130 *sizel = szd & SD_SZ_MASK;
131 if (szd & SD_SG32) {
132 *sizeh = get_erom_ent(sih, eromptr, 0, 0);
133 }
134 } else {
135 *sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
136 }
137
138 SI_VMSG((" SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n", sp, ad,
139 st, *sizeh, *sizel, *addrh, *addrl));
140
141 return asd;
142 }
143
144 /* Parse the enumeration rom to identify all cores
145 * Erom content format can be found in:
146 * http://hwnbu-twiki.broadcom.com/twiki/pub/Mwgroup/ArmDocumentation/SystemDiscovery.pdf
147 */
ai_scan(si_t * sih,void * regs,uint devid)148 void ai_scan(si_t *sih, void *regs, uint devid)
149 {
150 si_info_t *sii = SI_INFO(sih);
151 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
152 chipcregs_t *cc = (chipcregs_t *)regs;
153 uint32 erombase, *eromptr, *eromlim;
154 axi_wrapper_t *axi_wrapper = sii->axi_wrapper;
155
156 BCM_REFERENCE(devid);
157
158 erombase = R_REG(sii->osh, &cc->eromptr);
159
160 switch (BUSTYPE(sih->bustype)) {
161 case SI_BUS:
162 eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
163 break;
164
165 case PCI_BUS:
166 /* Set wrappers address */
167 sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE);
168
169 /* Now point the window at the erom */
170 OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 0x4, erombase);
171 eromptr = regs;
172 break;
173
174 #ifdef BCMSDIO
175 case SPI_BUS:
176 case SDIO_BUS:
177 eromptr = (uint32 *)(uintptr)erombase;
178 break;
179 #endif /* BCMSDIO */
180
181 case PCMCIA_BUS:
182 default:
183 SI_ERROR(("Don't know how to do AXI enumeration on bus %d\n",
184 sih->bustype));
185 ASSERT(0);
186 return;
187 }
188 eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
189 sii->axi_num_wrappers = 0;
190
191 SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim "
192 "= 0x%p\n",
193 OSL_OBFUSCATE_BUF(regs), erombase, OSL_OBFUSCATE_BUF(eromptr),
194 OSL_OBFUSATE_BUF(eromlim)));
195 while (eromptr < eromlim) {
196 uint32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp;
197 uint32 mpd, asd, addrl, addrh, sizel, sizeh;
198 uint i, j, idx;
199 bool br;
200
201 br = FALSE;
202
203 /* Grok a component */
204 cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
205 if (cia == (ER_END | ER_VALID)) {
206 SI_VMSG(("Found END of erom after %d cores\n", sii->numcores));
207 return;
208 }
209
210 cib = get_erom_ent(sih, &eromptr, 0, 0);
211 if ((cib & ER_TAG) != ER_CI) {
212 SI_ERROR(("CIA not followed by CIB\n"));
213 goto error;
214 }
215
216 cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
217 mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
218 crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
219 nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
220 nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
221 nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
222 nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
223
224 #ifdef BCMDBG_SI
225 SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with "
226 "nmw = %d, "
227 "nsw = %d, nmp = %d & nsp = %d\n",
228 mfg, cid, crev, OSL_OBFUSCATE_BUF(eromptr - 1), nmw, nsw, nmp,
229 nsp));
230 #else
231 BCM_REFERENCE(crev);
232 #endif // endif
233
234 if (BCM4347_CHIP(sih->chip)) {
235 /* 4347 has more entries for ARM core
236 * This should apply to all chips but crashes on router
237 * This is a temp fix to be further analyze
238 */
239 if (nsp == 0) {
240 continue;
241 }
242 } else {
243 /* Include Default slave wrapper for timeout monitoring */
244 if ((nsp == 0) ||
245 #if !defined(AXI_TIMEOUTS) && !defined(BCM_BACKPLANE_TIMEOUT)
246 ((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) ||
247 #else
248 ((CHIPTYPE(sii->pub.socitype) == SOCI_NAI) &&
249 (mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) ||
250 #endif /* !defined(AXI_TIMEOUTS) && !defined(BCM_BACKPLANE_TIMEOUT) */
251 FALSE) {
252 continue;
253 }
254 }
255
256 if ((nmw + nsw == 0)) {
257 /* A component which is not a core */
258 if (cid == OOB_ROUTER_CORE_ID) {
259 asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh,
260 &sizel, &sizeh);
261 if (asd != 0) {
262 if ((sii->oob_router != 0) && (sii->oob_router != addrl)) {
263 sii->oob_router1 = addrl;
264 } else {
265 sii->oob_router = addrl;
266 }
267 }
268 }
269 if (cid != NS_CCB_CORE_ID && cid != PMU_CORE_ID &&
270 cid != GCI_CORE_ID && cid != SR_CORE_ID && cid != HUB_CORE_ID &&
271 cid != HND_OOBR_CORE_ID) {
272 continue;
273 }
274 }
275
276 idx = sii->numcores;
277
278 cores_info->cia[idx] = cia;
279 cores_info->cib[idx] = cib;
280 cores_info->coreid[idx] = cid;
281
282 for (i = 0; i < nmp; i++) {
283 mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
284 if ((mpd & ER_TAG) != ER_MP) {
285 SI_ERROR(("Not enough MP entries for component 0x%x\n", cid));
286 goto error;
287 }
288 SI_VMSG((" Master port %d, mp: %d id: %d\n", i,
289 (mpd & MPD_MP_MASK) >> MPD_MP_SHIFT,
290 (mpd & MPD_MUI_MASK) >> MPD_MUI_SHIFT));
291 }
292
293 /* First Slave Address Descriptor should be port 0:
294 * the main register space for the core
295 */
296 asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel,
297 &sizeh);
298 if (asd == 0) {
299 do {
300 /* Try again to see if it is a bridge */
301 asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
302 &sizel, &sizeh);
303 if (asd != 0) {
304 br = TRUE;
305 } else {
306 if (br == TRUE) {
307 break;
308 } else if ((addrh != 0) || (sizeh != 0) ||
309 (sizel != SI_CORE_SIZE)) {
310 SI_ERROR(("addrh = 0x%x\t sizeh = 0x%x\t size1 ="
311 "0x%x\n",
312 addrh, sizeh, sizel));
313 SI_ERROR(("First Slave ASD for"
314 "core 0x%04x malformed "
315 "(0x%08x)\n",
316 cid, asd));
317 goto error;
318 }
319 }
320 } while (1);
321 }
322 cores_info->coresba[idx] = addrl;
323 cores_info->coresba_size[idx] = sizel;
324 /* Get any more ASDs in first port */
325 j = 1;
326 do {
327 asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
328 &sizel, &sizeh);
329 if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) {
330 cores_info->coresba2[idx] = addrl;
331 cores_info->coresba2_size[idx] = sizel;
332 }
333 j++;
334 } while (asd != 0);
335
336 /* Go through the ASDs for other slave ports */
337 for (i = 1; i < nsp; i++) {
338 j = 0;
339 do {
340 asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh,
341 &sizel, &sizeh);
342 /* To get the first base address of second slave port */
343 if ((asd != 0) && (i == 1) && (j == 0)) {
344 cores_info->csp2ba[idx] = addrl;
345 cores_info->csp2ba_size[idx] = sizel;
346 }
347 if (asd == 0) {
348 break;
349 }
350 j++;
351 } while (1);
352 if (j == 0) {
353 SI_ERROR((" SP %d has no address descriptors\n", i));
354 goto error;
355 }
356 }
357
358 /* Now get master wrappers */
359 for (i = 0; i < nmw; i++) {
360 asd = get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl, &addrh,
361 &sizel, &sizeh);
362 if (asd == 0) {
363 SI_ERROR(("Missing descriptor for MW %d\n", i));
364 goto error;
365 }
366 if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
367 SI_ERROR(("Master wrapper %d is not 4KB\n", i));
368 goto error;
369 }
370 if (i == 0) {
371 cores_info->wrapba[idx] = addrl;
372 } else if (i == 1) {
373 cores_info->wrapba2[idx] = addrl;
374 } else if (i == 0x2) {
375 cores_info->wrapba3[idx] = addrl;
376 }
377
378 if (axi_wrapper && (sii->axi_num_wrappers < SI_MAX_AXI_WRAPPERS)) {
379 axi_wrapper[sii->axi_num_wrappers].mfg = mfg;
380 axi_wrapper[sii->axi_num_wrappers].cid = cid;
381 axi_wrapper[sii->axi_num_wrappers].rev = crev;
382 axi_wrapper[sii->axi_num_wrappers].wrapper_type =
383 AI_MASTER_WRAPPER;
384 axi_wrapper[sii->axi_num_wrappers].wrapper_addr = addrl;
385 sii->axi_num_wrappers++;
386 SI_VMSG(("MASTER WRAPPER: %d, mfg:%x, cid:%x,"
387 "rev:%x, addr:%x, size:%x\n",
388 sii->axi_num_wrappers, mfg, cid, crev, addrl, sizel));
389 }
390 }
391
392 /* And finally slave wrappers */
393 for (i = 0; i < nsw; i++) {
394 uint fwp = (nsp == 1) ? 0 : 1;
395 asd = get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP, &addrl,
396 &addrh, &sizel, &sizeh);
397
398 /* cache APB bridge wrapper address for set/clear timeout */
399 if ((mfg == MFGID_ARM) && (cid == APB_BRIDGE_ID)) {
400 ASSERT(sii->num_br < SI_MAXBR);
401 sii->br_wrapba[sii->num_br++] = addrl;
402 }
403
404 if (asd == 0) {
405 SI_ERROR(("Missing descriptor for SW %d\n", i));
406 goto error;
407 }
408 if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
409 SI_ERROR(("Slave wrapper %d is not 4KB\n", i));
410 goto error;
411 }
412 if ((nmw == 0) && (i == 0)) {
413 cores_info->wrapba[idx] = addrl;
414 } else if ((nmw == 0) && (i == 1)) {
415 cores_info->wrapba2[idx] = addrl;
416 } else if ((nmw == 0) && (i == 0x2)) {
417 cores_info->wrapba3[idx] = addrl;
418 }
419
420 /* Include all slave wrappers to the list to
421 * enable and monitor watchdog timeouts
422 */
423
424 if (axi_wrapper && (sii->axi_num_wrappers < SI_MAX_AXI_WRAPPERS)) {
425 axi_wrapper[sii->axi_num_wrappers].mfg = mfg;
426 axi_wrapper[sii->axi_num_wrappers].cid = cid;
427 axi_wrapper[sii->axi_num_wrappers].rev = crev;
428 axi_wrapper[sii->axi_num_wrappers].wrapper_type =
429 AI_SLAVE_WRAPPER;
430
431 /* Software WAR as discussed with hardware team, to ensure
432 * proper Slave Wrapper Base address is set for 4364 Chip ID.
433 * Current address is 0x1810c000, Corrected the same to
434 * 0x1810e000. This ensures AXI default slave wrapper is
435 * registered along with other slave wrapper cores and is useful
436 * while generating trap info when write operation is tried on
437 * Invalid Core / Wrapper register
438 */
439
440 if ((CHIPID(sih->chip) == BCM4364_CHIP_ID) &&
441 (cid == DEF_AI_COMP)) {
442 axi_wrapper[sii->axi_num_wrappers].wrapper_addr =
443 0x1810e000;
444 } else {
445 axi_wrapper[sii->axi_num_wrappers].wrapper_addr = addrl;
446 }
447
448 sii->axi_num_wrappers++;
449
450 SI_VMSG(("SLAVE WRAPPER: %d, mfg:%x, cid:%x,"
451 "rev:%x, addr:%x, size:%x\n",
452 sii->axi_num_wrappers, mfg, cid, crev, addrl, sizel));
453 }
454 }
455
456 #ifndef BCM_BACKPLANE_TIMEOUT
457 /* Don't record bridges */
458 if (br) {
459 continue;
460 }
461 #endif // endif
462
463 /* Done with core */
464 sii->numcores++;
465 }
466
467 SI_ERROR(("Reached end of erom without finding END\n"));
468
469 error:
470 sii->numcores = 0;
471 return;
472 }
473
474 #define AI_SETCOREIDX_MAPSIZE(coreid) \
475 (((coreid) == NS_CCB_CORE_ID) ? 15 * SI_CORE_SIZE : SI_CORE_SIZE)
476
477 /* This function changes the logical "focus" to the indicated core.
478 * Return the current core's virtual address.
479 */
_ai_setcoreidx(si_t * sih,uint coreidx,uint use_wrapn)480 static volatile void *_ai_setcoreidx(si_t *sih, uint coreidx, uint use_wrapn)
481 {
482 si_info_t *sii = SI_INFO(sih);
483 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
484 uint32 addr, wrap, wrap2, wrap3;
485 volatile void *regs;
486
487 if (coreidx >= MIN(sii->numcores, SI_MAXCORES)) {
488 return (NULL);
489 }
490
491 addr = cores_info->coresba[coreidx];
492 wrap = cores_info->wrapba[coreidx];
493 wrap2 = cores_info->wrapba2[coreidx];
494 wrap3 = cores_info->wrapba3[coreidx];
495
496 #ifdef BCM_BACKPLANE_TIMEOUT
497 /* No need to disable interrupts while entering/exiting APB bridge core */
498 if ((cores_info->coreid[coreidx] != APB_BRIDGE_CORE_ID) &&
499 (cores_info->coreid[sii->curidx] != APB_BRIDGE_CORE_ID))
500 #endif /* BCM_BACKPLANE_TIMEOUT */
501 {
502 /*
503 * If the user has provided an interrupt mask enabled function,
504 * then assert interrupts are disabled before switching the core.
505 */
506 ASSERT((sii->intrsenabled_fn == NULL) ||
507 !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
508 }
509
510 switch (BUSTYPE(sih->bustype)) {
511 case SI_BUS:
512 /* map new one */
513 if (!cores_info->regs[coreidx]) {
514 cores_info->regs[coreidx] = REG_MAP(
515 addr, AI_SETCOREIDX_MAPSIZE(cores_info->coreid[coreidx]));
516 ASSERT(GOODREGS(cores_info->regs[coreidx]));
517 }
518 sii->curmap = regs = cores_info->regs[coreidx];
519 if (!cores_info->wrappers[coreidx] && (wrap != 0)) {
520 cores_info->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE);
521 ASSERT(GOODREGS(cores_info->wrappers[coreidx]));
522 }
523 if (!cores_info->wrappers2[coreidx] && (wrap2 != 0)) {
524 cores_info->wrappers2[coreidx] = REG_MAP(wrap2, SI_CORE_SIZE);
525 ASSERT(GOODREGS(cores_info->wrappers2[coreidx]));
526 }
527 if (!cores_info->wrappers3[coreidx] && (wrap3 != 0)) {
528 cores_info->wrappers3[coreidx] = REG_MAP(wrap3, SI_CORE_SIZE);
529 ASSERT(GOODREGS(cores_info->wrappers3[coreidx]));
530 }
531
532 if (use_wrapn == 0x2) {
533 sii->curwrap = cores_info->wrappers3[coreidx];
534 } else if (use_wrapn == 1) {
535 sii->curwrap = cores_info->wrappers2[coreidx];
536 } else {
537 sii->curwrap = cores_info->wrappers[coreidx];
538 }
539 break;
540
541 case PCI_BUS:
542 #ifdef BCM_BACKPLANE_TIMEOUT
543 /* No need to set the BAR0 if core is APB Bridge.
544 * This is to reduce 2 PCI writes while checkng for errlog
545 */
546 if (cores_info->coreid[coreidx] != APB_BRIDGE_CORE_ID)
547 #endif /* BCM_BACKPLANE_TIMEOUT */
548 {
549 /* point bar0 window */
550 OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 0x4, addr);
551 }
552
553 regs = sii->curmap;
554 /* point bar0 2nd 4KB window to the primary wrapper */
555 if (use_wrapn) {
556 wrap = wrap2;
557 }
558 if (PCIE_GEN2(sii)) {
559 OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_WIN2, 0x4, wrap);
560 } else {
561 OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN2, 0x4, wrap);
562 }
563 break;
564
565 #ifdef BCMSDIO
566 case SPI_BUS:
567 case SDIO_BUS:
568 sii->curmap = regs = (void *)((uintptr)addr);
569 if (use_wrapn) {
570 sii->curwrap = (void *)((uintptr)wrap2);
571 } else {
572 sii->curwrap = (void *)((uintptr)wrap);
573 }
574 break;
575 #endif /* BCMSDIO */
576
577 case PCMCIA_BUS:
578 default:
579 ASSERT(0);
580 regs = NULL;
581 break;
582 }
583
584 sii->curmap = regs;
585 sii->curidx = coreidx;
586
587 return regs;
588 }
589
ai_setcoreidx(si_t * sih,uint coreidx)590 volatile void *ai_setcoreidx(si_t *sih, uint coreidx)
591 {
592 return _ai_setcoreidx(sih, coreidx, 0);
593 }
594
ai_setcoreidx_2ndwrap(si_t * sih,uint coreidx)595 volatile void *ai_setcoreidx_2ndwrap(si_t *sih, uint coreidx)
596 {
597 return _ai_setcoreidx(sih, coreidx, 1);
598 }
599
ai_setcoreidx_3rdwrap(si_t * sih,uint coreidx)600 volatile void *ai_setcoreidx_3rdwrap(si_t *sih, uint coreidx)
601 {
602 return _ai_setcoreidx(sih, coreidx, 0x2);
603 }
604
ai_coreaddrspaceX(si_t * sih,uint asidx,uint32 * addr,uint32 * size)605 void ai_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size)
606 {
607 si_info_t *sii = SI_INFO(sih);
608 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
609 chipcregs_t *cc = NULL;
610 uint32 erombase, *eromptr, *eromlim;
611 uint i, j, cidx;
612 uint32 cia, cib, nmp, nsp;
613 uint32 asd, addrl, addrh, sizel, sizeh;
614
615 for (i = 0; i < sii->numcores; i++) {
616 if (cores_info->coreid[i] == CC_CORE_ID) {
617 cc = (chipcregs_t *)cores_info->regs[i];
618 break;
619 }
620 }
621 if (cc == NULL) {
622 goto error;
623 }
624
625 BCM_REFERENCE(erombase);
626 erombase = R_REG(sii->osh, &cc->eromptr);
627 eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
628 eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
629
630 cidx = sii->curidx;
631 cia = cores_info->cia[cidx];
632 cib = cores_info->cib[cidx];
633
634 nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
635 nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
636
637 /* scan for cores */
638 while (eromptr < eromlim) {
639 if ((get_erom_ent(sih, &eromptr, ER_TAG, ER_CI) == cia) &&
640 (get_erom_ent(sih, &eromptr, 0, 0) == cib)) {
641 break;
642 }
643 }
644
645 /* skip master ports */
646 for (i = 0; i < nmp; i++) {
647 get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
648 }
649
650 /* Skip ASDs in port 0 */
651 asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel,
652 &sizeh);
653 if (asd == 0) {
654 /* Try again to see if it is a bridge */
655 asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh, &sizel,
656 &sizeh);
657 }
658
659 j = 1;
660 do {
661 asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh, &sizel,
662 &sizeh);
663 j++;
664 } while (asd != 0);
665
666 /* Go through the ASDs for other slave ports */
667 for (i = 1; i < nsp; i++) {
668 j = 0;
669 do {
670 asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh,
671 &sizel, &sizeh);
672 if (asd == 0) {
673 break;
674 }
675
676 if (!asidx--) {
677 *addr = addrl;
678 *size = sizel;
679 return;
680 }
681 j++;
682 } while (1);
683
684 if (j == 0) {
685 SI_ERROR((" SP %d has no address descriptors\n", i));
686 break;
687 }
688 }
689
690 error:
691 *size = 0;
692 return;
693 }
694
695 /* Return the number of address spaces in current core */
ai_numaddrspaces(si_t * sih)696 int ai_numaddrspaces(si_t *sih)
697 {
698 BCM_REFERENCE(sih);
699 return 0x2;
700 }
701
702 /* Return the address of the nth address space in the current core
703 * Arguments
704 * sih : Pointer to struct si_t
705 * spidx : slave port index
706 * baidx : base address index
707 */
ai_addrspace(si_t * sih,uint spidx,uint baidx)708 uint32 ai_addrspace(si_t *sih, uint spidx, uint baidx)
709 {
710 si_info_t *sii = SI_INFO(sih);
711 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
712 uint cidx;
713
714 cidx = sii->curidx;
715
716 if (spidx == CORE_SLAVE_PORT_0) {
717 if (baidx == CORE_BASE_ADDR_0) {
718 return cores_info->coresba[cidx];
719 } else if (baidx == CORE_BASE_ADDR_1) {
720 return cores_info->coresba2[cidx];
721 }
722 } else if (spidx == CORE_SLAVE_PORT_1) {
723 if (baidx == CORE_BASE_ADDR_0) {
724 return cores_info->csp2ba[cidx];
725 }
726 }
727
728 SI_ERROR(("%s: Need to parse the erom again to find %d base addr in %d "
729 "slave port\n",
730 __FUNCTION__, baidx, spidx));
731
732 return 0;
733 }
734
735 /* Return the size of the nth address space in the current core
736 * Arguments
737 * sih : Pointer to struct si_t
738 * spidx : slave port index
739 * baidx : base address index
740 */
ai_addrspacesize(si_t * sih,uint spidx,uint baidx)741 uint32 ai_addrspacesize(si_t *sih, uint spidx, uint baidx)
742 {
743 si_info_t *sii = SI_INFO(sih);
744 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
745 uint cidx;
746
747 cidx = sii->curidx;
748 if (spidx == CORE_SLAVE_PORT_0) {
749 if (baidx == CORE_BASE_ADDR_0) {
750 return cores_info->coresba_size[cidx];
751 } else if (baidx == CORE_BASE_ADDR_1) {
752 return cores_info->coresba2_size[cidx];
753 }
754 } else if (spidx == CORE_SLAVE_PORT_1) {
755 if (baidx == CORE_BASE_ADDR_0) {
756 return cores_info->csp2ba_size[cidx];
757 }
758 }
759
760 SI_ERROR(("%s: Need to parse the erom again to find %d base addr in %d "
761 "slave port\n",
762 __FUNCTION__, baidx, spidx));
763
764 return 0;
765 }
766
ai_flag(si_t * sih)767 uint ai_flag(si_t *sih)
768 {
769 si_info_t *sii = SI_INFO(sih);
770 aidmp_t *ai;
771
772 if (BCM4707_DMP()) {
773 SI_ERROR(("%s: Attempting to read CHIPCOMMONB DMP registers on 4707\n",
774 __FUNCTION__));
775 return sii->curidx;
776 }
777 if (BCM53573_DMP()) {
778 SI_ERROR(
779 ("%s: Attempting to read DMP registers on 53573\n", __FUNCTION__));
780 return sii->curidx;
781 }
782 if (PMU_DMP()) {
783 uint idx, flag;
784 idx = sii->curidx;
785 ai_setcoreidx(sih, SI_CC_IDX);
786 flag = ai_flag_alt(sih);
787 ai_setcoreidx(sih, idx);
788 return flag;
789 }
790
791 ai = sii->curwrap;
792 ASSERT(ai != NULL);
793
794 return (R_REG(sii->osh, &ai->oobselouta30) & 0x1f);
795 }
796
ai_flag_alt(si_t * sih)797 uint ai_flag_alt(si_t *sih)
798 {
799 si_info_t *sii = SI_INFO(sih);
800 aidmp_t *ai;
801
802 if (BCM4707_DMP()) {
803 SI_ERROR(("%s: Attempting to read CHIPCOMMONB DMP registers on 4707\n",
804 __FUNCTION__));
805 return sii->curidx;
806 }
807
808 ai = sii->curwrap;
809
810 return ((R_REG(sii->osh, &ai->oobselouta30) >> AI_OOBSEL_1_SHIFT) &
811 AI_OOBSEL_MASK);
812 }
813
ai_setint(si_t * sih,int siflag)814 void ai_setint(si_t *sih, int siflag)
815 {
816 BCM_REFERENCE(sih);
817 BCM_REFERENCE(siflag);
818 }
819
ai_wrap_reg(si_t * sih,uint32 offset,uint32 mask,uint32 val)820 uint ai_wrap_reg(si_t *sih, uint32 offset, uint32 mask, uint32 val)
821 {
822 si_info_t *sii = SI_INFO(sih);
823 uint32 *addr = (uint32 *)((uchar *)(sii->curwrap) + offset);
824
825 if (mask || val) {
826 uint32 w = R_REG(sii->osh, addr);
827 w &= ~mask;
828 w |= val;
829 W_REG(sii->osh, addr, w);
830 }
831 return (R_REG(sii->osh, addr));
832 }
833
ai_corevendor(si_t * sih)834 uint ai_corevendor(si_t *sih)
835 {
836 si_info_t *sii = SI_INFO(sih);
837 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
838 uint32 cia;
839
840 cia = cores_info->cia[sii->curidx];
841 return ((cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT);
842 }
843
ai_corerev(si_t * sih)844 uint ai_corerev(si_t *sih)
845 {
846 si_info_t *sii = SI_INFO(sih);
847 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
848 uint32 cib;
849
850 cib = cores_info->cib[sii->curidx];
851 return ((cib & CIB_REV_MASK) >> CIB_REV_SHIFT);
852 }
853
ai_corerev_minor(si_t * sih)854 uint ai_corerev_minor(si_t *sih)
855 {
856 return (ai_core_sflags(sih, 0, 0) >> SISF_MINORREV_D11_SHIFT) &
857 SISF_MINORREV_D11_MASK;
858 }
859
ai_iscoreup(si_t * sih)860 bool ai_iscoreup(si_t *sih)
861 {
862 si_info_t *sii = SI_INFO(sih);
863 aidmp_t *ai;
864
865 ai = sii->curwrap;
866
867 return (((R_REG(sii->osh, &ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) ==
868 SICF_CLOCK_EN) &&
869 ((R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) == 0));
870 }
871
872 /*
873 * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set
874 * operation, switch back to the original core, and return the new value.
875 *
876 * When using the silicon backplane, no fiddling with interrupts or core
877 * switches is needed.
878 *
879 * Also, when using pci/pcie, we can optimize away the core switching for pci
880 * registers and (on newer pci cores) chipcommon registers.
881 */
ai_corereg(si_t * sih,uint coreidx,uint regoff,uint mask,uint val)882 uint ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
883 {
884 uint origidx = 0;
885 volatile uint32 *r = NULL;
886 uint w;
887 uint intr_val = 0;
888 bool fast = FALSE;
889 si_info_t *sii = SI_INFO(sih);
890 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
891
892 ASSERT(GOODIDX(coreidx));
893 ASSERT(regoff < SI_CORE_SIZE);
894 ASSERT((val & ~mask) == 0);
895
896 if (coreidx >= SI_MAXCORES) {
897 return 0;
898 }
899
900 if (BUSTYPE(sih->bustype) == SI_BUS) {
901 /* If internal bus, we can always get at everything */
902 fast = TRUE;
903 /* map if does not exist */
904 if (!cores_info->regs[coreidx]) {
905 cores_info->regs[coreidx] =
906 REG_MAP(cores_info->coresba[coreidx], SI_CORE_SIZE);
907 ASSERT(GOODREGS(cores_info->regs[coreidx]));
908 }
909 r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] +
910 regoff);
911 } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
912 /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc
913 */
914
915 if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
916 /* Chipc registers are mapped at 12KB */
917
918 fast = TRUE;
919 r = (volatile uint32 *)((volatile char *)sii->curmap +
920 PCI_16KB0_CCREGS_OFFSET + regoff);
921 } else if (sii->pub.buscoreidx == coreidx) {
922 /* pci registers are at either in the last 2KB of an 8KB window
923 * or, in pcie and pci rev 13 at 8KB
924 */
925 fast = TRUE;
926 if (SI_FAST(sii)) {
927 r = (volatile uint32 *)((volatile char *)sii->curmap +
928 PCI_16KB0_PCIREGS_OFFSET + regoff);
929 } else {
930 r = (volatile uint32 *)((volatile char *)sii->curmap +
931 ((regoff >= SBCONFIGOFF)
932 ? PCI_BAR0_PCISBR_OFFSET
933 : PCI_BAR0_PCIREGS_OFFSET) +
934 regoff);
935 }
936 }
937 }
938
939 if (!fast) {
940 INTR_OFF(sii, intr_val);
941
942 /* save current core index */
943 origidx = si_coreidx(&sii->pub);
944
945 /* switch core */
946 r = (volatile uint32 *)((volatile uchar *)ai_setcoreidx(&sii->pub,
947 coreidx) +
948 regoff);
949 }
950 ASSERT(r != NULL);
951
952 /* mask and set */
953 if (mask || val) {
954 w = (R_REG(sii->osh, r) & ~mask) | val;
955 W_REG(sii->osh, r, w);
956 }
957
958 /* readback */
959 w = R_REG(sii->osh, r);
960
961 if (!fast) {
962 /* restore core index */
963 if (origidx != coreidx) {
964 ai_setcoreidx(&sii->pub, origidx);
965 }
966
967 INTR_RESTORE(sii, intr_val);
968 }
969
970 return (w);
971 }
972
973 /*
974 * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set
975 * operation, switch back to the original core, and return the new value.
976 *
977 * When using the silicon backplane, no fiddling with interrupts or core
978 * switches is needed.
979 *
980 * Also, when using pci/pcie, we can optimize away the core switching for pci
981 * registers and (on newer pci cores) chipcommon registers.
982 */
ai_corereg_writeonly(si_t * sih,uint coreidx,uint regoff,uint mask,uint val)983 uint ai_corereg_writeonly(si_t *sih, uint coreidx, uint regoff, uint mask,
984 uint val)
985 {
986 uint origidx = 0;
987 volatile uint32 *r = NULL;
988 uint w = 0;
989 uint intr_val = 0;
990 bool fast = FALSE;
991 si_info_t *sii = SI_INFO(sih);
992 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
993
994 ASSERT(GOODIDX(coreidx));
995 ASSERT(regoff < SI_CORE_SIZE);
996 ASSERT((val & ~mask) == 0);
997
998 if (coreidx >= SI_MAXCORES) {
999 return 0;
1000 }
1001
1002 if (BUSTYPE(sih->bustype) == SI_BUS) {
1003 /* If internal bus, we can always get at everything */
1004 fast = TRUE;
1005 /* map if does not exist */
1006 if (!cores_info->regs[coreidx]) {
1007 cores_info->regs[coreidx] =
1008 REG_MAP(cores_info->coresba[coreidx], SI_CORE_SIZE);
1009 ASSERT(GOODREGS(cores_info->regs[coreidx]));
1010 }
1011 r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] +
1012 regoff);
1013 } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
1014 /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc
1015 */
1016
1017 if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
1018 /* Chipc registers are mapped at 12KB */
1019
1020 fast = TRUE;
1021 r = (volatile uint32 *)((volatile char *)sii->curmap +
1022 PCI_16KB0_CCREGS_OFFSET + regoff);
1023 } else if (sii->pub.buscoreidx == coreidx) {
1024 /* pci registers are at either in the last 2KB of an 8KB window
1025 * or, in pcie and pci rev 13 at 8KB
1026 */
1027 fast = TRUE;
1028 if (SI_FAST(sii)) {
1029 r = (volatile uint32 *)((volatile char *)sii->curmap +
1030 PCI_16KB0_PCIREGS_OFFSET + regoff);
1031 } else {
1032 r = (volatile uint32 *)((volatile char *)sii->curmap +
1033 ((regoff >= SBCONFIGOFF)
1034 ? PCI_BAR0_PCISBR_OFFSET
1035 : PCI_BAR0_PCIREGS_OFFSET) +
1036 regoff);
1037 }
1038 }
1039 }
1040
1041 if (!fast) {
1042 INTR_OFF(sii, intr_val);
1043
1044 /* save current core index */
1045 origidx = si_coreidx(&sii->pub);
1046
1047 /* switch core */
1048 r = (volatile uint32 *)((volatile uchar *)ai_setcoreidx(&sii->pub,
1049 coreidx) +
1050 regoff);
1051 }
1052 ASSERT(r != NULL);
1053
1054 /* mask and set */
1055 if (mask || val) {
1056 w = (R_REG(sii->osh, r) & ~mask) | val;
1057 W_REG(sii->osh, r, w);
1058 }
1059
1060 if (!fast) {
1061 /* restore core index */
1062 if (origidx != coreidx) {
1063 ai_setcoreidx(&sii->pub, origidx);
1064 }
1065
1066 INTR_RESTORE(sii, intr_val);
1067 }
1068
1069 return (w);
1070 }
1071
1072 /*
1073 * If there is no need for fiddling with interrupts or core switches (typically
1074 * silicon back plane registers, pci registers and chipcommon registers), this
1075 * function returns the register offset on this core to a mapped address. This
1076 * address can be used for W_REG/R_REG directly.
1077 *
1078 * For accessing registers that would need a core switch, this function will
1079 * return NULL.
1080 */
ai_corereg_addr(si_t * sih,uint coreidx,uint regoff)1081 volatile uint32 *ai_corereg_addr(si_t *sih, uint coreidx, uint regoff)
1082 {
1083 volatile uint32 *r = NULL;
1084 bool fast = FALSE;
1085 si_info_t *sii = SI_INFO(sih);
1086 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
1087
1088 ASSERT(GOODIDX(coreidx));
1089 ASSERT(regoff < SI_CORE_SIZE);
1090
1091 if (coreidx >= SI_MAXCORES) {
1092 return 0;
1093 }
1094
1095 if (BUSTYPE(sih->bustype) == SI_BUS) {
1096 /* If internal bus, we can always get at everything */
1097 fast = TRUE;
1098 /* map if does not exist */
1099 if (!cores_info->regs[coreidx]) {
1100 cores_info->regs[coreidx] =
1101 REG_MAP(cores_info->coresba[coreidx], SI_CORE_SIZE);
1102 ASSERT(GOODREGS(cores_info->regs[coreidx]));
1103 }
1104 r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] +
1105 regoff);
1106 } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
1107 /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc
1108 */
1109
1110 if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
1111 /* Chipc registers are mapped at 12KB */
1112
1113 fast = TRUE;
1114 r = (volatile uint32 *)((volatile char *)sii->curmap +
1115 PCI_16KB0_CCREGS_OFFSET + regoff);
1116 } else if (sii->pub.buscoreidx == coreidx) {
1117 /* pci registers are at either in the last 2KB of an 8KB window
1118 * or, in pcie and pci rev 13 at 8KB
1119 */
1120 fast = TRUE;
1121 if (SI_FAST(sii)) {
1122 r = (volatile uint32 *)((volatile char *)sii->curmap +
1123 PCI_16KB0_PCIREGS_OFFSET + regoff);
1124 } else {
1125 r = (volatile uint32 *)((volatile char *)sii->curmap +
1126 ((regoff >= SBCONFIGOFF)
1127 ? PCI_BAR0_PCISBR_OFFSET
1128 : PCI_BAR0_PCIREGS_OFFSET) +
1129 regoff);
1130 }
1131 }
1132 }
1133
1134 if (!fast) {
1135 ASSERT(sii->curidx == coreidx);
1136 r = (volatile uint32 *)((volatile uchar *)sii->curmap + regoff);
1137 }
1138
1139 return (r);
1140 }
1141
ai_core_disable(si_t * sih,uint32 bits)1142 void ai_core_disable(si_t *sih, uint32 bits)
1143 {
1144 si_info_t *sii = SI_INFO(sih);
1145 volatile uint32 dummy;
1146 uint32 status;
1147 aidmp_t *ai;
1148
1149 ASSERT(GOODREGS(sii->curwrap));
1150 ai = sii->curwrap;
1151
1152 /* if core is already in reset, just return */
1153 if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) {
1154 return;
1155 }
1156
1157 /* ensure there are no pending backplane operations */
1158 SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 0x12C);
1159
1160 /* if pending backplane ops still, try waiting longer */
1161 if (status != 0) {
1162 /* 300usecs was sufficient to allow backplane ops to clear for big
1163 * hammer */
1164 /* during driver load we may need more time */
1165 SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 0x2710);
1166 /* if still pending ops, continue on and try disable anyway */
1167 /* this is in big hammer path, so don't call wl_reinit in this case...
1168 */
1169 }
1170
1171 W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
1172 dummy = R_REG(sii->osh, &ai->resetctrl);
1173 BCM_REFERENCE(dummy);
1174 OSL_DELAY(1);
1175
1176 W_REG(sii->osh, &ai->ioctrl, bits);
1177 dummy = R_REG(sii->osh, &ai->ioctrl);
1178 BCM_REFERENCE(dummy);
1179 OSL_DELAY(0xA);
1180 }
1181
1182 /* reset and re-enable a core
1183 * inputs:
1184 * bits - core specific bits that are set during and after reset sequence
1185 * resetbits - core specific bits that are set only during reset sequence
1186 */
_ai_core_reset(si_t * sih,uint32 bits,uint32 resetbits)1187 static void _ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
1188 {
1189 si_info_t *sii = SI_INFO(sih);
1190 #if defined(UCM_CORRUPTION_WAR)
1191 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
1192 #endif // endif
1193 aidmp_t *ai;
1194 volatile uint32 dummy;
1195 uint loop_counter = 10;
1196
1197 ASSERT(GOODREGS(sii->curwrap));
1198 ai = sii->curwrap;
1199
1200 /* ensure there are no pending backplane operations */
1201 SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 0x12C);
1202
1203 /* put core into reset state */
1204 W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
1205 OSL_DELAY(0xA);
1206
1207 /* ensure there are no pending backplane operations */
1208 SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 0x12C);
1209
1210 W_REG(sii->osh, &ai->ioctrl, (bits | resetbits | SICF_FGC | SICF_CLOCK_EN));
1211 dummy = R_REG(sii->osh, &ai->ioctrl);
1212 BCM_REFERENCE(dummy);
1213 #ifdef UCM_CORRUPTION_WAR
1214 if (cores_info->coreid[sii->curidx] == D11_CORE_ID) {
1215 /* Reset FGC */
1216 OSL_DELAY(1);
1217 W_REG(sii->osh, &ai->ioctrl, (dummy & (~SICF_FGC)));
1218 }
1219 #endif /* UCM_CORRUPTION_WAR */
1220 /* ensure there are no pending backplane operations */
1221 SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 0x12C);
1222
1223 while (R_REG(sii->osh, &ai->resetctrl) != 0 && --loop_counter != 0) {
1224 /* ensure there are no pending backplane operations */
1225 SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 0x12C);
1226
1227 /* take core out of reset */
1228 W_REG(sii->osh, &ai->resetctrl, 0);
1229
1230 /* ensure there are no pending backplane operations */
1231 SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 0x12C);
1232 }
1233
1234 #ifdef UCM_CORRUPTION_WAR
1235 /* Pulse FGC after lifting Reset */
1236 W_REG(sii->osh, &ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN));
1237 #else
1238 W_REG(sii->osh, &ai->ioctrl, (bits | SICF_CLOCK_EN));
1239 #endif /* UCM_CORRUPTION_WAR */
1240 dummy = R_REG(sii->osh, &ai->ioctrl);
1241 BCM_REFERENCE(dummy);
1242 #ifdef UCM_CORRUPTION_WAR
1243 if (cores_info->coreid[sii->curidx] == D11_CORE_ID) {
1244 /* Reset FGC */
1245 OSL_DELAY(1);
1246 W_REG(sii->osh, &ai->ioctrl, (dummy & (~SICF_FGC)));
1247 }
1248 #endif /* UCM_CORRUPTION_WAR */
1249 OSL_DELAY(1);
1250 }
1251
ai_core_reset(si_t * sih,uint32 bits,uint32 resetbits)1252 void ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
1253 {
1254 si_info_t *sii = SI_INFO(sih);
1255 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
1256 uint idx = sii->curidx;
1257
1258 if (cores_info->wrapba3[idx] != 0) {
1259 ai_setcoreidx_3rdwrap(sih, idx);
1260 _ai_core_reset(sih, bits, resetbits);
1261 ai_setcoreidx(sih, idx);
1262 }
1263
1264 if (cores_info->wrapba2[idx] != 0) {
1265 ai_setcoreidx_2ndwrap(sih, idx);
1266 _ai_core_reset(sih, bits, resetbits);
1267 ai_setcoreidx(sih, idx);
1268 }
1269
1270 _ai_core_reset(sih, bits, resetbits);
1271 }
1272
ai_core_cflags_wo(si_t * sih,uint32 mask,uint32 val)1273 void ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
1274 {
1275 si_info_t *sii = SI_INFO(sih);
1276 aidmp_t *ai;
1277 uint32 w;
1278
1279 if (BCM4707_DMP()) {
1280 SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
1281 __FUNCTION__));
1282 return;
1283 }
1284 if (PMU_DMP()) {
1285 SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n", __FUNCTION__));
1286 return;
1287 }
1288
1289 ASSERT(GOODREGS(sii->curwrap));
1290 ai = sii->curwrap;
1291
1292 ASSERT((val & ~mask) == 0);
1293
1294 if (mask || val) {
1295 w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
1296 W_REG(sii->osh, &ai->ioctrl, w);
1297 }
1298 }
1299
ai_core_cflags(si_t * sih,uint32 mask,uint32 val)1300 uint32 ai_core_cflags(si_t *sih, uint32 mask, uint32 val)
1301 {
1302 si_info_t *sii = SI_INFO(sih);
1303 aidmp_t *ai;
1304 uint32 w;
1305
1306 if (BCM4707_DMP()) {
1307 SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
1308 __FUNCTION__));
1309 return 0;
1310 }
1311
1312 if (PMU_DMP()) {
1313 SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n", __FUNCTION__));
1314 return 0;
1315 }
1316 ASSERT(GOODREGS(sii->curwrap));
1317 ai = sii->curwrap;
1318
1319 ASSERT((val & ~mask) == 0);
1320
1321 if (mask || val) {
1322 w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
1323 W_REG(sii->osh, &ai->ioctrl, w);
1324 }
1325
1326 return R_REG(sii->osh, &ai->ioctrl);
1327 }
1328
ai_core_sflags(si_t * sih,uint32 mask,uint32 val)1329 uint32 ai_core_sflags(si_t *sih, uint32 mask, uint32 val)
1330 {
1331 si_info_t *sii = SI_INFO(sih);
1332 aidmp_t *ai;
1333 uint32 w;
1334
1335 if (BCM4707_DMP()) {
1336 SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
1337 __FUNCTION__));
1338 return 0;
1339 }
1340 if (PMU_DMP()) {
1341 SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n", __FUNCTION__));
1342 return 0;
1343 }
1344
1345 ASSERT(GOODREGS(sii->curwrap));
1346 ai = sii->curwrap;
1347
1348 ASSERT((val & ~mask) == 0);
1349 ASSERT((mask & ~SISF_CORE_BITS) == 0);
1350
1351 if (mask || val) {
1352 w = ((R_REG(sii->osh, &ai->iostatus) & ~mask) | val);
1353 W_REG(sii->osh, &ai->iostatus, w);
1354 }
1355
1356 return R_REG(sii->osh, &ai->iostatus);
1357 }
1358
1359 #if defined(BCMDBG_PHYDUMP)
1360 /* print interesting aidmp registers */
ai_dumpregs(si_t * sih,struct bcmstrbuf * b)1361 void ai_dumpregs(si_t *sih, struct bcmstrbuf *b)
1362 {
1363 si_info_t *sii = SI_INFO(sih);
1364 osl_t *osh;
1365 aidmp_t *ai;
1366 uint i;
1367 uint32 prev_value = 0;
1368 axi_wrapper_t *axi_wrapper = sii->axi_wrapper;
1369 uint32 cfg_reg = 0;
1370 uint bar0_win_offset = 0;
1371
1372 osh = sii->osh;
1373
1374 /* Save and restore wrapper access window */
1375 if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1376 if (PCIE_GEN2(sii)) {
1377 cfg_reg = PCIE2_BAR0_CORE2_WIN2;
1378 bar0_win_offset = PCIE2_BAR0_CORE2_WIN2_OFFSET;
1379 } else {
1380 cfg_reg = PCI_BAR0_WIN2;
1381 bar0_win_offset = PCI_BAR0_WIN2_OFFSET;
1382 }
1383
1384 prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 0x4);
1385 if (prev_value == ID32_INVALID) {
1386 SI_PRINT(("%s, PCI_BAR0_WIN2 - %x\n", __FUNCTION__, prev_value));
1387 return;
1388 }
1389 }
1390
1391 bcm_bprintf(
1392 b,
1393 "ChipNum:%x, ChipRev;%x, BusType:%x, BoardType:%x, BoardVendor:%x\n\n",
1394 sih->chip, sih->chiprev, sih->bustype, sih->boardtype,
1395 sih->boardvendor);
1396
1397 for (i = 0; i < sii->axi_num_wrappers; i++) {
1398 if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1399 /* Set BAR0 window to bridge wapper base address */
1400 OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 0x4, axi_wrapper[i].wrapper_addr);
1401
1402 ai = (aidmp_t *)((volatile uint8 *)sii->curmap + bar0_win_offset);
1403 } else {
1404 ai = (aidmp_t *)(uintptr)axi_wrapper[i].wrapper_addr;
1405 }
1406
1407 bcm_bprintf(b, "core 0x%x: core_rev:%d, %s_WR ADDR:%x \n",
1408 axi_wrapper[i].cid, axi_wrapper[i].rev,
1409 axi_wrapper[i].wrapper_type == AI_SLAVE_WRAPPER ? "SLAVE"
1410 : "MASTER",
1411 axi_wrapper[i].wrapper_addr);
1412
1413 /* BCM4707_DMP() */
1414 if (BCM4707_CHIP(CHIPID(sih->chip)) &&
1415 (axi_wrapper[i].cid == NS_CCB_CORE_ID)) {
1416 bcm_bprintf(b, "Skipping chipcommonb in 4707\n");
1417 continue;
1418 }
1419
1420 bcm_bprintf(b,
1421 "ioctrlset 0x%x ioctrlclear 0x%x ioctrl 0x%x iostatus 0x%x "
1422 "ioctrlwidth 0x%x iostatuswidth 0x%x\n"
1423 "resetctrl 0x%x resetstatus 0x%x resetreadid 0x%x "
1424 "resetwriteid 0x%x\n"
1425 "errlogctrl 0x%x errlogdone 0x%x errlogstatus 0x%x "
1426 "errlogaddrlo 0x%x errlogaddrhi 0x%x\n"
1427 "errlogid 0x%x errloguser 0x%x errlogflags 0x%x\n"
1428 "intstatus 0x%x config 0x%x itcr 0x%x\n\n",
1429 R_REG(osh, &ai->ioctrlset), R_REG(osh, &ai->ioctrlclear),
1430 R_REG(osh, &ai->ioctrl), R_REG(osh, &ai->iostatus),
1431 R_REG(osh, &ai->ioctrlwidth),
1432 R_REG(osh, &ai->iostatuswidth), R_REG(osh, &ai->resetctrl),
1433 R_REG(osh, &ai->resetstatus), R_REG(osh, &ai->resetreadid),
1434 R_REG(osh, &ai->resetwriteid), R_REG(osh, &ai->errlogctrl),
1435 R_REG(osh, &ai->errlogdone), R_REG(osh, &ai->errlogstatus),
1436 R_REG(osh, &ai->errlogaddrlo),
1437 R_REG(osh, &ai->errlogaddrhi), R_REG(osh, &ai->errlogid),
1438 R_REG(osh, &ai->errloguser), R_REG(osh, &ai->errlogflags),
1439 R_REG(osh, &ai->intstatus), R_REG(osh, &ai->config),
1440 R_REG(osh, &ai->itcr));
1441 }
1442
1443 /* Restore the initial wrapper space */
1444 if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1445 if (prev_value && cfg_reg) {
1446 OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 0x4, prev_value);
1447 }
1448 }
1449 }
1450 #endif // endif
1451
ai_update_backplane_timeouts(si_t * sih,bool enable,uint32 timeout_exp,uint32 cid)1452 void ai_update_backplane_timeouts(si_t *sih, bool enable, uint32 timeout_exp,
1453 uint32 cid)
1454 {
1455 #if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
1456 si_info_t *sii = SI_INFO(sih);
1457 aidmp_t *ai;
1458 uint32 i;
1459 axi_wrapper_t *axi_wrapper = sii->axi_wrapper;
1460 uint32 errlogctrl =
1461 (enable << AIELC_TO_ENAB_SHIFT) |
1462 ((timeout_exp << AIELC_TO_EXP_SHIFT) & AIELC_TO_EXP_MASK);
1463
1464 #ifdef BCM_BACKPLANE_TIMEOUT
1465 uint32 prev_value = 0;
1466 osl_t *osh = sii->osh;
1467 uint32 cfg_reg = 0;
1468 uint32 offset = 0;
1469 #endif /* BCM_BACKPLANE_TIMEOUT */
1470
1471 if ((sii->axi_num_wrappers == 0) ||
1472 #ifdef BCM_BACKPLANE_TIMEOUT
1473 (!PCIE(sii)) ||
1474 #endif /* BCM_BACKPLANE_TIMEOUT */
1475 FALSE) {
1476 SI_VMSG((" %s, axi_num_wrappers:%d, Is_PCIE:%d, BUS_TYPE:%d, ID:%x\n",
1477 __FUNCTION__, sii->axi_num_wrappers, PCIE(sii),
1478 BUSTYPE(sii->pub.bustype), sii->pub.buscoretype));
1479 return;
1480 }
1481
1482 #ifdef BCM_BACKPLANE_TIMEOUT
1483 /* Save and restore the wrapper access window */
1484 if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1485 if (PCIE_GEN1(sii)) {
1486 cfg_reg = PCI_BAR0_WIN2;
1487 offset = PCI_BAR0_WIN2_OFFSET;
1488 } else if (PCIE_GEN2(sii)) {
1489 cfg_reg = PCIE2_BAR0_CORE2_WIN2;
1490 offset = PCIE2_BAR0_CORE2_WIN2_OFFSET;
1491 } else {
1492 ASSERT(!"!PCIE_GEN1 && !PCIE_GEN2");
1493 }
1494
1495 prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 0x4);
1496 if (prev_value == ID32_INVALID) {
1497 SI_PRINT(("%s, PCI_BAR0_WIN2 - %x\n", __FUNCTION__, prev_value));
1498 return;
1499 }
1500 }
1501 #endif /* BCM_BACKPLANE_TIMEOUT */
1502
1503 for (i = 0; i < sii->axi_num_wrappers; ++i) {
1504 if (axi_wrapper[i].wrapper_type != AI_SLAVE_WRAPPER) {
1505 SI_VMSG(("SKIP ENABLE BPT: MFG:%x, CID:%x, ADDR:%x\n",
1506 axi_wrapper[i].mfg, axi_wrapper[i].cid,
1507 axi_wrapper[i].wrapper_addr));
1508 continue;
1509 }
1510
1511 /* Update only given core if requested */
1512 if ((cid != 0) && (axi_wrapper[i].cid != cid)) {
1513 continue;
1514 }
1515
1516 #ifdef BCM_BACKPLANE_TIMEOUT
1517 if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1518 /* Set BAR0_CORE2_WIN2 to bridge wapper base address */
1519 OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 0x4, axi_wrapper[i].wrapper_addr);
1520
1521 /* set AI to BAR0 + Offset corresponding to Gen1 or gen2 */
1522 ai = (aidmp_t *)(DISCARD_QUAL(sii->curmap, uint8) + offset);
1523 } else
1524 #endif /* BCM_BACKPLANE_TIMEOUT */
1525 {
1526 ai = (aidmp_t *)(uintptr)axi_wrapper[i].wrapper_addr;
1527 }
1528
1529 W_REG(sii->osh, &ai->errlogctrl, errlogctrl);
1530
1531 SI_VMSG(("ENABLED BPT: MFG:%x, CID:%x, ADDR:%x, ERR_CTRL:%x\n",
1532 axi_wrapper[i].mfg, axi_wrapper[i].cid,
1533 axi_wrapper[i].wrapper_addr,
1534 R_REG(sii->osh, &ai->errlogctrl)));
1535 }
1536
1537 #ifdef BCM_BACKPLANE_TIMEOUT
1538 /* Restore the initial wrapper space */
1539 if (prev_value) {
1540 OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 0x4, prev_value);
1541 }
1542 #endif /* BCM_BACKPLANE_TIMEOUT */
1543
1544 #endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */
1545 }
1546
1547 #if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
1548
1549 /* slave error is ignored, so account for those cases */
1550 static uint32 si_ignore_errlog_cnt = 0;
1551
ai_ignore_errlog(si_info_t * sii,aidmp_t * ai,uint32 lo_addr,uint32 hi_addr,uint32 err_axi_id,uint32 errsts)1552 static bool ai_ignore_errlog(si_info_t *sii, aidmp_t *ai, uint32 lo_addr,
1553 uint32 hi_addr, uint32 err_axi_id, uint32 errsts)
1554 {
1555 uint32 axi_id;
1556 #ifdef BCMPCIE_BTLOG
1557 uint32 axi_id2 = BCM4347_UNUSED_AXI_ID;
1558 #endif /* BCMPCIE_BTLOG */
1559 uint32 ignore_errsts = AIELS_SLAVE_ERR;
1560 uint32 ignore_hi = BT_CC_SPROM_BADREG_HI;
1561 uint32 ignore_lo = BT_CC_SPROM_BADREG_LO;
1562 uint32 ignore_size = BT_CC_SPROM_BADREG_SIZE;
1563
1564 /* ignore the BT slave errors if the errlog is to chipcommon addr 0x190 */
1565 switch (CHIPID(sii->pub.chip)) {
1566 case BCM4350_CHIP_ID:
1567 axi_id = BCM4350_BT_AXI_ID;
1568 break;
1569 case BCM4345_CHIP_ID:
1570 axi_id = BCM4345_BT_AXI_ID;
1571 break;
1572 case BCM4349_CHIP_GRPID:
1573 axi_id = BCM4349_BT_AXI_ID;
1574 break;
1575 case BCM4364_CHIP_ID:
1576 case BCM4373_CHIP_ID:
1577 axi_id = BCM4364_BT_AXI_ID;
1578 break;
1579 #ifdef BCMPCIE_BTLOG
1580 case BCM4347_CHIP_ID:
1581 case BCM4357_CHIP_ID:
1582 axi_id = BCM4347_CC_AXI_ID;
1583 axi_id2 = BCM4347_PCIE_AXI_ID;
1584 ignore_errsts = AIELS_TIMEOUT;
1585 ignore_hi = BCM4347_BT_ADDR_HI;
1586 ignore_lo = BCM4347_BT_ADDR_LO;
1587 ignore_size = BCM4347_BT_SIZE;
1588 break;
1589 #endif /* BCMPCIE_BTLOG */
1590
1591 default:
1592 return FALSE;
1593 }
1594
1595 /* AXI ID check */
1596 err_axi_id &= AI_ERRLOGID_AXI_ID_MASK;
1597 if (!(err_axi_id == axi_id ||
1598 #ifdef BCMPCIE_BTLOG
1599 (axi_id2 != BCM4347_UNUSED_AXI_ID && err_axi_id == axi_id2)))
1600 #else
1601 FALSE))
1602 #endif /* BCMPCIE_BTLOG */
1603 return FALSE;
1604
1605 /* slave errors */
1606 if ((errsts & AIELS_TIMEOUT_MASK) != ignore_errsts) {
1607 return FALSE;
1608 }
1609
1610 /* address range check */
1611 if ((hi_addr != ignore_hi) || (lo_addr < ignore_lo) ||
1612 (lo_addr >= (ignore_lo + ignore_size))) {
1613 return FALSE;
1614 }
1615
1616 #ifdef BCMPCIE_BTLOG
1617 if (ignore_errsts == AIELS_TIMEOUT) {
1618 /* reset AXI timeout */
1619 ai_reset_axi_to(sii, ai);
1620 }
1621 #endif /* BCMPCIE_BTLOG */
1622
1623 return TRUE;
1624 }
1625 #endif /* defined (AXI_TIMEOUTS) || defined (BCM_BACKPLANE_TIMEOUT) */
1626
1627 #ifdef BCM_BACKPLANE_TIMEOUT
1628
1629 /* Function to return the APB bridge details corresponding to the core */
ai_get_apb_bridge(si_t * sih,uint32 coreidx,uint32 * apb_id,uint32 * apb_coreuinit)1630 static bool ai_get_apb_bridge(si_t *sih, uint32 coreidx, uint32 *apb_id,
1631 uint32 *apb_coreuinit)
1632 {
1633 uint i;
1634 uint32 core_base, core_end;
1635 si_info_t *sii = SI_INFO(sih);
1636 static uint32 coreidx_cached = 0, apb_id_cached = 0,
1637 apb_coreunit_cached = 0;
1638 uint32 tmp_coreunit = 0;
1639 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
1640
1641 if (coreidx >= MIN(sii->numcores, SI_MAXCORES)) {
1642 return FALSE;
1643 }
1644
1645 /* Most of the time apb bridge query will be for d11 core.
1646 * Maintain the last cache and return if found rather than iterating the
1647 * table
1648 */
1649 if (coreidx_cached == coreidx) {
1650 *apb_id = apb_id_cached;
1651 *apb_coreuinit = apb_coreunit_cached;
1652 return TRUE;
1653 }
1654
1655 core_base = cores_info->coresba[coreidx];
1656 core_end = core_base + cores_info->coresba_size[coreidx];
1657
1658 for (i = 0; i < sii->numcores; i++) {
1659 if (cores_info->coreid[i] == APB_BRIDGE_ID) {
1660 uint32 apb_base;
1661 uint32 apb_end;
1662
1663 apb_base = cores_info->coresba[i];
1664 apb_end = apb_base + cores_info->coresba_size[i];
1665
1666 if ((core_base >= apb_base) && (core_end <= apb_end)) {
1667 /* Current core is attached to this APB bridge */
1668 *apb_id = apb_id_cached = APB_BRIDGE_ID;
1669 *apb_coreuinit = apb_coreunit_cached = tmp_coreunit;
1670 coreidx_cached = coreidx;
1671 return TRUE;
1672 }
1673 /* Increment the coreunit */
1674 tmp_coreunit++;
1675 }
1676 }
1677
1678 return FALSE;
1679 }
1680
ai_clear_backplane_to_fast(si_t * sih,void * addr)1681 uint32 ai_clear_backplane_to_fast(si_t *sih, void *addr)
1682 {
1683 si_info_t *sii = SI_INFO(sih);
1684 volatile void *curmap = sii->curmap;
1685 bool core_reg = FALSE;
1686
1687 /* Use fast path only for core register access */
1688 if (((uintptr)addr >= (uintptr)curmap) &&
1689 ((uintptr)addr < ((uintptr)curmap + SI_CORE_SIZE))) {
1690 /* address being accessed is within current core reg map */
1691 core_reg = TRUE;
1692 }
1693
1694 if (core_reg) {
1695 uint32 apb_id, apb_coreuinit;
1696
1697 if (ai_get_apb_bridge(sih, si_coreidx(&sii->pub), &apb_id,
1698 &apb_coreuinit) == TRUE) {
1699 /* Found the APB bridge corresponding to current core,
1700 * Check for bus errors in APB wrapper
1701 */
1702 return ai_clear_backplane_to_per_core(sih, apb_id, apb_coreuinit,
1703 NULL);
1704 }
1705 }
1706
1707 /* Default is to poll for errors on all slave wrappers */
1708 return si_clear_backplane_to(sih);
1709 }
1710 #endif /* BCM_BACKPLANE_TIMEOUT */
1711
1712 #if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
1713 static bool g_disable_backplane_logs = FALSE;
1714
1715 #if defined(ETD)
1716 static uint32 last_axi_error = AXI_WRAP_STS_NONE;
1717 static uint32 last_axi_error_core = 0;
1718 static uint32 last_axi_error_wrap = 0;
1719 #endif /* ETD */
1720
1721 /*
1722 * API to clear the back plane timeout per core.
1723 * Caller may passs optional wrapper address. If present this will be used as
1724 * the wrapper base address. If wrapper base address is provided then caller
1725 * must provide the coreid also.
1726 * If both coreid and wrapper is zero, then err status of current bridge
1727 * will be verified.
1728 */
ai_clear_backplane_to_per_core(si_t * sih,uint coreid,uint coreunit,void * wrap)1729 uint32 ai_clear_backplane_to_per_core(si_t *sih, uint coreid, uint coreunit,
1730 void *wrap)
1731 {
1732 int ret = AXI_WRAP_STS_NONE;
1733 aidmp_t *ai = NULL;
1734 uint32 errlog_status = 0;
1735 si_info_t *sii = SI_INFO(sih);
1736 uint32 errlog_lo = 0, errlog_hi = 0, errlog_id = 0, errlog_flags = 0;
1737 uint32 current_coreidx = si_coreidx(sih);
1738 uint32 target_coreidx = si_findcoreidx(sih, coreid, coreunit);
1739
1740 #if defined(BCM_BACKPLANE_TIMEOUT)
1741 si_axi_error_t *axi_error =
1742 sih->err_info ? &sih->err_info->axi_error[sih->err_info->count] : NULL;
1743 #endif /* BCM_BACKPLANE_TIMEOUT */
1744 bool restore_core = FALSE;
1745
1746 if ((sii->axi_num_wrappers == 0) ||
1747 #ifdef BCM_BACKPLANE_TIMEOUT
1748 (!PCIE(sii)) ||
1749 #endif /* BCM_BACKPLANE_TIMEOUT */
1750 FALSE) {
1751 SI_VMSG((" %s, axi_num_wrappers:%d, Is_PCIE:%d, BUS_TYPE:%d, ID:%x\n",
1752 __FUNCTION__, sii->axi_num_wrappers, PCIE(sii),
1753 BUSTYPE(sii->pub.bustype), sii->pub.buscoretype));
1754 return AXI_WRAP_STS_NONE;
1755 }
1756
1757 if (wrap != NULL) {
1758 ai = (aidmp_t *)wrap;
1759 } else if (coreid && (target_coreidx != current_coreidx)) {
1760 if (ai_setcoreidx(sih, target_coreidx) == NULL) {
1761 /* Unable to set the core */
1762 SI_PRINT(
1763 ("Set Code Failed: coreid:%x, unit:%d, target_coreidx:%d\n",
1764 coreid, coreunit, target_coreidx));
1765 errlog_lo = target_coreidx;
1766 ret = AXI_WRAP_STS_SET_CORE_FAIL;
1767 goto end;
1768 }
1769
1770 restore_core = TRUE;
1771 ai = (aidmp_t *)si_wrapperregs(sih);
1772 } else {
1773 /* Read error status of current wrapper */
1774 ai = (aidmp_t *)si_wrapperregs(sih);
1775
1776 /* Update CoreID to current Code ID */
1777 coreid = si_coreid(sih);
1778 }
1779
1780 /* read error log status */
1781 errlog_status = R_REG(sii->osh, &ai->errlogstatus);
1782 if (errlog_status == ID32_INVALID) {
1783 /* Do not try to peek further */
1784 SI_PRINT(("%s, errlogstatus:%x - Slave Wrapper:%x\n", __FUNCTION__,
1785 errlog_status, coreid));
1786 ret = AXI_WRAP_STS_WRAP_RD_ERR;
1787 errlog_lo = (uint32)(uintptr)&ai->errlogstatus;
1788 goto end;
1789 }
1790
1791 if ((errlog_status & AIELS_TIMEOUT_MASK) != 0) {
1792 uint32 tmp;
1793 uint32 count = 0;
1794 /* set ErrDone to clear the condition */
1795 W_REG(sii->osh, &ai->errlogdone, AIELD_ERRDONE_MASK);
1796
1797 /* SPINWAIT on errlogstatus timeout status bits */
1798 while ((tmp = R_REG(sii->osh, &ai->errlogstatus)) &
1799 AIELS_TIMEOUT_MASK) {
1800 if (tmp == ID32_INVALID) {
1801 SI_PRINT(("%s: prev errlogstatus:%x, errlogstatus:%x\n",
1802 __FUNCTION__, errlog_status, tmp));
1803 ret = AXI_WRAP_STS_WRAP_RD_ERR;
1804 errlog_lo = (uint32)(uintptr)&ai->errlogstatus;
1805 goto end;
1806 }
1807 /*
1808 * Clear again, to avoid getting stuck in the loop, if a new error
1809 * is logged after we cleared the first timeout
1810 */
1811 W_REG(sii->osh, &ai->errlogdone, AIELD_ERRDONE_MASK);
1812
1813 count++;
1814 OSL_DELAY(0xA);
1815 if ((0xA * count) > AI_REG_READ_TIMEOUT) {
1816 errlog_status = tmp;
1817 break;
1818 }
1819 }
1820
1821 errlog_lo = R_REG(sii->osh, &ai->errlogaddrlo);
1822 errlog_hi = R_REG(sii->osh, &ai->errlogaddrhi);
1823 errlog_id = R_REG(sii->osh, &ai->errlogid);
1824 errlog_flags = R_REG(sii->osh, &ai->errlogflags);
1825
1826 /* we are already in the error path, so OK to check for the slave error
1827 */
1828 if (ai_ignore_errlog(sii, ai, errlog_lo, errlog_hi, errlog_id,
1829 errlog_status)) {
1830 si_ignore_errlog_cnt++;
1831 goto end;
1832 }
1833
1834 /* only reset APB Bridge on timeout (not slave error, or dec error) */
1835 switch (errlog_status & AIELS_TIMEOUT_MASK) {
1836 case AIELS_SLAVE_ERR:
1837 SI_PRINT(("AXI slave error\n"));
1838 ret = AXI_WRAP_STS_SLAVE_ERR;
1839 break;
1840
1841 case AIELS_TIMEOUT:
1842 ai_reset_axi_to(sii, ai);
1843 ret = AXI_WRAP_STS_TIMEOUT;
1844 break;
1845
1846 case AIELS_DECODE:
1847 SI_PRINT(("AXI decode error\n"));
1848 ret = AXI_WRAP_STS_DECODE_ERR;
1849 break;
1850 default:
1851 ASSERT(0); /* should be impossible */
1852 }
1853
1854 SI_PRINT(("\tCoreID: %x\n", coreid));
1855 SI_PRINT(("\t errlog: lo 0x%08x, hi 0x%08x, id 0x%08x, flags 0x%08x"
1856 ", status 0x%08x\n",
1857 errlog_lo, errlog_hi, errlog_id, errlog_flags,
1858 errlog_status));
1859 }
1860
1861 end:
1862 #if defined(ETD)
1863 if (ret != AXI_WRAP_STS_NONE) {
1864 last_axi_error = ret;
1865 last_axi_error_core = coreid;
1866 last_axi_error_wrap = (uint32)ai;
1867 }
1868 #endif /* ETD */
1869
1870 #if defined(BCM_BACKPLANE_TIMEOUT)
1871 if (axi_error && (ret != AXI_WRAP_STS_NONE)) {
1872 axi_error->error = ret;
1873 axi_error->coreid = coreid;
1874 axi_error->errlog_lo = errlog_lo;
1875 axi_error->errlog_hi = errlog_hi;
1876 axi_error->errlog_id = errlog_id;
1877 axi_error->errlog_flags = errlog_flags;
1878 axi_error->errlog_status = errlog_status;
1879 sih->err_info->count++;
1880
1881 if (sih->err_info->count == SI_MAX_ERRLOG_SIZE) {
1882 sih->err_info->count = SI_MAX_ERRLOG_SIZE - 1;
1883 SI_PRINT(("AXI Error log overflow\n"));
1884 }
1885 }
1886 #endif /* BCM_BACKPLANE_TIMEOUT */
1887
1888 if (restore_core) {
1889 if (ai_setcoreidx(sih, current_coreidx) == NULL) {
1890 /* Unable to set the core */
1891 return ID32_INVALID;
1892 }
1893 }
1894
1895 return ret;
1896 }
1897
1898 /* reset AXI timeout */
ai_reset_axi_to(si_info_t * sii,aidmp_t * ai)1899 static void ai_reset_axi_to(si_info_t *sii, aidmp_t *ai)
1900 {
1901 /* reset APB Bridge */
1902 OR_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
1903 /* sync write */
1904 (void)R_REG(sii->osh, &ai->resetctrl);
1905 /* clear Reset bit */
1906 AND_REG(sii->osh, &ai->resetctrl, ~(AIRC_RESET));
1907 /* sync write */
1908 (void)R_REG(sii->osh, &ai->resetctrl);
1909 SI_PRINT(("AXI timeout\n"));
1910 if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) {
1911 SI_PRINT(("reset failed on wrapper %p\n", ai));
1912 g_disable_backplane_logs = TRUE;
1913 }
1914 }
1915 #endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */
1916
1917 /*
1918 * This API polls all slave wrappers for errors and returns bit map of
1919 * all reported errors.
1920 * return - bit map of
1921 * AXI_WRAP_STS_NONE
1922 * AXI_WRAP_STS_TIMEOUT
1923 * AXI_WRAP_STS_SLAVE_ERR
1924 * AXI_WRAP_STS_DECODE_ERR
1925 * AXI_WRAP_STS_PCI_RD_ERR
1926 * AXI_WRAP_STS_WRAP_RD_ERR
1927 * AXI_WRAP_STS_SET_CORE_FAIL
1928 * On timeout detection, correspondign bridge will be reset to
1929 * unblock the bus.
1930 * Error reported in each wrapper can be retrieved using the API
1931 * si_get_axi_errlog_info()
1932 */
ai_clear_backplane_to(si_t * sih)1933 uint32 ai_clear_backplane_to(si_t *sih)
1934 {
1935 uint32 ret = 0;
1936 #if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
1937
1938 si_info_t *sii = SI_INFO(sih);
1939 aidmp_t *ai;
1940 uint32 i;
1941 axi_wrapper_t *axi_wrapper = sii->axi_wrapper;
1942
1943 #ifdef BCM_BACKPLANE_TIMEOUT
1944 uint32 prev_value = 0;
1945 osl_t *osh = sii->osh;
1946 uint32 cfg_reg = 0;
1947 uint32 offset = 0;
1948
1949 if ((sii->axi_num_wrappers == 0) || (!PCIE(sii)))
1950 #else
1951 if (sii->axi_num_wrappers == 0)
1952 #endif // endif
1953 {
1954 SI_VMSG((" %s, axi_num_wrappers:%d, Is_PCIE:%d, BUS_TYPE:%d, ID:%x\n",
1955 __FUNCTION__, sii->axi_num_wrappers, PCIE(sii),
1956 BUSTYPE(sii->pub.bustype), sii->pub.buscoretype));
1957 return AXI_WRAP_STS_NONE;
1958 }
1959
1960 #ifdef BCM_BACKPLANE_TIMEOUT
1961 /* Save and restore wrapper access window */
1962 if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1963 if (PCIE_GEN1(sii)) {
1964 cfg_reg = PCI_BAR0_WIN2;
1965 offset = PCI_BAR0_WIN2_OFFSET;
1966 } else if (PCIE_GEN2(sii)) {
1967 cfg_reg = PCIE2_BAR0_CORE2_WIN2;
1968 offset = PCIE2_BAR0_CORE2_WIN2_OFFSET;
1969 } else {
1970 ASSERT(!"!PCIE_GEN1 && !PCIE_GEN2");
1971 }
1972
1973 prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 0x4);
1974 if (prev_value == ID32_INVALID) {
1975 si_axi_error_t *axi_error =
1976 sih->err_info ? &sih->err_info->axi_error[sih->err_info->count]
1977 : NULL;
1978 SI_PRINT(("%s, PCI_BAR0_WIN2 - %x\n", __FUNCTION__, prev_value));
1979 if (axi_error) {
1980 axi_error->error = ret = AXI_WRAP_STS_PCI_RD_ERR;
1981 axi_error->errlog_lo = cfg_reg;
1982 sih->err_info->count++;
1983
1984 if (sih->err_info->count == SI_MAX_ERRLOG_SIZE) {
1985 sih->err_info->count = SI_MAX_ERRLOG_SIZE - 1;
1986 SI_PRINT(("AXI Error log overflow\n"));
1987 }
1988 }
1989
1990 return ret;
1991 }
1992 }
1993 #endif /* BCM_BACKPLANE_TIMEOUT */
1994
1995 for (i = 0; i < sii->axi_num_wrappers; ++i) {
1996 uint32 tmp;
1997
1998 if (axi_wrapper[i].wrapper_type != AI_SLAVE_WRAPPER) {
1999 continue;
2000 }
2001
2002 #ifdef BCM_BACKPLANE_TIMEOUT
2003 if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
2004 /* Set BAR0_CORE2_WIN2 to bridge wapper base address */
2005 OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 0x4, axi_wrapper[i].wrapper_addr);
2006
2007 /* set AI to BAR0 + Offset corresponding to Gen1 or gen2 */
2008 ai = (aidmp_t *)(DISCARD_QUAL(sii->curmap, uint8) + offset);
2009 } else
2010 #endif /* BCM_BACKPLANE_TIMEOUT */
2011 {
2012 ai = (aidmp_t *)(uintptr)axi_wrapper[i].wrapper_addr;
2013 }
2014
2015 tmp = ai_clear_backplane_to_per_core(sih, axi_wrapper[i].cid, 0,
2016 DISCARD_QUAL(ai, void));
2017
2018 ret |= tmp;
2019 }
2020
2021 #ifdef BCM_BACKPLANE_TIMEOUT
2022 /* Restore the initial wrapper space */
2023 if (prev_value) {
2024 OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 0x4, prev_value);
2025 }
2026 #endif /* BCM_BACKPLANE_TIMEOUT */
2027
2028 #endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */
2029
2030 return ret;
2031 }
2032
ai_num_slaveports(si_t * sih,uint coreidx)2033 uint ai_num_slaveports(si_t *sih, uint coreidx)
2034 {
2035 si_info_t *sii = SI_INFO(sih);
2036 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
2037 uint32 cib;
2038
2039 cib = cores_info->cib[coreidx];
2040 return ((cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT);
2041 }
2042
2043 #ifdef UART_TRAP_DBG
ai_dump_APB_Bridge_registers(si_t * sih)2044 void ai_dump_APB_Bridge_registers(si_t *sih)
2045 {
2046 aidmp_t *ai;
2047 si_info_t *sii = SI_INFO(sih);
2048
2049 ai = (aidmp_t *)sii->br_wrapba[0];
2050 printf("APB Bridge 0\n");
2051 printf("lo 0x%08x, hi 0x%08x, id 0x%08x, flags 0x%08x",
2052 R_REG(sii->osh, &ai->errlogaddrlo),
2053 R_REG(sii->osh, &ai->errlogaddrhi), R_REG(sii->osh, &ai->errlogid),
2054 R_REG(sii->osh, &ai->errlogflags));
2055 printf("\n status 0x%08x\n", R_REG(sii->osh, &ai->errlogstatus));
2056 }
2057 #endif /* UART_TRAP_DBG */
2058
ai_force_clocks(si_t * sih,uint clock_state)2059 void ai_force_clocks(si_t *sih, uint clock_state)
2060 {
2061 si_info_t *sii = SI_INFO(sih);
2062 aidmp_t *ai, *ai_sec = NULL;
2063 volatile uint32 dummy;
2064 uint32 ioctrl;
2065 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
2066
2067 ASSERT(GOODREGS(sii->curwrap));
2068 ai = sii->curwrap;
2069 if (cores_info->wrapba2[sii->curidx]) {
2070 ai_sec = REG_MAP(cores_info->wrapba2[sii->curidx], SI_CORE_SIZE);
2071 }
2072
2073 /* ensure there are no pending backplane operations */
2074 SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 0x12C);
2075
2076 if (clock_state == FORCE_CLK_ON) {
2077 ioctrl = R_REG(sii->osh, &ai->ioctrl);
2078 W_REG(sii->osh, &ai->ioctrl, (ioctrl | SICF_FGC));
2079 dummy = R_REG(sii->osh, &ai->ioctrl);
2080 BCM_REFERENCE(dummy);
2081 if (ai_sec) {
2082 ioctrl = R_REG(sii->osh, &ai_sec->ioctrl);
2083 W_REG(sii->osh, &ai_sec->ioctrl, (ioctrl | SICF_FGC));
2084 dummy = R_REG(sii->osh, &ai_sec->ioctrl);
2085 BCM_REFERENCE(dummy);
2086 }
2087 } else {
2088 ioctrl = R_REG(sii->osh, &ai->ioctrl);
2089 W_REG(sii->osh, &ai->ioctrl, (ioctrl & (~SICF_FGC)));
2090 dummy = R_REG(sii->osh, &ai->ioctrl);
2091 BCM_REFERENCE(dummy);
2092 if (ai_sec) {
2093 ioctrl = R_REG(sii->osh, &ai_sec->ioctrl);
2094 W_REG(sii->osh, &ai_sec->ioctrl, (ioctrl & (~SICF_FGC)));
2095 dummy = R_REG(sii->osh, &ai_sec->ioctrl);
2096 BCM_REFERENCE(dummy);
2097 }
2098 }
2099 /* ensure there are no pending backplane operations */
2100 SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 0x12C);
2101 }
2102