• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Misc utility routines for accessing chip-specific features
3  * of the SiliconBackplane-based Broadcom chips.
4  *
5  * Copyright (C) 1999-2017, Broadcom Corporation
6  *
7  *      Unless you and Broadcom execute a separate written software license
8  * agreement governing use of this software, this software is licensed to you
9  * under the terms of the GNU General Public License version 2 (the "GPL"),
10  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11  * following added to such license:
12  *
13  *      As a special exception, the copyright holders of this software give you
14  * permission to link this software with independent modules, and to copy and
15  * distribute the resulting executable under terms of your choice, provided that
16  * you also meet, for each linked independent module, the terms and conditions of
17  * the license of that module.  An independent module is a module which is not
18  * derived from this software.  The special exception does not apply to any
19  * modifications of the software.
20  *
21  *      Notwithstanding the above, under no circumstances may you combine this
22  * software in any way with any other Broadcom software provided under a license
23  * other than the GPL, without Broadcom's express prior written consent.
24  *
25  *
26  * <<Broadcom-WL-IPTag/Open:>>
27  *
28  * $Id: aiutils.c 625027 2016-03-15 08:20:18Z $
29  */
30 #include <bcm_cfg.h>
31 #include <typedefs.h>
32 #include <bcmdefs.h>
33 #include <osl.h>
34 #include <bcmutils.h>
35 #include <siutils.h>
36 #include <hndsoc.h>
37 #include <sbchipc.h>
38 #include <pcicfg.h>
39 
40 #include "siutils_priv.h"
41 #include <bcmdevs.h>
42 
43 #define BCM5357_DMP() (0)
44 #define BCM53573_DMP() (0)
45 #define BCM4707_DMP() (0)
46 #define PMU_DMP() (0)
47 #define GCI_DMP() (0)
48 #define remap_coreid(sih, coreid)    (coreid)
49 #define remap_corerev(sih, corerev)    (corerev)
50 
51 /* EROM parsing */
52 
53 static uint32
get_erom_ent(si_t * sih,uint32 ** eromptr,uint32 mask,uint32 match)54 get_erom_ent(si_t *sih, uint32 **eromptr, uint32 mask, uint32 match)
55 {
56     uint32 ent;
57     uint inv = 0, nom = 0;
58     uint32 size = 0;
59 
60     while (TRUE) {
61         ent = R_REG(si_osh(sih), *eromptr);
62         (*eromptr)++;
63 
64         if (mask == 0)
65             break;
66 
67         if ((ent & ER_VALID) == 0) {
68             inv++;
69             continue;
70         }
71 
72         if (ent == (ER_END | ER_VALID))
73             break;
74 
75         if ((ent & mask) == match)
76             break;
77 
78         /* escape condition related EROM size if it has invalid values */
79         size += sizeof(*eromptr);
80         if (size >= ER_SZ_MAX) {
81             SI_ERROR(("Failed to find end of EROM marker\n"));
82             break;
83         }
84 
85         nom++;
86     }
87 
88     SI_VMSG(("%s: Returning ent 0x%08x\n", __FUNCTION__, ent));
89     if (inv + nom) {
90         SI_VMSG(("  after %d invalid and %d non-matching entries\n", inv, nom));
91     }
92     return ent;
93 }
94 
95 static uint32
get_asd(si_t * sih,uint32 ** eromptr,uint sp,uint ad,uint st,uint32 * addrl,uint32 * addrh,uint32 * sizel,uint32 * sizeh)96 get_asd(si_t *sih, uint32 **eromptr, uint sp, uint ad, uint st, uint32 *addrl, uint32 *addrh,
97     uint32 *sizel, uint32 *sizeh)
98 {
99     uint32 asd, sz, szd;
100 
101     BCM_REFERENCE(ad);
102 
103     asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
104     if (((asd & ER_TAG1) != ER_ADD) ||
105         (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
106         ((asd & AD_ST_MASK) != st)) {
107         /* This is not what we want, "push" it back */
108         (*eromptr)--;
109         return 0;
110     }
111     *addrl = asd & AD_ADDR_MASK;
112     if (asd & AD_AG32)
113         *addrh = get_erom_ent(sih, eromptr, 0, 0);
114     else
115         *addrh = 0;
116     *sizeh = 0;
117     sz = asd & AD_SZ_MASK;
118     if (sz == AD_SZ_SZD) {
119         szd = get_erom_ent(sih, eromptr, 0, 0);
120         *sizel = szd & SD_SZ_MASK;
121         if (szd & SD_SG32)
122             *sizeh = get_erom_ent(sih, eromptr, 0, 0);
123     } else
124         *sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
125 
126     SI_VMSG(("  SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n",
127             sp, ad, st, *sizeh, *sizel, *addrh, *addrl));
128 
129     return asd;
130 }
131 
132 
133 /* parse the enumeration rom to identify all cores */
134 void
ai_scan(si_t * sih,void * regs,uint devid)135 ai_scan(si_t *sih, void *regs, uint devid)
136 {
137     si_info_t *sii = SI_INFO(sih);
138     si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
139     chipcregs_t *cc = (chipcregs_t *)regs;
140     uint32 erombase, *eromptr, *eromlim;
141     axi_wrapper_t *axi_wrapper = sii->axi_wrapper;
142 
143     BCM_REFERENCE(devid);
144 
145     erombase = R_REG(sii->osh, &cc->eromptr);
146 
147     switch (BUSTYPE(sih->bustype)) {
148     case SI_BUS:
149         eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
150         break;
151 
152     case PCI_BUS:
153         /* Set wrappers address */
154         sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE);
155 
156         /* Now point the window at the erom */
157         OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, erombase);
158         eromptr = regs;
159         break;
160 
161 #ifdef BCMSDIO
162     case SPI_BUS:
163     case SDIO_BUS:
164         eromptr = (uint32 *)(uintptr)erombase;
165         break;
166 #endif    /* BCMSDIO */
167 
168     case PCMCIA_BUS:
169     default:
170         SI_ERROR(("Don't know how to do AXI enumertion on bus %d\n", sih->bustype));
171         ASSERT(0);
172         return;
173     }
174     eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
175     sii->axi_num_wrappers = 0;
176 
177     SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim = 0x%p\n",
178              OSL_OBFUSCATE_BUF(regs), erombase,
179         OSL_OBFUSCATE_BUF(eromptr), OSL_OBFUSATE_BUF(eromlim)));
180     while (eromptr < eromlim) {
181         uint32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp;
182         uint32 mpd, asd, addrl, addrh, sizel, sizeh;
183         uint i, j, idx;
184         bool br;
185 
186         br = FALSE;
187 
188         /* Grok a component */
189         cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
190         if (cia == (ER_END | ER_VALID)) {
191             SI_VMSG(("Found END of erom after %d cores\n", sii->numcores));
192             return;
193         }
194 
195         cib = get_erom_ent(sih, &eromptr, 0, 0);
196 
197         if ((cib & ER_TAG) != ER_CI) {
198             SI_ERROR(("CIA not followed by CIB\n"));
199             goto error;
200         }
201 
202         cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
203         mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
204         crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
205         nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
206         nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
207         nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
208         nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
209 
210 #ifdef BCMDBG_SI
211         SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with nmw = %d, "
212                  "nsw = %d, nmp = %d & nsp = %d\n",
213                  mfg, cid, crev, OSL_OBFUSCATE_BUF(eromptr - 1), nmw, nsw, nmp, nsp));
214 #else
215         BCM_REFERENCE(crev);
216 #endif
217 
218         if (CHIPID(sih->chip) == BCM4347_CHIP_ID) {
219             /* 4347 has more entries for ARM core
220              * This should apply to all chips but crashes on router
221              * This is a temp fix to be further analyze
222              */
223             if (nsp == 0)
224                 continue;
225         } else {
226             /* Include Default slave wrapper for timeout monitoring */
227             if ((nsp == 0) ||
228 #if !defined(AXI_TIMEOUTS) && !defined(BCM_BACKPLANE_TIMEOUT)
229                 ((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) ||
230 #endif /* !defined(AXI_TIMEOUTS) && !defined(BCM_BACKPLANE_TIMEOUT) */
231                 FALSE) {
232                 continue;
233             }
234         }
235 
236         if ((nmw + nsw == 0)) {
237             /* A component which is not a core */
238             if (cid == OOB_ROUTER_CORE_ID) {
239                 asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
240                     &addrl, &addrh, &sizel, &sizeh);
241                 if (asd != 0) {
242                     sii->oob_router = addrl;
243                 }
244             }
245             if (cid != GMAC_COMMON_4706_CORE_ID && cid != NS_CCB_CORE_ID &&
246                 cid != PMU_CORE_ID && cid != GCI_CORE_ID)
247                 continue;
248         }
249 
250         idx = sii->numcores;
251 
252         cores_info->cia[idx] = cia;
253         cores_info->cib[idx] = cib;
254         cores_info->coreid[idx] = remap_coreid(sih, cid);
255 
256         for (i = 0; i < nmp; i++) {
257             mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
258             if ((mpd & ER_TAG) != ER_MP) {
259                 SI_ERROR(("Not enough MP entries for component 0x%x\n", cid));
260                 goto error;
261             }
262             SI_VMSG(("  Master port %d, mp: %d id: %d\n", i,
263                      (mpd & MPD_MP_MASK) >> MPD_MP_SHIFT,
264                      (mpd & MPD_MUI_MASK) >> MPD_MUI_SHIFT));
265         }
266 
267         /* First Slave Address Descriptor should be port 0:
268          * the main register space for the core
269          */
270         asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
271         if (asd == 0) {
272             do {
273             /* Try again to see if it is a bridge */
274             asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
275                           &sizel, &sizeh);
276             if (asd != 0)
277                 br = TRUE;
278             else {
279                     if (br == TRUE) {
280                         break;
281                     }
282                     else if ((addrh != 0) || (sizeh != 0) ||
283                         (sizel != SI_CORE_SIZE)) {
284                         SI_ERROR(("addrh = 0x%x\t sizeh = 0x%x\t size1 ="
285                             "0x%x\n", addrh, sizeh, sizel));
286                         SI_ERROR(("First Slave ASD for"
287                             "core 0x%04x malformed "
288                             "(0x%08x)\n", cid, asd));
289                         goto error;
290                     }
291                 }
292             } while (1);
293         }
294         cores_info->coresba[idx] = addrl;
295         cores_info->coresba_size[idx] = sizel;
296         /* Get any more ASDs in port 0 */
297         j = 1;
298         do {
299             asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
300                           &sizel, &sizeh);
301             if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) {
302                 cores_info->coresba2[idx] = addrl;
303                 cores_info->coresba2_size[idx] = sizel;
304             }
305             j++;
306         } while (asd != 0);
307 
308         /* Go through the ASDs for other slave ports */
309         for (i = 1; i < nsp; i++) {
310             j = 0;
311             do {
312                 asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh,
313                               &sizel, &sizeh);
314 
315                 if (asd == 0)
316                     break;
317                 j++;
318             } while (1);
319             if (j == 0) {
320                 SI_ERROR((" SP %d has no address descriptors\n", i));
321                 goto error;
322             }
323         }
324 
325         /* Now get master wrappers */
326         for (i = 0; i < nmw; i++) {
327             asd = get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl, &addrh,
328                           &sizel, &sizeh);
329             if (asd == 0) {
330                 SI_ERROR(("Missing descriptor for MW %d\n", i));
331                 goto error;
332             }
333             if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
334                 SI_ERROR(("Master wrapper %d is not 4KB\n", i));
335                 goto error;
336             }
337             if (i == 0)
338                 cores_info->wrapba[idx] = addrl;
339             else if (i == 1)
340                 cores_info->wrapba2[idx] = addrl;
341 
342 
343             ASSERT(sii->axi_num_wrappers < SI_MAX_AXI_WRAPPERS);
344             axi_wrapper[sii->axi_num_wrappers].mfg = mfg;
345             axi_wrapper[sii->axi_num_wrappers].cid = cid;
346             axi_wrapper[sii->axi_num_wrappers].rev = crev;
347             axi_wrapper[sii->axi_num_wrappers].wrapper_type = AI_MASTER_WRAPPER;
348             axi_wrapper[sii->axi_num_wrappers].wrapper_addr = addrl;
349             sii->axi_num_wrappers++;
350             SI_VMSG(("MASTER WRAPPER: %d, mfg:%x, cid:%x, rev:%x, addr:%x, size:%x\n",
351                     sii->axi_num_wrappers, mfg, cid, crev, addrl, sizel));
352         }
353 
354         /* And finally slave wrappers */
355         for (i = 0; i < nsw; i++) {
356             uint fwp = (nsp == 1) ? 0 : 1;
357             asd = get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP, &addrl, &addrh,
358                           &sizel, &sizeh);
359 
360             /* cache APB bridge wrapper address for set/clear timeout */
361             if ((mfg == MFGID_ARM) && (cid == APB_BRIDGE_ID)) {
362                 ASSERT(sii->num_br < SI_MAXBR);
363                 sii->br_wrapba[sii->num_br++] = addrl;
364             }
365 
366             if (asd == 0) {
367                 SI_ERROR(("Missing descriptor for SW %d\n", i));
368                 goto error;
369             }
370             if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
371                 SI_ERROR(("Slave wrapper %d is not 4KB\n", i));
372                 goto error;
373             }
374             if ((nmw == 0) && (i == 0))
375                 cores_info->wrapba[idx] = addrl;
376             else if ((nmw == 0) && (i == 1))
377                 cores_info->wrapba2[idx] = addrl;
378 
379             /* Include all slave wrappers to the list to
380              * enable and monitor watchdog timeouts
381              */
382 
383             ASSERT(sii->axi_num_wrappers < SI_MAX_AXI_WRAPPERS);
384             axi_wrapper[sii->axi_num_wrappers].mfg = mfg;
385             axi_wrapper[sii->axi_num_wrappers].cid = cid;
386             axi_wrapper[sii->axi_num_wrappers].rev = crev;
387             axi_wrapper[sii->axi_num_wrappers].wrapper_type = AI_SLAVE_WRAPPER;
388             axi_wrapper[sii->axi_num_wrappers].wrapper_addr = addrl;
389             sii->axi_num_wrappers++;
390 
391             SI_VMSG(("SLAVE WRAPPER: %d,  mfg:%x, cid:%x, rev:%x, addr:%x, size:%x\n",
392                 sii->axi_num_wrappers,  mfg, cid, crev, addrl, sizel));
393         }
394 
395 
396 #ifndef BCM_BACKPLANE_TIMEOUT
397         /* Don't record bridges */
398         if (br)
399             continue;
400 #endif
401 
402         /* Done with core */
403         sii->numcores++;
404     }
405 
406     SI_ERROR(("Reached end of erom without finding END\n"));
407 
408 error:
409     sii->numcores = 0;
410     return;
411 }
412 
413 #define AI_SETCOREIDX_MAPSIZE(coreid) \
414     (((coreid) == NS_CCB_CORE_ID) ? 15 * SI_CORE_SIZE : SI_CORE_SIZE)
415 
416 /* This function changes the logical "focus" to the indicated core.
417  * Return the current core's virtual address.
418  */
419 static volatile void *
_ai_setcoreidx(si_t * sih,uint coreidx,uint use_wrap2)420 _ai_setcoreidx(si_t *sih, uint coreidx, uint use_wrap2)
421 {
422     si_info_t *sii = SI_INFO(sih);
423     si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
424     uint32 addr, wrap, wrap2;
425     volatile void *regs;
426 
427     if (coreidx >= MIN(sii->numcores, SI_MAXCORES))
428         return (NULL);
429 
430     addr = cores_info->coresba[coreidx];
431     wrap = cores_info->wrapba[coreidx];
432     wrap2 = cores_info->wrapba2[coreidx];
433 
434 #ifdef BCM_BACKPLANE_TIMEOUT
435     /* No need to disable interrupts while entering/exiting APB bridge core */
436     if ((cores_info->coreid[coreidx] != APB_BRIDGE_CORE_ID) &&
437         (cores_info->coreid[sii->curidx] != APB_BRIDGE_CORE_ID))
438 #endif /* BCM_BACKPLANE_TIMEOUT */
439     {
440         /*
441          * If the user has provided an interrupt mask enabled function,
442          * then assert interrupts are disabled before switching the core.
443          */
444         ASSERT((sii->intrsenabled_fn == NULL) ||
445             !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
446     }
447 
448     switch (BUSTYPE(sih->bustype)) {
449     case SI_BUS:
450         /* map new one */
451         if (!cores_info->regs[coreidx]) {
452             cores_info->regs[coreidx] = REG_MAP(addr,
453                 AI_SETCOREIDX_MAPSIZE(cores_info->coreid[coreidx]));
454             ASSERT(GOODREGS(cores_info->regs[coreidx]));
455         }
456         sii->curmap = regs = cores_info->regs[coreidx];
457         if (!cores_info->wrappers[coreidx] && (wrap != 0)) {
458             cores_info->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE);
459             ASSERT(GOODREGS(cores_info->wrappers[coreidx]));
460         }
461         if (!cores_info->wrappers2[coreidx] && (wrap2 != 0)) {
462             cores_info->wrappers2[coreidx] = REG_MAP(wrap2, SI_CORE_SIZE);
463             ASSERT(GOODREGS(cores_info->wrappers2[coreidx]));
464         }
465         if (use_wrap2)
466             sii->curwrap = cores_info->wrappers2[coreidx];
467         else
468             sii->curwrap = cores_info->wrappers[coreidx];
469         break;
470 
471     case PCI_BUS:
472 #ifdef BCM_BACKPLANE_TIMEOUT
473         /* No need to set the BAR0 if core is APB Bridge.
474          * This is to reduce 2 PCI writes while checkng for errlog
475          */
476         if (cores_info->coreid[coreidx] != APB_BRIDGE_CORE_ID)
477 #endif /* BCM_BACKPLANE_TIMEOUT */
478         {
479             /* point bar0 window */
480             OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, addr);
481         }
482 
483         regs = sii->curmap;
484         /* point bar0 2nd 4KB window to the primary wrapper */
485         if (use_wrap2)
486             wrap = wrap2;
487         if (PCIE_GEN2(sii))
488             OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_WIN2, 4, wrap);
489         else
490             OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN2, 4, wrap);
491         break;
492 
493 #ifdef BCMSDIO
494     case SPI_BUS:
495     case SDIO_BUS:
496         sii->curmap = regs = (void *)((uintptr)addr);
497         if (use_wrap2)
498             sii->curwrap = (void *)((uintptr)wrap2);
499         else
500             sii->curwrap = (void *)((uintptr)wrap);
501         break;
502 #endif    /* BCMSDIO */
503 
504     case PCMCIA_BUS:
505     default:
506         ASSERT(0);
507         regs = NULL;
508         break;
509     }
510 
511     sii->curmap = regs;
512     sii->curidx = coreidx;
513 
514     return regs;
515 }
516 
517 volatile void *
ai_setcoreidx(si_t * sih,uint coreidx)518 ai_setcoreidx(si_t *sih, uint coreidx)
519 {
520     return _ai_setcoreidx(sih, coreidx, 0);
521 }
522 
523 volatile void *
ai_setcoreidx_2ndwrap(si_t * sih,uint coreidx)524 ai_setcoreidx_2ndwrap(si_t *sih, uint coreidx)
525 {
526     return _ai_setcoreidx(sih, coreidx, 1);
527 }
528 
529 void
ai_coreaddrspaceX(si_t * sih,uint asidx,uint32 * addr,uint32 * size)530 ai_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size)
531 {
532     si_info_t *sii = SI_INFO(sih);
533     si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
534     chipcregs_t *cc = NULL;
535     uint32 erombase, *eromptr, *eromlim;
536     uint i, j, cidx;
537     uint32 cia, cib, nmp, nsp;
538     uint32 asd, addrl, addrh, sizel, sizeh;
539 
540     for (i = 0; i < sii->numcores; i++) {
541         if (cores_info->coreid[i] == CC_CORE_ID) {
542             cc = (chipcregs_t *)cores_info->regs[i];
543             break;
544         }
545     }
546     if (cc == NULL)
547         goto error;
548 
549     erombase = R_REG(sii->osh, &cc->eromptr);
550     eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
551     eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
552 
553     cidx = sii->curidx;
554     cia = cores_info->cia[cidx];
555     cib = cores_info->cib[cidx];
556 
557     nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
558     nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
559 
560     /* scan for cores */
561     while (eromptr < eromlim) {
562         if ((get_erom_ent(sih, &eromptr, ER_TAG, ER_CI) == cia) &&
563             (get_erom_ent(sih, &eromptr, 0, 0) == cib)) {
564             break;
565         }
566     }
567 
568     /* skip master ports */
569     for (i = 0; i < nmp; i++)
570         get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
571 
572     /* Skip ASDs in port 0 */
573     asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
574     if (asd == 0) {
575         /* Try again to see if it is a bridge */
576         asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
577                       &sizel, &sizeh);
578     }
579 
580     j = 1;
581     do {
582         asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
583                       &sizel, &sizeh);
584         j++;
585     } while (asd != 0);
586 
587     /* Go through the ASDs for other slave ports */
588     for (i = 1; i < nsp; i++) {
589         j = 0;
590         do {
591             asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh,
592                 &sizel, &sizeh);
593             if (asd == 0)
594                 break;
595 
596             if (!asidx--) {
597                 *addr = addrl;
598                 *size = sizel;
599                 return;
600             }
601             j++;
602         } while (1);
603 
604         if (j == 0) {
605             SI_ERROR((" SP %d has no address descriptors\n", i));
606             break;
607         }
608     }
609 
610 error:
611     *size = 0;
612     return;
613 }
614 
615 /* Return the number of address spaces in current core */
616 int
ai_numaddrspaces(si_t * sih)617 ai_numaddrspaces(si_t *sih)
618 {
619     BCM_REFERENCE(sih);
620 
621     return 2;
622 }
623 
624 /* Return the address of the nth address space in the current core */
625 uint32
ai_addrspace(si_t * sih,uint asidx)626 ai_addrspace(si_t *sih, uint asidx)
627 {
628     si_info_t *sii = SI_INFO(sih);
629     si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
630     uint cidx;
631 
632     cidx = sii->curidx;
633 
634     if (asidx == 0)
635         return cores_info->coresba[cidx];
636     else if (asidx == 1)
637         return cores_info->coresba2[cidx];
638     else {
639         SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n",
640                   __FUNCTION__, asidx));
641         return 0;
642     }
643 }
644 
645 /* Return the size of the nth address space in the current core */
646 uint32
ai_addrspacesize(si_t * sih,uint asidx)647 ai_addrspacesize(si_t *sih, uint asidx)
648 {
649     si_info_t *sii = SI_INFO(sih);
650     si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
651     uint cidx;
652 
653     cidx = sii->curidx;
654 
655     if (asidx == 0)
656         return cores_info->coresba_size[cidx];
657     else if (asidx == 1)
658         return cores_info->coresba2_size[cidx];
659     else {
660         SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n",
661                   __FUNCTION__, asidx));
662         return 0;
663     }
664 }
665 
666 uint
ai_flag(si_t * sih)667 ai_flag(si_t *sih)
668 {
669     si_info_t *sii = SI_INFO(sih);
670     aidmp_t *ai;
671 
672     if (BCM5357_DMP()) {
673         SI_ERROR(("%s: Attempting to read USB20H DMP registers on 5357b0\n", __FUNCTION__));
674         return sii->curidx;
675     }
676     if (BCM4707_DMP()) {
677         SI_ERROR(("%s: Attempting to read CHIPCOMMONB DMP registers on 4707\n",
678             __FUNCTION__));
679         return sii->curidx;
680     }
681     if (BCM53573_DMP()) {
682         SI_ERROR(("%s: Attempting to read DMP registers on 53573\n", __FUNCTION__));
683         return sii->curidx;
684     }
685 #ifdef REROUTE_OOBINT
686     if (PMU_DMP()) {
687         SI_ERROR(("%s: Attempting to read PMU DMP registers\n",
688             __FUNCTION__));
689         return PMU_OOB_BIT;
690     }
691 #else
692     if (PMU_DMP()) {
693         uint idx, flag;
694         idx = sii->curidx;
695         ai_setcoreidx(sih, SI_CC_IDX);
696         flag = ai_flag_alt(sih);
697         ai_setcoreidx(sih, idx);
698         return flag;
699     }
700 #endif /* REROUTE_OOBINT */
701 
702     ai = sii->curwrap;
703     ASSERT(ai != NULL);
704 
705     return (R_REG(sii->osh, &ai->oobselouta30) & 0x1f);
706 }
707 
708 uint
ai_flag_alt(si_t * sih)709 ai_flag_alt(si_t *sih)
710 {
711     si_info_t *sii = SI_INFO(sih);
712     aidmp_t *ai;
713 
714     if (BCM5357_DMP()) {
715         SI_ERROR(("%s: Attempting to read USB20H DMP registers on 5357b0\n", __FUNCTION__));
716         return sii->curidx;
717     }
718     if (BCM4707_DMP()) {
719         SI_ERROR(("%s: Attempting to read CHIPCOMMONB DMP registers on 4707\n",
720             __FUNCTION__));
721         return sii->curidx;
722     }
723 #ifdef REROUTE_OOBINT
724     if (PMU_DMP()) {
725         SI_ERROR(("%s: Attempting to read PMU DMP registers\n",
726             __FUNCTION__));
727         return PMU_OOB_BIT;
728     }
729 #endif /* REROUTE_OOBINT */
730 
731     ai = sii->curwrap;
732 
733     return ((R_REG(sii->osh, &ai->oobselouta30) >> AI_OOBSEL_1_SHIFT) & AI_OOBSEL_MASK);
734 }
735 
736 void
ai_setint(si_t * sih,int siflag)737 ai_setint(si_t *sih, int siflag)
738 {
739     BCM_REFERENCE(sih);
740     BCM_REFERENCE(siflag);
741 }
742 
743 uint
ai_wrap_reg(si_t * sih,uint32 offset,uint32 mask,uint32 val)744 ai_wrap_reg(si_t *sih, uint32 offset, uint32 mask, uint32 val)
745 {
746     si_info_t *sii = SI_INFO(sih);
747     uint32 *map = (uint32 *) sii->curwrap;
748 
749     if (mask || val) {
750         uint32 w = R_REG(sii->osh, map+(offset/4));
751         w &= ~mask;
752         w |= val;
753         W_REG(sii->osh, map+(offset/4), w);
754     }
755 
756     return (R_REG(sii->osh, map+(offset/4)));
757 }
758 
759 uint
ai_corevendor(si_t * sih)760 ai_corevendor(si_t *sih)
761 {
762     si_info_t *sii = SI_INFO(sih);
763     si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
764     uint32 cia;
765 
766     cia = cores_info->cia[sii->curidx];
767     return ((cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT);
768 }
769 
770 uint
ai_corerev(si_t * sih)771 ai_corerev(si_t *sih)
772 {
773     si_info_t *sii = SI_INFO(sih);
774     si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
775     uint32 cib;
776 
777 
778     cib = cores_info->cib[sii->curidx];
779     return remap_corerev(sih, (cib & CIB_REV_MASK) >> CIB_REV_SHIFT);
780 }
781 
782 bool
ai_iscoreup(si_t * sih)783 ai_iscoreup(si_t *sih)
784 {
785     si_info_t *sii = SI_INFO(sih);
786     aidmp_t *ai;
787 
788     ai = sii->curwrap;
789 
790     return (((R_REG(sii->osh, &ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) == SICF_CLOCK_EN) &&
791             ((R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) == 0));
792 }
793 
794 /*
795  * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
796  * switch back to the original core, and return the new value.
797  *
798  * When using the silicon backplane, no fiddling with interrupts or core switches is needed.
799  *
800  * Also, when using pci/pcie, we can optimize away the core switching for pci registers
801  * and (on newer pci cores) chipcommon registers.
802  */
803 uint
ai_corereg(si_t * sih,uint coreidx,uint regoff,uint mask,uint val)804 ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
805 {
806     uint origidx = 0;
807     volatile uint32 *r = NULL;
808     uint w;
809     uint intr_val = 0;
810     bool fast = FALSE;
811     si_info_t *sii = SI_INFO(sih);
812     si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
813 
814 
815     ASSERT(GOODIDX(coreidx));
816     ASSERT(regoff < SI_CORE_SIZE);
817     ASSERT((val & ~mask) == 0);
818 
819     if (coreidx >= SI_MAXCORES)
820         return 0;
821 
822     if (BUSTYPE(sih->bustype) == SI_BUS) {
823         /* If internal bus, we can always get at everything */
824         fast = TRUE;
825         /* map if does not exist */
826         if (!cores_info->regs[coreidx]) {
827             cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
828                                         SI_CORE_SIZE);
829             ASSERT(GOODREGS(cores_info->regs[coreidx]));
830         }
831         r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
832     } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
833         /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
834 
835         if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
836             /* Chipc registers are mapped at 12KB */
837 
838             fast = TRUE;
839             r = (volatile uint32 *)((volatile char *)sii->curmap +
840                            PCI_16KB0_CCREGS_OFFSET + regoff);
841         } else if (sii->pub.buscoreidx == coreidx) {
842             /* pci registers are at either in the last 2KB of an 8KB window
843              * or, in pcie and pci rev 13 at 8KB
844              */
845             fast = TRUE;
846             if (SI_FAST(sii))
847                 r = (volatile uint32 *)((volatile char *)sii->curmap +
848                                PCI_16KB0_PCIREGS_OFFSET + regoff);
849             else
850                 r = (volatile uint32 *)((volatile char *)sii->curmap +
851                                ((regoff >= SBCONFIGOFF) ?
852                                 PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
853                                regoff);
854         }
855     }
856 
857     if (!fast) {
858         INTR_OFF(sii, intr_val);
859 
860         /* save current core index */
861         origidx = si_coreidx(&sii->pub);
862 
863         /* switch core */
864         r = (volatile uint32*) ((volatile uchar*) ai_setcoreidx(&sii->pub, coreidx) +
865                        regoff);
866     }
867     ASSERT(r != NULL);
868 
869     /* mask and set */
870     if (mask || val) {
871         w = (R_REG(sii->osh, r) & ~mask) | val;
872         W_REG(sii->osh, r, w);
873     }
874 
875     /* readback */
876     w = R_REG(sii->osh, r);
877 
878     if (!fast) {
879         /* restore core index */
880         if (origidx != coreidx)
881             ai_setcoreidx(&sii->pub, origidx);
882 
883         INTR_RESTORE(sii, intr_val);
884     }
885 
886     return (w);
887 }
888 
889 /*
890  * If there is no need for fiddling with interrupts or core switches (typically silicon
891  * back plane registers, pci registers and chipcommon registers), this function
892  * returns the register offset on this core to a mapped address. This address can
893  * be used for W_REG/R_REG directly.
894  *
895  * For accessing registers that would need a core switch, this function will return
896  * NULL.
897  */
898 volatile uint32 *
ai_corereg_addr(si_t * sih,uint coreidx,uint regoff)899 ai_corereg_addr(si_t *sih, uint coreidx, uint regoff)
900 {
901     volatile uint32 *r = NULL;
902     bool fast = FALSE;
903     si_info_t *sii = SI_INFO(sih);
904     si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
905 
906 
907     ASSERT(GOODIDX(coreidx));
908     ASSERT(regoff < SI_CORE_SIZE);
909 
910     if (coreidx >= SI_MAXCORES)
911         return 0;
912 
913     if (BUSTYPE(sih->bustype) == SI_BUS) {
914         /* If internal bus, we can always get at everything */
915         fast = TRUE;
916         /* map if does not exist */
917         if (!cores_info->regs[coreidx]) {
918             cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
919                                         SI_CORE_SIZE);
920             ASSERT(GOODREGS(cores_info->regs[coreidx]));
921         }
922         r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
923     } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
924         /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
925 
926         if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
927             /* Chipc registers are mapped at 12KB */
928 
929             fast = TRUE;
930             r = (volatile uint32 *)((volatile char *)sii->curmap +
931                            PCI_16KB0_CCREGS_OFFSET + regoff);
932         } else if (sii->pub.buscoreidx == coreidx) {
933             /* pci registers are at either in the last 2KB of an 8KB window
934              * or, in pcie and pci rev 13 at 8KB
935              */
936             fast = TRUE;
937             if (SI_FAST(sii))
938                 r = (volatile uint32 *)((volatile char *)sii->curmap +
939                                PCI_16KB0_PCIREGS_OFFSET + regoff);
940             else
941                 r = (volatile uint32 *)((volatile char *)sii->curmap +
942                                ((regoff >= SBCONFIGOFF) ?
943                                 PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
944                                regoff);
945         }
946     }
947 
948     if (!fast) {
949         ASSERT(sii->curidx == coreidx);
950         r = (volatile uint32*) ((volatile uchar*)sii->curmap + regoff);
951     }
952 
953     return (r);
954 }
955 
956 void
ai_core_disable(si_t * sih,uint32 bits)957 ai_core_disable(si_t *sih, uint32 bits)
958 {
959     si_info_t *sii = SI_INFO(sih);
960     volatile uint32 dummy;
961     uint32 status;
962     aidmp_t *ai;
963 
964 
965     ASSERT(GOODREGS(sii->curwrap));
966     ai = sii->curwrap;
967 
968     /* if core is already in reset, just return */
969     if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) {
970         return;
971     }
972 
973     /* ensure there are no pending backplane operations */
974     SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
975 
976     /* if pending backplane ops still, try waiting longer */
977     if (status != 0) {
978         /* 300usecs was sufficient to allow backplane ops to clear for big hammer */
979         /* during driver load we may need more time */
980         SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 10000);
981         /* if still pending ops, continue on and try disable anyway */
982         /* this is in big hammer path, so don't call wl_reinit in this case... */
983     }
984 
985     W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
986     dummy = R_REG(sii->osh, &ai->resetctrl);
987     BCM_REFERENCE(dummy);
988     OSL_DELAY(1);
989 
990     W_REG(sii->osh, &ai->ioctrl, bits);
991     dummy = R_REG(sii->osh, &ai->ioctrl);
992     BCM_REFERENCE(dummy);
993     OSL_DELAY(10);
994 }
995 
996 /* reset and re-enable a core
997  * inputs:
998  * bits - core specific bits that are set during and after reset sequence
999  * resetbits - core specific bits that are set only during reset sequence
1000  */
1001 static void
_ai_core_reset(si_t * sih,uint32 bits,uint32 resetbits)1002 _ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
1003 {
1004     si_info_t *sii = SI_INFO(sih);
1005     aidmp_t *ai;
1006     volatile uint32 dummy;
1007     uint loop_counter = 10;
1008 
1009     ASSERT(GOODREGS(sii->curwrap));
1010     ai = sii->curwrap;
1011 
1012     /* if core is already out of reset, just return */
1013 
1014     /* ensure there are no pending backplane operations */
1015     SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
1016 
1017 
1018     /* put core into reset state */
1019     W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
1020     OSL_DELAY(10);
1021 
1022     /* ensure there are no pending backplane operations */
1023     SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
1024 
1025     W_REG(sii->osh, &ai->ioctrl, (bits | resetbits | SICF_FGC | SICF_CLOCK_EN));
1026     dummy = R_REG(sii->osh, &ai->ioctrl);
1027     BCM_REFERENCE(dummy);
1028 
1029     /* ensure there are no pending backplane operations */
1030     SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
1031 
1032 
1033     while (R_REG(sii->osh, &ai->resetctrl) != 0 && --loop_counter != 0) {
1034         /* ensure there are no pending backplane operations */
1035         SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
1036 
1037 
1038         /* take core out of reset */
1039         W_REG(sii->osh, &ai->resetctrl, 0);
1040 
1041         /* ensure there are no pending backplane operations */
1042         SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
1043     }
1044 
1045 
1046     W_REG(sii->osh, &ai->ioctrl, (bits | SICF_CLOCK_EN));
1047     dummy = R_REG(sii->osh, &ai->ioctrl);
1048     BCM_REFERENCE(dummy);
1049     OSL_DELAY(1);
1050 }
1051 
1052 void
ai_core_reset(si_t * sih,uint32 bits,uint32 resetbits)1053 ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
1054 {
1055     si_info_t *sii = SI_INFO(sih);
1056     si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
1057     uint idx = sii->curidx;
1058 
1059     if (cores_info->wrapba2[idx] != 0) {
1060         ai_setcoreidx_2ndwrap(sih, idx);
1061         _ai_core_reset(sih, bits, resetbits);
1062         ai_setcoreidx(sih, idx);
1063     }
1064 
1065     _ai_core_reset(sih, bits, resetbits);
1066 }
1067 
1068 void
ai_core_cflags_wo(si_t * sih,uint32 mask,uint32 val)1069 ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
1070 {
1071     si_info_t *sii = SI_INFO(sih);
1072     aidmp_t *ai;
1073     uint32 w;
1074 
1075     if (BCM5357_DMP()) {
1076         SI_ERROR(("%s: Accessing USB20H DMP register (ioctrl) on 5357\n",
1077                   __FUNCTION__));
1078         return;
1079     }
1080     if (BCM4707_DMP()) {
1081         SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
1082             __FUNCTION__));
1083         return;
1084     }
1085     if (PMU_DMP()) {
1086         SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n",
1087             __FUNCTION__));
1088         return;
1089     }
1090 
1091     ASSERT(GOODREGS(sii->curwrap));
1092     ai = sii->curwrap;
1093 
1094     ASSERT((val & ~mask) == 0);
1095 
1096     if (mask || val) {
1097         w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
1098         W_REG(sii->osh, &ai->ioctrl, w);
1099     }
1100 }
1101 
1102 uint32
ai_core_cflags(si_t * sih,uint32 mask,uint32 val)1103 ai_core_cflags(si_t *sih, uint32 mask, uint32 val)
1104 {
1105     si_info_t *sii = SI_INFO(sih);
1106     aidmp_t *ai;
1107     uint32 w;
1108 
1109     if (BCM5357_DMP()) {
1110         SI_ERROR(("%s: Accessing USB20H DMP register (ioctrl) on 5357\n",
1111                   __FUNCTION__));
1112         return 0;
1113     }
1114     if (BCM4707_DMP()) {
1115         SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
1116             __FUNCTION__));
1117         return 0;
1118     }
1119 
1120     if (PMU_DMP()) {
1121         SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n",
1122             __FUNCTION__));
1123         return 0;
1124     }
1125     ASSERT(GOODREGS(sii->curwrap));
1126     ai = sii->curwrap;
1127 
1128     ASSERT((val & ~mask) == 0);
1129 
1130     if (mask || val) {
1131         w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
1132         W_REG(sii->osh, &ai->ioctrl, w);
1133     }
1134 
1135     return R_REG(sii->osh, &ai->ioctrl);
1136 }
1137 
1138 uint32
ai_core_sflags(si_t * sih,uint32 mask,uint32 val)1139 ai_core_sflags(si_t *sih, uint32 mask, uint32 val)
1140 {
1141     si_info_t *sii = SI_INFO(sih);
1142     aidmp_t *ai;
1143     uint32 w;
1144 
1145     if (BCM5357_DMP()) {
1146         SI_ERROR(("%s: Accessing USB20H DMP register (iostatus) on 5357\n",
1147                   __FUNCTION__));
1148         return 0;
1149     }
1150     if (BCM4707_DMP()) {
1151         SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
1152             __FUNCTION__));
1153         return 0;
1154     }
1155     if (PMU_DMP()) {
1156         SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n",
1157             __FUNCTION__));
1158         return 0;
1159     }
1160 
1161     ASSERT(GOODREGS(sii->curwrap));
1162     ai = sii->curwrap;
1163 
1164     ASSERT((val & ~mask) == 0);
1165     ASSERT((mask & ~SISF_CORE_BITS) == 0);
1166 
1167     if (mask || val) {
1168         w = ((R_REG(sii->osh, &ai->iostatus) & ~mask) | val);
1169         W_REG(sii->osh, &ai->iostatus, w);
1170     }
1171 
1172     return R_REG(sii->osh, &ai->iostatus);
1173 }
1174 
1175 #if defined(BCMDBG_PHYDUMP)
1176 /* print interesting aidmp registers */
1177 void
ai_dumpregs(si_t * sih,struct bcmstrbuf * b)1178 ai_dumpregs(si_t *sih, struct bcmstrbuf *b)
1179 {
1180     si_info_t *sii = SI_INFO(sih);
1181     osl_t *osh;
1182     aidmp_t *ai;
1183     uint i;
1184     uint32 prev_value = 0;
1185     axi_wrapper_t *axi_wrapper = sii->axi_wrapper;
1186     uint32 cfg_reg = 0;
1187     uint bar0_win_offset = 0;
1188 
1189     osh = sii->osh;
1190 
1191 
1192     /* Save and restore wrapper access window */
1193     if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1194         if (PCIE_GEN2(sii)) {
1195             cfg_reg = PCIE2_BAR0_CORE2_WIN2;
1196             bar0_win_offset = PCIE2_BAR0_CORE2_WIN2_OFFSET;
1197         } else {
1198             cfg_reg = PCI_BAR0_WIN2;
1199             bar0_win_offset = PCI_BAR0_WIN2_OFFSET;
1200         }
1201 
1202         prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 4);
1203 
1204         if (prev_value == ID32_INVALID) {
1205             SI_PRINT(("%s, PCI_BAR0_WIN2 - %x\n", __FUNCTION__, prev_value));
1206             return;
1207         }
1208     }
1209 
1210     bcm_bprintf(b, "ChipNum:%x, ChipRev;%x, BusType:%x, BoardType:%x, BoardVendor:%x\n\n",
1211         sih->chip, sih->chiprev, sih->bustype, sih->boardtype, sih->boardvendor);
1212 
1213     for (i = 0; i < sii->axi_num_wrappers; i++) {
1214         if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1215             /* Set BAR0 window to bridge wapper base address */
1216             OSL_PCI_WRITE_CONFIG(osh,
1217                 cfg_reg, 4, axi_wrapper[i].wrapper_addr);
1218 
1219             ai = (aidmp_t *) ((volatile uint8*)sii->curmap + bar0_win_offset);
1220         } else {
1221             ai = (aidmp_t *)(uintptr) axi_wrapper[i].wrapper_addr;
1222         }
1223 
1224         bcm_bprintf(b, "core 0x%x: core_rev:%d, %s_WR ADDR:%x \n", axi_wrapper[i].cid,
1225             axi_wrapper[i].rev,
1226             axi_wrapper[i].wrapper_type == AI_SLAVE_WRAPPER ? "SLAVE" : "MASTER",
1227             axi_wrapper[i].wrapper_addr);
1228 
1229         /* BCM5357_DMP() */
1230         if (((CHIPID(sih->chip) == BCM5357_CHIP_ID) ||
1231             (CHIPID(sih->chip) == BCM4749_CHIP_ID)) &&
1232             (sih->chippkg == BCM5357_PKG_ID) &&
1233             (axi_wrapper[i].cid == USB20H_CORE_ID)) {
1234             bcm_bprintf(b, "Skipping usb20h in 5357\n");
1235             continue;
1236         }
1237 
1238         /* BCM4707_DMP() */
1239         if (BCM4707_CHIP(CHIPID(sih->chip)) &&
1240             (axi_wrapper[i].cid == NS_CCB_CORE_ID)) {
1241             bcm_bprintf(b, "Skipping chipcommonb in 4707\n");
1242             continue;
1243         }
1244 
1245         bcm_bprintf(b, "ioctrlset 0x%x ioctrlclear 0x%x ioctrl 0x%x iostatus 0x%x "
1246                 "ioctrlwidth 0x%x iostatuswidth 0x%x\n"
1247                 "resetctrl 0x%x resetstatus 0x%x resetreadid 0x%x resetwriteid 0x%x\n"
1248                 "errlogctrl 0x%x errlogdone 0x%x errlogstatus 0x%x "
1249                 "errlogaddrlo 0x%x errlogaddrhi 0x%x\n"
1250                 "errlogid 0x%x errloguser 0x%x errlogflags 0x%x\n"
1251                 "intstatus 0x%x config 0x%x itcr 0x%x\n\n",
1252                 R_REG(osh, &ai->ioctrlset),
1253                 R_REG(osh, &ai->ioctrlclear),
1254                 R_REG(osh, &ai->ioctrl),
1255                 R_REG(osh, &ai->iostatus),
1256                 R_REG(osh, &ai->ioctrlwidth),
1257                 R_REG(osh, &ai->iostatuswidth),
1258                 R_REG(osh, &ai->resetctrl),
1259                 R_REG(osh, &ai->resetstatus),
1260                 R_REG(osh, &ai->resetreadid),
1261                 R_REG(osh, &ai->resetwriteid),
1262                 R_REG(osh, &ai->errlogctrl),
1263                 R_REG(osh, &ai->errlogdone),
1264                 R_REG(osh, &ai->errlogstatus),
1265                 R_REG(osh, &ai->errlogaddrlo),
1266                 R_REG(osh, &ai->errlogaddrhi),
1267                 R_REG(osh, &ai->errlogid),
1268                 R_REG(osh, &ai->errloguser),
1269                 R_REG(osh, &ai->errlogflags),
1270                 R_REG(osh, &ai->intstatus),
1271                 R_REG(osh, &ai->config),
1272                 R_REG(osh, &ai->itcr));
1273     }
1274 
1275     /* Restore the initial wrapper space */
1276     if (prev_value && cfg_reg) {
1277         OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 4, prev_value);
1278     }
1279 }
1280 #endif
1281 
1282 
1283 void
ai_enable_backplane_timeouts(si_t * sih)1284 ai_enable_backplane_timeouts(si_t *sih)
1285 {
1286 #if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
1287     si_info_t *sii = SI_INFO(sih);
1288     aidmp_t *ai;
1289     uint32 i;
1290     axi_wrapper_t *axi_wrapper = sii->axi_wrapper;
1291 
1292 #ifdef BCM_BACKPLANE_TIMEOUT
1293     uint32 prev_value = 0;
1294     osl_t *osh = sii->osh;
1295     uint32 cfg_reg = 0;
1296     uint32 offset = 0;
1297 #endif /* BCM_BACKPLANE_TIMEOUT */
1298 
1299     if ((sii->axi_num_wrappers == 0) ||
1300 #ifdef BCM_BACKPLANE_TIMEOUT
1301         (!PCIE(sii)) ||
1302 #endif /* BCM_BACKPLANE_TIMEOUT */
1303         FALSE) {
1304         SI_VMSG((" %s, axi_num_wrappers:%d, Is_PCIE:%d, BUS_TYPE:%d, ID:%x\n",
1305             __FUNCTION__, sii->axi_num_wrappers, PCIE(sii),
1306             BUSTYPE(sii->pub.bustype), sii->pub.buscoretype));
1307         return;
1308     }
1309 
1310 #ifdef BCM_BACKPLANE_TIMEOUT
1311     /* Save and restore the wrapper access window */
1312     if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1313         if (PCIE_GEN1(sii)) {
1314             cfg_reg = PCI_BAR0_WIN2;
1315             offset = PCI_BAR0_WIN2_OFFSET;
1316         } else if (PCIE_GEN2(sii)) {
1317             cfg_reg = PCIE2_BAR0_CORE2_WIN2;
1318             offset = PCIE2_BAR0_CORE2_WIN2_OFFSET;
1319         }
1320         else {
1321             osl_panic("!PCIE_GEN1 && !PCIE_GEN2\n");
1322         }
1323 
1324         prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 4);
1325         if (prev_value == ID32_INVALID) {
1326             SI_PRINT(("%s, PCI_BAR0_WIN2 - %x\n", __FUNCTION__, prev_value));
1327             return;
1328         }
1329     }
1330 
1331 #endif /* BCM_BACKPLANE_TIMEOUT */
1332 
1333     for (i = 0; i < sii->axi_num_wrappers; ++i) {
1334         if (axi_wrapper[i].wrapper_type != AI_SLAVE_WRAPPER) {
1335             SI_VMSG(("SKIP ENABLE BPT: MFG:%x, CID:%x, ADDR:%x\n",
1336                 axi_wrapper[i].mfg,
1337                 axi_wrapper[i].cid,
1338                 axi_wrapper[i].wrapper_addr));
1339             continue;
1340         }
1341 
1342 #ifdef BCM_BACKPLANE_TIMEOUT
1343         if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1344             /* Set BAR0_CORE2_WIN2 to wapper base address */
1345             OSL_PCI_WRITE_CONFIG(osh,
1346                 cfg_reg, 4, axi_wrapper[i].wrapper_addr);
1347 
1348             /* set AI to BAR0 + Offset corresponding to Gen1 or gen2 */
1349             ai = (aidmp_t *) ((uint8*)sii->curmap + offset);
1350         }
1351         else
1352 #endif /* BCM_BACKPLANE_TIMEOUT */
1353         {
1354             ai = (aidmp_t *)(uintptr) axi_wrapper[i].wrapper_addr;
1355         }
1356 
1357         W_REG(sii->osh, &ai->errlogctrl, (1 << AIELC_TO_ENAB_SHIFT) |
1358               ((AXI_TO_VAL << AIELC_TO_EXP_SHIFT) & AIELC_TO_EXP_MASK));
1359 
1360         SI_VMSG(("ENABLED BPT: MFG:%x, CID:%x, ADDR:%x, ERR_CTRL:%x\n",
1361             axi_wrapper[i].mfg,
1362             axi_wrapper[i].cid,
1363             axi_wrapper[i].wrapper_addr,
1364             R_REG(sii->osh, &ai->errlogctrl)));
1365     }
1366 
1367 #ifdef BCM_BACKPLANE_TIMEOUT
1368     /* Restore the initial wrapper space */
1369     if (prev_value) {
1370         OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 4, prev_value);
1371     }
1372 #endif /* BCM_BACKPLANE_TIMEOUT */
1373 
1374 #endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */
1375 }
1376 
1377 #if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
1378 
1379 /* slave error is ignored, so account for those cases */
1380 static uint32 si_ignore_errlog_cnt = 0;
1381 
1382 static bool
ai_ignore_errlog(si_info_t * sii,uint32 lo_addr,uint32 hi_addr,uint32 err_axi_id,uint32 errsts)1383 ai_ignore_errlog(si_info_t *sii, uint32 lo_addr, uint32 hi_addr, uint32 err_axi_id, uint32 errsts)
1384 {
1385     uint32 axi_id;
1386 
1387     /* ignore the BT slave errors if the errlog is to chipcommon addr 0x190 */
1388     switch (CHIPID(sii->pub.chip)) {
1389         case BCM4350_CHIP_ID:
1390             axi_id = BCM4350_BT_AXI_ID;
1391             break;
1392         case BCM4345_CHIP_ID:
1393             axi_id = BCM4345_BT_AXI_ID;
1394             break;
1395         default:
1396             return FALSE;
1397     }
1398 
1399     /* AXI ID check */
1400     if ((err_axi_id & AI_ERRLOGID_AXI_ID_MASK) != axi_id)
1401         return FALSE;
1402 
1403     /* slave errors */
1404     if ((errsts & AIELS_TIMEOUT_MASK) != AIELS_SLAVE_ERR)
1405         return FALSE;
1406 
1407     /* chipc reg 0x190 */
1408     if ((hi_addr != BT_CC_SPROM_BADREG_HI) || (lo_addr != BT_CC_SPROM_BADREG_LO))
1409         return FALSE;
1410 
1411     return TRUE;
1412 }
1413 #endif /* defined (AXI_TIMEOUTS) || defined (BCM_BACKPLANE_TIMEOUT) */
1414 
1415 #ifdef BCM_BACKPLANE_TIMEOUT
1416 
1417 /* Function to return the APB bridge details corresponding to the core */
1418 bool
ai_get_apb_bridge(si_t * sih,uint32 coreidx,uint32 * apb_id,uint32 * apb_coreuinit)1419 ai_get_apb_bridge(si_t * sih, uint32 coreidx, uint32 *apb_id, uint32 * apb_coreuinit)
1420 {
1421     uint i;
1422     uint32 core_base, core_end;
1423     si_info_t *sii = SI_INFO(sih);
1424     static uint32 coreidx_cached = 0, apb_id_cached = 0, apb_coreunit_cached = 0;
1425     uint32 tmp_coreunit = 0;
1426     si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
1427 
1428     if (coreidx >= MIN(sii->numcores, SI_MAXCORES))
1429         return FALSE;
1430 
1431     /* Most of the time apb bridge query will be for d11 core.
1432      * Maintain the last cache and return if found rather than iterating the table
1433      */
1434     if (coreidx_cached == coreidx) {
1435         *apb_id = apb_id_cached;
1436         *apb_coreuinit = apb_coreunit_cached;
1437         return TRUE;
1438     }
1439 
1440     core_base = cores_info->coresba[coreidx];
1441     core_end = core_base + cores_info->coresba_size[coreidx];
1442 
1443     for (i = 0; i < sii->numcores; i++) {
1444         if (cores_info->coreid[i] == APB_BRIDGE_ID) {
1445             uint32 apb_base;
1446             uint32 apb_end;
1447 
1448             apb_base = cores_info->coresba[i];
1449             apb_end = apb_base + cores_info->coresba_size[i];
1450 
1451             if ((core_base >= apb_base) &&
1452                 (core_end <= apb_end)) {
1453                 /* Current core is attached to this APB bridge */
1454                 *apb_id = apb_id_cached = APB_BRIDGE_ID;
1455                 *apb_coreuinit = apb_coreunit_cached = tmp_coreunit;
1456                 coreidx_cached = coreidx;
1457                 return TRUE;
1458             }
1459             /* Increment the coreunit */
1460             tmp_coreunit++;
1461         }
1462     }
1463 
1464     return FALSE;
1465 }
1466 
1467 uint32
ai_clear_backplane_to_fast(si_t * sih,void * addr)1468 ai_clear_backplane_to_fast(si_t *sih, void * addr)
1469 {
1470     si_info_t *sii = SI_INFO(sih);
1471     void *curmap = sii->curmap;
1472     bool core_reg = FALSE;
1473 
1474     /* Use fast path only for core register access */
1475     if ((addr >= curmap) && (addr < (curmap + SI_CORE_SIZE))) {
1476         /* address being accessed is within current core reg map */
1477         core_reg = TRUE;
1478     }
1479 
1480     if (core_reg) {
1481         uint32 apb_id, apb_coreuinit;
1482 
1483         if (ai_get_apb_bridge(sih, si_coreidx(&sii->pub),
1484             &apb_id, &apb_coreuinit) == TRUE) {
1485             /* Found the APB bridge corresponding to current core,
1486              * Check for bus errors in APB wrapper
1487              */
1488             return ai_clear_backplane_to_per_core(sih,
1489                 apb_id, apb_coreuinit, NULL);
1490         }
1491     }
1492 
1493     /* Default is to poll for errors on all slave wrappers */
1494     return si_clear_backplane_to(sih);
1495 }
1496 #endif /* BCM_BACKPLANE_TIMEOUT */
1497 
1498 #if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
1499 /*
1500  * API to clear the back plane timeout per core.
1501  * Caller may passs optional wrapper address. If present this will be used as
1502  * the wrapper base address. If wrapper base address is provided then caller
1503  * must provide the coreid also.
1504  * If both coreid and wrapper is zero, then err status of current bridge
1505  * will be verified.
1506  */
1507 uint32
ai_clear_backplane_to_per_core(si_t * sih,uint coreid,uint coreunit,void * wrap)1508 ai_clear_backplane_to_per_core(si_t *sih, uint coreid, uint coreunit, void * wrap)
1509 {
1510     int ret = AXI_WRAP_STS_NONE;
1511     aidmp_t *ai = NULL;
1512     uint32 errlog_status = 0;
1513     si_info_t *sii = SI_INFO(sih);
1514     uint32 errlog_lo = 0, errlog_hi = 0, errlog_id = 0, errlog_flags = 0;
1515     uint32 current_coreidx = si_coreidx(sih);
1516     uint32 target_coreidx = si_findcoreidx(sih, coreid, coreunit);
1517 
1518 #if defined(BCM_BACKPLANE_TIMEOUT)
1519     si_axi_error_t *axi_error = &sih->err_info->axi_error[sih->err_info->count];
1520 #endif /* BCM_BACKPLANE_TIMEOUT */
1521     bool restore_core = FALSE;
1522 
1523     if ((sii->axi_num_wrappers == 0) ||
1524 #ifdef BCM_BACKPLANE_TIMEOUT
1525         (!PCIE(sii)) ||
1526 #endif /* BCM_BACKPLANE_TIMEOUT */
1527         FALSE) {
1528         SI_VMSG((" %s, axi_num_wrappers:%d, Is_PCIE:%d, BUS_TYPE:%d, ID:%x\n",
1529             __FUNCTION__, sii->axi_num_wrappers, PCIE(sii),
1530             BUSTYPE(sii->pub.bustype), sii->pub.buscoretype));
1531         return AXI_WRAP_STS_NONE;
1532     }
1533 
1534     if (wrap != NULL) {
1535         ai = (aidmp_t *)wrap;
1536     } else if (coreid && (target_coreidx != current_coreidx)) {
1537         if (ai_setcoreidx(sih, target_coreidx) == NULL) {
1538             /* Unable to set the core */
1539             SI_PRINT(("Set Code Failed: coreid:%x, unit:%d, target_coreidx:%d\n",
1540                 coreid, coreunit, target_coreidx));
1541             errlog_lo = target_coreidx;
1542             ret = AXI_WRAP_STS_SET_CORE_FAIL;
1543             goto end;
1544         }
1545 
1546         restore_core = TRUE;
1547         ai = (aidmp_t *)si_wrapperregs(sih);
1548     } else {
1549         /* Read error status of current wrapper */
1550         ai = (aidmp_t *)si_wrapperregs(sih);
1551 
1552         /* Update CoreID to current Code ID */
1553         coreid = si_coreid(sih);
1554     }
1555 
1556     /* read error log status */
1557     errlog_status = R_REG(sii->osh, &ai->errlogstatus);
1558 
1559     if (errlog_status == ID32_INVALID) {
1560         /* Do not try to peek further */
1561         SI_PRINT(("%s, errlogstatus:%x - Slave Wrapper:%x\n",
1562             __FUNCTION__, errlog_status, coreid));
1563         ret = AXI_WRAP_STS_WRAP_RD_ERR;
1564         errlog_lo = (uint32)&ai->errlogstatus;
1565         goto end;
1566     }
1567 
1568     if ((errlog_status & AIELS_TIMEOUT_MASK) != 0) {
1569         uint32 tmp;
1570         uint32 count = 0;
1571         /* set ErrDone to clear the condition */
1572         W_REG(sii->osh, &ai->errlogdone, AIELD_ERRDONE_MASK);
1573 
1574         /* SPINWAIT on errlogstatus timeout status bits */
1575         while ((tmp = R_REG(sii->osh, &ai->errlogstatus)) & AIELS_TIMEOUT_MASK) {
1576             if (tmp == ID32_INVALID) {
1577                 SI_PRINT(("%s: prev errlogstatus:%x, errlogstatus:%x\n",
1578                     __FUNCTION__, errlog_status, tmp));
1579                 ret = AXI_WRAP_STS_WRAP_RD_ERR;
1580                 errlog_lo = (uint32)&ai->errlogstatus;
1581                 goto end;
1582             }
1583             /*
1584              * Clear again, to avoid getting stuck in the loop, if a new error
1585              * is logged after we cleared the first timeout
1586              */
1587             W_REG(sii->osh, &ai->errlogdone, AIELD_ERRDONE_MASK);
1588 
1589             count++;
1590             OSL_DELAY(10);
1591             if ((10 * count) > AI_REG_READ_TIMEOUT) {
1592                 errlog_status = tmp;
1593                 break;
1594             }
1595         }
1596 
1597         errlog_lo = R_REG(sii->osh, &ai->errlogaddrlo);
1598         errlog_hi = R_REG(sii->osh, &ai->errlogaddrhi);
1599         errlog_id = R_REG(sii->osh, &ai->errlogid);
1600         errlog_flags = R_REG(sii->osh, &ai->errlogflags);
1601 
1602         /* we are already in the error path, so OK to check for the  slave error */
1603         if (ai_ignore_errlog(sii, errlog_lo, errlog_hi, errlog_id,
1604             errlog_status)) {
1605             si_ignore_errlog_cnt++;
1606             goto end;
1607         }
1608 
1609         /* only reset APB Bridge on timeout (not slave error, or dec error) */
1610         switch (errlog_status & AIELS_TIMEOUT_MASK) {
1611             case AIELS_SLAVE_ERR:
1612                 SI_PRINT(("AXI slave error"));
1613                 ret = AXI_WRAP_STS_SLAVE_ERR;
1614                 break;
1615 
1616             case AIELS_TIMEOUT:
1617                 /* reset APB Bridge */
1618                 OR_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
1619                 /* sync write */
1620                 (void)R_REG(sii->osh, &ai->resetctrl);
1621                 /* clear Reset bit */
1622                 AND_REG(sii->osh, &ai->resetctrl, ~(AIRC_RESET));
1623                 /* sync write */
1624                 (void)R_REG(sii->osh, &ai->resetctrl);
1625                 SI_PRINT(("AXI timeout"));
1626                 ret = AXI_WRAP_STS_TIMEOUT;
1627                 break;
1628 
1629             case AIELS_DECODE:
1630                 SI_PRINT(("AXI decode error"));
1631                 ret = AXI_WRAP_STS_DECODE_ERR;
1632                 break;
1633             default:
1634                 ASSERT(0);    /* should be impossible */
1635         }
1636 
1637         SI_PRINT(("\tCoreID: %x\n", coreid));
1638         SI_PRINT(("\t errlog: lo 0x%08x, hi 0x%08x, id 0x%08x, flags 0x%08x"
1639             ", status 0x%08x\n",
1640             errlog_lo, errlog_hi, errlog_id, errlog_flags,
1641             errlog_status));
1642     }
1643 
1644 end:
1645 
1646 #if defined(BCM_BACKPLANE_TIMEOUT)
1647     if (axi_error && (ret != AXI_WRAP_STS_NONE)) {
1648         axi_error->error = ret;
1649         axi_error->coreid = coreid;
1650         axi_error->errlog_lo = errlog_lo;
1651         axi_error->errlog_hi = errlog_hi;
1652         axi_error->errlog_id = errlog_id;
1653         axi_error->errlog_flags = errlog_flags;
1654         axi_error->errlog_status = errlog_status;
1655         sih->err_info->count++;
1656 
1657         if (sih->err_info->count == SI_MAX_ERRLOG_SIZE) {
1658             sih->err_info->count = SI_MAX_ERRLOG_SIZE - 1;
1659             SI_PRINT(("AXI Error log overflow\n"));
1660         }
1661     }
1662 #endif /* BCM_BACKPLANE_TIMEOUT */
1663 
1664     if (restore_core) {
1665         if (ai_setcoreidx(sih, current_coreidx) == NULL) {
1666             /* Unable to set the core */
1667             return ID32_INVALID;
1668         }
1669     }
1670 
1671     return ret;
1672 }
1673 #endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */
1674 
1675 /*
1676  * This API polls all slave wrappers for errors and returns bit map of
1677  * all reported errors.
1678  * return - bit map of
1679  *    AXI_WRAP_STS_NONE
1680  *    AXI_WRAP_STS_TIMEOUT
1681  *    AXI_WRAP_STS_SLAVE_ERR
1682  *    AXI_WRAP_STS_DECODE_ERR
1683  *    AXI_WRAP_STS_PCI_RD_ERR
1684  *    AXI_WRAP_STS_WRAP_RD_ERR
1685  *    AXI_WRAP_STS_SET_CORE_FAIL
1686  * On timeout detection, correspondign bridge will be reset to
1687  * unblock the bus.
1688  * Error reported in each wrapper can be retrieved using the API
1689  * si_get_axi_errlog_info()
1690  */
1691 uint32
ai_clear_backplane_to(si_t * sih)1692 ai_clear_backplane_to(si_t *sih)
1693 {
1694     uint32 ret = 0;
1695 #if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
1696 
1697     si_info_t *sii = SI_INFO(sih);
1698     aidmp_t *ai;
1699     uint32 i;
1700     axi_wrapper_t *axi_wrapper = sii->axi_wrapper;
1701 
1702 #ifdef BCM_BACKPLANE_TIMEOUT
1703     uint32 prev_value = 0;
1704     osl_t *osh = sii->osh;
1705     uint32 cfg_reg = 0;
1706     uint32 offset = 0;
1707 
1708     if ((sii->axi_num_wrappers == 0) || (!PCIE(sii)))
1709 #else
1710     if (sii->axi_num_wrappers == 0)
1711 #endif
1712     {
1713         SI_VMSG((" %s, axi_num_wrappers:%d, Is_PCIE:%d, BUS_TYPE:%d, ID:%x\n",
1714             __FUNCTION__, sii->axi_num_wrappers, PCIE(sii),
1715             BUSTYPE(sii->pub.bustype), sii->pub.buscoretype));
1716         return AXI_WRAP_STS_NONE;
1717     }
1718 
1719 #ifdef BCM_BACKPLANE_TIMEOUT
1720     /* Save and restore wrapper access window */
1721     if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1722         if (PCIE_GEN1(sii)) {
1723             cfg_reg = PCI_BAR0_WIN2;
1724             offset = PCI_BAR0_WIN2_OFFSET;
1725         } else if (PCIE_GEN2(sii)) {
1726             cfg_reg = PCIE2_BAR0_CORE2_WIN2;
1727             offset = PCIE2_BAR0_CORE2_WIN2_OFFSET;
1728         }
1729         else {
1730             osl_panic("!PCIE_GEN1 && !PCIE_GEN2\n");
1731         }
1732 
1733         prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 4);
1734 
1735         if (prev_value == ID32_INVALID) {
1736             si_axi_error_t *axi_error =
1737                 &sih->err_info->axi_error[sih->err_info->count];
1738             SI_PRINT(("%s, PCI_BAR0_WIN2 - %x\n", __FUNCTION__, prev_value));
1739 
1740             axi_error->error = ret = AXI_WRAP_STS_PCI_RD_ERR;
1741             axi_error->errlog_lo = cfg_reg;
1742             sih->err_info->count++;
1743 
1744             if (sih->err_info->count == SI_MAX_ERRLOG_SIZE) {
1745                 sih->err_info->count = SI_MAX_ERRLOG_SIZE - 1;
1746                 SI_PRINT(("AXI Error log overflow\n"));
1747             }
1748 
1749             return ret;
1750         }
1751     }
1752 #endif /* BCM_BACKPLANE_TIMEOUT */
1753 
1754     for (i = 0; i < sii->axi_num_wrappers; ++i) {
1755         uint32 tmp;
1756 
1757         if (axi_wrapper[i].wrapper_type != AI_SLAVE_WRAPPER) {
1758             continue;
1759         }
1760 
1761 #ifdef BCM_BACKPLANE_TIMEOUT
1762 
1763         if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1764             /* Set BAR0_CORE2_WIN2 to bridge wapper base address */
1765             OSL_PCI_WRITE_CONFIG(osh,
1766                 cfg_reg, 4, axi_wrapper[i].wrapper_addr);
1767 
1768             /* set AI to BAR0 + Offset corresponding to Gen1 or gen2 */
1769             ai = (aidmp_t *) ((uint8*)sii->curmap + offset);
1770         }
1771         else
1772 #endif /* BCM_BACKPLANE_TIMEOUT */
1773         {
1774             ai = (aidmp_t *)(uintptr) axi_wrapper[i].wrapper_addr;
1775         }
1776 
1777         tmp = ai_clear_backplane_to_per_core(sih, axi_wrapper[i].cid, 0, (void*)ai);
1778 
1779         ret |= tmp;
1780     }
1781 
1782 #ifdef BCM_BACKPLANE_TIMEOUT
1783     /* Restore the initial wrapper space */
1784     if (prev_value) {
1785         OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 4, prev_value);
1786     }
1787 #endif /* BCM_BACKPLANE_TIMEOUT */
1788 
1789 #endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */
1790 
1791     return ret;
1792 }
1793 
1794 uint
ai_num_slaveports(si_t * sih,uint coreidx)1795 ai_num_slaveports(si_t *sih, uint coreidx)
1796 {
1797     si_info_t *sii = SI_INFO(sih);
1798     si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
1799     uint32 cib;
1800 
1801     cib = cores_info->cib[coreidx];
1802     return ((cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT);
1803 }
1804