1 /*
2 * Misc utility routines for accessing chip-specific features
3 * of the SiliconBackplane-based Broadcom chips.
4 *
5 * Copyright (C) 1999-2019, Broadcom.
6 *
7 * Unless you and Broadcom execute a separate written software license
8 * agreement governing use of this software, this software is licensed to you
9 * under the terms of the GNU General Public License version 2 (the "GPL"),
10 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11 * following added to such license:
12 *
13 * As a special exception, the copyright holders of this software give you
14 * permission to link this software with independent modules, and to copy and
15 * distribute the resulting executable under terms of your choice, provided that
16 * you also meet, for each linked independent module, the terms and conditions
17 * of the license of that module. An independent module is a module which is
18 * not derived from this software. The special exception does not apply to any
19 * modifications of the software.
20 *
21 * Notwithstanding the above, under no circumstances may you combine this
22 * software in any way with any other Broadcom software provided under a license
23 * other than the GPL, without Broadcom's express prior written consent.
24 *
25 *
26 * <<Broadcom-WL-IPTag/Open:>>
27 *
28 * $Id: sbutils.c 700323 2017-05-18 16:12:11Z $
29 */
30
31 #include <bcm_cfg.h>
32 #include <typedefs.h>
33 #include <bcmdefs.h>
34 #include <osl.h>
35 #include <bcmutils.h>
36 #include <siutils.h>
37 #include <bcmdevs.h>
38 #include <hndsoc.h>
39 #include <sbchipc.h>
40 #include <pcicfg.h>
41 #include <sbpcmcia.h>
42
43 #include "siutils_priv.h"
44
45 /* local prototypes */
46 static uint _sb_coreidx(si_info_t *sii, uint32 sba);
47 static uint _sb_scan(si_info_t *sii, uint32 sba, volatile void *regs, uint bus,
48 uint32 sbba, uint ncores, uint devid);
49 static uint32 _sb_coresba(si_info_t *sii);
50 static volatile void *_sb_setcoreidx(si_info_t *sii, uint coreidx);
51 #define SET_SBREG(sii, r, mask, val) \
52 W_SBREG((sii), (r), ((R_SBREG((sii), (r)) & ~(mask)) | (val)))
53 #define REGS2SB(va) (sbconfig_t *)((volatile int8 *)(va) + SBCONFIGOFF)
54
55 /* sonicsrev */
56 #define SONICS_2_2 (SBIDL_RV_2_2 >> SBIDL_RV_SHIFT)
57 #define SONICS_2_3 (SBIDL_RV_2_3 >> SBIDL_RV_SHIFT)
58
59 #define R_SBREG(sii, sbr) sb_read_sbreg((sii), (sbr))
60 #define W_SBREG(sii, sbr, v) sb_write_sbreg((sii), (sbr), (v))
61 #define AND_SBREG(sii, sbr, v) \
62 W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) & (v)))
63 #define OR_SBREG(sii, sbr, v) \
64 W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) | (v)))
65
sb_read_sbreg(si_info_t * sii,volatile uint32 * sbr)66 static uint32 sb_read_sbreg(si_info_t *sii, volatile uint32 *sbr)
67 {
68 uint8 tmp;
69 uint32 val, intr_val = 0;
70
71 /*
72 * compact flash only has 11 bits address, while we needs 12 bits address.
73 * MEM_SEG will be OR'd with other 11 bits address in hardware,
74 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
75 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
76 */
77 if (PCMCIA(sii)) {
78 INTR_OFF(sii, intr_val);
79 tmp = 1;
80 OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
81 sbr = (volatile uint32 *)((uintptr)sbr &
82 ~(1 << 11)); /* mask out bit 11 */
83 }
84
85 val = R_REG(sii->osh, sbr);
86
87 if (PCMCIA(sii)) {
88 tmp = 0;
89 OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
90 INTR_RESTORE(sii, intr_val);
91 }
92
93 return (val);
94 }
95
sb_write_sbreg(si_info_t * sii,volatile uint32 * sbr,uint32 v)96 static void sb_write_sbreg(si_info_t *sii, volatile uint32 *sbr, uint32 v)
97 {
98 uint8 tmp;
99 volatile uint32 dummy;
100 uint32 intr_val = 0;
101
102 /*
103 * compact flash only has 11 bits address, while we needs 12 bits address.
104 * MEM_SEG will be OR'd with other 11 bits address in hardware,
105 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
106 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
107 */
108 if (PCMCIA(sii)) {
109 INTR_OFF(sii, intr_val);
110 tmp = 1;
111 OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
112 sbr = (volatile uint32 *)((uintptr)sbr &
113 ~(1 << 11)); /* mask out bit 11 */
114 }
115
116 if (BUSTYPE(sii->pub.bustype) == PCMCIA_BUS) {
117 dummy = R_REG(sii->osh, sbr);
118 BCM_REFERENCE(dummy);
119 W_REG(sii->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff));
120 dummy = R_REG(sii->osh, sbr);
121 BCM_REFERENCE(dummy);
122 W_REG(sii->osh, ((volatile uint16 *)sbr + 1),
123 (uint16)((v >> 0x10) & 0xffff));
124 } else {
125 W_REG(sii->osh, sbr, v);
126 }
127
128 if (PCMCIA(sii)) {
129 tmp = 0;
130 OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
131 INTR_RESTORE(sii, intr_val);
132 }
133 }
134
sb_coreid(si_t * sih)135 uint sb_coreid(si_t *sih)
136 {
137 si_info_t *sii;
138 sbconfig_t *sb;
139
140 sii = SI_INFO(sih);
141 sb = REGS2SB(sii->curmap);
142
143 return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT);
144 }
145
sb_intflag(si_t * sih)146 uint sb_intflag(si_t *sih)
147 {
148 si_info_t *sii = SI_INFO(sih);
149 volatile void *corereg;
150 sbconfig_t *sb;
151 uint origidx, intflag, intr_val = 0;
152
153 INTR_OFF(sii, intr_val);
154 origidx = si_coreidx(sih);
155 corereg = si_setcore(sih, CC_CORE_ID, 0);
156 ASSERT(corereg != NULL);
157 sb = REGS2SB(corereg);
158 intflag = R_SBREG(sii, &sb->sbflagst);
159 sb_setcoreidx(sih, origidx);
160 INTR_RESTORE(sii, intr_val);
161
162 return intflag;
163 }
164
sb_flag(si_t * sih)165 uint sb_flag(si_t *sih)
166 {
167 si_info_t *sii;
168 sbconfig_t *sb;
169
170 sii = SI_INFO(sih);
171 sb = REGS2SB(sii->curmap);
172
173 return R_SBREG(sii, &sb->sbtpsflag) & SBTPS_NUM0_MASK;
174 }
175
sb_setint(si_t * sih,int siflag)176 void sb_setint(si_t *sih, int siflag)
177 {
178 si_info_t *sii;
179 sbconfig_t *sb;
180 uint32 vec;
181
182 sii = SI_INFO(sih);
183 sb = REGS2SB(sii->curmap);
184
185 if (siflag == -1) {
186 vec = 0;
187 } else {
188 vec = 1 << siflag;
189 }
190 W_SBREG(sii, &sb->sbintvec, vec);
191 }
192
193 /* return core index of the core with address 'sba' */
_sb_coreidx(si_info_t * sii,uint32 sba)194 static uint _sb_coreidx(si_info_t *sii, uint32 sba)
195 {
196 uint i;
197 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
198
199 for (i = 0; i < sii->numcores; i++) {
200 if (sba == cores_info->coresba[i]) {
201 return i;
202 }
203 }
204 return BADIDX;
205 }
206
207 /* return core address of the current core */
_sb_coresba(si_info_t * sii)208 static uint32 _sb_coresba(si_info_t *sii)
209 {
210 uint32 sbaddr;
211
212 switch (BUSTYPE(sii->pub.bustype)) {
213 case SI_BUS: {
214 sbconfig_t *sb = REGS2SB(sii->curmap);
215 sbaddr = sb_base(R_SBREG(sii, &sb->sbadmatch0));
216 break;
217 }
218
219 case PCI_BUS:
220 sbaddr =
221 OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
222 break;
223
224 case PCMCIA_BUS: {
225 uint8 tmp = 0;
226 OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
227 sbaddr = (uint32)tmp << 0xC;
228 OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
229 sbaddr |= (uint32)tmp << 0x10;
230 OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
231 sbaddr |= (uint32)tmp << 0x18;
232 break;
233 }
234
235 #ifdef BCMSDIO
236 case SPI_BUS:
237 case SDIO_BUS:
238 sbaddr = (uint32)(uintptr)sii->curmap;
239 break;
240 #endif // endif
241
242 default:
243 sbaddr = BADCOREADDR;
244 break;
245 }
246
247 return sbaddr;
248 }
249
sb_corevendor(si_t * sih)250 uint sb_corevendor(si_t *sih)
251 {
252 si_info_t *sii;
253 sbconfig_t *sb;
254
255 sii = SI_INFO(sih);
256 sb = REGS2SB(sii->curmap);
257
258 return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_VC_MASK) >> SBIDH_VC_SHIFT);
259 }
260
sb_corerev(si_t * sih)261 uint sb_corerev(si_t *sih)
262 {
263 si_info_t *sii;
264 sbconfig_t *sb;
265 uint sbidh;
266
267 sii = SI_INFO(sih);
268 sb = REGS2SB(sii->curmap);
269 sbidh = R_SBREG(sii, &sb->sbidhigh);
270
271 return (SBCOREREV(sbidh));
272 }
273
274 /* set core-specific control flags */
sb_core_cflags_wo(si_t * sih,uint32 mask,uint32 val)275 void sb_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
276 {
277 si_info_t *sii;
278 sbconfig_t *sb;
279 uint32 w;
280
281 sii = SI_INFO(sih);
282 sb = REGS2SB(sii->curmap);
283
284 ASSERT((val & ~mask) == 0);
285
286 /* mask and set */
287 w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
288 (val << SBTML_SICF_SHIFT);
289 W_SBREG(sii, &sb->sbtmstatelow, w);
290 }
291
292 /* set/clear core-specific control flags */
sb_core_cflags(si_t * sih,uint32 mask,uint32 val)293 uint32 sb_core_cflags(si_t *sih, uint32 mask, uint32 val)
294 {
295 si_info_t *sii;
296 sbconfig_t *sb;
297 uint32 w;
298
299 sii = SI_INFO(sih);
300 sb = REGS2SB(sii->curmap);
301
302 ASSERT((val & ~mask) == 0);
303
304 /* mask and set */
305 if (mask || val) {
306 w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
307 (val << SBTML_SICF_SHIFT);
308 W_SBREG(sii, &sb->sbtmstatelow, w);
309 }
310
311 /* return the new value
312 * for write operation, the following readback ensures the completion of
313 * write opration.
314 */
315 return (R_SBREG(sii, &sb->sbtmstatelow) >> SBTML_SICF_SHIFT);
316 }
317
318 /* set/clear core-specific status flags */
sb_core_sflags(si_t * sih,uint32 mask,uint32 val)319 uint32 sb_core_sflags(si_t *sih, uint32 mask, uint32 val)
320 {
321 si_info_t *sii;
322 sbconfig_t *sb;
323 uint32 w;
324
325 sii = SI_INFO(sih);
326 sb = REGS2SB(sii->curmap);
327
328 ASSERT((val & ~mask) == 0);
329 ASSERT((mask & ~SISF_CORE_BITS) == 0);
330
331 /* mask and set */
332 if (mask || val) {
333 w = (R_SBREG(sii, &sb->sbtmstatehigh) & ~(mask << SBTMH_SISF_SHIFT)) |
334 (val << SBTMH_SISF_SHIFT);
335 W_SBREG(sii, &sb->sbtmstatehigh, w);
336 }
337
338 /* return the new value */
339 return (R_SBREG(sii, &sb->sbtmstatehigh) >> SBTMH_SISF_SHIFT);
340 }
341
sb_iscoreup(si_t * sih)342 bool sb_iscoreup(si_t *sih)
343 {
344 si_info_t *sii;
345 sbconfig_t *sb;
346
347 sii = SI_INFO(sih);
348 sb = REGS2SB(sii->curmap);
349
350 return ((R_SBREG(sii, &sb->sbtmstatelow) &
351 (SBTML_RESET | SBTML_REJ_MASK |
352 (SICF_CLOCK_EN << SBTML_SICF_SHIFT))) ==
353 (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
354 }
355
356 /*
357 * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set
358 * operation, switch back to the original core, and return the new value.
359 *
360 * When using the silicon backplane, no fidleing with interrupts or core
361 * switches are needed.
362 *
363 * Also, when using pci/pcie, we can optimize away the core switching for pci
364 * registers and (on newer pci cores) chipcommon registers.
365 */
sb_corereg(si_t * sih,uint coreidx,uint regoff,uint mask,uint val)366 uint sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
367 {
368 uint origidx = 0;
369 volatile uint32 *r = NULL;
370 uint w;
371 uint intr_val = 0;
372 bool fast = FALSE;
373 si_info_t *sii = SI_INFO(sih);
374 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
375
376 ASSERT(GOODIDX(coreidx));
377 ASSERT(regoff < SI_CORE_SIZE);
378 ASSERT((val & ~mask) == 0);
379
380 if (coreidx >= SI_MAXCORES) {
381 return 0;
382 }
383
384 if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
385 /* If internal bus, we can always get at everything */
386 fast = TRUE;
387 /* map if does not exist */
388 if (!cores_info->regs[coreidx]) {
389 cores_info->regs[coreidx] =
390 REG_MAP(cores_info->coresba[coreidx], SI_CORE_SIZE);
391 ASSERT(GOODREGS(cores_info->regs[coreidx]));
392 }
393 r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] +
394 regoff);
395 } else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
396 /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc
397 */
398
399 if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
400 /* Chipc registers are mapped at 12KB */
401
402 fast = TRUE;
403 r = (volatile uint32 *)((volatile char *)sii->curmap +
404 PCI_16KB0_CCREGS_OFFSET + regoff);
405 } else if (sii->pub.buscoreidx == coreidx) {
406 /* pci registers are at either in the last 2KB of an 8KB window
407 * or, in pcie and pci rev 13 at 8KB
408 */
409 fast = TRUE;
410 if (SI_FAST(sii)) {
411 r = (volatile uint32 *)((volatile char *)sii->curmap +
412 PCI_16KB0_PCIREGS_OFFSET + regoff);
413 } else {
414 r = (volatile uint32 *)((volatile char *)sii->curmap +
415 ((regoff >= SBCONFIGOFF)
416 ? PCI_BAR0_PCISBR_OFFSET
417 : PCI_BAR0_PCIREGS_OFFSET) +
418 regoff);
419 }
420 }
421 }
422
423 if (!fast) {
424 INTR_OFF(sii, intr_val);
425
426 /* save current core index */
427 origidx = si_coreidx(&sii->pub);
428
429 /* switch core */
430 r = (volatile uint32 *)((volatile uchar *)sb_setcoreidx(&sii->pub,
431 coreidx) +
432 regoff);
433 }
434 ASSERT(r != NULL);
435
436 /* mask and set */
437 if (mask || val) {
438 if (regoff >= SBCONFIGOFF) {
439 w = (R_SBREG(sii, r) & ~mask) | val;
440 W_SBREG(sii, r, w);
441 } else {
442 w = (R_REG(sii->osh, r) & ~mask) | val;
443 W_REG(sii->osh, r, w);
444 }
445 }
446
447 /* readback */
448 if (regoff >= SBCONFIGOFF) {
449 w = R_SBREG(sii, r);
450 } else {
451 w = R_REG(sii->osh, r);
452 }
453
454 if (!fast) {
455 /* restore core index */
456 if (origidx != coreidx) {
457 sb_setcoreidx(&sii->pub, origidx);
458 }
459
460 INTR_RESTORE(sii, intr_val);
461 }
462
463 return (w);
464 }
465
466 /*
467 * If there is no need for fiddling with interrupts or core switches (typically
468 * silicon back plane registers, pci registers and chipcommon registers), this
469 * function returns the register offset on this core to a mapped address. This
470 * address can be used for W_REG/R_REG directly.
471 *
472 * For accessing registers that would need a core switch, this function will
473 * return NULL.
474 */
sb_corereg_addr(si_t * sih,uint coreidx,uint regoff)475 volatile uint32 *sb_corereg_addr(si_t *sih, uint coreidx, uint regoff)
476 {
477 volatile uint32 *r = NULL;
478 bool fast = FALSE;
479 si_info_t *sii = SI_INFO(sih);
480 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
481
482 ASSERT(GOODIDX(coreidx));
483 ASSERT(regoff < SI_CORE_SIZE);
484
485 if (coreidx >= SI_MAXCORES) {
486 return 0;
487 }
488
489 if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
490 /* If internal bus, we can always get at everything */
491 fast = TRUE;
492 /* map if does not exist */
493 if (!cores_info->regs[coreidx]) {
494 cores_info->regs[coreidx] =
495 REG_MAP(cores_info->coresba[coreidx], SI_CORE_SIZE);
496 ASSERT(GOODREGS(cores_info->regs[coreidx]));
497 }
498 r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] +
499 regoff);
500 } else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
501 /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc
502 */
503
504 if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
505 /* Chipc registers are mapped at 12KB */
506
507 fast = TRUE;
508 r = (volatile uint32 *)((volatile char *)sii->curmap +
509 PCI_16KB0_CCREGS_OFFSET + regoff);
510 } else if (sii->pub.buscoreidx == coreidx) {
511 /* pci registers are at either in the last 2KB of an 8KB window
512 * or, in pcie and pci rev 13 at 8KB
513 */
514 fast = TRUE;
515 if (SI_FAST(sii)) {
516 r = (volatile uint32 *)((volatile char *)sii->curmap +
517 PCI_16KB0_PCIREGS_OFFSET + regoff);
518 } else {
519 r = (volatile uint32 *)((volatile char *)sii->curmap +
520 ((regoff >= SBCONFIGOFF)
521 ? PCI_BAR0_PCISBR_OFFSET
522 : PCI_BAR0_PCIREGS_OFFSET) +
523 regoff);
524 }
525 }
526 }
527
528 if (!fast) {
529 return 0;
530 }
531
532 return (r);
533 }
534
535 /* Scan the enumeration space to find all cores starting from the given
536 * bus 'sbba'. Append coreid and other info to the lists in 'si'. 'sba'
537 * is the default core address at chip POR time and 'regs' is the virtual
538 * address that the default core is mapped at. 'ncores' is the number of
539 * cores expected on bus 'sbba'. It returns the total number of cores
540 * starting from bus 'sbba', inclusive.
541 */
542 #define SB_MAXBUSES 2
_sb_scan(si_info_t * sii,uint32 sba,volatile void * regs,uint bus,uint32 sbba,uint numcores,uint devid)543 static uint _sb_scan(si_info_t *sii, uint32 sba, volatile void *regs, uint bus,
544 uint32 sbba, uint numcores, uint devid)
545 {
546 uint next;
547 uint ncc = 0;
548 uint i;
549 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
550
551 if (bus >= SB_MAXBUSES) {
552 SI_ERROR(("_sb_scan: bus 0x%08x at level %d is too deep to scan\n",
553 sbba, bus));
554 return 0;
555 }
556 SI_MSG(("_sb_scan: scan bus 0x%08x assume %u cores\n", sbba, numcores));
557
558 /* Scan all cores on the bus starting from core 0.
559 * Core addresses must be contiguous on each bus.
560 */
561 for (i = 0, next = sii->numcores; i < numcores && next < SB_BUS_MAXCORES;
562 i++, next++) {
563 cores_info->coresba[next] = sbba + (i * SI_CORE_SIZE);
564
565 /* keep and reuse the initial register mapping */
566 if ((BUSTYPE(sii->pub.bustype) == SI_BUS) &&
567 (cores_info->coresba[next] == sba)) {
568 SI_VMSG(
569 ("_sb_scan: reuse mapped regs %p for core %u\n", regs, next));
570 cores_info->regs[next] = regs;
571 }
572
573 /* change core to 'next' and read its coreid */
574 sii->curmap = _sb_setcoreidx(sii, next);
575 sii->curidx = next;
576
577 cores_info->coreid[next] = sb_coreid(&sii->pub);
578
579 /* core specific processing... */
580 /* chipc provides # cores */
581 if (cores_info->coreid[next] == CC_CORE_ID) {
582 chipcregs_t *cc = (chipcregs_t *)sii->curmap;
583 uint32 ccrev = sb_corerev(&sii->pub);
584 /* determine numcores - this is the total # cores in the chip */
585 if (((ccrev == 0x4) || (ccrev >= 0x6))) {
586 ASSERT(cc);
587 numcores = (R_REG(sii->osh, &cc->chipid) & CID_CC_MASK) >>
588 CID_CC_SHIFT;
589 } else {
590 /* Older chips */
591 uint chip = CHIPID(sii->pub.chip);
592 if (chip == BCM4704_CHIP_ID) {
593 numcores = 0x9;
594 } else if (chip == BCM5365_CHIP_ID) {
595 numcores = 0x7;
596 } else {
597 SI_ERROR(
598 ("sb_chip2numcores: unsupported chip 0x%x\n", chip));
599 ASSERT(0);
600 numcores = 1;
601 }
602 }
603 SI_VMSG(("_sb_scan: there are %u cores in the chip %s\n", numcores,
604 sii->pub.issim ? "QT" : ""));
605 } else if (cores_info->coreid[next] == OCP_CORE_ID) {
606 /* scan bridged SB(s) and add results to the end of the list */
607 sbconfig_t *sb = REGS2SB(sii->curmap);
608 uint32 nsbba = R_SBREG(sii, &sb->sbadmatch1);
609 uint nsbcc;
610
611 sii->numcores = next + 1;
612
613 if ((nsbba & 0xfff00000) != si_enum_base(devid)) {
614 continue;
615 }
616 nsbba &= 0xfffff000;
617 if (_sb_coreidx(sii, nsbba) != BADIDX) {
618 continue;
619 }
620
621 nsbcc = (R_SBREG(sii, &sb->sbtmstatehigh) & 0x000f0000) >> 0x10;
622 nsbcc = _sb_scan(sii, sba, regs, bus + 1, nsbba, nsbcc, devid);
623 if (sbba == si_enum_base(devid)) {
624 numcores -= nsbcc;
625 }
626 ncc += nsbcc;
627 }
628 }
629
630 SI_MSG(("_sb_scan: found %u cores on bus 0x%08x\n", i, sbba));
631
632 sii->numcores = i + ncc;
633 return sii->numcores;
634 }
635
636 /* scan the sb enumerated space to identify all cores */
sb_scan(si_t * sih,volatile void * regs,uint devid)637 void sb_scan(si_t *sih, volatile void *regs, uint devid)
638 {
639 uint32 origsba;
640 sbconfig_t *sb;
641 si_info_t *sii = SI_INFO(sih);
642 BCM_REFERENCE(devid);
643
644 sb = REGS2SB(sii->curmap);
645
646 sii->pub.socirev =
647 (R_SBREG(sii, &sb->sbidlow) & SBIDL_RV_MASK) >> SBIDL_RV_SHIFT;
648
649 /* Save the current core info and validate it later till we know
650 * for sure what is good and what is bad.
651 */
652 origsba = _sb_coresba(sii);
653
654 /* scan all SB(s) starting from SI_ENUM_BASE_DEFAULT */
655 sii->numcores =
656 _sb_scan(sii, origsba, regs, 0, si_enum_base(devid), 1, devid);
657 }
658
659 /*
660 * This function changes logical "focus" to the indicated core;
661 * must be called with interrupts off.
662 * Moreover, callers should keep interrupts off during switching out of and back
663 * to d11 core
664 */
sb_setcoreidx(si_t * sih,uint coreidx)665 volatile void *sb_setcoreidx(si_t *sih, uint coreidx)
666 {
667 si_info_t *sii = SI_INFO(sih);
668
669 if (coreidx >= sii->numcores) {
670 return (NULL);
671 }
672
673 /*
674 * If the user has provided an interrupt mask enabled function,
675 * then assert interrupts are disabled before switching the core.
676 */
677 ASSERT((sii->intrsenabled_fn == NULL) ||
678 !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
679
680 sii->curmap = _sb_setcoreidx(sii, coreidx);
681 sii->curidx = coreidx;
682
683 return (sii->curmap);
684 }
685
686 /* This function changes the logical "focus" to the indicated core.
687 * Return the current core's virtual address.
688 */
_sb_setcoreidx(si_info_t * sii,uint coreidx)689 static volatile void *_sb_setcoreidx(si_info_t *sii, uint coreidx)
690 {
691 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
692 uint32 sbaddr = cores_info->coresba[coreidx];
693 volatile void *regs;
694
695 switch (BUSTYPE(sii->pub.bustype)) {
696 case SI_BUS:
697 /* map new one */
698 if (!cores_info->regs[coreidx]) {
699 cores_info->regs[coreidx] = REG_MAP(sbaddr, SI_CORE_SIZE);
700 ASSERT(GOODREGS(cores_info->regs[coreidx]));
701 }
702 regs = cores_info->regs[coreidx];
703 break;
704
705 case PCI_BUS:
706 /* point bar0 window */
707 OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 0x4, sbaddr);
708 regs = sii->curmap;
709 break;
710
711 case PCMCIA_BUS: {
712 uint8 tmp = (sbaddr >> 12) & 0x0f;
713 OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
714 tmp = (sbaddr >> 0x10) & 0xff;
715 OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
716 tmp = (sbaddr >> 0x18) & 0xff;
717 OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
718 regs = sii->curmap;
719 break;
720 }
721 #ifdef BCMSDIO
722 case SPI_BUS:
723 case SDIO_BUS:
724 /* map new one */
725 if (!cores_info->regs[coreidx]) {
726 cores_info->regs[coreidx] = (void *)(uintptr)sbaddr;
727 ASSERT(GOODREGS(cores_info->regs[coreidx]));
728 }
729 regs = cores_info->regs[coreidx];
730 break;
731 #endif /* BCMSDIO */
732
733 default:
734 ASSERT(0);
735 regs = NULL;
736 break;
737 }
738
739 return regs;
740 }
741
742 /* Return the address of sbadmatch0/1/2/3 register */
sb_admatch(si_info_t * sii,uint asidx)743 static volatile uint32 *sb_admatch(si_info_t *sii, uint asidx)
744 {
745 sbconfig_t *sb;
746 volatile uint32 *addrm;
747
748 sb = REGS2SB(sii->curmap);
749
750 switch (asidx) {
751 case 0:
752 addrm = &sb->sbadmatch0;
753 break;
754
755 case 1:
756 addrm = &sb->sbadmatch1;
757 break;
758
759 case 0x2:
760 addrm = &sb->sbadmatch2;
761 break;
762
763 case 0x3:
764 addrm = &sb->sbadmatch3;
765 break;
766
767 default:
768 SI_ERROR(("%s: Address space index (%d) out of range\n",
769 __FUNCTION__, asidx));
770 return 0;
771 }
772
773 return (addrm);
774 }
775
776 /* Return the number of address spaces in current core */
sb_numaddrspaces(si_t * sih)777 int sb_numaddrspaces(si_t *sih)
778 {
779 si_info_t *sii;
780 sbconfig_t *sb;
781
782 sii = SI_INFO(sih);
783 sb = REGS2SB(sii->curmap);
784
785 /* + 1 because of enumeration space */
786 return ((R_SBREG(sii, &sb->sbidlow) & SBIDL_AR_MASK) >> SBIDL_AR_SHIFT) + 1;
787 }
788
789 /* Return the address of the nth address space in the current core */
sb_addrspace(si_t * sih,uint asidx)790 uint32 sb_addrspace(si_t *sih, uint asidx)
791 {
792 si_info_t *sii;
793
794 sii = SI_INFO(sih);
795
796 return (sb_base(R_SBREG(sii, sb_admatch(sii, asidx))));
797 }
798
799 /* Return the size of the nth address space in the current core */
sb_addrspacesize(si_t * sih,uint asidx)800 uint32 sb_addrspacesize(si_t *sih, uint asidx)
801 {
802 si_info_t *sii;
803
804 sii = SI_INFO(sih);
805
806 return (sb_size(R_SBREG(sii, sb_admatch(sii, asidx))));
807 }
808
809 /* do buffered registers update */
sb_commit(si_t * sih)810 void sb_commit(si_t *sih)
811 {
812 si_info_t *sii = SI_INFO(sih);
813 uint origidx;
814 uint intr_val = 0;
815
816 origidx = sii->curidx;
817 ASSERT(GOODIDX(origidx));
818
819 INTR_OFF(sii, intr_val);
820
821 /* switch over to chipcommon core if there is one, else use pci */
822 if (sii->pub.ccrev != NOREV) {
823 chipcregs_t *ccregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
824 ASSERT(ccregs != NULL);
825
826 /* do the buffer registers update */
827 W_REG(sii->osh, &ccregs->broadcastaddress, SB_COMMIT);
828 W_REG(sii->osh, &ccregs->broadcastdata, 0x0);
829 } else {
830 ASSERT(0);
831 }
832
833 /* restore core index */
834 sb_setcoreidx(sih, origidx);
835 INTR_RESTORE(sii, intr_val);
836 }
837
sb_core_disable(si_t * sih,uint32 bits)838 void sb_core_disable(si_t *sih, uint32 bits)
839 {
840 si_info_t *sii;
841 volatile uint32 dummy;
842 sbconfig_t *sb;
843
844 sii = SI_INFO(sih);
845
846 ASSERT(GOODREGS(sii->curmap));
847 sb = REGS2SB(sii->curmap);
848
849 /* if core is already in reset, just return */
850 if (R_SBREG(sii, &sb->sbtmstatelow) & SBTML_RESET) {
851 return;
852 }
853
854 /* if clocks are not enabled, put into reset and return */
855 if ((R_SBREG(sii, &sb->sbtmstatelow) &
856 (SICF_CLOCK_EN << SBTML_SICF_SHIFT)) == 0) {
857 goto disable;
858 }
859
860 /* set target reject and spin until busy is clear (preserve core-specific
861 * bits) */
862 OR_SBREG(sii, &sb->sbtmstatelow, SBTML_REJ);
863 dummy = R_SBREG(sii, &sb->sbtmstatelow);
864 BCM_REFERENCE(dummy);
865 OSL_DELAY(1);
866 SPINWAIT((R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY), 0x186A0);
867 if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY) {
868 SI_ERROR(("%s: target state still busy\n", __FUNCTION__));
869 }
870
871 if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT) {
872 OR_SBREG(sii, &sb->sbimstate, SBIM_RJ);
873 dummy = R_SBREG(sii, &sb->sbimstate);
874 BCM_REFERENCE(dummy);
875 OSL_DELAY(1);
876 SPINWAIT((R_SBREG(sii, &sb->sbimstate) & SBIM_BY), 0x186A0);
877 }
878
879 /* set reset and reject while enabling the clocks */
880 W_SBREG(sii, &sb->sbtmstatelow,
881 (((bits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
882 SBTML_REJ | SBTML_RESET));
883 dummy = R_SBREG(sii, &sb->sbtmstatelow);
884 BCM_REFERENCE(dummy);
885 OSL_DELAY(0xA);
886
887 /* don't forget to clear the initiator reject bit */
888 if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT) {
889 AND_SBREG(sii, &sb->sbimstate, ~SBIM_RJ);
890 }
891
892 disable:
893 /* leave reset and reject asserted */
894 W_SBREG(sii, &sb->sbtmstatelow,
895 ((bits << SBTML_SICF_SHIFT) | SBTML_REJ | SBTML_RESET));
896 OSL_DELAY(1);
897 }
898
899 /* reset and re-enable a core
900 * inputs:
901 * bits - core specific bits that are set during and after reset sequence
902 * resetbits - core specific bits that are set only during reset sequence
903 */
sb_core_reset(si_t * sih,uint32 bits,uint32 resetbits)904 void sb_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
905 {
906 si_info_t *sii;
907 sbconfig_t *sb;
908 volatile uint32 dummy;
909
910 sii = SI_INFO(sih);
911 ASSERT(GOODREGS(sii->curmap));
912 sb = REGS2SB(sii->curmap);
913
914 /*
915 * Must do the disable sequence first to work for arbitrary current core
916 * state.
917 */
918 sb_core_disable(sih, (bits | resetbits));
919
920 /*
921 * Now do the initialization sequence.
922 */
923
924 /* set reset while enabling the clock and forcing them on throughout the
925 * core */
926 W_SBREG(
927 sii, &sb->sbtmstatelow,
928 (((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
929 SBTML_RESET));
930 dummy = R_SBREG(sii, &sb->sbtmstatelow);
931 BCM_REFERENCE(dummy);
932 OSL_DELAY(1);
933
934 if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_SERR) {
935 W_SBREG(sii, &sb->sbtmstatehigh, 0);
936 }
937 if ((dummy = R_SBREG(sii, &sb->sbimstate)) & (SBIM_IBE | SBIM_TO)) {
938 AND_SBREG(sii, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
939 }
940
941 /* clear reset and allow it to propagate throughout the core */
942 W_SBREG(
943 sii, &sb->sbtmstatelow,
944 ((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
945 dummy = R_SBREG(sii, &sb->sbtmstatelow);
946 BCM_REFERENCE(dummy);
947 OSL_DELAY(1);
948
949 /* leave clock enabled */
950 W_SBREG(sii, &sb->sbtmstatelow,
951 ((bits | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
952 dummy = R_SBREG(sii, &sb->sbtmstatelow);
953 BCM_REFERENCE(dummy);
954 OSL_DELAY(1);
955 }
956
957 /*
958 * Set the initiator timeout for the "master core".
959 * The master core is defined to be the core in control
960 * of the chip and so it issues accesses to non-memory
961 * locations (Because of dma *any* core can access memeory).
962 *
963 * The routine uses the bus to decide who is the master:
964 * SI_BUS => mips
965 * JTAG_BUS => chipc
966 * PCI_BUS => pci or pcie
967 * PCMCIA_BUS => pcmcia
968 * SDIO_BUS => pcmcia
969 *
970 * This routine exists so callers can disable initiator
971 * timeouts so accesses to very slow devices like otp
972 * won't cause an abort. The routine allows arbitrary
973 * settings of the service and request timeouts, though.
974 *
975 * Returns the timeout state before changing it or -1
976 * on error.
977 */
978
979 #define TO_MASK (SBIMCL_RTO_MASK | SBIMCL_STO_MASK)
980
sb_set_initiator_to(si_t * sih,uint32 to,uint idx)981 uint32 sb_set_initiator_to(si_t *sih, uint32 to, uint idx)
982 {
983 si_info_t *sii = SI_INFO(sih);
984 uint origidx;
985 uint intr_val = 0;
986 uint32 tmp, ret = 0xffffffff;
987 sbconfig_t *sb;
988
989 if ((to & ~TO_MASK) != 0) {
990 return ret;
991 }
992
993 /* Figure out the master core */
994 if (idx == BADIDX) {
995 switch (BUSTYPE(sii->pub.bustype)) {
996 case PCI_BUS:
997 idx = sii->pub.buscoreidx;
998 break;
999 case JTAG_BUS:
1000 idx = SI_CC_IDX;
1001 break;
1002 case PCMCIA_BUS:
1003 #ifdef BCMSDIO
1004 case SDIO_BUS:
1005 #endif // endif
1006 idx = si_findcoreidx(sih, PCMCIA_CORE_ID, 0);
1007 break;
1008 case SI_BUS:
1009 idx = si_findcoreidx(sih, MIPS33_CORE_ID, 0);
1010 break;
1011 default:
1012 ASSERT(0);
1013 }
1014 if (idx == BADIDX) {
1015 return ret;
1016 }
1017 }
1018
1019 INTR_OFF(sii, intr_val);
1020 origidx = si_coreidx(sih);
1021
1022 sb = REGS2SB(sb_setcoreidx(sih, idx));
1023
1024 tmp = R_SBREG(sii, &sb->sbimconfiglow);
1025 ret = tmp & TO_MASK;
1026 W_SBREG(sii, &sb->sbimconfiglow, (tmp & ~TO_MASK) | to);
1027
1028 sb_commit(sih);
1029 sb_setcoreidx(sih, origidx);
1030 INTR_RESTORE(sii, intr_val);
1031 return ret;
1032 }
1033
sb_base(uint32 admatch)1034 uint32 sb_base(uint32 admatch)
1035 {
1036 uint32 base;
1037 uint type;
1038
1039 type = admatch & SBAM_TYPE_MASK;
1040 ASSERT(type < 0x3);
1041
1042 base = 0;
1043
1044 if (type == 0) {
1045 base = admatch & SBAM_BASE0_MASK;
1046 } else if (type == 1) {
1047 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
1048 base = admatch & SBAM_BASE1_MASK;
1049 } else if (type == 0x2) {
1050 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
1051 base = admatch & SBAM_BASE2_MASK;
1052 }
1053
1054 return (base);
1055 }
1056
sb_size(uint32 admatch)1057 uint32 sb_size(uint32 admatch)
1058 {
1059 uint32 size;
1060 uint type;
1061
1062 type = admatch & SBAM_TYPE_MASK;
1063 ASSERT(type < 0x3);
1064
1065 size = 0;
1066
1067 if (type == 0) {
1068 size = 1 << (((admatch & SBAM_ADINT0_MASK) >> SBAM_ADINT0_SHIFT) + 1);
1069 } else if (type == 1) {
1070 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
1071 size = 1 << (((admatch & SBAM_ADINT1_MASK) >> SBAM_ADINT1_SHIFT) + 1);
1072 } else if (type == 0x2) {
1073 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
1074 size = 1 << (((admatch & SBAM_ADINT2_MASK) >> SBAM_ADINT2_SHIFT) + 1);
1075 }
1076
1077 return (size);
1078 }
1079
1080 #if defined(BCMDBG_PHYDUMP)
1081 /* print interesting sbconfig registers */
sb_dumpregs(si_t * sih,struct bcmstrbuf * b)1082 void sb_dumpregs(si_t *sih, struct bcmstrbuf *b)
1083 {
1084 sbconfig_t *sb;
1085 uint origidx, i, intr_val = 0;
1086 si_info_t *sii = SI_INFO(sih);
1087 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
1088
1089 origidx = sii->curidx;
1090
1091 INTR_OFF(sii, intr_val);
1092
1093 for (i = 0; i < sii->numcores; i++) {
1094 sb = REGS2SB(sb_setcoreidx(sih, i));
1095
1096 bcm_bprintf(b, "core 0x%x: \n", cores_info->coreid[i]);
1097
1098 if (sii->pub.socirev > SONICS_2_2) {
1099 bcm_bprintf(
1100 b, "sbimerrlog 0x%x sbimerrloga 0x%x\n",
1101 sb_corereg(sih, si_coreidx(&sii->pub), SBIMERRLOG, 0, 0),
1102 sb_corereg(sih, si_coreidx(&sii->pub), SBIMERRLOGA, 0, 0));
1103 }
1104
1105 bcm_bprintf(b,
1106 "sbtmstatelow 0x%x sbtmstatehigh 0x%x sbidhigh 0x%x "
1107 "sbimstate 0x%x\n sbimconfiglow 0x%x sbimconfighigh 0x%x\n",
1108 R_SBREG(sii, &sb->sbtmstatelow),
1109 R_SBREG(sii, &sb->sbtmstatehigh),
1110 R_SBREG(sii, &sb->sbidhigh), R_SBREG(sii, &sb->sbimstate),
1111 R_SBREG(sii, &sb->sbimconfiglow),
1112 R_SBREG(sii, &sb->sbimconfighigh));
1113 }
1114
1115 sb_setcoreidx(sih, origidx);
1116 INTR_RESTORE(sii, intr_val);
1117 }
1118 #endif // endif
1119