1 /*
2 * Misc utility routines for accessing chip-specific features
3 * of the SiliconBackplane-based Broadcom chips.
4 *
5 * Copyright (C) 1999-2017, Broadcom Corporation
6 *
7 * Unless you and Broadcom execute a separate written software license
8 * agreement governing use of this software, this software is licensed to you
9 * under the terms of the GNU General Public License version 2 (the "GPL"),
10 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11 * following added to such license:
12 *
13 * As a special exception, the copyright holders of this software give you
14 * permission to link this software with independent modules, and to copy and
15 * distribute the resulting executable under terms of your choice, provided that
16 * you also meet, for each linked independent module, the terms and conditions of
17 * the license of that module. An independent module is a module which is not
18 * derived from this software. The special exception does not apply to any
19 * modifications of the software.
20 *
21 * Notwithstanding the above, under no circumstances may you combine this
22 * software in any way with any other Broadcom software provided under a license
23 * other than the GPL, without Broadcom's express prior written consent.
24 *
25 *
26 * <<Broadcom-WL-IPTag/Open:>>
27 *
28 * $Id: sbutils.c 599296 2015-11-13 06:36:13Z $
29 */
30
31 #include <bcm_cfg.h>
32 #include <typedefs.h>
33 #include <bcmdefs.h>
34 #include <osl.h>
35 #include <bcmutils.h>
36 #include <siutils.h>
37 #include <bcmdevs.h>
38 #include <hndsoc.h>
39 #include <sbchipc.h>
40 #include <pcicfg.h>
41 #include <sbpcmcia.h>
42
43 #include "siutils_priv.h"
44
45
46 /* local prototypes */
47 static uint _sb_coreidx(si_info_t *sii, uint32 sba);
48 static uint _sb_scan(si_info_t *sii, uint32 sba, volatile void *regs, uint bus, uint32 sbba,
49 uint ncores);
50 static uint32 _sb_coresba(si_info_t *sii);
51 static volatile void *_sb_setcoreidx(si_info_t *sii, uint coreidx);
52 #define SET_SBREG(sii, r, mask, val) \
53 W_SBREG((sii), (r), ((R_SBREG((sii), (r)) & ~(mask)) | (val)))
54 #define REGS2SB(va) (sbconfig_t*) ((volatile int8*)(va) + SBCONFIGOFF)
55
56 /* sonicsrev */
57 #define SONICS_2_2 (SBIDL_RV_2_2 >> SBIDL_RV_SHIFT)
58 #define SONICS_2_3 (SBIDL_RV_2_3 >> SBIDL_RV_SHIFT)
59
60 #define R_SBREG(sii, sbr) sb_read_sbreg((sii), (sbr))
61 #define W_SBREG(sii, sbr, v) sb_write_sbreg((sii), (sbr), (v))
62 #define AND_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) & (v)))
63 #define OR_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) | (v)))
64
65 static uint32
sb_read_sbreg(si_info_t * sii,volatile uint32 * sbr)66 sb_read_sbreg(si_info_t *sii, volatile uint32 *sbr)
67 {
68 uint8 tmp;
69 uint32 val, intr_val = 0;
70
71
72 /*
73 * compact flash only has 11 bits address, while we needs 12 bits address.
74 * MEM_SEG will be OR'd with other 11 bits address in hardware,
75 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
76 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
77 */
78 if (PCMCIA(sii)) {
79 INTR_OFF(sii, intr_val);
80 tmp = 1;
81 OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
82 sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
83 }
84
85 val = R_REG(sii->osh, sbr);
86
87 if (PCMCIA(sii)) {
88 tmp = 0;
89 OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
90 INTR_RESTORE(sii, intr_val);
91 }
92
93 return (val);
94 }
95
96 static void
sb_write_sbreg(si_info_t * sii,volatile uint32 * sbr,uint32 v)97 sb_write_sbreg(si_info_t *sii, volatile uint32 *sbr, uint32 v)
98 {
99 uint8 tmp;
100 volatile uint32 dummy;
101 uint32 intr_val = 0;
102
103
104 /*
105 * compact flash only has 11 bits address, while we needs 12 bits address.
106 * MEM_SEG will be OR'd with other 11 bits address in hardware,
107 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
108 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
109 */
110 if (PCMCIA(sii)) {
111 INTR_OFF(sii, intr_val);
112 tmp = 1;
113 OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
114 sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
115 }
116
117 if (BUSTYPE(sii->pub.bustype) == PCMCIA_BUS) {
118 dummy = R_REG(sii->osh, sbr);
119 BCM_REFERENCE(dummy);
120 W_REG(sii->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff));
121 dummy = R_REG(sii->osh, sbr);
122 BCM_REFERENCE(dummy);
123 W_REG(sii->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff));
124 } else
125 W_REG(sii->osh, sbr, v);
126
127 if (PCMCIA(sii)) {
128 tmp = 0;
129 OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
130 INTR_RESTORE(sii, intr_val);
131 }
132 }
133
134 uint
sb_coreid(si_t * sih)135 sb_coreid(si_t *sih)
136 {
137 si_info_t *sii;
138 sbconfig_t *sb;
139
140 sii = SI_INFO(sih);
141 sb = REGS2SB(sii->curmap);
142
143 return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT);
144 }
145
146 uint
sb_intflag(si_t * sih)147 sb_intflag(si_t *sih)
148 {
149 si_info_t *sii = SI_INFO(sih);
150 volatile void *corereg;
151 sbconfig_t *sb;
152 uint origidx, intflag, intr_val = 0;
153
154 INTR_OFF(sii, intr_val);
155 origidx = si_coreidx(sih);
156 corereg = si_setcore(sih, CC_CORE_ID, 0);
157 ASSERT(corereg != NULL);
158 sb = REGS2SB(corereg);
159 intflag = R_SBREG(sii, &sb->sbflagst);
160 sb_setcoreidx(sih, origidx);
161 INTR_RESTORE(sii, intr_val);
162
163 return intflag;
164 }
165
166 uint
sb_flag(si_t * sih)167 sb_flag(si_t *sih)
168 {
169 si_info_t *sii;
170 sbconfig_t *sb;
171
172 sii = SI_INFO(sih);
173 sb = REGS2SB(sii->curmap);
174
175 return R_SBREG(sii, &sb->sbtpsflag) & SBTPS_NUM0_MASK;
176 }
177
178 void
sb_setint(si_t * sih,int siflag)179 sb_setint(si_t *sih, int siflag)
180 {
181 si_info_t *sii;
182 sbconfig_t *sb;
183 uint32 vec;
184
185 sii = SI_INFO(sih);
186 sb = REGS2SB(sii->curmap);
187
188 if (siflag == -1)
189 vec = 0;
190 else
191 vec = 1 << siflag;
192 W_SBREG(sii, &sb->sbintvec, vec);
193 }
194
195 /* return core index of the core with address 'sba' */
196 static uint
_sb_coreidx(si_info_t * sii,uint32 sba)197 _sb_coreidx(si_info_t *sii, uint32 sba)
198 {
199 uint i;
200 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
201
202 for (i = 0; i < sii->numcores; i ++)
203 if (sba == cores_info->coresba[i])
204 return i;
205 return BADIDX;
206 }
207
208 /* return core address of the current core */
209 static uint32
_sb_coresba(si_info_t * sii)210 _sb_coresba(si_info_t *sii)
211 {
212 uint32 sbaddr;
213
214
215 switch (BUSTYPE(sii->pub.bustype)) {
216 case SI_BUS: {
217 sbconfig_t *sb = REGS2SB(sii->curmap);
218 sbaddr = sb_base(R_SBREG(sii, &sb->sbadmatch0));
219 break;
220 }
221
222 case PCI_BUS:
223 sbaddr = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
224 break;
225
226 case PCMCIA_BUS: {
227 uint8 tmp = 0;
228 OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
229 sbaddr = (uint32)tmp << 12;
230 OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
231 sbaddr |= (uint32)tmp << 16;
232 OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
233 sbaddr |= (uint32)tmp << 24;
234 break;
235 }
236
237 #ifdef BCMSDIO
238 case SPI_BUS:
239 case SDIO_BUS:
240 sbaddr = (uint32)(uintptr)sii->curmap;
241 break;
242 #endif
243
244
245 default:
246 sbaddr = BADCOREADDR;
247 break;
248 }
249
250 return sbaddr;
251 }
252
253 uint
sb_corevendor(si_t * sih)254 sb_corevendor(si_t *sih)
255 {
256 si_info_t *sii;
257 sbconfig_t *sb;
258
259 sii = SI_INFO(sih);
260 sb = REGS2SB(sii->curmap);
261
262 return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_VC_MASK) >> SBIDH_VC_SHIFT);
263 }
264
265 uint
sb_corerev(si_t * sih)266 sb_corerev(si_t *sih)
267 {
268 si_info_t *sii;
269 sbconfig_t *sb;
270 uint sbidh;
271
272 sii = SI_INFO(sih);
273 sb = REGS2SB(sii->curmap);
274 sbidh = R_SBREG(sii, &sb->sbidhigh);
275
276 return (SBCOREREV(sbidh));
277 }
278
279 /* set core-specific control flags */
280 void
sb_core_cflags_wo(si_t * sih,uint32 mask,uint32 val)281 sb_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
282 {
283 si_info_t *sii;
284 sbconfig_t *sb;
285 uint32 w;
286
287 sii = SI_INFO(sih);
288 sb = REGS2SB(sii->curmap);
289
290 ASSERT((val & ~mask) == 0);
291
292 /* mask and set */
293 w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
294 (val << SBTML_SICF_SHIFT);
295 W_SBREG(sii, &sb->sbtmstatelow, w);
296 }
297
298 /* set/clear core-specific control flags */
299 uint32
sb_core_cflags(si_t * sih,uint32 mask,uint32 val)300 sb_core_cflags(si_t *sih, uint32 mask, uint32 val)
301 {
302 si_info_t *sii;
303 sbconfig_t *sb;
304 uint32 w;
305
306 sii = SI_INFO(sih);
307 sb = REGS2SB(sii->curmap);
308
309 ASSERT((val & ~mask) == 0);
310
311 /* mask and set */
312 if (mask || val) {
313 w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
314 (val << SBTML_SICF_SHIFT);
315 W_SBREG(sii, &sb->sbtmstatelow, w);
316 }
317
318 /* return the new value
319 * for write operation, the following readback ensures the completion of write opration.
320 */
321 return (R_SBREG(sii, &sb->sbtmstatelow) >> SBTML_SICF_SHIFT);
322 }
323
324 /* set/clear core-specific status flags */
325 uint32
sb_core_sflags(si_t * sih,uint32 mask,uint32 val)326 sb_core_sflags(si_t *sih, uint32 mask, uint32 val)
327 {
328 si_info_t *sii;
329 sbconfig_t *sb;
330 uint32 w;
331
332 sii = SI_INFO(sih);
333 sb = REGS2SB(sii->curmap);
334
335 ASSERT((val & ~mask) == 0);
336 ASSERT((mask & ~SISF_CORE_BITS) == 0);
337
338 /* mask and set */
339 if (mask || val) {
340 w = (R_SBREG(sii, &sb->sbtmstatehigh) & ~(mask << SBTMH_SISF_SHIFT)) |
341 (val << SBTMH_SISF_SHIFT);
342 W_SBREG(sii, &sb->sbtmstatehigh, w);
343 }
344
345 /* return the new value */
346 return (R_SBREG(sii, &sb->sbtmstatehigh) >> SBTMH_SISF_SHIFT);
347 }
348
349 bool
sb_iscoreup(si_t * sih)350 sb_iscoreup(si_t *sih)
351 {
352 si_info_t *sii;
353 sbconfig_t *sb;
354
355 sii = SI_INFO(sih);
356 sb = REGS2SB(sii->curmap);
357
358 return ((R_SBREG(sii, &sb->sbtmstatelow) &
359 (SBTML_RESET | SBTML_REJ_MASK | (SICF_CLOCK_EN << SBTML_SICF_SHIFT))) ==
360 (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
361 }
362
363 /*
364 * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
365 * switch back to the original core, and return the new value.
366 *
367 * When using the silicon backplane, no fidleing with interrupts or core switches are needed.
368 *
369 * Also, when using pci/pcie, we can optimize away the core switching for pci registers
370 * and (on newer pci cores) chipcommon registers.
371 */
372 uint
sb_corereg(si_t * sih,uint coreidx,uint regoff,uint mask,uint val)373 sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
374 {
375 uint origidx = 0;
376 volatile uint32 *r = NULL;
377 uint w;
378 uint intr_val = 0;
379 bool fast = FALSE;
380 si_info_t *sii = SI_INFO(sih);
381 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
382
383 ASSERT(GOODIDX(coreidx));
384 ASSERT(regoff < SI_CORE_SIZE);
385 ASSERT((val & ~mask) == 0);
386
387 if (coreidx >= SI_MAXCORES)
388 return 0;
389
390 if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
391 /* If internal bus, we can always get at everything */
392 fast = TRUE;
393 /* map if does not exist */
394 if (!cores_info->regs[coreidx]) {
395 cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
396 SI_CORE_SIZE);
397 ASSERT(GOODREGS(cores_info->regs[coreidx]));
398 }
399 r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
400 } else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
401 /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
402
403 if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
404 /* Chipc registers are mapped at 12KB */
405
406 fast = TRUE;
407 r = (volatile uint32 *)((volatile char *)sii->curmap +
408 PCI_16KB0_CCREGS_OFFSET + regoff);
409 } else if (sii->pub.buscoreidx == coreidx) {
410 /* pci registers are at either in the last 2KB of an 8KB window
411 * or, in pcie and pci rev 13 at 8KB
412 */
413 fast = TRUE;
414 if (SI_FAST(sii))
415 r = (volatile uint32 *)((volatile char *)sii->curmap +
416 PCI_16KB0_PCIREGS_OFFSET + regoff);
417 else
418 r = (volatile uint32 *)((volatile char *)sii->curmap +
419 ((regoff >= SBCONFIGOFF) ?
420 PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
421 regoff);
422 }
423 }
424
425 if (!fast) {
426 INTR_OFF(sii, intr_val);
427
428 /* save current core index */
429 origidx = si_coreidx(&sii->pub);
430
431 /* switch core */
432 r = (volatile uint32*) ((volatile uchar*)sb_setcoreidx(&sii->pub, coreidx) +
433 regoff);
434 }
435 ASSERT(r != NULL);
436
437 /* mask and set */
438 if (mask || val) {
439 if (regoff >= SBCONFIGOFF) {
440 w = (R_SBREG(sii, r) & ~mask) | val;
441 W_SBREG(sii, r, w);
442 } else {
443 w = (R_REG(sii->osh, r) & ~mask) | val;
444 W_REG(sii->osh, r, w);
445 }
446 }
447
448 /* readback */
449 if (regoff >= SBCONFIGOFF)
450 w = R_SBREG(sii, r);
451 else {
452 w = R_REG(sii->osh, r);
453 }
454
455 if (!fast) {
456 /* restore core index */
457 if (origidx != coreidx)
458 sb_setcoreidx(&sii->pub, origidx);
459
460 INTR_RESTORE(sii, intr_val);
461 }
462
463 return (w);
464 }
465
466 /*
467 * If there is no need for fiddling with interrupts or core switches (typically silicon
468 * back plane registers, pci registers and chipcommon registers), this function
469 * returns the register offset on this core to a mapped address. This address can
470 * be used for W_REG/R_REG directly.
471 *
472 * For accessing registers that would need a core switch, this function will return
473 * NULL.
474 */
475 volatile uint32 *
sb_corereg_addr(si_t * sih,uint coreidx,uint regoff)476 sb_corereg_addr(si_t *sih, uint coreidx, uint regoff)
477 {
478 volatile uint32 *r = NULL;
479 bool fast = FALSE;
480 si_info_t *sii = SI_INFO(sih);
481 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
482
483 ASSERT(GOODIDX(coreidx));
484 ASSERT(regoff < SI_CORE_SIZE);
485
486 if (coreidx >= SI_MAXCORES)
487 return 0;
488
489 if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
490 /* If internal bus, we can always get at everything */
491 fast = TRUE;
492 /* map if does not exist */
493 if (!cores_info->regs[coreidx]) {
494 cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
495 SI_CORE_SIZE);
496 ASSERT(GOODREGS(cores_info->regs[coreidx]));
497 }
498 r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
499 } else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
500 /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
501
502 if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
503 /* Chipc registers are mapped at 12KB */
504
505 fast = TRUE;
506 r = (volatile uint32 *)((volatile char *)sii->curmap +
507 PCI_16KB0_CCREGS_OFFSET + regoff);
508 } else if (sii->pub.buscoreidx == coreidx) {
509 /* pci registers are at either in the last 2KB of an 8KB window
510 * or, in pcie and pci rev 13 at 8KB
511 */
512 fast = TRUE;
513 if (SI_FAST(sii))
514 r = (volatile uint32 *)((volatile char *)sii->curmap +
515 PCI_16KB0_PCIREGS_OFFSET + regoff);
516 else
517 r = (volatile uint32 *)((volatile char *)sii->curmap +
518 ((regoff >= SBCONFIGOFF) ?
519 PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
520 regoff);
521 }
522 }
523
524 if (!fast)
525 return 0;
526
527 return (r);
528 }
529
530 /* Scan the enumeration space to find all cores starting from the given
531 * bus 'sbba'. Append coreid and other info to the lists in 'si'. 'sba'
532 * is the default core address at chip POR time and 'regs' is the virtual
533 * address that the default core is mapped at. 'ncores' is the number of
534 * cores expected on bus 'sbba'. It returns the total number of cores
535 * starting from bus 'sbba', inclusive.
536 */
537 #define SB_MAXBUSES 2
538 static uint
_sb_scan(si_info_t * sii,uint32 sba,volatile void * regs,uint bus,uint32 sbba,uint numcores)539 _sb_scan(si_info_t *sii, uint32 sba, volatile void *regs, uint bus,
540 uint32 sbba, uint numcores)
541 {
542 uint next;
543 uint ncc = 0;
544 uint i;
545 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
546
547 if (bus >= SB_MAXBUSES) {
548 SI_ERROR(("_sb_scan: bus 0x%08x at level %d is too deep to scan\n", sbba, bus));
549 return 0;
550 }
551 SI_MSG(("_sb_scan: scan bus 0x%08x assume %u cores\n", sbba, numcores));
552
553 /* Scan all cores on the bus starting from core 0.
554 * Core addresses must be contiguous on each bus.
555 */
556 for (i = 0, next = sii->numcores; i < numcores && next < SB_BUS_MAXCORES; i++, next++) {
557 cores_info->coresba[next] = sbba + (i * SI_CORE_SIZE);
558
559 /* keep and reuse the initial register mapping */
560 if ((BUSTYPE(sii->pub.bustype) == SI_BUS) && (cores_info->coresba[next] == sba)) {
561 SI_VMSG(("_sb_scan: reuse mapped regs %p for core %u\n", regs, next));
562 cores_info->regs[next] = regs;
563 }
564
565 /* change core to 'next' and read its coreid */
566 sii->curmap = _sb_setcoreidx(sii, next);
567 sii->curidx = next;
568
569 cores_info->coreid[next] = sb_coreid(&sii->pub);
570
571 /* core specific processing... */
572 /* chipc provides # cores */
573 if (cores_info->coreid[next] == CC_CORE_ID) {
574 chipcregs_t *cc = (chipcregs_t *)sii->curmap;
575 uint32 ccrev = sb_corerev(&sii->pub);
576
577 /* determine numcores - this is the total # cores in the chip */
578 if (((ccrev == 4) || (ccrev >= 6))) {
579 ASSERT(cc);
580 numcores = (R_REG(sii->osh, &cc->chipid) & CID_CC_MASK) >>
581 CID_CC_SHIFT;
582 } else {
583 /* Older chips */
584 uint chip = CHIPID(sii->pub.chip);
585
586 if (chip == BCM4704_CHIP_ID)
587 numcores = 9;
588 else if (chip == BCM5365_CHIP_ID)
589 numcores = 7;
590 else {
591 SI_ERROR(("sb_chip2numcores: unsupported chip 0x%x\n",
592 chip));
593 ASSERT(0);
594 numcores = 1;
595 }
596 }
597 SI_VMSG(("_sb_scan: there are %u cores in the chip %s\n", numcores,
598 sii->pub.issim ? "QT" : ""));
599 }
600 /* scan bridged SB(s) and add results to the end of the list */
601 else if (cores_info->coreid[next] == OCP_CORE_ID) {
602 sbconfig_t *sb = REGS2SB(sii->curmap);
603 uint32 nsbba = R_SBREG(sii, &sb->sbadmatch1);
604 uint nsbcc;
605
606 sii->numcores = next + 1;
607
608 if ((nsbba & 0xfff00000) != SI_ENUM_BASE)
609 continue;
610 nsbba &= 0xfffff000;
611 if (_sb_coreidx(sii, nsbba) != BADIDX)
612 continue;
613
614 nsbcc = (R_SBREG(sii, &sb->sbtmstatehigh) & 0x000f0000) >> 16;
615 nsbcc = _sb_scan(sii, sba, regs, bus + 1, nsbba, nsbcc);
616 if (sbba == SI_ENUM_BASE)
617 numcores -= nsbcc;
618 ncc += nsbcc;
619 }
620 }
621
622 SI_MSG(("_sb_scan: found %u cores on bus 0x%08x\n", i, sbba));
623
624 sii->numcores = i + ncc;
625 return sii->numcores;
626 }
627
628 /* scan the sb enumerated space to identify all cores */
629 void
sb_scan(si_t * sih,volatile void * regs,uint devid)630 sb_scan(si_t *sih, volatile void *regs, uint devid)
631 {
632 uint32 origsba;
633 sbconfig_t *sb;
634 si_info_t *sii = SI_INFO(sih);
635 BCM_REFERENCE(devid);
636
637 sb = REGS2SB(sii->curmap);
638
639 sii->pub.socirev = (R_SBREG(sii, &sb->sbidlow) & SBIDL_RV_MASK) >> SBIDL_RV_SHIFT;
640
641 /* Save the current core info and validate it later till we know
642 * for sure what is good and what is bad.
643 */
644 origsba = _sb_coresba(sii);
645
646 /* scan all SB(s) starting from SI_ENUM_BASE */
647 sii->numcores = _sb_scan(sii, origsba, regs, 0, SI_ENUM_BASE, 1);
648 }
649
650 /*
651 * This function changes logical "focus" to the indicated core;
652 * must be called with interrupts off.
653 * Moreover, callers should keep interrupts off during switching out of and back to d11 core
654 */
655 volatile void *
sb_setcoreidx(si_t * sih,uint coreidx)656 sb_setcoreidx(si_t *sih, uint coreidx)
657 {
658 si_info_t *sii = SI_INFO(sih);
659
660 if (coreidx >= sii->numcores)
661 return (NULL);
662
663 /*
664 * If the user has provided an interrupt mask enabled function,
665 * then assert interrupts are disabled before switching the core.
666 */
667 ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
668
669 sii->curmap = _sb_setcoreidx(sii, coreidx);
670 sii->curidx = coreidx;
671
672 return (sii->curmap);
673 }
674
675 /* This function changes the logical "focus" to the indicated core.
676 * Return the current core's virtual address.
677 */
678 static volatile void *
_sb_setcoreidx(si_info_t * sii,uint coreidx)679 _sb_setcoreidx(si_info_t *sii, uint coreidx)
680 {
681 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
682 uint32 sbaddr = cores_info->coresba[coreidx];
683 volatile void *regs;
684
685 switch (BUSTYPE(sii->pub.bustype)) {
686 case SI_BUS:
687 /* map new one */
688 if (!cores_info->regs[coreidx]) {
689 cores_info->regs[coreidx] = REG_MAP(sbaddr, SI_CORE_SIZE);
690 ASSERT(GOODREGS(cores_info->regs[coreidx]));
691 }
692 regs = cores_info->regs[coreidx];
693 break;
694
695 case PCI_BUS:
696 /* point bar0 window */
697 OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, sbaddr);
698 regs = sii->curmap;
699 break;
700
701 case PCMCIA_BUS: {
702 uint8 tmp = (sbaddr >> 12) & 0x0f;
703 OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
704 tmp = (sbaddr >> 16) & 0xff;
705 OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
706 tmp = (sbaddr >> 24) & 0xff;
707 OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
708 regs = sii->curmap;
709 break;
710 }
711 #ifdef BCMSDIO
712 case SPI_BUS:
713 case SDIO_BUS:
714 /* map new one */
715 if (!cores_info->regs[coreidx]) {
716 cores_info->regs[coreidx] = (void *)(uintptr)sbaddr;
717 ASSERT(GOODREGS(cores_info->regs[coreidx]));
718 }
719 regs = cores_info->regs[coreidx];
720 break;
721 #endif /* BCMSDIO */
722
723
724 default:
725 ASSERT(0);
726 regs = NULL;
727 break;
728 }
729
730 return regs;
731 }
732
733 /* Return the address of sbadmatch0/1/2/3 register */
734 static volatile uint32 *
sb_admatch(si_info_t * sii,uint asidx)735 sb_admatch(si_info_t *sii, uint asidx)
736 {
737 sbconfig_t *sb;
738 volatile uint32 *addrm;
739
740 sb = REGS2SB(sii->curmap);
741
742 switch (asidx) {
743 case 0:
744 addrm = &sb->sbadmatch0;
745 break;
746
747 case 1:
748 addrm = &sb->sbadmatch1;
749 break;
750
751 case 2:
752 addrm = &sb->sbadmatch2;
753 break;
754
755 case 3:
756 addrm = &sb->sbadmatch3;
757 break;
758
759 default:
760 SI_ERROR(("%s: Address space index (%d) out of range\n", __FUNCTION__, asidx));
761 return 0;
762 }
763
764 return (addrm);
765 }
766
767 /* Return the number of address spaces in current core */
768 int
sb_numaddrspaces(si_t * sih)769 sb_numaddrspaces(si_t *sih)
770 {
771 si_info_t *sii;
772 sbconfig_t *sb;
773
774 sii = SI_INFO(sih);
775 sb = REGS2SB(sii->curmap);
776
777 /* + 1 because of enumeration space */
778 return ((R_SBREG(sii, &sb->sbidlow) & SBIDL_AR_MASK) >> SBIDL_AR_SHIFT) + 1;
779 }
780
781 /* Return the address of the nth address space in the current core */
782 uint32
sb_addrspace(si_t * sih,uint asidx)783 sb_addrspace(si_t *sih, uint asidx)
784 {
785 si_info_t *sii;
786
787 sii = SI_INFO(sih);
788
789 return (sb_base(R_SBREG(sii, sb_admatch(sii, asidx))));
790 }
791
792 /* Return the size of the nth address space in the current core */
793 uint32
sb_addrspacesize(si_t * sih,uint asidx)794 sb_addrspacesize(si_t *sih, uint asidx)
795 {
796 si_info_t *sii;
797
798 sii = SI_INFO(sih);
799
800 return (sb_size(R_SBREG(sii, sb_admatch(sii, asidx))));
801 }
802
803
804 /* do buffered registers update */
805 void
sb_commit(si_t * sih)806 sb_commit(si_t *sih)
807 {
808 si_info_t *sii = SI_INFO(sih);
809 uint origidx;
810 uint intr_val = 0;
811
812 origidx = sii->curidx;
813 ASSERT(GOODIDX(origidx));
814
815 INTR_OFF(sii, intr_val);
816
817 /* switch over to chipcommon core if there is one, else use pci */
818 if (sii->pub.ccrev != NOREV) {
819 chipcregs_t *ccregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
820 ASSERT(ccregs != NULL);
821
822 /* do the buffer registers update */
823 W_REG(sii->osh, &ccregs->broadcastaddress, SB_COMMIT);
824 W_REG(sii->osh, &ccregs->broadcastdata, 0x0);
825 } else
826 ASSERT(0);
827
828 /* restore core index */
829 sb_setcoreidx(sih, origidx);
830 INTR_RESTORE(sii, intr_val);
831 }
832
833 void
sb_core_disable(si_t * sih,uint32 bits)834 sb_core_disable(si_t *sih, uint32 bits)
835 {
836 si_info_t *sii;
837 volatile uint32 dummy;
838 sbconfig_t *sb;
839
840 sii = SI_INFO(sih);
841
842 ASSERT(GOODREGS(sii->curmap));
843 sb = REGS2SB(sii->curmap);
844
845 /* if core is already in reset, just return */
846 if (R_SBREG(sii, &sb->sbtmstatelow) & SBTML_RESET)
847 return;
848
849 /* if clocks are not enabled, put into reset and return */
850 if ((R_SBREG(sii, &sb->sbtmstatelow) & (SICF_CLOCK_EN << SBTML_SICF_SHIFT)) == 0)
851 goto disable;
852
853 /* set target reject and spin until busy is clear (preserve core-specific bits) */
854 OR_SBREG(sii, &sb->sbtmstatelow, SBTML_REJ);
855 dummy = R_SBREG(sii, &sb->sbtmstatelow);
856 BCM_REFERENCE(dummy);
857 OSL_DELAY(1);
858 SPINWAIT((R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY), 100000);
859 if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY)
860 SI_ERROR(("%s: target state still busy\n", __FUNCTION__));
861
862 if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT) {
863 OR_SBREG(sii, &sb->sbimstate, SBIM_RJ);
864 dummy = R_SBREG(sii, &sb->sbimstate);
865 BCM_REFERENCE(dummy);
866 OSL_DELAY(1);
867 SPINWAIT((R_SBREG(sii, &sb->sbimstate) & SBIM_BY), 100000);
868 }
869
870 /* set reset and reject while enabling the clocks */
871 W_SBREG(sii, &sb->sbtmstatelow,
872 (((bits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
873 SBTML_REJ | SBTML_RESET));
874 dummy = R_SBREG(sii, &sb->sbtmstatelow);
875 BCM_REFERENCE(dummy);
876 OSL_DELAY(10);
877
878 /* don't forget to clear the initiator reject bit */
879 if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT)
880 AND_SBREG(sii, &sb->sbimstate, ~SBIM_RJ);
881
882 disable:
883 /* leave reset and reject asserted */
884 W_SBREG(sii, &sb->sbtmstatelow, ((bits << SBTML_SICF_SHIFT) | SBTML_REJ | SBTML_RESET));
885 OSL_DELAY(1);
886 }
887
888 /* reset and re-enable a core
889 * inputs:
890 * bits - core specific bits that are set during and after reset sequence
891 * resetbits - core specific bits that are set only during reset sequence
892 */
893 void
sb_core_reset(si_t * sih,uint32 bits,uint32 resetbits)894 sb_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
895 {
896 si_info_t *sii;
897 sbconfig_t *sb;
898 volatile uint32 dummy;
899
900 sii = SI_INFO(sih);
901 ASSERT(GOODREGS(sii->curmap));
902 sb = REGS2SB(sii->curmap);
903
904 /*
905 * Must do the disable sequence first to work for arbitrary current core state.
906 */
907 sb_core_disable(sih, (bits | resetbits));
908
909 /*
910 * Now do the initialization sequence.
911 */
912
913 /* set reset while enabling the clock and forcing them on throughout the core */
914 W_SBREG(sii, &sb->sbtmstatelow,
915 (((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
916 SBTML_RESET));
917 dummy = R_SBREG(sii, &sb->sbtmstatelow);
918 BCM_REFERENCE(dummy);
919 OSL_DELAY(1);
920
921 if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_SERR) {
922 W_SBREG(sii, &sb->sbtmstatehigh, 0);
923 }
924 if ((dummy = R_SBREG(sii, &sb->sbimstate)) & (SBIM_IBE | SBIM_TO)) {
925 AND_SBREG(sii, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
926 }
927
928 /* clear reset and allow it to propagate throughout the core */
929 W_SBREG(sii, &sb->sbtmstatelow,
930 ((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
931 dummy = R_SBREG(sii, &sb->sbtmstatelow);
932 BCM_REFERENCE(dummy);
933 OSL_DELAY(1);
934
935 /* leave clock enabled */
936 W_SBREG(sii, &sb->sbtmstatelow, ((bits | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
937 dummy = R_SBREG(sii, &sb->sbtmstatelow);
938 BCM_REFERENCE(dummy);
939 OSL_DELAY(1);
940 }
941
942 /*
943 * Set the initiator timeout for the "master core".
944 * The master core is defined to be the core in control
945 * of the chip and so it issues accesses to non-memory
946 * locations (Because of dma *any* core can access memeory).
947 *
948 * The routine uses the bus to decide who is the master:
949 * SI_BUS => mips
950 * JTAG_BUS => chipc
951 * PCI_BUS => pci or pcie
952 * PCMCIA_BUS => pcmcia
953 * SDIO_BUS => pcmcia
954 *
955 * This routine exists so callers can disable initiator
956 * timeouts so accesses to very slow devices like otp
957 * won't cause an abort. The routine allows arbitrary
958 * settings of the service and request timeouts, though.
959 *
960 * Returns the timeout state before changing it or -1
961 * on error.
962 */
963
964 #define TO_MASK (SBIMCL_RTO_MASK | SBIMCL_STO_MASK)
965
966 uint32
sb_set_initiator_to(si_t * sih,uint32 to,uint idx)967 sb_set_initiator_to(si_t *sih, uint32 to, uint idx)
968 {
969 si_info_t *sii = SI_INFO(sih);
970 uint origidx;
971 uint intr_val = 0;
972 uint32 tmp, ret = 0xffffffff;
973 sbconfig_t *sb;
974
975
976 if ((to & ~TO_MASK) != 0)
977 return ret;
978
979 /* Figure out the master core */
980 if (idx == BADIDX) {
981 switch (BUSTYPE(sii->pub.bustype)) {
982 case PCI_BUS:
983 idx = sii->pub.buscoreidx;
984 break;
985 case JTAG_BUS:
986 idx = SI_CC_IDX;
987 break;
988 case PCMCIA_BUS:
989 #ifdef BCMSDIO
990 case SDIO_BUS:
991 #endif
992 idx = si_findcoreidx(sih, PCMCIA_CORE_ID, 0);
993 break;
994 case SI_BUS:
995 idx = si_findcoreidx(sih, MIPS33_CORE_ID, 0);
996 break;
997 default:
998 ASSERT(0);
999 }
1000 if (idx == BADIDX)
1001 return ret;
1002 }
1003
1004 INTR_OFF(sii, intr_val);
1005 origidx = si_coreidx(sih);
1006
1007 sb = REGS2SB(sb_setcoreidx(sih, idx));
1008
1009 tmp = R_SBREG(sii, &sb->sbimconfiglow);
1010 ret = tmp & TO_MASK;
1011 W_SBREG(sii, &sb->sbimconfiglow, (tmp & ~TO_MASK) | to);
1012
1013 sb_commit(sih);
1014 sb_setcoreidx(sih, origidx);
1015 INTR_RESTORE(sii, intr_val);
1016 return ret;
1017 }
1018
1019 uint32
sb_base(uint32 admatch)1020 sb_base(uint32 admatch)
1021 {
1022 uint32 base;
1023 uint type;
1024
1025 type = admatch & SBAM_TYPE_MASK;
1026 ASSERT(type < 3);
1027
1028 base = 0;
1029
1030 if (type == 0) {
1031 base = admatch & SBAM_BASE0_MASK;
1032 } else if (type == 1) {
1033 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
1034 base = admatch & SBAM_BASE1_MASK;
1035 } else if (type == 2) {
1036 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
1037 base = admatch & SBAM_BASE2_MASK;
1038 }
1039
1040 return (base);
1041 }
1042
1043 uint32
sb_size(uint32 admatch)1044 sb_size(uint32 admatch)
1045 {
1046 uint32 size;
1047 uint type;
1048
1049 type = admatch & SBAM_TYPE_MASK;
1050 ASSERT(type < 3);
1051
1052 size = 0;
1053
1054 if (type == 0) {
1055 size = 1 << (((admatch & SBAM_ADINT0_MASK) >> SBAM_ADINT0_SHIFT) + 1);
1056 } else if (type == 1) {
1057 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
1058 size = 1 << (((admatch & SBAM_ADINT1_MASK) >> SBAM_ADINT1_SHIFT) + 1);
1059 } else if (type == 2) {
1060 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
1061 size = 1 << (((admatch & SBAM_ADINT2_MASK) >> SBAM_ADINT2_SHIFT) + 1);
1062 }
1063
1064 return (size);
1065 }
1066
1067 #if defined(BCMDBG_PHYDUMP)
1068 /* print interesting sbconfig registers */
1069 void
sb_dumpregs(si_t * sih,struct bcmstrbuf * b)1070 sb_dumpregs(si_t *sih, struct bcmstrbuf *b)
1071 {
1072 sbconfig_t *sb;
1073 uint origidx, i, intr_val = 0;
1074 si_info_t *sii = SI_INFO(sih);
1075 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
1076
1077 origidx = sii->curidx;
1078
1079 INTR_OFF(sii, intr_val);
1080
1081 for (i = 0; i < sii->numcores; i++) {
1082 sb = REGS2SB(sb_setcoreidx(sih, i));
1083
1084 bcm_bprintf(b, "core 0x%x: \n", cores_info->coreid[i]);
1085
1086 if (sii->pub.socirev > SONICS_2_2)
1087 bcm_bprintf(b, "sbimerrlog 0x%x sbimerrloga 0x%x\n",
1088 sb_corereg(sih, si_coreidx(&sii->pub), SBIMERRLOG, 0, 0),
1089 sb_corereg(sih, si_coreidx(&sii->pub), SBIMERRLOGA, 0, 0));
1090
1091 bcm_bprintf(b, "sbtmstatelow 0x%x sbtmstatehigh 0x%x sbidhigh 0x%x "
1092 "sbimstate 0x%x\n sbimconfiglow 0x%x sbimconfighigh 0x%x\n",
1093 R_SBREG(sii, &sb->sbtmstatelow), R_SBREG(sii, &sb->sbtmstatehigh),
1094 R_SBREG(sii, &sb->sbidhigh), R_SBREG(sii, &sb->sbimstate),
1095 R_SBREG(sii, &sb->sbimconfiglow), R_SBREG(sii, &sb->sbimconfighigh));
1096 }
1097
1098 sb_setcoreidx(sih, origidx);
1099 INTR_RESTORE(sii, intr_val);
1100 }
1101 #endif
1102