1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Misc utility routines for accessing chip-specific features
4 * of the SiliconBackplane-based Broadcom chips.
5 *
6 * Copyright (C) 1999-2019, Broadcom.
7 *
8 * Unless you and Broadcom execute a separate written software license
9 * agreement governing use of this software, this software is licensed to you
10 * under the terms of the GNU General Public License version 2 (the "GPL"),
11 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
12 * following added to such license:
13 *
14 * As a special exception, the copyright holders of this software give you
15 * permission to link this software with independent modules, and to copy and
16 * distribute the resulting executable under terms of your choice, provided that
17 * you also meet, for each linked independent module, the terms and conditions of
18 * the license of that module. An independent module is a module which is not
19 * derived from this software. The special exception does not apply to any
20 * modifications of the software.
21 *
22 * Notwithstanding the above, under no circumstances may you combine this
23 * software in any way with any other Broadcom software provided under a license
24 * other than the GPL, without Broadcom's express prior written consent.
25 *
26 *
27 * <<Broadcom-WL-IPTag/Open:>>
28 *
29 * $Id: sbutils.c 700323 2017-05-18 16:12:11Z $
30 */
31
32 #include <bcm_cfg.h>
33 #include <typedefs.h>
34 #include <bcmdefs.h>
35 #include <osl.h>
36 #include <bcmutils.h>
37 #include <siutils.h>
38 #include <bcmdevs.h>
39 #include <hndsoc.h>
40 #include <sbchipc.h>
41 #include <pcicfg.h>
42 #include <sbpcmcia.h>
43
44 #include "siutils_priv.h"
45
46 /* local prototypes */
47 static uint _sb_coreidx(si_info_t *sii, uint32 sba);
48 static uint _sb_scan(si_info_t *sii, uint32 sba, volatile void *regs, uint bus, uint32 sbba,
49 uint ncores, uint devid);
50 static uint32 _sb_coresba(si_info_t *sii);
51 static volatile void *_sb_setcoreidx(si_info_t *sii, uint coreidx);
52 #define SET_SBREG(sii, r, mask, val) \
53 W_SBREG((sii), (r), ((R_SBREG((sii), (r)) & ~(mask)) | (val)))
54 #define REGS2SB(va) (sbconfig_t*) ((volatile int8*)(va) + SBCONFIGOFF)
55
56 /* sonicsrev */
57 #define SONICS_2_2 (SBIDL_RV_2_2 >> SBIDL_RV_SHIFT)
58 #define SONICS_2_3 (SBIDL_RV_2_3 >> SBIDL_RV_SHIFT)
59
60 #define R_SBREG(sii, sbr) sb_read_sbreg((sii), (sbr))
61 #define W_SBREG(sii, sbr, v) sb_write_sbreg((sii), (sbr), (v))
62 #define AND_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) & (v)))
63 #define OR_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) | (v)))
64
65 static uint32
sb_read_sbreg(si_info_t * sii,volatile uint32 * sbr)66 sb_read_sbreg(si_info_t *sii, volatile uint32 *sbr)
67 {
68 uint8 tmp;
69 uint32 val, intr_val = 0;
70
71 /*
72 * compact flash only has 11 bits address, while we needs 12 bits address.
73 * MEM_SEG will be OR'd with other 11 bits address in hardware,
74 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
75 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
76 */
77 if (PCMCIA(sii)) {
78 INTR_OFF(sii, intr_val);
79 tmp = 1;
80 OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
81 sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
82 }
83
84 val = R_REG(sii->osh, sbr);
85
86 if (PCMCIA(sii)) {
87 tmp = 0;
88 OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
89 INTR_RESTORE(sii, intr_val);
90 }
91
92 return (val);
93 }
94
95 static void
sb_write_sbreg(si_info_t * sii,volatile uint32 * sbr,uint32 v)96 sb_write_sbreg(si_info_t *sii, volatile uint32 *sbr, uint32 v)
97 {
98 uint8 tmp;
99 volatile uint32 dummy;
100 uint32 intr_val = 0;
101
102 /*
103 * compact flash only has 11 bits address, while we needs 12 bits address.
104 * MEM_SEG will be OR'd with other 11 bits address in hardware,
105 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
106 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
107 */
108 if (PCMCIA(sii)) {
109 INTR_OFF(sii, intr_val);
110 tmp = 1;
111 OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
112 sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
113 }
114
115 if (BUSTYPE(sii->pub.bustype) == PCMCIA_BUS) {
116 dummy = R_REG(sii->osh, sbr);
117 BCM_REFERENCE(dummy);
118 W_REG(sii->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff));
119 dummy = R_REG(sii->osh, sbr);
120 BCM_REFERENCE(dummy);
121 W_REG(sii->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff));
122 } else
123 W_REG(sii->osh, sbr, v);
124
125 if (PCMCIA(sii)) {
126 tmp = 0;
127 OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
128 INTR_RESTORE(sii, intr_val);
129 }
130 }
131
132 uint
sb_coreid(si_t * sih)133 sb_coreid(si_t *sih)
134 {
135 si_info_t *sii;
136 sbconfig_t *sb;
137
138 sii = SI_INFO(sih);
139 sb = REGS2SB(sii->curmap);
140
141 return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT);
142 }
143
144 uint
sb_intflag(si_t * sih)145 sb_intflag(si_t *sih)
146 {
147 si_info_t *sii = SI_INFO(sih);
148 volatile void *corereg;
149 sbconfig_t *sb;
150 uint origidx, intflag, intr_val = 0;
151
152 INTR_OFF(sii, intr_val);
153 origidx = si_coreidx(sih);
154 corereg = si_setcore(sih, CC_CORE_ID, 0);
155 ASSERT(corereg != NULL);
156 sb = REGS2SB(corereg);
157 intflag = R_SBREG(sii, &sb->sbflagst);
158 sb_setcoreidx(sih, origidx);
159 INTR_RESTORE(sii, intr_val);
160
161 return intflag;
162 }
163
164 uint
sb_flag(si_t * sih)165 sb_flag(si_t *sih)
166 {
167 si_info_t *sii;
168 sbconfig_t *sb;
169
170 sii = SI_INFO(sih);
171 sb = REGS2SB(sii->curmap);
172
173 return R_SBREG(sii, &sb->sbtpsflag) & SBTPS_NUM0_MASK;
174 }
175
176 void
sb_setint(si_t * sih,int siflag)177 sb_setint(si_t *sih, int siflag)
178 {
179 si_info_t *sii;
180 sbconfig_t *sb;
181 uint32 vec;
182
183 sii = SI_INFO(sih);
184 sb = REGS2SB(sii->curmap);
185
186 if (siflag == -1)
187 vec = 0;
188 else
189 vec = 1 << siflag;
190 W_SBREG(sii, &sb->sbintvec, vec);
191 }
192
193 /* return core index of the core with address 'sba' */
194 static uint
_sb_coreidx(si_info_t * sii,uint32 sba)195 _sb_coreidx(si_info_t *sii, uint32 sba)
196 {
197 uint i;
198 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
199
200 for (i = 0; i < sii->numcores; i ++)
201 if (sba == cores_info->coresba[i])
202 return i;
203 return BADIDX;
204 }
205
206 /* return core address of the current core */
207 static uint32
_sb_coresba(si_info_t * sii)208 _sb_coresba(si_info_t *sii)
209 {
210 uint32 sbaddr;
211
212 switch (BUSTYPE(sii->pub.bustype)) {
213 case SI_BUS: {
214 sbconfig_t *sb = REGS2SB(sii->curmap);
215 sbaddr = sb_base(R_SBREG(sii, &sb->sbadmatch0));
216 break;
217 }
218
219 case PCI_BUS:
220 sbaddr = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
221 break;
222
223 case PCMCIA_BUS: {
224 uint8 tmp = 0;
225 OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
226 sbaddr = (uint32)tmp << 12;
227 OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
228 sbaddr |= (uint32)tmp << 16;
229 OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
230 sbaddr |= (uint32)tmp << 24;
231 break;
232 }
233
234 #ifdef BCMSDIO
235 case SPI_BUS:
236 case SDIO_BUS:
237 sbaddr = (uint32)(uintptr)sii->curmap;
238 break;
239 #endif // endif
240
241 default:
242 sbaddr = BADCOREADDR;
243 break;
244 }
245
246 return sbaddr;
247 }
248
249 uint
sb_corevendor(si_t * sih)250 sb_corevendor(si_t *sih)
251 {
252 si_info_t *sii;
253 sbconfig_t *sb;
254
255 sii = SI_INFO(sih);
256 sb = REGS2SB(sii->curmap);
257
258 return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_VC_MASK) >> SBIDH_VC_SHIFT);
259 }
260
261 uint
sb_corerev(si_t * sih)262 sb_corerev(si_t *sih)
263 {
264 si_info_t *sii;
265 sbconfig_t *sb;
266 uint sbidh;
267
268 sii = SI_INFO(sih);
269 sb = REGS2SB(sii->curmap);
270 sbidh = R_SBREG(sii, &sb->sbidhigh);
271
272 return (SBCOREREV(sbidh));
273 }
274
275 /* set core-specific control flags */
276 void
sb_core_cflags_wo(si_t * sih,uint32 mask,uint32 val)277 sb_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
278 {
279 si_info_t *sii;
280 sbconfig_t *sb;
281 uint32 w;
282
283 sii = SI_INFO(sih);
284 sb = REGS2SB(sii->curmap);
285
286 ASSERT((val & ~mask) == 0);
287
288 /* mask and set */
289 w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
290 (val << SBTML_SICF_SHIFT);
291 W_SBREG(sii, &sb->sbtmstatelow, w);
292 }
293
294 /* set/clear core-specific control flags */
295 uint32
sb_core_cflags(si_t * sih,uint32 mask,uint32 val)296 sb_core_cflags(si_t *sih, uint32 mask, uint32 val)
297 {
298 si_info_t *sii;
299 sbconfig_t *sb;
300 uint32 w;
301
302 sii = SI_INFO(sih);
303 sb = REGS2SB(sii->curmap);
304
305 ASSERT((val & ~mask) == 0);
306
307 /* mask and set */
308 if (mask || val) {
309 w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
310 (val << SBTML_SICF_SHIFT);
311 W_SBREG(sii, &sb->sbtmstatelow, w);
312 }
313
314 /* return the new value
315 * for write operation, the following readback ensures the completion of write opration.
316 */
317 return (R_SBREG(sii, &sb->sbtmstatelow) >> SBTML_SICF_SHIFT);
318 }
319
320 /* set/clear core-specific status flags */
321 uint32
sb_core_sflags(si_t * sih,uint32 mask,uint32 val)322 sb_core_sflags(si_t *sih, uint32 mask, uint32 val)
323 {
324 si_info_t *sii;
325 sbconfig_t *sb;
326 uint32 w;
327
328 sii = SI_INFO(sih);
329 sb = REGS2SB(sii->curmap);
330
331 ASSERT((val & ~mask) == 0);
332 ASSERT((mask & ~SISF_CORE_BITS) == 0);
333
334 /* mask and set */
335 if (mask || val) {
336 w = (R_SBREG(sii, &sb->sbtmstatehigh) & ~(mask << SBTMH_SISF_SHIFT)) |
337 (val << SBTMH_SISF_SHIFT);
338 W_SBREG(sii, &sb->sbtmstatehigh, w);
339 }
340
341 /* return the new value */
342 return (R_SBREG(sii, &sb->sbtmstatehigh) >> SBTMH_SISF_SHIFT);
343 }
344
345 bool
sb_iscoreup(si_t * sih)346 sb_iscoreup(si_t *sih)
347 {
348 si_info_t *sii;
349 sbconfig_t *sb;
350
351 sii = SI_INFO(sih);
352 sb = REGS2SB(sii->curmap);
353
354 return ((R_SBREG(sii, &sb->sbtmstatelow) &
355 (SBTML_RESET | SBTML_REJ_MASK | (SICF_CLOCK_EN << SBTML_SICF_SHIFT))) ==
356 (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
357 }
358
359 /*
360 * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
361 * switch back to the original core, and return the new value.
362 *
363 * When using the silicon backplane, no fidleing with interrupts or core switches are needed.
364 *
365 * Also, when using pci/pcie, we can optimize away the core switching for pci registers
366 * and (on newer pci cores) chipcommon registers.
367 */
368 uint
sb_corereg(si_t * sih,uint coreidx,uint regoff,uint mask,uint val)369 sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
370 {
371 uint origidx = 0;
372 volatile uint32 *r = NULL;
373 uint w;
374 uint intr_val = 0;
375 bool fast = FALSE;
376 si_info_t *sii = SI_INFO(sih);
377 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
378
379 ASSERT(GOODIDX(coreidx));
380 ASSERT(regoff < SI_CORE_SIZE);
381 ASSERT((val & ~mask) == 0);
382
383 if (coreidx >= SI_MAXCORES)
384 return 0;
385
386 if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
387 /* If internal bus, we can always get at everything */
388 fast = TRUE;
389 /* map if does not exist */
390 if (!cores_info->regs[coreidx]) {
391 cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
392 SI_CORE_SIZE);
393 ASSERT(GOODREGS(cores_info->regs[coreidx]));
394 }
395 r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
396 } else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
397 /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
398
399 if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
400 /* Chipc registers are mapped at 12KB */
401
402 fast = TRUE;
403 r = (volatile uint32 *)((volatile char *)sii->curmap +
404 PCI_16KB0_CCREGS_OFFSET + regoff);
405 } else if (sii->pub.buscoreidx == coreidx) {
406 /* pci registers are at either in the last 2KB of an 8KB window
407 * or, in pcie and pci rev 13 at 8KB
408 */
409 fast = TRUE;
410 if (SI_FAST(sii))
411 r = (volatile uint32 *)((volatile char *)sii->curmap +
412 PCI_16KB0_PCIREGS_OFFSET + regoff);
413 else
414 r = (volatile uint32 *)((volatile char *)sii->curmap +
415 ((regoff >= SBCONFIGOFF) ?
416 PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
417 regoff);
418 }
419 }
420
421 if (!fast) {
422 INTR_OFF(sii, intr_val);
423
424 /* save current core index */
425 origidx = si_coreidx(&sii->pub);
426
427 /* switch core */
428 r = (volatile uint32*) ((volatile uchar*)sb_setcoreidx(&sii->pub, coreidx) +
429 regoff);
430 }
431 ASSERT(r != NULL);
432
433 /* mask and set */
434 if (mask || val) {
435 if (regoff >= SBCONFIGOFF) {
436 w = (R_SBREG(sii, r) & ~mask) | val;
437 W_SBREG(sii, r, w);
438 } else {
439 w = (R_REG(sii->osh, r) & ~mask) | val;
440 W_REG(sii->osh, r, w);
441 }
442 }
443
444 /* readback */
445 if (regoff >= SBCONFIGOFF)
446 w = R_SBREG(sii, r);
447 else {
448 w = R_REG(sii->osh, r);
449 }
450
451 if (!fast) {
452 /* restore core index */
453 if (origidx != coreidx)
454 sb_setcoreidx(&sii->pub, origidx);
455
456 INTR_RESTORE(sii, intr_val);
457 }
458
459 return (w);
460 }
461
462 /*
463 * If there is no need for fiddling with interrupts or core switches (typically silicon
464 * back plane registers, pci registers and chipcommon registers), this function
465 * returns the register offset on this core to a mapped address. This address can
466 * be used for W_REG/R_REG directly.
467 *
468 * For accessing registers that would need a core switch, this function will return
469 * NULL.
470 */
471 volatile uint32 *
sb_corereg_addr(si_t * sih,uint coreidx,uint regoff)472 sb_corereg_addr(si_t *sih, uint coreidx, uint regoff)
473 {
474 volatile uint32 *r = NULL;
475 bool fast = FALSE;
476 si_info_t *sii = SI_INFO(sih);
477 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
478
479 ASSERT(GOODIDX(coreidx));
480 ASSERT(regoff < SI_CORE_SIZE);
481
482 if (coreidx >= SI_MAXCORES)
483 return 0;
484
485 if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
486 /* If internal bus, we can always get at everything */
487 fast = TRUE;
488 /* map if does not exist */
489 if (!cores_info->regs[coreidx]) {
490 cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
491 SI_CORE_SIZE);
492 ASSERT(GOODREGS(cores_info->regs[coreidx]));
493 }
494 r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
495 } else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
496 /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
497
498 if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
499 /* Chipc registers are mapped at 12KB */
500
501 fast = TRUE;
502 r = (volatile uint32 *)((volatile char *)sii->curmap +
503 PCI_16KB0_CCREGS_OFFSET + regoff);
504 } else if (sii->pub.buscoreidx == coreidx) {
505 /* pci registers are at either in the last 2KB of an 8KB window
506 * or, in pcie and pci rev 13 at 8KB
507 */
508 fast = TRUE;
509 if (SI_FAST(sii))
510 r = (volatile uint32 *)((volatile char *)sii->curmap +
511 PCI_16KB0_PCIREGS_OFFSET + regoff);
512 else
513 r = (volatile uint32 *)((volatile char *)sii->curmap +
514 ((regoff >= SBCONFIGOFF) ?
515 PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
516 regoff);
517 }
518 }
519
520 if (!fast)
521 return 0;
522
523 return (r);
524 }
525
526 /* Scan the enumeration space to find all cores starting from the given
527 * bus 'sbba'. Append coreid and other info to the lists in 'si'. 'sba'
528 * is the default core address at chip POR time and 'regs' is the virtual
529 * address that the default core is mapped at. 'ncores' is the number of
530 * cores expected on bus 'sbba'. It returns the total number of cores
531 * starting from bus 'sbba', inclusive.
532 */
533 #define SB_MAXBUSES 2
534 static uint
_sb_scan(si_info_t * sii,uint32 sba,volatile void * regs,uint bus,uint32 sbba,uint numcores,uint devid)535 _sb_scan(si_info_t *sii, uint32 sba, volatile void *regs, uint bus,
536 uint32 sbba, uint numcores, uint devid)
537 {
538 uint next;
539 uint ncc = 0;
540 uint i;
541 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
542
543 if (bus >= SB_MAXBUSES) {
544 SI_ERROR(("_sb_scan: bus 0x%08x at level %d is too deep to scan\n", sbba, bus));
545 return 0;
546 }
547 SI_MSG(("_sb_scan: scan bus 0x%08x assume %u cores\n", sbba, numcores));
548
549 /* Scan all cores on the bus starting from core 0.
550 * Core addresses must be contiguous on each bus.
551 */
552 for (i = 0, next = sii->numcores; i < numcores && next < SB_BUS_MAXCORES; i++, next++) {
553 cores_info->coresba[next] = sbba + (i * SI_CORE_SIZE);
554
555 /* keep and reuse the initial register mapping */
556 if ((BUSTYPE(sii->pub.bustype) == SI_BUS) && (cores_info->coresba[next] == sba)) {
557 SI_VMSG(("_sb_scan: reuse mapped regs %p for core %u\n", regs, next));
558 cores_info->regs[next] = regs;
559 }
560
561 /* change core to 'next' and read its coreid */
562 sii->curmap = _sb_setcoreidx(sii, next);
563 sii->curidx = next;
564
565 cores_info->coreid[next] = sb_coreid(&sii->pub);
566
567 /* core specific processing... */
568 /* chipc provides # cores */
569 if (cores_info->coreid[next] == CC_CORE_ID) {
570 chipcregs_t *cc = (chipcregs_t *)sii->curmap;
571 uint32 ccrev = sb_corerev(&sii->pub);
572
573 /* determine numcores - this is the total # cores in the chip */
574 if (((ccrev == 4) || (ccrev >= 6))) {
575 ASSERT(cc);
576 numcores = (R_REG(sii->osh, &cc->chipid) & CID_CC_MASK) >>
577 CID_CC_SHIFT;
578 } else {
579 /* Older chips */
580 uint chip = CHIPID(sii->pub.chip);
581
582 if (chip == BCM4704_CHIP_ID)
583 numcores = 9;
584 else if (chip == BCM5365_CHIP_ID)
585 numcores = 7;
586 else {
587 SI_ERROR(("sb_chip2numcores: unsupported chip 0x%x\n",
588 chip));
589 ASSERT(0);
590 numcores = 1;
591 }
592 }
593 SI_VMSG(("_sb_scan: there are %u cores in the chip %s\n", numcores,
594 sii->pub.issim ? "QT" : ""));
595 }
596 /* scan bridged SB(s) and add results to the end of the list */
597 else if (cores_info->coreid[next] == OCP_CORE_ID) {
598 sbconfig_t *sb = REGS2SB(sii->curmap);
599 uint32 nsbba = R_SBREG(sii, &sb->sbadmatch1);
600 uint nsbcc;
601
602 sii->numcores = next + 1;
603
604 if ((nsbba & 0xfff00000) != si_enum_base(devid))
605 continue;
606 nsbba &= 0xfffff000;
607 if (_sb_coreidx(sii, nsbba) != BADIDX)
608 continue;
609
610 nsbcc = (R_SBREG(sii, &sb->sbtmstatehigh) & 0x000f0000) >> 16;
611 nsbcc = _sb_scan(sii, sba, regs, bus + 1, nsbba, nsbcc, devid);
612 if (sbba == si_enum_base(devid))
613 numcores -= nsbcc;
614 ncc += nsbcc;
615 }
616 }
617
618 SI_MSG(("_sb_scan: found %u cores on bus 0x%08x\n", i, sbba));
619
620 sii->numcores = i + ncc;
621 return sii->numcores;
622 }
623
624 /* scan the sb enumerated space to identify all cores */
625 void
sb_scan(si_t * sih,volatile void * regs,uint devid)626 sb_scan(si_t *sih, volatile void *regs, uint devid)
627 {
628 uint32 origsba;
629 sbconfig_t *sb;
630 si_info_t *sii = SI_INFO(sih);
631 BCM_REFERENCE(devid);
632
633 sb = REGS2SB(sii->curmap);
634
635 sii->pub.socirev = (R_SBREG(sii, &sb->sbidlow) & SBIDL_RV_MASK) >> SBIDL_RV_SHIFT;
636
637 /* Save the current core info and validate it later till we know
638 * for sure what is good and what is bad.
639 */
640 origsba = _sb_coresba(sii);
641
642 /* scan all SB(s) starting from SI_ENUM_BASE_DEFAULT */
643 sii->numcores = _sb_scan(sii, origsba, regs, 0, si_enum_base(devid), 1, devid);
644 }
645
646 /*
647 * This function changes logical "focus" to the indicated core;
648 * must be called with interrupts off.
649 * Moreover, callers should keep interrupts off during switching out of and back to d11 core
650 */
651 volatile void *
sb_setcoreidx(si_t * sih,uint coreidx)652 sb_setcoreidx(si_t *sih, uint coreidx)
653 {
654 si_info_t *sii = SI_INFO(sih);
655
656 if (coreidx >= sii->numcores)
657 return (NULL);
658
659 /*
660 * If the user has provided an interrupt mask enabled function,
661 * then assert interrupts are disabled before switching the core.
662 */
663 ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
664
665 sii->curmap = _sb_setcoreidx(sii, coreidx);
666 sii->curidx = coreidx;
667
668 return (sii->curmap);
669 }
670
671 /* This function changes the logical "focus" to the indicated core.
672 * Return the current core's virtual address.
673 */
674 static volatile void *
_sb_setcoreidx(si_info_t * sii,uint coreidx)675 _sb_setcoreidx(si_info_t *sii, uint coreidx)
676 {
677 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
678 uint32 sbaddr = cores_info->coresba[coreidx];
679 volatile void *regs;
680
681 switch (BUSTYPE(sii->pub.bustype)) {
682 case SI_BUS:
683 /* map new one */
684 if (!cores_info->regs[coreidx]) {
685 cores_info->regs[coreidx] = REG_MAP(sbaddr, SI_CORE_SIZE);
686 ASSERT(GOODREGS(cores_info->regs[coreidx]));
687 }
688 regs = cores_info->regs[coreidx];
689 break;
690
691 case PCI_BUS:
692 /* point bar0 window */
693 OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, sbaddr);
694 regs = sii->curmap;
695 break;
696
697 case PCMCIA_BUS: {
698 uint8 tmp = (sbaddr >> 12) & 0x0f;
699 OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
700 tmp = (sbaddr >> 16) & 0xff;
701 OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
702 tmp = (sbaddr >> 24) & 0xff;
703 OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
704 regs = sii->curmap;
705 break;
706 }
707 #ifdef BCMSDIO
708 case SPI_BUS:
709 case SDIO_BUS:
710 /* map new one */
711 if (!cores_info->regs[coreidx]) {
712 cores_info->regs[coreidx] = (void *)(uintptr)sbaddr;
713 ASSERT(GOODREGS(cores_info->regs[coreidx]));
714 }
715 regs = cores_info->regs[coreidx];
716 break;
717 #endif /* BCMSDIO */
718
719 default:
720 ASSERT(0);
721 regs = NULL;
722 break;
723 }
724
725 return regs;
726 }
727
728 /* Return the address of sbadmatch0/1/2/3 register */
729 static volatile uint32 *
sb_admatch(si_info_t * sii,uint asidx)730 sb_admatch(si_info_t *sii, uint asidx)
731 {
732 sbconfig_t *sb;
733 volatile uint32 *addrm;
734
735 sb = REGS2SB(sii->curmap);
736
737 switch (asidx) {
738 case 0:
739 addrm = &sb->sbadmatch0;
740 break;
741
742 case 1:
743 addrm = &sb->sbadmatch1;
744 break;
745
746 case 2:
747 addrm = &sb->sbadmatch2;
748 break;
749
750 case 3:
751 addrm = &sb->sbadmatch3;
752 break;
753
754 default:
755 SI_ERROR(("%s: Address space index (%d) out of range\n", __FUNCTION__, asidx));
756 return 0;
757 }
758
759 return (addrm);
760 }
761
762 /* Return the number of address spaces in current core */
763 int
sb_numaddrspaces(si_t * sih)764 sb_numaddrspaces(si_t *sih)
765 {
766 si_info_t *sii;
767 sbconfig_t *sb;
768
769 sii = SI_INFO(sih);
770 sb = REGS2SB(sii->curmap);
771
772 /* + 1 because of enumeration space */
773 return ((R_SBREG(sii, &sb->sbidlow) & SBIDL_AR_MASK) >> SBIDL_AR_SHIFT) + 1;
774 }
775
776 /* Return the address of the nth address space in the current core */
777 uint32
sb_addrspace(si_t * sih,uint asidx)778 sb_addrspace(si_t *sih, uint asidx)
779 {
780 si_info_t *sii;
781
782 sii = SI_INFO(sih);
783
784 return (sb_base(R_SBREG(sii, sb_admatch(sii, asidx))));
785 }
786
787 /* Return the size of the nth address space in the current core */
788 uint32
sb_addrspacesize(si_t * sih,uint asidx)789 sb_addrspacesize(si_t *sih, uint asidx)
790 {
791 si_info_t *sii;
792
793 sii = SI_INFO(sih);
794
795 return (sb_size(R_SBREG(sii, sb_admatch(sii, asidx))));
796 }
797
798 /* do buffered registers update */
799 void
sb_commit(si_t * sih)800 sb_commit(si_t *sih)
801 {
802 si_info_t *sii = SI_INFO(sih);
803 uint origidx;
804 uint intr_val = 0;
805
806 origidx = sii->curidx;
807 ASSERT(GOODIDX(origidx));
808
809 INTR_OFF(sii, intr_val);
810
811 /* switch over to chipcommon core if there is one, else use pci */
812 if (sii->pub.ccrev != NOREV) {
813 chipcregs_t *ccregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
814 ASSERT(ccregs != NULL);
815
816 /* do the buffer registers update */
817 W_REG(sii->osh, &ccregs->broadcastaddress, SB_COMMIT);
818 W_REG(sii->osh, &ccregs->broadcastdata, 0x0);
819 } else
820 ASSERT(0);
821
822 /* restore core index */
823 sb_setcoreidx(sih, origidx);
824 INTR_RESTORE(sii, intr_val);
825 }
826
827 void
sb_core_disable(si_t * sih,uint32 bits)828 sb_core_disable(si_t *sih, uint32 bits)
829 {
830 si_info_t *sii;
831 volatile uint32 dummy;
832 sbconfig_t *sb;
833
834 sii = SI_INFO(sih);
835
836 ASSERT(GOODREGS(sii->curmap));
837 sb = REGS2SB(sii->curmap);
838
839 /* if core is already in reset, just return */
840 if (R_SBREG(sii, &sb->sbtmstatelow) & SBTML_RESET)
841 return;
842
843 /* if clocks are not enabled, put into reset and return */
844 if ((R_SBREG(sii, &sb->sbtmstatelow) & (SICF_CLOCK_EN << SBTML_SICF_SHIFT)) == 0)
845 goto disable;
846
847 /* set target reject and spin until busy is clear (preserve core-specific bits) */
848 OR_SBREG(sii, &sb->sbtmstatelow, SBTML_REJ);
849 dummy = R_SBREG(sii, &sb->sbtmstatelow);
850 BCM_REFERENCE(dummy);
851 OSL_DELAY(1);
852 SPINWAIT((R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY), 100000);
853 if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY)
854 SI_ERROR(("%s: target state still busy\n", __FUNCTION__));
855
856 if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT) {
857 OR_SBREG(sii, &sb->sbimstate, SBIM_RJ);
858 dummy = R_SBREG(sii, &sb->sbimstate);
859 BCM_REFERENCE(dummy);
860 OSL_DELAY(1);
861 SPINWAIT((R_SBREG(sii, &sb->sbimstate) & SBIM_BY), 100000);
862 }
863
864 /* set reset and reject while enabling the clocks */
865 W_SBREG(sii, &sb->sbtmstatelow,
866 (((bits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
867 SBTML_REJ | SBTML_RESET));
868 dummy = R_SBREG(sii, &sb->sbtmstatelow);
869 BCM_REFERENCE(dummy);
870 OSL_DELAY(10);
871
872 /* don't forget to clear the initiator reject bit */
873 if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT)
874 AND_SBREG(sii, &sb->sbimstate, ~SBIM_RJ);
875
876 disable:
877 /* leave reset and reject asserted */
878 W_SBREG(sii, &sb->sbtmstatelow, ((bits << SBTML_SICF_SHIFT) | SBTML_REJ | SBTML_RESET));
879 OSL_DELAY(1);
880 }
881
882 /* reset and re-enable a core
883 * inputs:
884 * bits - core specific bits that are set during and after reset sequence
885 * resetbits - core specific bits that are set only during reset sequence
886 */
887 void
sb_core_reset(si_t * sih,uint32 bits,uint32 resetbits)888 sb_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
889 {
890 si_info_t *sii;
891 sbconfig_t *sb;
892 volatile uint32 dummy;
893
894 sii = SI_INFO(sih);
895 ASSERT(GOODREGS(sii->curmap));
896 sb = REGS2SB(sii->curmap);
897
898 /*
899 * Must do the disable sequence first to work for arbitrary current core state.
900 */
901 sb_core_disable(sih, (bits | resetbits));
902
903 /*
904 * Now do the initialization sequence.
905 */
906
907 /* set reset while enabling the clock and forcing them on throughout the core */
908 W_SBREG(sii, &sb->sbtmstatelow,
909 (((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
910 SBTML_RESET));
911 dummy = R_SBREG(sii, &sb->sbtmstatelow);
912 BCM_REFERENCE(dummy);
913 OSL_DELAY(1);
914
915 if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_SERR) {
916 W_SBREG(sii, &sb->sbtmstatehigh, 0);
917 }
918 if ((dummy = R_SBREG(sii, &sb->sbimstate)) & (SBIM_IBE | SBIM_TO)) {
919 AND_SBREG(sii, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
920 }
921
922 /* clear reset and allow it to propagate throughout the core */
923 W_SBREG(sii, &sb->sbtmstatelow,
924 ((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
925 dummy = R_SBREG(sii, &sb->sbtmstatelow);
926 BCM_REFERENCE(dummy);
927 OSL_DELAY(1);
928
929 /* leave clock enabled */
930 W_SBREG(sii, &sb->sbtmstatelow, ((bits | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
931 dummy = R_SBREG(sii, &sb->sbtmstatelow);
932 BCM_REFERENCE(dummy);
933 OSL_DELAY(1);
934 }
935
936 /*
937 * Set the initiator timeout for the "master core".
938 * The master core is defined to be the core in control
939 * of the chip and so it issues accesses to non-memory
940 * locations (Because of dma *any* core can access memeory).
941 *
942 * The routine uses the bus to decide who is the master:
943 * SI_BUS => mips
944 * JTAG_BUS => chipc
945 * PCI_BUS => pci or pcie
946 * PCMCIA_BUS => pcmcia
947 * SDIO_BUS => pcmcia
948 *
949 * This routine exists so callers can disable initiator
950 * timeouts so accesses to very slow devices like otp
951 * won't cause an abort. The routine allows arbitrary
952 * settings of the service and request timeouts, though.
953 *
954 * Returns the timeout state before changing it or -1
955 * on error.
956 */
957
958 #define TO_MASK (SBIMCL_RTO_MASK | SBIMCL_STO_MASK)
959
960 uint32
sb_set_initiator_to(si_t * sih,uint32 to,uint idx)961 sb_set_initiator_to(si_t *sih, uint32 to, uint idx)
962 {
963 si_info_t *sii = SI_INFO(sih);
964 uint origidx;
965 uint intr_val = 0;
966 uint32 tmp, ret = 0xffffffff;
967 sbconfig_t *sb;
968
969 if ((to & ~TO_MASK) != 0)
970 return ret;
971
972 /* Figure out the master core */
973 if (idx == BADIDX) {
974 switch (BUSTYPE(sii->pub.bustype)) {
975 case PCI_BUS:
976 idx = sii->pub.buscoreidx;
977 break;
978 case JTAG_BUS:
979 idx = SI_CC_IDX;
980 break;
981 case PCMCIA_BUS:
982 #ifdef BCMSDIO
983 case SDIO_BUS:
984 #endif // endif
985 idx = si_findcoreidx(sih, PCMCIA_CORE_ID, 0);
986 break;
987 case SI_BUS:
988 idx = si_findcoreidx(sih, MIPS33_CORE_ID, 0);
989 break;
990 default:
991 ASSERT(0);
992 }
993 if (idx == BADIDX)
994 return ret;
995 }
996
997 INTR_OFF(sii, intr_val);
998 origidx = si_coreidx(sih);
999
1000 sb = REGS2SB(sb_setcoreidx(sih, idx));
1001
1002 tmp = R_SBREG(sii, &sb->sbimconfiglow);
1003 ret = tmp & TO_MASK;
1004 W_SBREG(sii, &sb->sbimconfiglow, (tmp & ~TO_MASK) | to);
1005
1006 sb_commit(sih);
1007 sb_setcoreidx(sih, origidx);
1008 INTR_RESTORE(sii, intr_val);
1009 return ret;
1010 }
1011
1012 uint32
sb_base(uint32 admatch)1013 sb_base(uint32 admatch)
1014 {
1015 uint32 base;
1016 uint type;
1017
1018 type = admatch & SBAM_TYPE_MASK;
1019 ASSERT(type < 3);
1020
1021 base = 0;
1022
1023 if (type == 0) {
1024 base = admatch & SBAM_BASE0_MASK;
1025 } else if (type == 1) {
1026 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
1027 base = admatch & SBAM_BASE1_MASK;
1028 } else if (type == 2) {
1029 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
1030 base = admatch & SBAM_BASE2_MASK;
1031 }
1032
1033 return (base);
1034 }
1035
1036 uint32
sb_size(uint32 admatch)1037 sb_size(uint32 admatch)
1038 {
1039 uint32 size;
1040 uint type;
1041
1042 type = admatch & SBAM_TYPE_MASK;
1043 ASSERT(type < 3);
1044
1045 size = 0;
1046
1047 if (type == 0) {
1048 size = 1 << (((admatch & SBAM_ADINT0_MASK) >> SBAM_ADINT0_SHIFT) + 1);
1049 } else if (type == 1) {
1050 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
1051 size = 1 << (((admatch & SBAM_ADINT1_MASK) >> SBAM_ADINT1_SHIFT) + 1);
1052 } else if (type == 2) {
1053 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
1054 size = 1 << (((admatch & SBAM_ADINT2_MASK) >> SBAM_ADINT2_SHIFT) + 1);
1055 }
1056
1057 return (size);
1058 }
1059
1060 #if defined(BCMDBG_PHYDUMP)
1061 /* print interesting sbconfig registers */
1062 void
sb_dumpregs(si_t * sih,struct bcmstrbuf * b)1063 sb_dumpregs(si_t *sih, struct bcmstrbuf *b)
1064 {
1065 sbconfig_t *sb;
1066 uint origidx, i, intr_val = 0;
1067 si_info_t *sii = SI_INFO(sih);
1068 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
1069
1070 origidx = sii->curidx;
1071
1072 INTR_OFF(sii, intr_val);
1073
1074 for (i = 0; i < sii->numcores; i++) {
1075 sb = REGS2SB(sb_setcoreidx(sih, i));
1076
1077 bcm_bprintf(b, "core 0x%x: \n", cores_info->coreid[i]);
1078
1079 if (sii->pub.socirev > SONICS_2_2)
1080 bcm_bprintf(b, "sbimerrlog 0x%x sbimerrloga 0x%x\n",
1081 sb_corereg(sih, si_coreidx(&sii->pub), SBIMERRLOG, 0, 0),
1082 sb_corereg(sih, si_coreidx(&sii->pub), SBIMERRLOGA, 0, 0));
1083
1084 bcm_bprintf(b, "sbtmstatelow 0x%x sbtmstatehigh 0x%x sbidhigh 0x%x "
1085 "sbimstate 0x%x\n sbimconfiglow 0x%x sbimconfighigh 0x%x\n",
1086 R_SBREG(sii, &sb->sbtmstatelow), R_SBREG(sii, &sb->sbtmstatehigh),
1087 R_SBREG(sii, &sb->sbidhigh), R_SBREG(sii, &sb->sbimstate),
1088 R_SBREG(sii, &sb->sbimconfiglow), R_SBREG(sii, &sb->sbimconfighigh));
1089 }
1090
1091 sb_setcoreidx(sih, origidx);
1092 INTR_RESTORE(sii, intr_val);
1093 }
1094 #endif // endif
1095