1 /*
2 * Misc utility routines for accessing chip-specific features
3 * of the SiliconBackplane-based Broadcom chips.
4 *
5 * Copyright (C) 1999-2009, Broadcom Corporation
6 *
7 * Unless you and Broadcom execute a separate written software license
8 * agreement governing use of this software, this software is licensed to you
9 * under the terms of the GNU General Public License version 2 (the "GPL"),
10 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11 * following added to such license:
12 *
13 * As a special exception, the copyright holders of this software give you
14 * permission to link this software with independent modules, and to copy and
15 * distribute the resulting executable under terms of your choice, provided that
16 * you also meet, for each linked independent module, the terms and conditions of
17 * the license of that module. An independent module is a module which is not
18 * derived from this software. The special exception does not apply to any
19 * modifications of the software.
20 *
21 * Notwithstanding the above, under no circumstances may you combine this
22 * software in any way with any other Broadcom software provided under a license
23 * other than the GPL, without Broadcom's express prior written consent.
24 *
25 * $Id: sbutils.c,v 1.662.4.10.2.7.4.1 2009/09/25 00:32:01 Exp $
26 */
27
28 #include <typedefs.h>
29 #include <bcmdefs.h>
30 #include <osl.h>
31 #include <bcmutils.h>
32 #include <siutils.h>
33 #include <bcmdevs.h>
34 #include <hndsoc.h>
35 #include <sbchipc.h>
36 #include <pcicfg.h>
37 #include <sbpcmcia.h>
38
39 #include "siutils_priv.h"
40
41 /* local prototypes */
42 static uint _sb_coreidx(si_info_t *sii, uint32 sba);
43 static uint _sb_scan(si_info_t *sii, uint32 sba, void *regs, uint bus, uint32 sbba,
44 uint ncores);
45 static uint32 _sb_coresba(si_info_t *sii);
46 static void *_sb_setcoreidx(si_info_t *sii, uint coreidx);
47
48 #define SET_SBREG(sii, r, mask, val) \
49 W_SBREG((sii), (r), ((R_SBREG((sii), (r)) & ~(mask)) | (val)))
50 #define REGS2SB(va) (sbconfig_t*) ((int8*)(va) + SBCONFIGOFF)
51
52 /* sonicsrev */
53 #define SONICS_2_2 (SBIDL_RV_2_2 >> SBIDL_RV_SHIFT)
54 #define SONICS_2_3 (SBIDL_RV_2_3 >> SBIDL_RV_SHIFT)
55
56 #define R_SBREG(sii, sbr) sb_read_sbreg((sii), (sbr))
57 #define W_SBREG(sii, sbr, v) sb_write_sbreg((sii), (sbr), (v))
58 #define AND_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) & (v)))
59 #define OR_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) | (v)))
60
61 static uint32
sb_read_sbreg(si_info_t * sii,volatile uint32 * sbr)62 sb_read_sbreg(si_info_t *sii, volatile uint32 *sbr)
63 {
64 uint8 tmp;
65 uint32 val, intr_val = 0;
66
67
68 /*
69 * compact flash only has 11 bits address, while we needs 12 bits address.
70 * MEM_SEG will be OR'd with other 11 bits address in hardware,
71 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
72 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
73 */
74 if (PCMCIA(sii)) {
75 INTR_OFF(sii, intr_val);
76 tmp = 1;
77 OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
78 sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
79 }
80
81 val = R_REG(sii->osh, sbr);
82
83 if (PCMCIA(sii)) {
84 tmp = 0;
85 OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
86 INTR_RESTORE(sii, intr_val);
87 }
88
89 return (val);
90 }
91
92 static void
sb_write_sbreg(si_info_t * sii,volatile uint32 * sbr,uint32 v)93 sb_write_sbreg(si_info_t *sii, volatile uint32 *sbr, uint32 v)
94 {
95 uint8 tmp;
96 volatile uint32 dummy;
97 uint32 intr_val = 0;
98
99
100 /*
101 * compact flash only has 11 bits address, while we needs 12 bits address.
102 * MEM_SEG will be OR'd with other 11 bits address in hardware,
103 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
104 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
105 */
106 if (PCMCIA(sii)) {
107 INTR_OFF(sii, intr_val);
108 tmp = 1;
109 OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
110 sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
111 }
112
113 if (BUSTYPE(sii->pub.bustype) == PCMCIA_BUS) {
114 #ifdef IL_BIGENDIAN
115 dummy = R_REG(sii->osh, sbr);
116 W_REG(sii->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff));
117 dummy = R_REG(sii->osh, sbr);
118 W_REG(sii->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff));
119 #else
120 dummy = R_REG(sii->osh, sbr);
121 W_REG(sii->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff));
122 dummy = R_REG(sii->osh, sbr);
123 W_REG(sii->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff));
124 #endif /* IL_BIGENDIAN */
125 } else
126 W_REG(sii->osh, sbr, v);
127
128 if (PCMCIA(sii)) {
129 tmp = 0;
130 OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
131 INTR_RESTORE(sii, intr_val);
132 }
133 }
134
135 uint
sb_coreid(si_t * sih)136 sb_coreid(si_t *sih)
137 {
138 si_info_t *sii;
139 sbconfig_t *sb;
140
141 sii = SI_INFO(sih);
142 sb = REGS2SB(sii->curmap);
143
144 return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT);
145 }
146
147 uint
sb_flag(si_t * sih)148 sb_flag(si_t *sih)
149 {
150 si_info_t *sii;
151 sbconfig_t *sb;
152
153 sii = SI_INFO(sih);
154 sb = REGS2SB(sii->curmap);
155
156 return R_SBREG(sii, &sb->sbtpsflag) & SBTPS_NUM0_MASK;
157 }
158
159 void
sb_setint(si_t * sih,int siflag)160 sb_setint(si_t *sih, int siflag)
161 {
162 si_info_t *sii;
163 sbconfig_t *sb;
164 uint32 vec;
165
166 sii = SI_INFO(sih);
167 sb = REGS2SB(sii->curmap);
168
169 if (siflag == -1)
170 vec = 0;
171 else
172 vec = 1 << siflag;
173 W_SBREG(sii, &sb->sbintvec, vec);
174 }
175
176 /* return core index of the core with address 'sba' */
177 static uint
_sb_coreidx(si_info_t * sii,uint32 sba)178 _sb_coreidx(si_info_t *sii, uint32 sba)
179 {
180 uint i;
181
182 for (i = 0; i < sii->numcores; i ++)
183 if (sba == sii->common_info->coresba[i])
184 return i;
185 return BADIDX;
186 }
187
188 /* return core address of the current core */
189 static uint32
_sb_coresba(si_info_t * sii)190 _sb_coresba(si_info_t *sii)
191 {
192 uint32 sbaddr;
193
194
195 switch (BUSTYPE(sii->pub.bustype)) {
196 case SI_BUS: {
197 sbconfig_t *sb = REGS2SB(sii->curmap);
198 sbaddr = sb_base(R_SBREG(sii, &sb->sbadmatch0));
199 break;
200 }
201
202 case PCI_BUS:
203 sbaddr = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
204 break;
205
206 case PCMCIA_BUS: {
207 uint8 tmp = 0;
208 OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
209 sbaddr = (uint32)tmp << 12;
210 OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
211 sbaddr |= (uint32)tmp << 16;
212 OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
213 sbaddr |= (uint32)tmp << 24;
214 break;
215 }
216
217 case SPI_BUS:
218 case SDIO_BUS:
219 sbaddr = (uint32)(uintptr)sii->curmap;
220 break;
221
222
223 default:
224 sbaddr = BADCOREADDR;
225 break;
226 }
227
228 return sbaddr;
229 }
230
231 uint
sb_corevendor(si_t * sih)232 sb_corevendor(si_t *sih)
233 {
234 si_info_t *sii;
235 sbconfig_t *sb;
236
237 sii = SI_INFO(sih);
238 sb = REGS2SB(sii->curmap);
239
240 return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_VC_MASK) >> SBIDH_VC_SHIFT);
241 }
242
243 uint
sb_corerev(si_t * sih)244 sb_corerev(si_t *sih)
245 {
246 si_info_t *sii;
247 sbconfig_t *sb;
248 uint sbidh;
249
250 sii = SI_INFO(sih);
251 sb = REGS2SB(sii->curmap);
252 sbidh = R_SBREG(sii, &sb->sbidhigh);
253
254 return (SBCOREREV(sbidh));
255 }
256
257 /* set core-specific control flags */
258 void
sb_core_cflags_wo(si_t * sih,uint32 mask,uint32 val)259 sb_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
260 {
261 si_info_t *sii;
262 sbconfig_t *sb;
263 uint32 w;
264
265 sii = SI_INFO(sih);
266 sb = REGS2SB(sii->curmap);
267
268 ASSERT((val & ~mask) == 0);
269
270 /* mask and set */
271 w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
272 (val << SBTML_SICF_SHIFT);
273 W_SBREG(sii, &sb->sbtmstatelow, w);
274 }
275
276 /* set/clear core-specific control flags */
277 uint32
sb_core_cflags(si_t * sih,uint32 mask,uint32 val)278 sb_core_cflags(si_t *sih, uint32 mask, uint32 val)
279 {
280 si_info_t *sii;
281 sbconfig_t *sb;
282 uint32 w;
283
284 sii = SI_INFO(sih);
285 sb = REGS2SB(sii->curmap);
286
287 ASSERT((val & ~mask) == 0);
288
289 /* mask and set */
290 if (mask || val) {
291 w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
292 (val << SBTML_SICF_SHIFT);
293 W_SBREG(sii, &sb->sbtmstatelow, w);
294 }
295
296 /* return the new value
297 * for write operation, the following readback ensures the completion of write opration.
298 */
299 return (R_SBREG(sii, &sb->sbtmstatelow) >> SBTML_SICF_SHIFT);
300 }
301
302 /* set/clear core-specific status flags */
303 uint32
sb_core_sflags(si_t * sih,uint32 mask,uint32 val)304 sb_core_sflags(si_t *sih, uint32 mask, uint32 val)
305 {
306 si_info_t *sii;
307 sbconfig_t *sb;
308 uint32 w;
309
310 sii = SI_INFO(sih);
311 sb = REGS2SB(sii->curmap);
312
313 ASSERT((val & ~mask) == 0);
314 ASSERT((mask & ~SISF_CORE_BITS) == 0);
315
316 /* mask and set */
317 if (mask || val) {
318 w = (R_SBREG(sii, &sb->sbtmstatehigh) & ~(mask << SBTMH_SISF_SHIFT)) |
319 (val << SBTMH_SISF_SHIFT);
320 W_SBREG(sii, &sb->sbtmstatehigh, w);
321 }
322
323 /* return the new value */
324 return (R_SBREG(sii, &sb->sbtmstatehigh) >> SBTMH_SISF_SHIFT);
325 }
326
327 bool
sb_iscoreup(si_t * sih)328 sb_iscoreup(si_t *sih)
329 {
330 si_info_t *sii;
331 sbconfig_t *sb;
332
333 sii = SI_INFO(sih);
334 sb = REGS2SB(sii->curmap);
335
336 return ((R_SBREG(sii, &sb->sbtmstatelow) &
337 (SBTML_RESET | SBTML_REJ_MASK | (SICF_CLOCK_EN << SBTML_SICF_SHIFT))) ==
338 (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
339 }
340
341 /*
342 * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
343 * switch back to the original core, and return the new value.
344 *
345 * When using the silicon backplane, no fidleing with interrupts or core switches are needed.
346 *
347 * Also, when using pci/pcie, we can optimize away the core switching for pci registers
348 * and (on newer pci cores) chipcommon registers.
349 */
350 uint
sb_corereg(si_t * sih,uint coreidx,uint regoff,uint mask,uint val)351 sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
352 {
353 uint origidx = 0;
354 uint32 *r = NULL;
355 uint w;
356 uint intr_val = 0;
357 bool fast = FALSE;
358 si_info_t *sii;
359
360 sii = SI_INFO(sih);
361
362 ASSERT(GOODIDX(coreidx));
363 ASSERT(regoff < SI_CORE_SIZE);
364 ASSERT((val & ~mask) == 0);
365
366 if (coreidx >= SI_MAXCORES)
367 return 0;
368
369 if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
370 /* If internal bus, we can always get at everything */
371 fast = TRUE;
372 /* map if does not exist */
373 if (!sii->common_info->regs[coreidx]) {
374 sii->common_info->regs[coreidx] =
375 REG_MAP(sii->common_info->coresba[coreidx], SI_CORE_SIZE);
376 ASSERT(GOODREGS(sii->common_info->regs[coreidx]));
377 }
378 r = (uint32 *)((uchar *)sii->common_info->regs[coreidx] + regoff);
379 } else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
380 /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
381
382 if ((sii->common_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
383 /* Chipc registers are mapped at 12KB */
384
385 fast = TRUE;
386 r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
387 } else if (sii->pub.buscoreidx == coreidx) {
388 /* pci registers are at either in the last 2KB of an 8KB window
389 * or, in pcie and pci rev 13 at 8KB
390 */
391 fast = TRUE;
392 if (SI_FAST(sii))
393 r = (uint32 *)((char *)sii->curmap +
394 PCI_16KB0_PCIREGS_OFFSET + regoff);
395 else
396 r = (uint32 *)((char *)sii->curmap +
397 ((regoff >= SBCONFIGOFF) ?
398 PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
399 regoff);
400 }
401 }
402
403 if (!fast) {
404 INTR_OFF(sii, intr_val);
405
406 /* save current core index */
407 origidx = si_coreidx(&sii->pub);
408
409 /* switch core */
410 r = (uint32*) ((uchar*)sb_setcoreidx(&sii->pub, coreidx) + regoff);
411 }
412 ASSERT(r != NULL);
413
414 /* mask and set */
415 if (mask || val) {
416 if (regoff >= SBCONFIGOFF) {
417 w = (R_SBREG(sii, r) & ~mask) | val;
418 W_SBREG(sii, r, w);
419 } else {
420 w = (R_REG(sii->osh, r) & ~mask) | val;
421 W_REG(sii->osh, r, w);
422 }
423 }
424
425 /* readback */
426 if (regoff >= SBCONFIGOFF)
427 w = R_SBREG(sii, r);
428 else {
429 if ((CHIPID(sii->pub.chip) == BCM5354_CHIP_ID) &&
430 (coreidx == SI_CC_IDX) &&
431 (regoff == OFFSETOF(chipcregs_t, watchdog))) {
432 w = val;
433 } else
434 w = R_REG(sii->osh, r);
435 }
436
437 if (!fast) {
438 /* restore core index */
439 if (origidx != coreidx)
440 sb_setcoreidx(&sii->pub, origidx);
441
442 INTR_RESTORE(sii, intr_val);
443 }
444
445 return (w);
446 }
447
448 /* Scan the enumeration space to find all cores starting from the given
449 * bus 'sbba'. Append coreid and other info to the lists in 'si'. 'sba'
450 * is the default core address at chip POR time and 'regs' is the virtual
451 * address that the default core is mapped at. 'ncores' is the number of
452 * cores expected on bus 'sbba'. It returns the total number of cores
453 * starting from bus 'sbba', inclusive.
454 */
455 #define SB_MAXBUSES 2
456 static uint
_sb_scan(si_info_t * sii,uint32 sba,void * regs,uint bus,uint32 sbba,uint numcores)457 _sb_scan(si_info_t *sii, uint32 sba, void *regs, uint bus, uint32 sbba, uint numcores)
458 {
459 uint next;
460 uint ncc = 0;
461 uint i;
462
463 if (bus >= SB_MAXBUSES) {
464 SI_ERROR(("_sb_scan: bus 0x%08x at level %d is too deep to scan\n", sbba, bus));
465 return 0;
466 }
467 SI_MSG(("_sb_scan: scan bus 0x%08x assume %u cores\n", sbba, numcores));
468
469 /* Scan all cores on the bus starting from core 0.
470 * Core addresses must be contiguous on each bus.
471 */
472 for (i = 0, next = sii->numcores; i < numcores && next < SB_BUS_MAXCORES; i++, next++) {
473 sii->common_info->coresba[next] = sbba + (i * SI_CORE_SIZE);
474
475 /* keep and reuse the initial register mapping */
476 if ((BUSTYPE(sii->pub.bustype) == SI_BUS) &&
477 (sii->common_info->coresba[next] == sba)) {
478 SI_MSG(("_sb_scan: reuse mapped regs %p for core %u\n", regs, next));
479 sii->common_info->regs[next] = regs;
480 }
481
482 /* change core to 'next' and read its coreid */
483 sii->curmap = _sb_setcoreidx(sii, next);
484 sii->curidx = next;
485
486 sii->common_info->coreid[next] = sb_coreid(&sii->pub);
487
488 /* core specific processing... */
489 /* chipc provides # cores */
490 if (sii->common_info->coreid[next] == CC_CORE_ID) {
491 chipcregs_t *cc = (chipcregs_t *)sii->curmap;
492 uint32 ccrev = sb_corerev(&sii->pub);
493
494 /* determine numcores - this is the total # cores in the chip */
495 if (((ccrev == 4) || (ccrev >= 6)))
496 numcores = (R_REG(sii->osh, &cc->chipid) & CID_CC_MASK) >>
497 CID_CC_SHIFT;
498 else {
499 /* Older chips */
500 uint chip = sii->pub.chip;
501
502 if (chip == BCM4306_CHIP_ID) /* < 4306c0 */
503 numcores = 6;
504 else if (chip == BCM4704_CHIP_ID)
505 numcores = 9;
506 else if (chip == BCM5365_CHIP_ID)
507 numcores = 7;
508 else {
509 SI_ERROR(("sb_chip2numcores: unsupported chip 0x%x\n",
510 chip));
511 ASSERT(0);
512 numcores = 1;
513 }
514 }
515 SI_MSG(("_sb_scan: there are %u cores in the chip %s\n", numcores,
516 sii->pub.issim ? "QT" : ""));
517 }
518 /* scan bridged SB(s) and add results to the end of the list */
519 else if (sii->common_info->coreid[next] == OCP_CORE_ID) {
520 sbconfig_t *sb = REGS2SB(sii->curmap);
521 uint32 nsbba = R_SBREG(sii, &sb->sbadmatch1);
522 uint nsbcc;
523
524 sii->numcores = next + 1;
525
526 if ((nsbba & 0xfff00000) != SI_ENUM_BASE)
527 continue;
528 nsbba &= 0xfffff000;
529 if (_sb_coreidx(sii, nsbba) != BADIDX)
530 continue;
531
532 nsbcc = (R_SBREG(sii, &sb->sbtmstatehigh) & 0x000f0000) >> 16;
533 nsbcc = _sb_scan(sii, sba, regs, bus + 1, nsbba, nsbcc);
534 if (sbba == SI_ENUM_BASE)
535 numcores -= nsbcc;
536 ncc += nsbcc;
537 }
538 }
539
540 SI_MSG(("_sb_scan: found %u cores on bus 0x%08x\n", i, sbba));
541
542 sii->numcores = i + ncc;
543 return sii->numcores;
544 }
545
546 /* scan the sb enumerated space to identify all cores */
547 void
sb_scan(si_t * sih,void * regs,uint devid)548 sb_scan(si_t *sih, void *regs, uint devid)
549 {
550 si_info_t *sii;
551 uint32 origsba;
552
553 sii = SI_INFO(sih);
554
555 /* Save the current core info and validate it later till we know
556 * for sure what is good and what is bad.
557 */
558 origsba = _sb_coresba(sii);
559
560 /* scan all SB(s) starting from SI_ENUM_BASE */
561 sii->numcores = _sb_scan(sii, origsba, regs, 0, SI_ENUM_BASE, 1);
562 }
563
564 /*
565 * This function changes logical "focus" to the indicated core;
566 * must be called with interrupts off.
567 * Moreover, callers should keep interrupts off during switching out of and back to d11 core
568 */
569 void *
sb_setcoreidx(si_t * sih,uint coreidx)570 sb_setcoreidx(si_t *sih, uint coreidx)
571 {
572 si_info_t *sii;
573
574 sii = SI_INFO(sih);
575
576 if (coreidx >= sii->numcores)
577 return (NULL);
578
579 /*
580 * If the user has provided an interrupt mask enabled function,
581 * then assert interrupts are disabled before switching the core.
582 */
583 ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
584
585 sii->curmap = _sb_setcoreidx(sii, coreidx);
586 sii->curidx = coreidx;
587
588 return (sii->curmap);
589 }
590
591 /* This function changes the logical "focus" to the indicated core.
592 * Return the current core's virtual address.
593 */
594 static void *
_sb_setcoreidx(si_info_t * sii,uint coreidx)595 _sb_setcoreidx(si_info_t *sii, uint coreidx)
596 {
597 uint32 sbaddr = sii->common_info->coresba[coreidx];
598 void *regs;
599
600 switch (BUSTYPE(sii->pub.bustype)) {
601 case SI_BUS:
602 /* map new one */
603 if (!sii->common_info->regs[coreidx]) {
604 sii->common_info->regs[coreidx] = REG_MAP(sbaddr, SI_CORE_SIZE);
605 ASSERT(GOODREGS(sii->common_info->regs[coreidx]));
606 }
607 regs = sii->common_info->regs[coreidx];
608 break;
609
610 case PCI_BUS:
611 /* point bar0 window */
612 OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, sbaddr);
613 regs = sii->curmap;
614 break;
615
616 case PCMCIA_BUS: {
617 uint8 tmp = (sbaddr >> 12) & 0x0f;
618 OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
619 tmp = (sbaddr >> 16) & 0xff;
620 OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
621 tmp = (sbaddr >> 24) & 0xff;
622 OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
623 regs = sii->curmap;
624 break;
625 }
626 case SPI_BUS:
627 case SDIO_BUS:
628 /* map new one */
629 if (!sii->common_info->regs[coreidx]) {
630 sii->common_info->regs[coreidx] = (void *)(uintptr)sbaddr;
631 ASSERT(GOODREGS(sii->common_info->regs[coreidx]));
632 }
633 regs = sii->common_info->regs[coreidx];
634 break;
635
636
637 default:
638 ASSERT(0);
639 regs = NULL;
640 break;
641 }
642
643 return regs;
644 }
645
646 /* Return the address of sbadmatch0/1/2/3 register */
647 static volatile uint32 *
sb_admatch(si_info_t * sii,uint asidx)648 sb_admatch(si_info_t *sii, uint asidx)
649 {
650 sbconfig_t *sb;
651 volatile uint32 *addrm;
652
653 sb = REGS2SB(sii->curmap);
654
655 switch (asidx) {
656 case 0:
657 addrm = &sb->sbadmatch0;
658 break;
659
660 case 1:
661 addrm = &sb->sbadmatch1;
662 break;
663
664 case 2:
665 addrm = &sb->sbadmatch2;
666 break;
667
668 case 3:
669 addrm = &sb->sbadmatch3;
670 break;
671
672 default:
673 SI_ERROR(("%s: Address space index (%d) out of range\n", __FUNCTION__, asidx));
674 return 0;
675 }
676
677 return (addrm);
678 }
679
680 /* Return the number of address spaces in current core */
681 int
sb_numaddrspaces(si_t * sih)682 sb_numaddrspaces(si_t *sih)
683 {
684 si_info_t *sii;
685 sbconfig_t *sb;
686
687 sii = SI_INFO(sih);
688 sb = REGS2SB(sii->curmap);
689
690 /* + 1 because of enumeration space */
691 return ((R_SBREG(sii, &sb->sbidlow) & SBIDL_AR_MASK) >> SBIDL_AR_SHIFT) + 1;
692 }
693
694 /* Return the address of the nth address space in the current core */
695 uint32
sb_addrspace(si_t * sih,uint asidx)696 sb_addrspace(si_t *sih, uint asidx)
697 {
698 si_info_t *sii;
699
700 sii = SI_INFO(sih);
701
702 return (sb_base(R_SBREG(sii, sb_admatch(sii, asidx))));
703 }
704
705 /* Return the size of the nth address space in the current core */
706 uint32
sb_addrspacesize(si_t * sih,uint asidx)707 sb_addrspacesize(si_t *sih, uint asidx)
708 {
709 si_info_t *sii;
710
711 sii = SI_INFO(sih);
712
713 return (sb_size(R_SBREG(sii, sb_admatch(sii, asidx))));
714 }
715
716
717 /* do buffered registers update */
718 void
sb_commit(si_t * sih)719 sb_commit(si_t *sih)
720 {
721 si_info_t *sii;
722 uint origidx;
723 uint intr_val = 0;
724
725 sii = SI_INFO(sih);
726
727 origidx = sii->curidx;
728 ASSERT(GOODIDX(origidx));
729
730 INTR_OFF(sii, intr_val);
731
732 /* switch over to chipcommon core if there is one, else use pci */
733 if (sii->pub.ccrev != NOREV) {
734 chipcregs_t *ccregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
735
736 /* do the buffer registers update */
737 W_REG(sii->osh, &ccregs->broadcastaddress, SB_COMMIT);
738 W_REG(sii->osh, &ccregs->broadcastdata, 0x0);
739 } else
740 ASSERT(0);
741
742 /* restore core index */
743 sb_setcoreidx(sih, origidx);
744 INTR_RESTORE(sii, intr_val);
745 }
746
747 void
sb_core_disable(si_t * sih,uint32 bits)748 sb_core_disable(si_t *sih, uint32 bits)
749 {
750 si_info_t *sii;
751 volatile uint32 dummy;
752 sbconfig_t *sb;
753
754 sii = SI_INFO(sih);
755
756 ASSERT(GOODREGS(sii->curmap));
757 sb = REGS2SB(sii->curmap);
758
759 /* if core is already in reset, just return */
760 if (R_SBREG(sii, &sb->sbtmstatelow) & SBTML_RESET)
761 return;
762
763 /* if clocks are not enabled, put into reset and return */
764 if ((R_SBREG(sii, &sb->sbtmstatelow) & (SICF_CLOCK_EN << SBTML_SICF_SHIFT)) == 0)
765 goto disable;
766
767 /* set target reject and spin until busy is clear (preserve core-specific bits) */
768 OR_SBREG(sii, &sb->sbtmstatelow, SBTML_REJ);
769 dummy = R_SBREG(sii, &sb->sbtmstatelow);
770 OSL_DELAY(1);
771 SPINWAIT((R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY), 100000);
772 if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY)
773 SI_ERROR(("%s: target state still busy\n", __FUNCTION__));
774
775 if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT) {
776 OR_SBREG(sii, &sb->sbimstate, SBIM_RJ);
777 dummy = R_SBREG(sii, &sb->sbimstate);
778 OSL_DELAY(1);
779 SPINWAIT((R_SBREG(sii, &sb->sbimstate) & SBIM_BY), 100000);
780 }
781
782 /* set reset and reject while enabling the clocks */
783 W_SBREG(sii, &sb->sbtmstatelow,
784 (((bits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
785 SBTML_REJ | SBTML_RESET));
786 dummy = R_SBREG(sii, &sb->sbtmstatelow);
787 OSL_DELAY(10);
788
789 /* don't forget to clear the initiator reject bit */
790 if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT)
791 AND_SBREG(sii, &sb->sbimstate, ~SBIM_RJ);
792
793 disable:
794 /* leave reset and reject asserted */
795 W_SBREG(sii, &sb->sbtmstatelow, ((bits << SBTML_SICF_SHIFT) | SBTML_REJ | SBTML_RESET));
796 OSL_DELAY(1);
797 }
798
799 /* reset and re-enable a core
800 * inputs:
801 * bits - core specific bits that are set during and after reset sequence
802 * resetbits - core specific bits that are set only during reset sequence
803 */
804 void
sb_core_reset(si_t * sih,uint32 bits,uint32 resetbits)805 sb_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
806 {
807 si_info_t *sii;
808 sbconfig_t *sb;
809 volatile uint32 dummy;
810
811 sii = SI_INFO(sih);
812 ASSERT(GOODREGS(sii->curmap));
813 sb = REGS2SB(sii->curmap);
814
815 /*
816 * Must do the disable sequence first to work for arbitrary current core state.
817 */
818 sb_core_disable(sih, (bits | resetbits));
819
820 /*
821 * Now do the initialization sequence.
822 */
823
824 /* set reset while enabling the clock and forcing them on throughout the core */
825 W_SBREG(sii, &sb->sbtmstatelow,
826 (((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
827 SBTML_RESET));
828 dummy = R_SBREG(sii, &sb->sbtmstatelow);
829 OSL_DELAY(1);
830
831 if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_SERR) {
832 W_SBREG(sii, &sb->sbtmstatehigh, 0);
833 }
834 if ((dummy = R_SBREG(sii, &sb->sbimstate)) & (SBIM_IBE | SBIM_TO)) {
835 AND_SBREG(sii, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
836 }
837
838 /* clear reset and allow it to propagate throughout the core */
839 W_SBREG(sii, &sb->sbtmstatelow,
840 ((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
841 dummy = R_SBREG(sii, &sb->sbtmstatelow);
842 OSL_DELAY(1);
843
844 /* leave clock enabled */
845 W_SBREG(sii, &sb->sbtmstatelow, ((bits | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
846 dummy = R_SBREG(sii, &sb->sbtmstatelow);
847 OSL_DELAY(1);
848 }
849
850 void
sb_core_tofixup(si_t * sih)851 sb_core_tofixup(si_t *sih)
852 {
853 si_info_t *sii;
854 sbconfig_t *sb;
855
856 sii = SI_INFO(sih);
857
858 if ((BUSTYPE(sii->pub.bustype) != PCI_BUS) || PCIE(sii) ||
859 (PCI(sii) && (sii->pub.buscorerev >= 5)))
860 return;
861
862 ASSERT(GOODREGS(sii->curmap));
863 sb = REGS2SB(sii->curmap);
864
865 if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
866 SET_SBREG(sii, &sb->sbimconfiglow,
867 SBIMCL_RTO_MASK | SBIMCL_STO_MASK,
868 (0x5 << SBIMCL_RTO_SHIFT) | 0x3);
869 } else {
870 if (sb_coreid(sih) == PCI_CORE_ID) {
871 SET_SBREG(sii, &sb->sbimconfiglow,
872 SBIMCL_RTO_MASK | SBIMCL_STO_MASK,
873 (0x3 << SBIMCL_RTO_SHIFT) | 0x2);
874 } else {
875 SET_SBREG(sii, &sb->sbimconfiglow, (SBIMCL_RTO_MASK | SBIMCL_STO_MASK), 0);
876 }
877 }
878
879 sb_commit(sih);
880 }
881
882 /*
883 * Set the initiator timeout for the "master core".
884 * The master core is defined to be the core in control
885 * of the chip and so it issues accesses to non-memory
886 * locations (Because of dma *any* core can access memeory).
887 *
888 * The routine uses the bus to decide who is the master:
889 * SI_BUS => mips
890 * JTAG_BUS => chipc
891 * PCI_BUS => pci or pcie
892 * PCMCIA_BUS => pcmcia
893 * SDIO_BUS => pcmcia
894 *
895 * This routine exists so callers can disable initiator
896 * timeouts so accesses to very slow devices like otp
897 * won't cause an abort. The routine allows arbitrary
898 * settings of the service and request timeouts, though.
899 *
900 * Returns the timeout state before changing it or -1
901 * on error.
902 */
903
904 #define TO_MASK (SBIMCL_RTO_MASK | SBIMCL_STO_MASK)
905
906 uint32
sb_set_initiator_to(si_t * sih,uint32 to,uint idx)907 sb_set_initiator_to(si_t *sih, uint32 to, uint idx)
908 {
909 si_info_t *sii;
910 uint origidx;
911 uint intr_val = 0;
912 uint32 tmp, ret = 0xffffffff;
913 sbconfig_t *sb;
914
915 sii = SI_INFO(sih);
916
917 if ((to & ~TO_MASK) != 0)
918 return ret;
919
920 /* Figure out the master core */
921 if (idx == BADIDX) {
922 switch (BUSTYPE(sii->pub.bustype)) {
923 case PCI_BUS:
924 idx = sii->pub.buscoreidx;
925 break;
926 case JTAG_BUS:
927 idx = SI_CC_IDX;
928 break;
929 case PCMCIA_BUS:
930 case SDIO_BUS:
931 idx = si_findcoreidx(sih, PCMCIA_CORE_ID, 0);
932 break;
933 case SI_BUS:
934 idx = si_findcoreidx(sih, MIPS33_CORE_ID, 0);
935 break;
936 default:
937 ASSERT(0);
938 }
939 if (idx == BADIDX)
940 return ret;
941 }
942
943 INTR_OFF(sii, intr_val);
944 origidx = si_coreidx(sih);
945
946 sb = REGS2SB(sb_setcoreidx(sih, idx));
947
948 tmp = R_SBREG(sii, &sb->sbimconfiglow);
949 ret = tmp & TO_MASK;
950 W_SBREG(sii, &sb->sbimconfiglow, (tmp & ~TO_MASK) | to);
951
952 sb_commit(sih);
953 sb_setcoreidx(sih, origidx);
954 INTR_RESTORE(sii, intr_val);
955 return ret;
956 }
957
958 uint32
sb_base(uint32 admatch)959 sb_base(uint32 admatch)
960 {
961 uint32 base;
962 uint type;
963
964 type = admatch & SBAM_TYPE_MASK;
965 ASSERT(type < 3);
966
967 base = 0;
968
969 if (type == 0) {
970 base = admatch & SBAM_BASE0_MASK;
971 } else if (type == 1) {
972 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
973 base = admatch & SBAM_BASE1_MASK;
974 } else if (type == 2) {
975 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
976 base = admatch & SBAM_BASE2_MASK;
977 }
978
979 return (base);
980 }
981
982 uint32
sb_size(uint32 admatch)983 sb_size(uint32 admatch)
984 {
985 uint32 size;
986 uint type;
987
988 type = admatch & SBAM_TYPE_MASK;
989 ASSERT(type < 3);
990
991 size = 0;
992
993 if (type == 0) {
994 size = 1 << (((admatch & SBAM_ADINT0_MASK) >> SBAM_ADINT0_SHIFT) + 1);
995 } else if (type == 1) {
996 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
997 size = 1 << (((admatch & SBAM_ADINT1_MASK) >> SBAM_ADINT1_SHIFT) + 1);
998 } else if (type == 2) {
999 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
1000 size = 1 << (((admatch & SBAM_ADINT2_MASK) >> SBAM_ADINT2_SHIFT) + 1);
1001 }
1002
1003 return (size);
1004 }
1005