xref: /OK3568_Linux_fs/kernel/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/sbutils.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * Misc utility routines for accessing chip-specific features
3  * of the SiliconBackplane-based Broadcom chips.
4  *
5  * Copyright (C) 2020, Broadcom.
6  *
7  *      Unless you and Broadcom execute a separate written software license
8  * agreement governing use of this software, this software is licensed to you
9  * under the terms of the GNU General Public License version 2 (the "GPL"),
10  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11  * following added to such license:
12  *
13  *      As a special exception, the copyright holders of this software give you
14  * permission to link this software with independent modules, and to copy and
15  * distribute the resulting executable under terms of your choice, provided that
16  * you also meet, for each linked independent module, the terms and conditions of
17  * the license of that module.  An independent module is a module which is not
18  * derived from this software.  The special exception does not apply to any
19  * modifications of the software.
20  *
21  *
22  * <<Broadcom-WL-IPTag/Dual:>>
23  */
24 
25 #include <typedefs.h>
26 #include <bcmdefs.h>
27 #include <osl.h>
28 #include <bcmutils.h>
29 #include <siutils.h>
30 #include <bcmdevs.h>
31 #include <hndsoc.h>
32 #include <sbchipc.h>
33 #if !defined(BCMDONGLEHOST)
34 #include <pci_core.h>
35 #endif /* !defined(BCMDONGLEHOST) */
36 #include <pcicfg.h>
37 #include <sbpcmcia.h>
38 
39 #include "siutils_priv.h"
40 
41 /* local prototypes */
42 static uint _sb_coreidx(const si_info_t *sii, uint32 sba);
43 static uint _sb_scan(si_info_t *sii, uint32 sba, volatile void *regs, uint bus, uint32 sbba,
44                      uint ncores, uint devid);
45 static uint32 _sb_coresba(const si_info_t *sii);
46 static volatile void *_sb_setcoreidx(const si_info_t *sii, uint coreidx);
47 #define	SET_SBREG(sii, r, mask, val)	\
48 		W_SBREG((sii), (r), ((R_SBREG((sii), (r)) & ~(mask)) | (val)))
49 #define	REGS2SB(va)	(sbconfig_t*) ((volatile int8*)(va) + SBCONFIGOFF)
50 
51 /* sonicsrev */
52 #define	SONICS_2_2	(SBIDL_RV_2_2 >> SBIDL_RV_SHIFT)
53 #define	SONICS_2_3	(SBIDL_RV_2_3 >> SBIDL_RV_SHIFT)
54 
55 /*
56  * Macros to read/write sbconfig registers.
57  */
58 #define	R_SBREG(sii, sbr)	sb_read_sbreg((sii), (sbr))
59 #define	W_SBREG(sii, sbr, v)	sb_write_sbreg((sii), (sbr), (v))
60 #define	AND_SBREG(sii, sbr, v)	W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) & (v)))
61 #define	OR_SBREG(sii, sbr, v)	W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) | (v)))
62 
63 static uint32
sb_read_sbreg(const si_info_t * sii,volatile uint32 * sbr)64 sb_read_sbreg(const si_info_t *sii, volatile uint32 *sbr)
65 {
66 	return R_REG(sii->osh, sbr);
67 }
68 
69 static void
sb_write_sbreg(const si_info_t * sii,volatile uint32 * sbr,uint32 v)70 sb_write_sbreg(const si_info_t *sii, volatile uint32 *sbr, uint32 v)
71 {
72 	W_REG(sii->osh, sbr, v);
73 }
74 
75 uint
sb_coreid(const si_t * sih)76 sb_coreid(const si_t *sih)
77 {
78 	const si_info_t *sii = SI_INFO(sih);
79 	sbconfig_t *sb = REGS2SB(sii->curmap);
80 
81 	return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT);
82 }
83 
84 uint
sb_intflag(si_t * sih)85 sb_intflag(si_t *sih)
86 {
87 	const si_info_t *sii = SI_INFO(sih);
88 	volatile void *corereg;
89 	sbconfig_t *sb;
90 	uint origidx, intflag;
91 	bcm_int_bitmask_t intr_val;
92 
93 	INTR_OFF(sii, &intr_val);
94 	origidx = si_coreidx(sih);
95 	corereg = si_setcore(sih, CC_CORE_ID, 0);
96 	ASSERT(corereg != NULL);
97 	sb = REGS2SB(corereg);
98 	intflag = R_SBREG(sii, &sb->sbflagst);
99 	sb_setcoreidx(sih, origidx);
100 	INTR_RESTORE(sii, &intr_val);
101 
102 	return intflag;
103 }
104 
105 uint
sb_flag(const si_t * sih)106 sb_flag(const si_t *sih)
107 {
108 	const si_info_t *sii = SI_INFO(sih);
109 	sbconfig_t *sb = REGS2SB(sii->curmap);
110 
111 	return R_SBREG(sii, &sb->sbtpsflag) & SBTPS_NUM0_MASK;
112 }
113 
114 void
sb_setint(const si_t * sih,int siflag)115 sb_setint(const si_t *sih, int siflag)
116 {
117 	const si_info_t *sii = SI_INFO(sih);
118 	sbconfig_t *sb = REGS2SB(sii->curmap);
119 	uint32 vec;
120 
121 	if (siflag == -1)
122 		vec = 0;
123 	else
124 		vec = 1 << siflag;
125 	W_SBREG(sii, &sb->sbintvec, vec);
126 }
127 
128 /* return core index of the core with address 'sba' */
129 static uint
BCMATTACHFN(_sb_coreidx)130 BCMATTACHFN(_sb_coreidx)(const si_info_t *sii, uint32 sba)
131 {
132 	uint i;
133 	const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
134 
135 	for (i = 0; i < sii->numcores; i ++)
136 		if (sba == cores_info->coresba[i])
137 			return i;
138 	return BADIDX;
139 }
140 
141 /* return core address of the current core */
142 static uint32
BCMATTACHFN(_sb_coresba)143 BCMATTACHFN(_sb_coresba)(const si_info_t *sii)
144 {
145 	uint32 sbaddr;
146 
147 	switch (BUSTYPE(sii->pub.bustype)) {
148 	case SI_BUS: {
149 		sbconfig_t *sb = REGS2SB(sii->curmap);
150 		sbaddr = sb_base(R_SBREG(sii, &sb->sbadmatch0));
151 		break;
152 	}
153 
154 	case PCI_BUS:
155 		sbaddr = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
156 		break;
157 
158 #ifdef BCMSDIO
159 	case SPI_BUS:
160 	case SDIO_BUS:
161 		sbaddr = (uint32)(uintptr)sii->curmap;
162 		break;
163 #endif
164 
165 	default:
166 		sbaddr = BADCOREADDR;
167 		break;
168 	}
169 
170 	return sbaddr;
171 }
172 
173 uint
sb_corevendor(const si_t * sih)174 sb_corevendor(const si_t *sih)
175 {
176 	const si_info_t *sii;
177 	sbconfig_t *sb;
178 
179 	sii = SI_INFO(sih);
180 	sb = REGS2SB(sii->curmap);
181 
182 	return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_VC_MASK) >> SBIDH_VC_SHIFT);
183 }
184 
185 uint
sb_corerev(const si_t * sih)186 sb_corerev(const si_t *sih)
187 {
188 	const si_info_t *sii;
189 	sbconfig_t *sb;
190 	uint sbidh;
191 
192 	sii = SI_INFO(sih);
193 	sb = REGS2SB(sii->curmap);
194 	sbidh = R_SBREG(sii, &sb->sbidhigh);
195 
196 	return (SBCOREREV(sbidh));
197 }
198 
199 /* set core-specific control flags */
200 void
sb_core_cflags_wo(const si_t * sih,uint32 mask,uint32 val)201 sb_core_cflags_wo(const si_t *sih, uint32 mask, uint32 val)
202 {
203 	const si_info_t *sii = SI_INFO(sih);
204 	sbconfig_t *sb = REGS2SB(sii->curmap);
205 	uint32 w;
206 
207 	ASSERT((val & ~mask) == 0);
208 
209 	/* mask and set */
210 	w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
211 	        (val << SBTML_SICF_SHIFT);
212 	W_SBREG(sii, &sb->sbtmstatelow, w);
213 }
214 
215 /* set/clear core-specific control flags */
216 uint32
sb_core_cflags(const si_t * sih,uint32 mask,uint32 val)217 sb_core_cflags(const si_t *sih, uint32 mask, uint32 val)
218 {
219 	const si_info_t *sii = SI_INFO(sih);
220 	sbconfig_t *sb = REGS2SB(sii->curmap);
221 	uint32 w;
222 
223 	ASSERT((val & ~mask) == 0);
224 
225 	/* mask and set */
226 	if (mask || val) {
227 		w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
228 		        (val << SBTML_SICF_SHIFT);
229 		W_SBREG(sii, &sb->sbtmstatelow, w);
230 	}
231 
232 	/* return the new value
233 	 * for write operation, the following readback ensures the completion of write opration.
234 	 */
235 	return (R_SBREG(sii, &sb->sbtmstatelow) >> SBTML_SICF_SHIFT);
236 }
237 
238 /* set/clear core-specific status flags */
239 uint32
sb_core_sflags(const si_t * sih,uint32 mask,uint32 val)240 sb_core_sflags(const si_t *sih, uint32 mask, uint32 val)
241 {
242 	const si_info_t *sii = SI_INFO(sih);
243 	sbconfig_t *sb = REGS2SB(sii->curmap);
244 	uint32 w;
245 
246 	ASSERT((val & ~mask) == 0);
247 	ASSERT((mask & ~SISF_CORE_BITS) == 0);
248 
249 	/* mask and set */
250 	if (mask || val) {
251 		w = (R_SBREG(sii, &sb->sbtmstatehigh) & ~(mask << SBTMH_SISF_SHIFT)) |
252 		        (val << SBTMH_SISF_SHIFT);
253 		W_SBREG(sii, &sb->sbtmstatehigh, w);
254 	}
255 
256 	/* return the new value */
257 	return (R_SBREG(sii, &sb->sbtmstatehigh) >> SBTMH_SISF_SHIFT);
258 }
259 
260 bool
sb_iscoreup(const si_t * sih)261 sb_iscoreup(const si_t *sih)
262 {
263 	const si_info_t *sii = SI_INFO(sih);
264 	sbconfig_t *sb = REGS2SB(sii->curmap);
265 
266 	return ((R_SBREG(sii, &sb->sbtmstatelow) &
267 	         (SBTML_RESET | SBTML_REJ_MASK | (SICF_CLOCK_EN << SBTML_SICF_SHIFT))) ==
268 	        (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
269 }
270 
271 /*
272  * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
273  * switch back to the original core, and return the new value.
274  *
275  * When using the silicon backplane, no fidleing with interrupts or core switches are needed.
276  *
277  * Also, when using pci/pcie, we can optimize away the core switching for pci registers
278  * and (on newer pci cores) chipcommon registers.
279  */
280 uint
sb_corereg(si_t * sih,uint coreidx,uint regoff,uint mask,uint val)281 sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
282 {
283 	uint origidx = 0;
284 	volatile uint32 *r = NULL;
285 	uint w;
286 	bcm_int_bitmask_t intr_val;
287 	bool fast = FALSE;
288 	si_info_t *sii = SI_INFO(sih);
289 	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
290 
291 	ASSERT(GOODIDX(coreidx, sii->numcores));
292 	ASSERT(regoff < SI_CORE_SIZE);
293 	ASSERT((val & ~mask) == 0);
294 
295 	if (coreidx >= SI_MAXCORES)
296 		return 0;
297 
298 	if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
299 		/* If internal bus, we can always get at everything */
300 		fast = TRUE;
301 		/* map if does not exist */
302 		if (!cores_info->regs[coreidx]) {
303 			cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
304 			                            SI_CORE_SIZE);
305 			ASSERT(GOODREGS(cores_info->regs[coreidx]));
306 		}
307 		r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
308 	} else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
309 		/* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
310 
311 		if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
312 			/* Chipc registers are mapped at 12KB */
313 
314 			fast = TRUE;
315 			r = (volatile uint32 *)((volatile char *)sii->curmap +
316 			               PCI_16KB0_CCREGS_OFFSET + regoff);
317 		} else if (sii->pub.buscoreidx == coreidx) {
318 			/* pci registers are at either in the last 2KB of an 8KB window
319 			 * or, in pcie and pci rev 13 at 8KB
320 			 */
321 			fast = TRUE;
322 			if (SI_FAST(sii))
323 				r = (volatile uint32 *)((volatile char *)sii->curmap +
324 				               PCI_16KB0_PCIREGS_OFFSET + regoff);
325 			else
326 				r = (volatile uint32 *)((volatile char *)sii->curmap +
327 				               ((regoff >= SBCONFIGOFF) ?
328 				                PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
329 				               regoff);
330 		}
331 	}
332 
333 	if (!fast) {
334 		INTR_OFF(sii, &intr_val);
335 
336 		/* save current core index */
337 		origidx = si_coreidx(&sii->pub);
338 
339 		/* switch core */
340 		r = (volatile uint32*) ((volatile uchar*)sb_setcoreidx(&sii->pub, coreidx) +
341 		               regoff);
342 	}
343 	ASSERT(r != NULL);
344 
345 	/* mask and set */
346 	if (mask || val) {
347 		if (regoff >= SBCONFIGOFF) {
348 			w = (R_SBREG(sii, r) & ~mask) | val;
349 			W_SBREG(sii, r, w);
350 		} else {
351 			w = (R_REG(sii->osh, r) & ~mask) | val;
352 			W_REG(sii->osh, r, w);
353 		}
354 	}
355 
356 	/* readback */
357 	if (regoff >= SBCONFIGOFF)
358 		w = R_SBREG(sii, r);
359 	else {
360 		w = R_REG(sii->osh, r);
361 	}
362 
363 	if (!fast) {
364 		/* restore core index */
365 		if (origidx != coreidx)
366 			sb_setcoreidx(&sii->pub, origidx);
367 
368 		INTR_RESTORE(sii, &intr_val);
369 	}
370 
371 	return (w);
372 }
373 
374 /*
375  * If there is no need for fiddling with interrupts or core switches (typically silicon
376  * back plane registers, pci registers and chipcommon registers), this function
377  * returns the register offset on this core to a mapped address. This address can
378  * be used for W_REG/R_REG directly.
379  *
380  * For accessing registers that would need a core switch, this function will return
381  * NULL.
382  */
383 volatile uint32 *
sb_corereg_addr(const si_t * sih,uint coreidx,uint regoff)384 sb_corereg_addr(const si_t *sih, uint coreidx, uint regoff)
385 {
386 	volatile uint32 *r = NULL;
387 	bool fast = FALSE;
388 	const si_info_t *sii = SI_INFO(sih);
389 	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
390 
391 	ASSERT(GOODIDX(coreidx, sii->numcores));
392 	ASSERT(regoff < SI_CORE_SIZE);
393 
394 	if (coreidx >= SI_MAXCORES)
395 		return 0;
396 
397 	if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
398 		/* If internal bus, we can always get at everything */
399 		fast = TRUE;
400 		/* map if does not exist */
401 		if (!cores_info->regs[coreidx]) {
402 			cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
403 			                            SI_CORE_SIZE);
404 			ASSERT(GOODREGS(cores_info->regs[coreidx]));
405 		}
406 		r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
407 	} else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
408 		/* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
409 
410 		if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
411 			/* Chipc registers are mapped at 12KB */
412 
413 			fast = TRUE;
414 			r = (volatile uint32 *)((volatile char *)sii->curmap +
415 			               PCI_16KB0_CCREGS_OFFSET + regoff);
416 		} else if (sii->pub.buscoreidx == coreidx) {
417 			/* pci registers are at either in the last 2KB of an 8KB window
418 			 * or, in pcie and pci rev 13 at 8KB
419 			 */
420 			fast = TRUE;
421 			if (SI_FAST(sii))
422 				r = (volatile uint32 *)((volatile char *)sii->curmap +
423 				               PCI_16KB0_PCIREGS_OFFSET + regoff);
424 			else
425 				r = (volatile uint32 *)((volatile char *)sii->curmap +
426 				               ((regoff >= SBCONFIGOFF) ?
427 				                PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
428 				               regoff);
429 		}
430 	}
431 
432 	if (!fast)
433 		return 0;
434 
435 	return (r);
436 }
437 
438 /* Scan the enumeration space to find all cores starting from the given
439  * bus 'sbba'. Append coreid and other info to the lists in 'si'. 'sba'
440  * is the default core address at chip POR time and 'regs' is the virtual
441  * address that the default core is mapped at. 'ncores' is the number of
442  * cores expected on bus 'sbba'. It returns the total number of cores
443  * starting from bus 'sbba', inclusive.
444  */
445 #define SB_MAXBUSES	2
446 static uint
BCMATTACHFN(_sb_scan)447 BCMATTACHFN(_sb_scan)(si_info_t *sii, uint32 sba, volatile void *regs, uint bus,
448 	uint32 sbba, uint numcores, uint devid)
449 {
450 	uint next;
451 	uint ncc = 0;
452 	uint i;
453 	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
454 
455 	/* bail out in case it is too deep to scan at the specified bus level */
456 	if (bus >= SB_MAXBUSES) {
457 		SI_ERROR(("_sb_scan: bus 0x%08x at level %d is too deep to scan\n", sbba, bus));
458 		return 0;
459 	}
460 	SI_MSG(("_sb_scan: scan bus 0x%08x assume %u cores\n", sbba, numcores));
461 
462 	/* Scan all cores on the bus starting from core 0.
463 	 * Core addresses must be contiguous on each bus.
464 	 */
465 	for (i = 0, next = sii->numcores; i < numcores && next < SB_BUS_MAXCORES; i++, next++) {
466 		cores_info->coresba[next] = sbba + (i * SI_CORE_SIZE);
467 
468 		/* keep and reuse the initial register mapping */
469 		if ((BUSTYPE(sii->pub.bustype) == SI_BUS) && (cores_info->coresba[next] == sba)) {
470 			SI_VMSG(("_sb_scan: reuse mapped regs %p for core %u\n", regs, next));
471 			cores_info->regs[next] = regs;
472 		}
473 
474 		/* change core to 'next' and read its coreid */
475 		sii->curmap = _sb_setcoreidx(sii, next);
476 		sii->curidx = next;
477 
478 		cores_info->coreid[next] = sb_coreid(&sii->pub);
479 
480 		/* core specific processing... */
481 		/* chipc provides # cores */
482 		if (cores_info->coreid[next] == CC_CORE_ID) {
483 			chipcregs_t *cc = (chipcregs_t *)sii->curmap;
484 
485 			/* determine numcores - this is the total # cores in the chip */
486 			ASSERT(cc);
487 			numcores = (R_REG(sii->osh, &cc->chipid) & CID_CC_MASK) >> CID_CC_SHIFT;
488 			SI_VMSG(("_sb_scan: there are %u cores in the chip %s\n", numcores,
489 				sii->pub.issim ? "QT" : ""));
490 		}
491 		/* scan bridged SB(s) and add results to the end of the list */
492 		else if (cores_info->coreid[next] == OCP_CORE_ID) {
493 			sbconfig_t *sb = REGS2SB(sii->curmap);
494 			uint32 nsbba = R_SBREG(sii, &sb->sbadmatch1);
495 			uint nsbcc;
496 
497 			sii->numcores = next + 1;
498 
499 			if ((nsbba & 0xfff00000) != si_enum_base(devid))
500 				continue;
501 			nsbba &= 0xfffff000;
502 			if (_sb_coreidx(sii, nsbba) != BADIDX)
503 				continue;
504 
505 			nsbcc = (R_SBREG(sii, &sb->sbtmstatehigh) & 0x000f0000) >> 16;
506 			nsbcc = _sb_scan(sii, sba, regs, bus + 1, nsbba, nsbcc, devid);
507 			if (sbba == si_enum_base(devid))
508 				numcores -= nsbcc;
509 			ncc += nsbcc;
510 		}
511 	}
512 
513 	SI_MSG(("_sb_scan: found %u cores on bus 0x%08x\n", i, sbba));
514 
515 	sii->numcores = i + ncc;
516 	return sii->numcores;
517 }
518 
519 /* scan the sb enumerated space to identify all cores */
520 void
BCMATTACHFN(sb_scan)521 BCMATTACHFN(sb_scan)(si_t *sih, volatile void *regs, uint devid)
522 {
523 	uint32 origsba;
524 	sbconfig_t *sb;
525 	si_info_t *sii = SI_INFO(sih);
526 	BCM_REFERENCE(devid);
527 
528 	sb = REGS2SB(sii->curmap);
529 
530 	sii->pub.socirev = (R_SBREG(sii, &sb->sbidlow) & SBIDL_RV_MASK) >> SBIDL_RV_SHIFT;
531 
532 	/* Save the current core info and validate it later till we know
533 	 * for sure what is good and what is bad.
534 	 */
535 	origsba = _sb_coresba(sii);
536 
537 	/* scan all SB(s) starting from SI_ENUM_BASE_DEFAULT */
538 	sii->numcores = _sb_scan(sii, origsba, regs, 0, si_enum_base(devid), 1, devid);
539 }
540 
541 /*
542  * This function changes logical "focus" to the indicated core;
543  * must be called with interrupts off.
544  * Moreover, callers should keep interrupts off during switching out of and back to d11 core
545  */
546 volatile void *
sb_setcoreidx(si_t * sih,uint coreidx)547 sb_setcoreidx(si_t *sih, uint coreidx)
548 {
549 	si_info_t *sii = SI_INFO(sih);
550 
551 	if (coreidx >= sii->numcores)
552 		return (NULL);
553 
554 	/*
555 	 * If the user has provided an interrupt mask enabled function,
556 	 * then assert interrupts are disabled before switching the core.
557 	 */
558 	ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
559 
560 	sii->curmap = _sb_setcoreidx(sii, coreidx);
561 	sii->curidx = coreidx;
562 
563 	return (sii->curmap);
564 }
565 
566 /* This function changes the logical "focus" to the indicated core.
567  * Return the current core's virtual address.
568  */
569 static volatile void *
_sb_setcoreidx(const si_info_t * sii,uint coreidx)570 _sb_setcoreidx(const si_info_t *sii, uint coreidx)
571 {
572 	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
573 	uint32 sbaddr = cores_info->coresba[coreidx];
574 	volatile void *regs;
575 
576 	switch (BUSTYPE(sii->pub.bustype)) {
577 	case SI_BUS:
578 		/* map new one */
579 		if (!cores_info->regs[coreidx]) {
580 			cores_info->regs[coreidx] = REG_MAP(sbaddr, SI_CORE_SIZE);
581 			ASSERT(GOODREGS(cores_info->regs[coreidx]));
582 		}
583 		regs = cores_info->regs[coreidx];
584 		break;
585 
586 	case PCI_BUS:
587 		/* point bar0 window */
588 		OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, sbaddr);
589 		regs = sii->curmap;
590 		break;
591 
592 #ifdef BCMSDIO
593 	case SPI_BUS:
594 	case SDIO_BUS:
595 		/* map new one */
596 		if (!cores_info->regs[coreidx]) {
597 			cores_info->regs[coreidx] = (void *)(uintptr)sbaddr;
598 			ASSERT(GOODREGS(cores_info->regs[coreidx]));
599 		}
600 		regs = cores_info->regs[coreidx];
601 		break;
602 #endif	/* BCMSDIO */
603 
604 	default:
605 		ASSERT(0);
606 		regs = NULL;
607 		break;
608 	}
609 
610 	return regs;
611 }
612 
613 /* Return the address of sbadmatch0/1/2/3 register */
614 static volatile uint32 *
sb_admatch(const si_info_t * sii,uint asidx)615 sb_admatch(const si_info_t *sii, uint asidx)
616 {
617 	sbconfig_t *sb;
618 	volatile uint32 *addrm;
619 
620 	sb = REGS2SB(sii->curmap);
621 
622 	switch (asidx) {
623 	case 0:
624 		addrm =  &sb->sbadmatch0;
625 		break;
626 
627 	case 1:
628 		addrm =  &sb->sbadmatch1;
629 		break;
630 
631 	case 2:
632 		addrm =  &sb->sbadmatch2;
633 		break;
634 
635 	case 3:
636 		addrm =  &sb->sbadmatch3;
637 		break;
638 
639 	default:
640 		SI_ERROR(("sb_admatch: Address space index (%d) out of range\n", asidx));
641 		return 0;
642 	}
643 
644 	return (addrm);
645 }
646 
647 /* Return the number of address spaces in current core */
648 int
sb_numaddrspaces(const si_t * sih)649 sb_numaddrspaces(const si_t *sih)
650 {
651 	const si_info_t *sii = SI_INFO(sih);
652 	sbconfig_t *sb = REGS2SB(sii->curmap);
653 
654 	/* + 1 because of enumeration space */
655 	return ((R_SBREG(sii, &sb->sbidlow) & SBIDL_AR_MASK) >> SBIDL_AR_SHIFT) + 1;
656 }
657 
658 /* Return the address of the nth address space in the current core */
659 uint32
sb_addrspace(const si_t * sih,uint asidx)660 sb_addrspace(const si_t *sih, uint asidx)
661 {
662 	const si_info_t *sii = SI_INFO(sih);
663 
664 	return (sb_base(R_SBREG(sii, sb_admatch(sii, asidx))));
665 }
666 
667 /* Return the size of the nth address space in the current core */
668 uint32
sb_addrspacesize(const si_t * sih,uint asidx)669 sb_addrspacesize(const si_t *sih, uint asidx)
670 {
671 	const si_info_t *sii = SI_INFO(sih);
672 
673 	return (sb_size(R_SBREG(sii, sb_admatch(sii, asidx))));
674 }
675 
676 #if defined(BCMDBG_ERR) || defined(BCMASSERT_SUPPORT) || \
677 	defined(BCMDBG_DUMP)
678 /* traverse all cores to find and clear source of serror */
679 static void
sb_serr_clear(si_info_t * sii)680 sb_serr_clear(si_info_t *sii)
681 {
682 	sbconfig_t *sb;
683 	uint origidx;
684 	uint i;
685 	bcm_int_bitmask_t intr_val;
686 	volatile void *corereg = NULL;
687 
688 	INTR_OFF(sii, &intr_val);
689 	origidx = si_coreidx(&sii->pub);
690 
691 	for (i = 0; i < sii->numcores; i++) {
692 		corereg = sb_setcoreidx(&sii->pub, i);
693 		if (NULL != corereg) {
694 			sb = REGS2SB(corereg);
695 			if ((R_SBREG(sii, &sb->sbtmstatehigh)) & SBTMH_SERR) {
696 				AND_SBREG(sii, &sb->sbtmstatehigh, ~SBTMH_SERR);
697 				SI_ERROR(("sb_serr_clear: SError at core 0x%x\n",
698 				          sb_coreid(&sii->pub)));
699 			}
700 		}
701 	}
702 
703 	sb_setcoreidx(&sii->pub, origidx);
704 	INTR_RESTORE(sii, &intr_val);
705 }
706 
707 /*
708  * Check if any inband, outband or timeout errors has happened and clear them.
709  * Must be called with chip clk on !
710  */
711 bool
sb_taclear(si_t * sih,bool details)712 sb_taclear(si_t *sih, bool details)
713 {
714 	si_info_t *sii = SI_INFO(sih);
715 	bool rc = FALSE;
716 	uint32 inband = 0, serror = 0, timeout = 0;
717 	volatile uint32 imstate;
718 
719 	BCM_REFERENCE(sii);
720 
721 	if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
722 		volatile uint32 stcmd;
723 
724 		/* inband error is Target abort for PCI */
725 		stcmd = OSL_PCI_READ_CONFIG(sii->osh, PCI_CFG_CMD, sizeof(uint32));
726 		inband = stcmd & PCI_STAT_TA;
727 		if (inband) {
728 #ifdef BCMDBG
729 			if (details) {
730 				SI_ERROR(("\ninband:\n"));
731 				si_viewall(sih, FALSE);
732 			}
733 #endif
734 			OSL_PCI_WRITE_CONFIG(sii->osh, PCI_CFG_CMD, sizeof(uint32), stcmd);
735 		}
736 
737 		/* serror */
738 		stcmd = OSL_PCI_READ_CONFIG(sii->osh, PCI_INT_STATUS, sizeof(uint32));
739 		serror = stcmd & PCI_SBIM_STATUS_SERR;
740 		if (serror) {
741 #ifdef BCMDBG
742 			if (details) {
743 				SI_ERROR(("\nserror:\n"));
744 				si_viewall(sih, FALSE);
745 			}
746 #endif
747 			sb_serr_clear(sii);
748 			OSL_PCI_WRITE_CONFIG(sii->osh, PCI_INT_STATUS, sizeof(uint32), stcmd);
749 		}
750 
751 		/* timeout */
752 		imstate = sb_corereg(sih, sii->pub.buscoreidx,
753 		                     SBCONFIGOFF + OFFSETOF(sbconfig_t, sbimstate), 0, 0);
754 		if ((imstate != 0xffffffff) && (imstate & (SBIM_IBE | SBIM_TO))) {
755 			sb_corereg(sih, sii->pub.buscoreidx,
756 			           SBCONFIGOFF + OFFSETOF(sbconfig_t, sbimstate), ~0,
757 			           (imstate & ~(SBIM_IBE | SBIM_TO)));
758 			/* inband = imstate & SBIM_IBE; same as TA above */
759 			timeout = imstate & SBIM_TO;
760 			if (timeout) {
761 #ifdef BCMDBG
762 				if (details) {
763 					SI_ERROR(("\ntimeout:\n"));
764 					si_viewall(sih, FALSE);
765 				}
766 #endif
767 			}
768 		}
769 
770 		if (inband) {
771 			/* dump errlog for sonics >= 2.3 */
772 			if (sii->pub.socirev == SONICS_2_2)
773 				;
774 			else {
775 				uint32 imerrlog, imerrloga;
776 				imerrlog = sb_corereg(sih, sii->pub.buscoreidx, SBIMERRLOG, 0, 0);
777 				if (imerrlog & SBTMEL_EC) {
778 					imerrloga = sb_corereg(sih, sii->pub.buscoreidx,
779 					                       SBIMERRLOGA, 0, 0);
780 					BCM_REFERENCE(imerrloga);
781 					/* clear errlog */
782 					sb_corereg(sih, sii->pub.buscoreidx, SBIMERRLOG, ~0, 0);
783 					SI_ERROR(("sb_taclear: ImErrLog 0x%x, ImErrLogA 0x%x\n",
784 						imerrlog, imerrloga));
785 				}
786 			}
787 		}
788 	}
789 #ifdef BCMSDIO
790 	else if ((BUSTYPE(sii->pub.bustype) == SDIO_BUS) ||
791 	         (BUSTYPE(sii->pub.bustype) == SPI_BUS)) {
792 		sbconfig_t *sb;
793 		uint origidx;
794 		bcm_int_bitmask_t intr_val;
795 		volatile void *corereg = NULL;
796 		volatile uint32 tmstate;
797 
798 		INTR_OFF(sii, &intr_val);
799 		origidx = si_coreidx(sih);
800 
801 		corereg = si_setcore(sih, SDIOD_CORE_ID, 0);
802 		if (corereg != NULL) {
803 			sb = REGS2SB(corereg);
804 
805 			imstate = R_SBREG(sii, &sb->sbimstate);
806 			if ((imstate != 0xffffffff) && (imstate & (SBIM_IBE | SBIM_TO))) {
807 				AND_SBREG(sii, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
808 				/* inband = imstate & SBIM_IBE; cmd error */
809 				timeout = imstate & SBIM_TO;
810 			}
811 			tmstate = R_SBREG(sii, &sb->sbtmstatehigh);
812 			if ((tmstate != 0xffffffff) && (tmstate & SBTMH_INT_STATUS)) {
813 				sb_serr_clear(sii);
814 				serror = 1;
815 				OR_SBREG(sii, &sb->sbtmstatelow, SBTML_INT_ACK);
816 				AND_SBREG(sii, &sb->sbtmstatelow, ~SBTML_INT_ACK);
817 			}
818 		}
819 
820 		sb_setcoreidx(sih, origidx);
821 		INTR_RESTORE(sii, &intr_val);
822 	}
823 #endif /* BCMSDIO */
824 
825 	if (inband | timeout | serror) {
826 		rc = TRUE;
827 		SI_ERROR(("sb_taclear: inband 0x%x, serror 0x%x, timeout 0x%x!\n",
828 		          inband, serror, timeout));
829 	}
830 
831 	return (rc);
832 }
833 #endif /* BCMDBG_ERR || BCMASSERT_SUPPORT || BCMDBG_DUMP */
834 
835 /* do buffered registers update */
836 void
sb_commit(si_t * sih)837 sb_commit(si_t *sih)
838 {
839 	const si_info_t *sii = SI_INFO(sih);
840 	uint origidx;
841 	bcm_int_bitmask_t intr_val;
842 
843 	origidx = sii->curidx;
844 	ASSERT(GOODIDX(origidx, sii->numcores));
845 
846 	INTR_OFF(sii, &intr_val);
847 
848 	/* switch over to chipcommon core if there is one, else use pci */
849 	if (sii->pub.ccrev != NOREV) {
850 		chipcregs_t *ccregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
851 		ASSERT(ccregs != NULL);
852 
853 		/* do the buffer registers update */
854 		W_REG(sii->osh, &ccregs->broadcastaddress, SB_COMMIT);
855 		W_REG(sii->osh, &ccregs->broadcastdata, 0x0);
856 #if !defined(BCMDONGLEHOST)
857 	} else if (PCI(sii)) {
858 		sbpciregs_t *pciregs = (sbpciregs_t *)si_setcore(sih, PCI_CORE_ID, 0);
859 		ASSERT(pciregs != NULL);
860 
861 		/* do the buffer registers update */
862 		W_REG(sii->osh, &pciregs->bcastaddr, SB_COMMIT);
863 		W_REG(sii->osh, &pciregs->bcastdata, 0x0);
864 #endif /* !defined(BCMDONGLEHOST) */
865 	} else
866 		ASSERT(0);
867 
868 	/* restore core index */
869 	sb_setcoreidx(sih, origidx);
870 	INTR_RESTORE(sii, &intr_val);
871 }
872 
873 void
sb_core_disable(const si_t * sih,uint32 bits)874 sb_core_disable(const si_t *sih, uint32 bits)
875 {
876 	const si_info_t *sii = SI_INFO(sih);
877 	volatile uint32 dummy;
878 	sbconfig_t *sb;
879 
880 	ASSERT(GOODREGS(sii->curmap));
881 	sb = REGS2SB(sii->curmap);
882 
883 	/* if core is already in reset, just return */
884 	if (R_SBREG(sii, &sb->sbtmstatelow) & SBTML_RESET)
885 		return;
886 
887 	/* if clocks are not enabled, put into reset and return */
888 	if ((R_SBREG(sii, &sb->sbtmstatelow) & (SICF_CLOCK_EN << SBTML_SICF_SHIFT)) == 0)
889 		goto disable;
890 
891 	/* set target reject and spin until busy is clear (preserve core-specific bits) */
892 	OR_SBREG(sii, &sb->sbtmstatelow, SBTML_REJ);
893 	dummy = R_SBREG(sii, &sb->sbtmstatelow);
894 	BCM_REFERENCE(dummy);
895 	OSL_DELAY(1);
896 	SPINWAIT((R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY), 100000);
897 	if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY)
898 		SI_ERROR(("sb_core_disable: target state still busy\n"));
899 
900 	/*
901 	 * If core is initiator, set the Reject bit and allow Busy to clear.
902 	 * sonicsrev < 2.3 chips don't have the Reject and Busy bits (nops).
903 	 * Don't assert - dma engine might be stuck (PR4871).
904 	 */
905 	if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT) {
906 		OR_SBREG(sii, &sb->sbimstate, SBIM_RJ);
907 		dummy = R_SBREG(sii, &sb->sbimstate);
908 		BCM_REFERENCE(dummy);
909 		OSL_DELAY(1);
910 		SPINWAIT((R_SBREG(sii, &sb->sbimstate) & SBIM_BY), 100000);
911 	}
912 
913 	/* set reset and reject while enabling the clocks */
914 	W_SBREG(sii, &sb->sbtmstatelow,
915 	        (((bits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
916 	         SBTML_REJ | SBTML_RESET));
917 	dummy = R_SBREG(sii, &sb->sbtmstatelow);
918 	BCM_REFERENCE(dummy);
919 	OSL_DELAY(10);
920 
921 	/* don't forget to clear the initiator reject bit */
922 	if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT)
923 		AND_SBREG(sii, &sb->sbimstate, ~SBIM_RJ);
924 
925 disable:
926 	/* leave reset and reject asserted */
927 	W_SBREG(sii, &sb->sbtmstatelow, ((bits << SBTML_SICF_SHIFT) | SBTML_REJ | SBTML_RESET));
928 	OSL_DELAY(1);
929 }
930 
931 /* reset and re-enable a core
932  * inputs:
933  * bits - core specific bits that are set during and after reset sequence
934  * resetbits - core specific bits that are set only during reset sequence
935  */
936 void
sb_core_reset(const si_t * sih,uint32 bits,uint32 resetbits)937 sb_core_reset(const si_t *sih, uint32 bits, uint32 resetbits)
938 {
939 	const si_info_t *sii = SI_INFO(sih);
940 	sbconfig_t *sb;
941 	volatile uint32 dummy;
942 
943 	ASSERT(GOODREGS(sii->curmap));
944 	sb = REGS2SB(sii->curmap);
945 
946 	/*
947 	 * Must do the disable sequence first to work for arbitrary current core state.
948 	 */
949 	sb_core_disable(sih, (bits | resetbits));
950 
951 	/*
952 	 * Now do the initialization sequence.
953 	 */
954 
955 	/* set reset while enabling the clock and forcing them on throughout the core */
956 	W_SBREG(sii, &sb->sbtmstatelow,
957 	        (((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
958 	         SBTML_RESET));
959 	dummy = R_SBREG(sii, &sb->sbtmstatelow);
960 	BCM_REFERENCE(dummy);
961 	OSL_DELAY(1);
962 
963 	/* PR3158 - clear any serror */
964 	if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_SERR) {
965 		W_SBREG(sii, &sb->sbtmstatehigh, 0);
966 	}
967 	if ((dummy = R_SBREG(sii, &sb->sbimstate)) & (SBIM_IBE | SBIM_TO)) {
968 		AND_SBREG(sii, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
969 	}
970 
971 	/* clear reset and allow it to propagate throughout the core */
972 	W_SBREG(sii, &sb->sbtmstatelow,
973 	        ((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
974 	dummy = R_SBREG(sii, &sb->sbtmstatelow);
975 	BCM_REFERENCE(dummy);
976 	OSL_DELAY(1);
977 
978 	/* leave clock enabled */
979 	W_SBREG(sii, &sb->sbtmstatelow, ((bits | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
980 	dummy = R_SBREG(sii, &sb->sbtmstatelow);
981 	BCM_REFERENCE(dummy);
982 	OSL_DELAY(1);
983 }
984 
985 uint32
sb_base(uint32 admatch)986 sb_base(uint32 admatch)
987 {
988 	uint32 base;
989 	uint type;
990 
991 	type = admatch & SBAM_TYPE_MASK;
992 	ASSERT(type < 3);
993 
994 	base = 0;
995 
996 	if (type == 0) {
997 		base = admatch & SBAM_BASE0_MASK;
998 	} else if (type == 1) {
999 		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
1000 		base = admatch & SBAM_BASE1_MASK;
1001 	} else if (type == 2) {
1002 		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
1003 		base = admatch & SBAM_BASE2_MASK;
1004 	}
1005 
1006 	return (base);
1007 }
1008 
1009 uint32
sb_size(uint32 admatch)1010 sb_size(uint32 admatch)
1011 {
1012 	uint32 size;
1013 	uint type;
1014 
1015 	type = admatch & SBAM_TYPE_MASK;
1016 	ASSERT(type < 3);
1017 
1018 	size = 0;
1019 
1020 	if (type == 0) {
1021 		size = 1 << (((admatch & SBAM_ADINT0_MASK) >> SBAM_ADINT0_SHIFT) + 1);
1022 	} else if (type == 1) {
1023 		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
1024 		size = 1 << (((admatch & SBAM_ADINT1_MASK) >> SBAM_ADINT1_SHIFT) + 1);
1025 	} else if (type == 2) {
1026 		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
1027 		size = 1 << (((admatch & SBAM_ADINT2_MASK) >> SBAM_ADINT2_SHIFT) + 1);
1028 	}
1029 
1030 	return (size);
1031 }
1032 
1033 #if defined(BCMDBG) || defined(BCMDBG_DUMP)|| defined(BCMDBG_PHYDUMP)
1034 /* print interesting sbconfig registers */
1035 void
sb_dumpregs(si_t * sih,struct bcmstrbuf * b)1036 sb_dumpregs(si_t *sih, struct bcmstrbuf *b)
1037 {
1038 	sbconfig_t *sb;
1039 	uint origidx, i;
1040 	bcm_int_bitmask_t intr_val;
1041 	const si_info_t *sii = SI_INFO(sih);
1042 	const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
1043 
1044 	origidx = sii->curidx;
1045 
1046 	INTR_OFF(sii, &intr_val);
1047 
1048 	for (i = 0; i < sii->numcores; i++) {
1049 		sb = REGS2SB(sb_setcoreidx(sih, i));
1050 
1051 		bcm_bprintf(b, "core 0x%x: \n", cores_info->coreid[i]);
1052 
1053 		if (sii->pub.socirev > SONICS_2_2)
1054 			bcm_bprintf(b, "sbimerrlog 0x%x sbimerrloga 0x%x\n",
1055 			          sb_corereg(sih, si_coreidx(&sii->pub), SBIMERRLOG, 0, 0),
1056 			          sb_corereg(sih, si_coreidx(&sii->pub), SBIMERRLOGA, 0, 0));
1057 
1058 		bcm_bprintf(b, "sbtmstatelow 0x%x sbtmstatehigh 0x%x sbidhigh 0x%x "
1059 		            "sbimstate 0x%x\n sbimconfiglow 0x%x sbimconfighigh 0x%x\n",
1060 		            R_SBREG(sii, &sb->sbtmstatelow), R_SBREG(sii, &sb->sbtmstatehigh),
1061 		            R_SBREG(sii, &sb->sbidhigh), R_SBREG(sii, &sb->sbimstate),
1062 		            R_SBREG(sii, &sb->sbimconfiglow), R_SBREG(sii, &sb->sbimconfighigh));
1063 	}
1064 
1065 	sb_setcoreidx(sih, origidx);
1066 	INTR_RESTORE(sii, &intr_val);
1067 }
1068 #endif	/* BCMDBG || BCMDBG_DUMP || BCMDBG_PHYDUMP */
1069 
1070 #if defined(BCMDBG)
1071 void
sb_view(si_t * sih,bool verbose)1072 sb_view(si_t *sih, bool verbose)
1073 {
1074 	const si_info_t *sii = SI_INFO(sih);
1075 	sbconfig_t *sb = REGS2SB(sii->curmap);
1076 
1077 	SI_ERROR(("\nCore ID: 0x%x\n", sb_coreid(&sii->pub)));
1078 
1079 	if (sii->pub.socirev > SONICS_2_2)
1080 		SI_ERROR(("sbimerrlog 0x%x sbimerrloga 0x%x\n",
1081 		         sb_corereg(sih, si_coreidx(&sii->pub), SBIMERRLOG, 0, 0),
1082 		         sb_corereg(sih, si_coreidx(&sii->pub), SBIMERRLOGA, 0, 0)));
1083 
1084 	/* Print important or helpful registers */
1085 	SI_ERROR(("sbtmerrloga 0x%x sbtmerrlog 0x%x\n",
1086 	          R_SBREG(sii, &sb->sbtmerrloga), R_SBREG(sii, &sb->sbtmerrlog)));
1087 	SI_ERROR(("sbimstate 0x%x sbtmstatelow 0x%x sbtmstatehigh 0x%x\n",
1088 	          R_SBREG(sii, &sb->sbimstate),
1089 	          R_SBREG(sii, &sb->sbtmstatelow), R_SBREG(sii, &sb->sbtmstatehigh)));
1090 	SI_ERROR(("sbimconfiglow 0x%x sbtmconfiglow 0x%x\nsbtmconfighigh 0x%x sbidhigh 0x%x\n",
1091 	          R_SBREG(sii, &sb->sbimconfiglow), R_SBREG(sii, &sb->sbtmconfiglow),
1092 	          R_SBREG(sii, &sb->sbtmconfighigh), R_SBREG(sii, &sb->sbidhigh)));
1093 
1094 	/* Print more detailed registers that are otherwise not relevant */
1095 	if (verbose) {
1096 		SI_ERROR(("sbipsflag 0x%x sbtpsflag 0x%x\n",
1097 		          R_SBREG(sii, &sb->sbipsflag), R_SBREG(sii, &sb->sbtpsflag)));
1098 		SI_ERROR(("sbadmatch3 0x%x sbadmatch2 0x%x\nsbadmatch1 0x%x sbadmatch0 0x%x\n",
1099 		          R_SBREG(sii, &sb->sbadmatch3), R_SBREG(sii, &sb->sbadmatch2),
1100 		          R_SBREG(sii, &sb->sbadmatch1), R_SBREG(sii, &sb->sbadmatch0)));
1101 		SI_ERROR(("sbintvec 0x%x sbbwa0 0x%x sbimconfighigh 0x%x\n",
1102 		          R_SBREG(sii, &sb->sbintvec), R_SBREG(sii, &sb->sbbwa0),
1103 		          R_SBREG(sii, &sb->sbimconfighigh)));
1104 		SI_ERROR(("sbbconfig 0x%x sbbstate 0x%x\n",
1105 		          R_SBREG(sii, &sb->sbbconfig), R_SBREG(sii, &sb->sbbstate)));
1106 		SI_ERROR(("sbactcnfg 0x%x sbflagst 0x%x sbidlow 0x%x \n\n",
1107 		          R_SBREG(sii, &sb->sbactcnfg), R_SBREG(sii, &sb->sbflagst),
1108 		          R_SBREG(sii, &sb->sbidlow)));
1109 	}
1110 }
1111 #endif	/* BCMDBG */
1112