1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Misc utility routines for accessing chip-specific features
3*4882a593Smuzhiyun * of the SiliconBackplane-based Broadcom chips.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2020, Broadcom.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Unless you and Broadcom execute a separate written software license
8*4882a593Smuzhiyun * agreement governing use of this software, this software is licensed to you
9*4882a593Smuzhiyun * under the terms of the GNU General Public License version 2 (the "GPL"),
10*4882a593Smuzhiyun * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11*4882a593Smuzhiyun * following added to such license:
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * As a special exception, the copyright holders of this software give you
14*4882a593Smuzhiyun * permission to link this software with independent modules, and to copy and
15*4882a593Smuzhiyun * distribute the resulting executable under terms of your choice, provided that
16*4882a593Smuzhiyun * you also meet, for each linked independent module, the terms and conditions of
17*4882a593Smuzhiyun * the license of that module. An independent module is a module which is not
18*4882a593Smuzhiyun * derived from this software. The special exception does not apply to any
19*4882a593Smuzhiyun * modifications of the software.
20*4882a593Smuzhiyun *
21*4882a593Smuzhiyun *
22*4882a593Smuzhiyun * <<Broadcom-WL-IPTag/Dual:>>
23*4882a593Smuzhiyun */
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #include <typedefs.h>
26*4882a593Smuzhiyun #include <bcmdefs.h>
27*4882a593Smuzhiyun #include <osl.h>
28*4882a593Smuzhiyun #include <bcmutils.h>
29*4882a593Smuzhiyun #include <siutils.h>
30*4882a593Smuzhiyun #include <bcmdevs.h>
31*4882a593Smuzhiyun #include <hndsoc.h>
32*4882a593Smuzhiyun #include <sbchipc.h>
33*4882a593Smuzhiyun #if !defined(BCMDONGLEHOST)
34*4882a593Smuzhiyun #include <pci_core.h>
35*4882a593Smuzhiyun #endif /* !defined(BCMDONGLEHOST) */
36*4882a593Smuzhiyun #include <pcicfg.h>
37*4882a593Smuzhiyun #include <sbpcmcia.h>
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun #include "siutils_priv.h"
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun /* local prototypes */
42*4882a593Smuzhiyun static uint _sb_coreidx(const si_info_t *sii, uint32 sba);
43*4882a593Smuzhiyun static uint _sb_scan(si_info_t *sii, uint32 sba, volatile void *regs, uint bus, uint32 sbba,
44*4882a593Smuzhiyun uint ncores, uint devid);
45*4882a593Smuzhiyun static uint32 _sb_coresba(const si_info_t *sii);
46*4882a593Smuzhiyun static volatile void *_sb_setcoreidx(const si_info_t *sii, uint coreidx);
47*4882a593Smuzhiyun #define SET_SBREG(sii, r, mask, val) \
48*4882a593Smuzhiyun W_SBREG((sii), (r), ((R_SBREG((sii), (r)) & ~(mask)) | (val)))
49*4882a593Smuzhiyun #define REGS2SB(va) (sbconfig_t*) ((volatile int8*)(va) + SBCONFIGOFF)
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun /* sonicsrev */
52*4882a593Smuzhiyun #define SONICS_2_2 (SBIDL_RV_2_2 >> SBIDL_RV_SHIFT)
53*4882a593Smuzhiyun #define SONICS_2_3 (SBIDL_RV_2_3 >> SBIDL_RV_SHIFT)
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun /*
56*4882a593Smuzhiyun * Macros to read/write sbconfig registers.
57*4882a593Smuzhiyun */
58*4882a593Smuzhiyun #define R_SBREG(sii, sbr) sb_read_sbreg((sii), (sbr))
59*4882a593Smuzhiyun #define W_SBREG(sii, sbr, v) sb_write_sbreg((sii), (sbr), (v))
60*4882a593Smuzhiyun #define AND_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) & (v)))
61*4882a593Smuzhiyun #define OR_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) | (v)))
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun static uint32
sb_read_sbreg(const si_info_t * sii,volatile uint32 * sbr)64*4882a593Smuzhiyun sb_read_sbreg(const si_info_t *sii, volatile uint32 *sbr)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun return R_REG(sii->osh, sbr);
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun static void
sb_write_sbreg(const si_info_t * sii,volatile uint32 * sbr,uint32 v)70*4882a593Smuzhiyun sb_write_sbreg(const si_info_t *sii, volatile uint32 *sbr, uint32 v)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun W_REG(sii->osh, sbr, v);
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun uint
sb_coreid(const si_t * sih)76*4882a593Smuzhiyun sb_coreid(const si_t *sih)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
79*4882a593Smuzhiyun sbconfig_t *sb = REGS2SB(sii->curmap);
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT);
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun uint
sb_intflag(si_t * sih)85*4882a593Smuzhiyun sb_intflag(si_t *sih)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
88*4882a593Smuzhiyun volatile void *corereg;
89*4882a593Smuzhiyun sbconfig_t *sb;
90*4882a593Smuzhiyun uint origidx, intflag;
91*4882a593Smuzhiyun bcm_int_bitmask_t intr_val;
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun INTR_OFF(sii, &intr_val);
94*4882a593Smuzhiyun origidx = si_coreidx(sih);
95*4882a593Smuzhiyun corereg = si_setcore(sih, CC_CORE_ID, 0);
96*4882a593Smuzhiyun ASSERT(corereg != NULL);
97*4882a593Smuzhiyun sb = REGS2SB(corereg);
98*4882a593Smuzhiyun intflag = R_SBREG(sii, &sb->sbflagst);
99*4882a593Smuzhiyun sb_setcoreidx(sih, origidx);
100*4882a593Smuzhiyun INTR_RESTORE(sii, &intr_val);
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun return intflag;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun uint
sb_flag(const si_t * sih)106*4882a593Smuzhiyun sb_flag(const si_t *sih)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
109*4882a593Smuzhiyun sbconfig_t *sb = REGS2SB(sii->curmap);
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun return R_SBREG(sii, &sb->sbtpsflag) & SBTPS_NUM0_MASK;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun void
sb_setint(const si_t * sih,int siflag)115*4882a593Smuzhiyun sb_setint(const si_t *sih, int siflag)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
118*4882a593Smuzhiyun sbconfig_t *sb = REGS2SB(sii->curmap);
119*4882a593Smuzhiyun uint32 vec;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun if (siflag == -1)
122*4882a593Smuzhiyun vec = 0;
123*4882a593Smuzhiyun else
124*4882a593Smuzhiyun vec = 1 << siflag;
125*4882a593Smuzhiyun W_SBREG(sii, &sb->sbintvec, vec);
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun /* return core index of the core with address 'sba' */
129*4882a593Smuzhiyun static uint
BCMATTACHFN(_sb_coreidx)130*4882a593Smuzhiyun BCMATTACHFN(_sb_coreidx)(const si_info_t *sii, uint32 sba)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun uint i;
133*4882a593Smuzhiyun const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun for (i = 0; i < sii->numcores; i ++)
136*4882a593Smuzhiyun if (sba == cores_info->coresba[i])
137*4882a593Smuzhiyun return i;
138*4882a593Smuzhiyun return BADIDX;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun /* return core address of the current core */
142*4882a593Smuzhiyun static uint32
BCMATTACHFN(_sb_coresba)143*4882a593Smuzhiyun BCMATTACHFN(_sb_coresba)(const si_info_t *sii)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun uint32 sbaddr;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun switch (BUSTYPE(sii->pub.bustype)) {
148*4882a593Smuzhiyun case SI_BUS: {
149*4882a593Smuzhiyun sbconfig_t *sb = REGS2SB(sii->curmap);
150*4882a593Smuzhiyun sbaddr = sb_base(R_SBREG(sii, &sb->sbadmatch0));
151*4882a593Smuzhiyun break;
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun case PCI_BUS:
155*4882a593Smuzhiyun sbaddr = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
156*4882a593Smuzhiyun break;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun #ifdef BCMSDIO
159*4882a593Smuzhiyun case SPI_BUS:
160*4882a593Smuzhiyun case SDIO_BUS:
161*4882a593Smuzhiyun sbaddr = (uint32)(uintptr)sii->curmap;
162*4882a593Smuzhiyun break;
163*4882a593Smuzhiyun #endif
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun default:
166*4882a593Smuzhiyun sbaddr = BADCOREADDR;
167*4882a593Smuzhiyun break;
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun return sbaddr;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun uint
sb_corevendor(const si_t * sih)174*4882a593Smuzhiyun sb_corevendor(const si_t *sih)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun const si_info_t *sii;
177*4882a593Smuzhiyun sbconfig_t *sb;
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun sii = SI_INFO(sih);
180*4882a593Smuzhiyun sb = REGS2SB(sii->curmap);
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_VC_MASK) >> SBIDH_VC_SHIFT);
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun uint
sb_corerev(const si_t * sih)186*4882a593Smuzhiyun sb_corerev(const si_t *sih)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun const si_info_t *sii;
189*4882a593Smuzhiyun sbconfig_t *sb;
190*4882a593Smuzhiyun uint sbidh;
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun sii = SI_INFO(sih);
193*4882a593Smuzhiyun sb = REGS2SB(sii->curmap);
194*4882a593Smuzhiyun sbidh = R_SBREG(sii, &sb->sbidhigh);
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun return (SBCOREREV(sbidh));
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun /* set core-specific control flags */
200*4882a593Smuzhiyun void
sb_core_cflags_wo(const si_t * sih,uint32 mask,uint32 val)201*4882a593Smuzhiyun sb_core_cflags_wo(const si_t *sih, uint32 mask, uint32 val)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
204*4882a593Smuzhiyun sbconfig_t *sb = REGS2SB(sii->curmap);
205*4882a593Smuzhiyun uint32 w;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun ASSERT((val & ~mask) == 0);
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun /* mask and set */
210*4882a593Smuzhiyun w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
211*4882a593Smuzhiyun (val << SBTML_SICF_SHIFT);
212*4882a593Smuzhiyun W_SBREG(sii, &sb->sbtmstatelow, w);
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun /* set/clear core-specific control flags */
216*4882a593Smuzhiyun uint32
sb_core_cflags(const si_t * sih,uint32 mask,uint32 val)217*4882a593Smuzhiyun sb_core_cflags(const si_t *sih, uint32 mask, uint32 val)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
220*4882a593Smuzhiyun sbconfig_t *sb = REGS2SB(sii->curmap);
221*4882a593Smuzhiyun uint32 w;
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun ASSERT((val & ~mask) == 0);
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun /* mask and set */
226*4882a593Smuzhiyun if (mask || val) {
227*4882a593Smuzhiyun w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
228*4882a593Smuzhiyun (val << SBTML_SICF_SHIFT);
229*4882a593Smuzhiyun W_SBREG(sii, &sb->sbtmstatelow, w);
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun /* return the new value
233*4882a593Smuzhiyun * for write operation, the following readback ensures the completion of write opration.
234*4882a593Smuzhiyun */
235*4882a593Smuzhiyun return (R_SBREG(sii, &sb->sbtmstatelow) >> SBTML_SICF_SHIFT);
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun /* set/clear core-specific status flags */
239*4882a593Smuzhiyun uint32
sb_core_sflags(const si_t * sih,uint32 mask,uint32 val)240*4882a593Smuzhiyun sb_core_sflags(const si_t *sih, uint32 mask, uint32 val)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
243*4882a593Smuzhiyun sbconfig_t *sb = REGS2SB(sii->curmap);
244*4882a593Smuzhiyun uint32 w;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun ASSERT((val & ~mask) == 0);
247*4882a593Smuzhiyun ASSERT((mask & ~SISF_CORE_BITS) == 0);
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun /* mask and set */
250*4882a593Smuzhiyun if (mask || val) {
251*4882a593Smuzhiyun w = (R_SBREG(sii, &sb->sbtmstatehigh) & ~(mask << SBTMH_SISF_SHIFT)) |
252*4882a593Smuzhiyun (val << SBTMH_SISF_SHIFT);
253*4882a593Smuzhiyun W_SBREG(sii, &sb->sbtmstatehigh, w);
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun /* return the new value */
257*4882a593Smuzhiyun return (R_SBREG(sii, &sb->sbtmstatehigh) >> SBTMH_SISF_SHIFT);
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun bool
sb_iscoreup(const si_t * sih)261*4882a593Smuzhiyun sb_iscoreup(const si_t *sih)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
264*4882a593Smuzhiyun sbconfig_t *sb = REGS2SB(sii->curmap);
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun return ((R_SBREG(sii, &sb->sbtmstatelow) &
267*4882a593Smuzhiyun (SBTML_RESET | SBTML_REJ_MASK | (SICF_CLOCK_EN << SBTML_SICF_SHIFT))) ==
268*4882a593Smuzhiyun (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun /*
272*4882a593Smuzhiyun * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
273*4882a593Smuzhiyun * switch back to the original core, and return the new value.
274*4882a593Smuzhiyun *
275*4882a593Smuzhiyun * When using the silicon backplane, no fidleing with interrupts or core switches are needed.
276*4882a593Smuzhiyun *
277*4882a593Smuzhiyun * Also, when using pci/pcie, we can optimize away the core switching for pci registers
278*4882a593Smuzhiyun * and (on newer pci cores) chipcommon registers.
279*4882a593Smuzhiyun */
280*4882a593Smuzhiyun uint
sb_corereg(si_t * sih,uint coreidx,uint regoff,uint mask,uint val)281*4882a593Smuzhiyun sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun uint origidx = 0;
284*4882a593Smuzhiyun volatile uint32 *r = NULL;
285*4882a593Smuzhiyun uint w;
286*4882a593Smuzhiyun bcm_int_bitmask_t intr_val;
287*4882a593Smuzhiyun bool fast = FALSE;
288*4882a593Smuzhiyun si_info_t *sii = SI_INFO(sih);
289*4882a593Smuzhiyun si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun ASSERT(GOODIDX(coreidx, sii->numcores));
292*4882a593Smuzhiyun ASSERT(regoff < SI_CORE_SIZE);
293*4882a593Smuzhiyun ASSERT((val & ~mask) == 0);
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun if (coreidx >= SI_MAXCORES)
296*4882a593Smuzhiyun return 0;
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
299*4882a593Smuzhiyun /* If internal bus, we can always get at everything */
300*4882a593Smuzhiyun fast = TRUE;
301*4882a593Smuzhiyun /* map if does not exist */
302*4882a593Smuzhiyun if (!cores_info->regs[coreidx]) {
303*4882a593Smuzhiyun cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
304*4882a593Smuzhiyun SI_CORE_SIZE);
305*4882a593Smuzhiyun ASSERT(GOODREGS(cores_info->regs[coreidx]));
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
308*4882a593Smuzhiyun } else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
309*4882a593Smuzhiyun /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
312*4882a593Smuzhiyun /* Chipc registers are mapped at 12KB */
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun fast = TRUE;
315*4882a593Smuzhiyun r = (volatile uint32 *)((volatile char *)sii->curmap +
316*4882a593Smuzhiyun PCI_16KB0_CCREGS_OFFSET + regoff);
317*4882a593Smuzhiyun } else if (sii->pub.buscoreidx == coreidx) {
318*4882a593Smuzhiyun /* pci registers are at either in the last 2KB of an 8KB window
319*4882a593Smuzhiyun * or, in pcie and pci rev 13 at 8KB
320*4882a593Smuzhiyun */
321*4882a593Smuzhiyun fast = TRUE;
322*4882a593Smuzhiyun if (SI_FAST(sii))
323*4882a593Smuzhiyun r = (volatile uint32 *)((volatile char *)sii->curmap +
324*4882a593Smuzhiyun PCI_16KB0_PCIREGS_OFFSET + regoff);
325*4882a593Smuzhiyun else
326*4882a593Smuzhiyun r = (volatile uint32 *)((volatile char *)sii->curmap +
327*4882a593Smuzhiyun ((regoff >= SBCONFIGOFF) ?
328*4882a593Smuzhiyun PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
329*4882a593Smuzhiyun regoff);
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun if (!fast) {
334*4882a593Smuzhiyun INTR_OFF(sii, &intr_val);
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun /* save current core index */
337*4882a593Smuzhiyun origidx = si_coreidx(&sii->pub);
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun /* switch core */
340*4882a593Smuzhiyun r = (volatile uint32*) ((volatile uchar*)sb_setcoreidx(&sii->pub, coreidx) +
341*4882a593Smuzhiyun regoff);
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun ASSERT(r != NULL);
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun /* mask and set */
346*4882a593Smuzhiyun if (mask || val) {
347*4882a593Smuzhiyun if (regoff >= SBCONFIGOFF) {
348*4882a593Smuzhiyun w = (R_SBREG(sii, r) & ~mask) | val;
349*4882a593Smuzhiyun W_SBREG(sii, r, w);
350*4882a593Smuzhiyun } else {
351*4882a593Smuzhiyun w = (R_REG(sii->osh, r) & ~mask) | val;
352*4882a593Smuzhiyun W_REG(sii->osh, r, w);
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun /* readback */
357*4882a593Smuzhiyun if (regoff >= SBCONFIGOFF)
358*4882a593Smuzhiyun w = R_SBREG(sii, r);
359*4882a593Smuzhiyun else {
360*4882a593Smuzhiyun w = R_REG(sii->osh, r);
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun if (!fast) {
364*4882a593Smuzhiyun /* restore core index */
365*4882a593Smuzhiyun if (origidx != coreidx)
366*4882a593Smuzhiyun sb_setcoreidx(&sii->pub, origidx);
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun INTR_RESTORE(sii, &intr_val);
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun return (w);
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun /*
375*4882a593Smuzhiyun * If there is no need for fiddling with interrupts or core switches (typically silicon
376*4882a593Smuzhiyun * back plane registers, pci registers and chipcommon registers), this function
377*4882a593Smuzhiyun * returns the register offset on this core to a mapped address. This address can
378*4882a593Smuzhiyun * be used for W_REG/R_REG directly.
379*4882a593Smuzhiyun *
380*4882a593Smuzhiyun * For accessing registers that would need a core switch, this function will return
381*4882a593Smuzhiyun * NULL.
382*4882a593Smuzhiyun */
383*4882a593Smuzhiyun volatile uint32 *
sb_corereg_addr(const si_t * sih,uint coreidx,uint regoff)384*4882a593Smuzhiyun sb_corereg_addr(const si_t *sih, uint coreidx, uint regoff)
385*4882a593Smuzhiyun {
386*4882a593Smuzhiyun volatile uint32 *r = NULL;
387*4882a593Smuzhiyun bool fast = FALSE;
388*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
389*4882a593Smuzhiyun si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun ASSERT(GOODIDX(coreidx, sii->numcores));
392*4882a593Smuzhiyun ASSERT(regoff < SI_CORE_SIZE);
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun if (coreidx >= SI_MAXCORES)
395*4882a593Smuzhiyun return 0;
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
398*4882a593Smuzhiyun /* If internal bus, we can always get at everything */
399*4882a593Smuzhiyun fast = TRUE;
400*4882a593Smuzhiyun /* map if does not exist */
401*4882a593Smuzhiyun if (!cores_info->regs[coreidx]) {
402*4882a593Smuzhiyun cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
403*4882a593Smuzhiyun SI_CORE_SIZE);
404*4882a593Smuzhiyun ASSERT(GOODREGS(cores_info->regs[coreidx]));
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
407*4882a593Smuzhiyun } else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
408*4882a593Smuzhiyun /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
411*4882a593Smuzhiyun /* Chipc registers are mapped at 12KB */
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun fast = TRUE;
414*4882a593Smuzhiyun r = (volatile uint32 *)((volatile char *)sii->curmap +
415*4882a593Smuzhiyun PCI_16KB0_CCREGS_OFFSET + regoff);
416*4882a593Smuzhiyun } else if (sii->pub.buscoreidx == coreidx) {
417*4882a593Smuzhiyun /* pci registers are at either in the last 2KB of an 8KB window
418*4882a593Smuzhiyun * or, in pcie and pci rev 13 at 8KB
419*4882a593Smuzhiyun */
420*4882a593Smuzhiyun fast = TRUE;
421*4882a593Smuzhiyun if (SI_FAST(sii))
422*4882a593Smuzhiyun r = (volatile uint32 *)((volatile char *)sii->curmap +
423*4882a593Smuzhiyun PCI_16KB0_PCIREGS_OFFSET + regoff);
424*4882a593Smuzhiyun else
425*4882a593Smuzhiyun r = (volatile uint32 *)((volatile char *)sii->curmap +
426*4882a593Smuzhiyun ((regoff >= SBCONFIGOFF) ?
427*4882a593Smuzhiyun PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
428*4882a593Smuzhiyun regoff);
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun if (!fast)
433*4882a593Smuzhiyun return 0;
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun return (r);
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun /* Scan the enumeration space to find all cores starting from the given
439*4882a593Smuzhiyun * bus 'sbba'. Append coreid and other info to the lists in 'si'. 'sba'
440*4882a593Smuzhiyun * is the default core address at chip POR time and 'regs' is the virtual
441*4882a593Smuzhiyun * address that the default core is mapped at. 'ncores' is the number of
442*4882a593Smuzhiyun * cores expected on bus 'sbba'. It returns the total number of cores
443*4882a593Smuzhiyun * starting from bus 'sbba', inclusive.
444*4882a593Smuzhiyun */
445*4882a593Smuzhiyun #define SB_MAXBUSES 2
446*4882a593Smuzhiyun static uint
BCMATTACHFN(_sb_scan)447*4882a593Smuzhiyun BCMATTACHFN(_sb_scan)(si_info_t *sii, uint32 sba, volatile void *regs, uint bus,
448*4882a593Smuzhiyun uint32 sbba, uint numcores, uint devid)
449*4882a593Smuzhiyun {
450*4882a593Smuzhiyun uint next;
451*4882a593Smuzhiyun uint ncc = 0;
452*4882a593Smuzhiyun uint i;
453*4882a593Smuzhiyun si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun /* bail out in case it is too deep to scan at the specified bus level */
456*4882a593Smuzhiyun if (bus >= SB_MAXBUSES) {
457*4882a593Smuzhiyun SI_ERROR(("_sb_scan: bus 0x%08x at level %d is too deep to scan\n", sbba, bus));
458*4882a593Smuzhiyun return 0;
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun SI_MSG(("_sb_scan: scan bus 0x%08x assume %u cores\n", sbba, numcores));
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun /* Scan all cores on the bus starting from core 0.
463*4882a593Smuzhiyun * Core addresses must be contiguous on each bus.
464*4882a593Smuzhiyun */
465*4882a593Smuzhiyun for (i = 0, next = sii->numcores; i < numcores && next < SB_BUS_MAXCORES; i++, next++) {
466*4882a593Smuzhiyun cores_info->coresba[next] = sbba + (i * SI_CORE_SIZE);
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun /* keep and reuse the initial register mapping */
469*4882a593Smuzhiyun if ((BUSTYPE(sii->pub.bustype) == SI_BUS) && (cores_info->coresba[next] == sba)) {
470*4882a593Smuzhiyun SI_VMSG(("_sb_scan: reuse mapped regs %p for core %u\n", regs, next));
471*4882a593Smuzhiyun cores_info->regs[next] = regs;
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun /* change core to 'next' and read its coreid */
475*4882a593Smuzhiyun sii->curmap = _sb_setcoreidx(sii, next);
476*4882a593Smuzhiyun sii->curidx = next;
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun cores_info->coreid[next] = sb_coreid(&sii->pub);
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun /* core specific processing... */
481*4882a593Smuzhiyun /* chipc provides # cores */
482*4882a593Smuzhiyun if (cores_info->coreid[next] == CC_CORE_ID) {
483*4882a593Smuzhiyun chipcregs_t *cc = (chipcregs_t *)sii->curmap;
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun /* determine numcores - this is the total # cores in the chip */
486*4882a593Smuzhiyun ASSERT(cc);
487*4882a593Smuzhiyun numcores = (R_REG(sii->osh, &cc->chipid) & CID_CC_MASK) >> CID_CC_SHIFT;
488*4882a593Smuzhiyun SI_VMSG(("_sb_scan: there are %u cores in the chip %s\n", numcores,
489*4882a593Smuzhiyun sii->pub.issim ? "QT" : ""));
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun /* scan bridged SB(s) and add results to the end of the list */
492*4882a593Smuzhiyun else if (cores_info->coreid[next] == OCP_CORE_ID) {
493*4882a593Smuzhiyun sbconfig_t *sb = REGS2SB(sii->curmap);
494*4882a593Smuzhiyun uint32 nsbba = R_SBREG(sii, &sb->sbadmatch1);
495*4882a593Smuzhiyun uint nsbcc;
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun sii->numcores = next + 1;
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun if ((nsbba & 0xfff00000) != si_enum_base(devid))
500*4882a593Smuzhiyun continue;
501*4882a593Smuzhiyun nsbba &= 0xfffff000;
502*4882a593Smuzhiyun if (_sb_coreidx(sii, nsbba) != BADIDX)
503*4882a593Smuzhiyun continue;
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun nsbcc = (R_SBREG(sii, &sb->sbtmstatehigh) & 0x000f0000) >> 16;
506*4882a593Smuzhiyun nsbcc = _sb_scan(sii, sba, regs, bus + 1, nsbba, nsbcc, devid);
507*4882a593Smuzhiyun if (sbba == si_enum_base(devid))
508*4882a593Smuzhiyun numcores -= nsbcc;
509*4882a593Smuzhiyun ncc += nsbcc;
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun SI_MSG(("_sb_scan: found %u cores on bus 0x%08x\n", i, sbba));
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun sii->numcores = i + ncc;
516*4882a593Smuzhiyun return sii->numcores;
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun /* scan the sb enumerated space to identify all cores */
520*4882a593Smuzhiyun void
BCMATTACHFN(sb_scan)521*4882a593Smuzhiyun BCMATTACHFN(sb_scan)(si_t *sih, volatile void *regs, uint devid)
522*4882a593Smuzhiyun {
523*4882a593Smuzhiyun uint32 origsba;
524*4882a593Smuzhiyun sbconfig_t *sb;
525*4882a593Smuzhiyun si_info_t *sii = SI_INFO(sih);
526*4882a593Smuzhiyun BCM_REFERENCE(devid);
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun sb = REGS2SB(sii->curmap);
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun sii->pub.socirev = (R_SBREG(sii, &sb->sbidlow) & SBIDL_RV_MASK) >> SBIDL_RV_SHIFT;
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun /* Save the current core info and validate it later till we know
533*4882a593Smuzhiyun * for sure what is good and what is bad.
534*4882a593Smuzhiyun */
535*4882a593Smuzhiyun origsba = _sb_coresba(sii);
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun /* scan all SB(s) starting from SI_ENUM_BASE_DEFAULT */
538*4882a593Smuzhiyun sii->numcores = _sb_scan(sii, origsba, regs, 0, si_enum_base(devid), 1, devid);
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun /*
542*4882a593Smuzhiyun * This function changes logical "focus" to the indicated core;
543*4882a593Smuzhiyun * must be called with interrupts off.
544*4882a593Smuzhiyun * Moreover, callers should keep interrupts off during switching out of and back to d11 core
545*4882a593Smuzhiyun */
546*4882a593Smuzhiyun volatile void *
sb_setcoreidx(si_t * sih,uint coreidx)547*4882a593Smuzhiyun sb_setcoreidx(si_t *sih, uint coreidx)
548*4882a593Smuzhiyun {
549*4882a593Smuzhiyun si_info_t *sii = SI_INFO(sih);
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun if (coreidx >= sii->numcores)
552*4882a593Smuzhiyun return (NULL);
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun /*
555*4882a593Smuzhiyun * If the user has provided an interrupt mask enabled function,
556*4882a593Smuzhiyun * then assert interrupts are disabled before switching the core.
557*4882a593Smuzhiyun */
558*4882a593Smuzhiyun ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun sii->curmap = _sb_setcoreidx(sii, coreidx);
561*4882a593Smuzhiyun sii->curidx = coreidx;
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun return (sii->curmap);
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun /* This function changes the logical "focus" to the indicated core.
567*4882a593Smuzhiyun * Return the current core's virtual address.
568*4882a593Smuzhiyun */
569*4882a593Smuzhiyun static volatile void *
_sb_setcoreidx(const si_info_t * sii,uint coreidx)570*4882a593Smuzhiyun _sb_setcoreidx(const si_info_t *sii, uint coreidx)
571*4882a593Smuzhiyun {
572*4882a593Smuzhiyun si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
573*4882a593Smuzhiyun uint32 sbaddr = cores_info->coresba[coreidx];
574*4882a593Smuzhiyun volatile void *regs;
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun switch (BUSTYPE(sii->pub.bustype)) {
577*4882a593Smuzhiyun case SI_BUS:
578*4882a593Smuzhiyun /* map new one */
579*4882a593Smuzhiyun if (!cores_info->regs[coreidx]) {
580*4882a593Smuzhiyun cores_info->regs[coreidx] = REG_MAP(sbaddr, SI_CORE_SIZE);
581*4882a593Smuzhiyun ASSERT(GOODREGS(cores_info->regs[coreidx]));
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun regs = cores_info->regs[coreidx];
584*4882a593Smuzhiyun break;
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun case PCI_BUS:
587*4882a593Smuzhiyun /* point bar0 window */
588*4882a593Smuzhiyun OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, sbaddr);
589*4882a593Smuzhiyun regs = sii->curmap;
590*4882a593Smuzhiyun break;
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun #ifdef BCMSDIO
593*4882a593Smuzhiyun case SPI_BUS:
594*4882a593Smuzhiyun case SDIO_BUS:
595*4882a593Smuzhiyun /* map new one */
596*4882a593Smuzhiyun if (!cores_info->regs[coreidx]) {
597*4882a593Smuzhiyun cores_info->regs[coreidx] = (void *)(uintptr)sbaddr;
598*4882a593Smuzhiyun ASSERT(GOODREGS(cores_info->regs[coreidx]));
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun regs = cores_info->regs[coreidx];
601*4882a593Smuzhiyun break;
602*4882a593Smuzhiyun #endif /* BCMSDIO */
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun default:
605*4882a593Smuzhiyun ASSERT(0);
606*4882a593Smuzhiyun regs = NULL;
607*4882a593Smuzhiyun break;
608*4882a593Smuzhiyun }
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun return regs;
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun /* Return the address of sbadmatch0/1/2/3 register */
614*4882a593Smuzhiyun static volatile uint32 *
sb_admatch(const si_info_t * sii,uint asidx)615*4882a593Smuzhiyun sb_admatch(const si_info_t *sii, uint asidx)
616*4882a593Smuzhiyun {
617*4882a593Smuzhiyun sbconfig_t *sb;
618*4882a593Smuzhiyun volatile uint32 *addrm;
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun sb = REGS2SB(sii->curmap);
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun switch (asidx) {
623*4882a593Smuzhiyun case 0:
624*4882a593Smuzhiyun addrm = &sb->sbadmatch0;
625*4882a593Smuzhiyun break;
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun case 1:
628*4882a593Smuzhiyun addrm = &sb->sbadmatch1;
629*4882a593Smuzhiyun break;
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun case 2:
632*4882a593Smuzhiyun addrm = &sb->sbadmatch2;
633*4882a593Smuzhiyun break;
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun case 3:
636*4882a593Smuzhiyun addrm = &sb->sbadmatch3;
637*4882a593Smuzhiyun break;
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun default:
640*4882a593Smuzhiyun SI_ERROR(("sb_admatch: Address space index (%d) out of range\n", asidx));
641*4882a593Smuzhiyun return 0;
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun return (addrm);
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun /* Return the number of address spaces in current core */
648*4882a593Smuzhiyun int
sb_numaddrspaces(const si_t * sih)649*4882a593Smuzhiyun sb_numaddrspaces(const si_t *sih)
650*4882a593Smuzhiyun {
651*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
652*4882a593Smuzhiyun sbconfig_t *sb = REGS2SB(sii->curmap);
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun /* + 1 because of enumeration space */
655*4882a593Smuzhiyun return ((R_SBREG(sii, &sb->sbidlow) & SBIDL_AR_MASK) >> SBIDL_AR_SHIFT) + 1;
656*4882a593Smuzhiyun }
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun /* Return the address of the nth address space in the current core */
659*4882a593Smuzhiyun uint32
sb_addrspace(const si_t * sih,uint asidx)660*4882a593Smuzhiyun sb_addrspace(const si_t *sih, uint asidx)
661*4882a593Smuzhiyun {
662*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun return (sb_base(R_SBREG(sii, sb_admatch(sii, asidx))));
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun /* Return the size of the nth address space in the current core */
668*4882a593Smuzhiyun uint32
sb_addrspacesize(const si_t * sih,uint asidx)669*4882a593Smuzhiyun sb_addrspacesize(const si_t *sih, uint asidx)
670*4882a593Smuzhiyun {
671*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun return (sb_size(R_SBREG(sii, sb_admatch(sii, asidx))));
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun #if defined(BCMDBG_ERR) || defined(BCMASSERT_SUPPORT) || \
677*4882a593Smuzhiyun defined(BCMDBG_DUMP)
678*4882a593Smuzhiyun /* traverse all cores to find and clear source of serror */
679*4882a593Smuzhiyun static void
sb_serr_clear(si_info_t * sii)680*4882a593Smuzhiyun sb_serr_clear(si_info_t *sii)
681*4882a593Smuzhiyun {
682*4882a593Smuzhiyun sbconfig_t *sb;
683*4882a593Smuzhiyun uint origidx;
684*4882a593Smuzhiyun uint i;
685*4882a593Smuzhiyun bcm_int_bitmask_t intr_val;
686*4882a593Smuzhiyun volatile void *corereg = NULL;
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun INTR_OFF(sii, &intr_val);
689*4882a593Smuzhiyun origidx = si_coreidx(&sii->pub);
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun for (i = 0; i < sii->numcores; i++) {
692*4882a593Smuzhiyun corereg = sb_setcoreidx(&sii->pub, i);
693*4882a593Smuzhiyun if (NULL != corereg) {
694*4882a593Smuzhiyun sb = REGS2SB(corereg);
695*4882a593Smuzhiyun if ((R_SBREG(sii, &sb->sbtmstatehigh)) & SBTMH_SERR) {
696*4882a593Smuzhiyun AND_SBREG(sii, &sb->sbtmstatehigh, ~SBTMH_SERR);
697*4882a593Smuzhiyun SI_ERROR(("sb_serr_clear: SError at core 0x%x\n",
698*4882a593Smuzhiyun sb_coreid(&sii->pub)));
699*4882a593Smuzhiyun }
700*4882a593Smuzhiyun }
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun sb_setcoreidx(&sii->pub, origidx);
704*4882a593Smuzhiyun INTR_RESTORE(sii, &intr_val);
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun /*
708*4882a593Smuzhiyun * Check if any inband, outband or timeout errors has happened and clear them.
709*4882a593Smuzhiyun * Must be called with chip clk on !
710*4882a593Smuzhiyun */
711*4882a593Smuzhiyun bool
sb_taclear(si_t * sih,bool details)712*4882a593Smuzhiyun sb_taclear(si_t *sih, bool details)
713*4882a593Smuzhiyun {
714*4882a593Smuzhiyun si_info_t *sii = SI_INFO(sih);
715*4882a593Smuzhiyun bool rc = FALSE;
716*4882a593Smuzhiyun uint32 inband = 0, serror = 0, timeout = 0;
717*4882a593Smuzhiyun volatile uint32 imstate;
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun BCM_REFERENCE(sii);
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
722*4882a593Smuzhiyun volatile uint32 stcmd;
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun /* inband error is Target abort for PCI */
725*4882a593Smuzhiyun stcmd = OSL_PCI_READ_CONFIG(sii->osh, PCI_CFG_CMD, sizeof(uint32));
726*4882a593Smuzhiyun inband = stcmd & PCI_STAT_TA;
727*4882a593Smuzhiyun if (inband) {
728*4882a593Smuzhiyun #ifdef BCMDBG
729*4882a593Smuzhiyun if (details) {
730*4882a593Smuzhiyun SI_ERROR(("\ninband:\n"));
731*4882a593Smuzhiyun si_viewall(sih, FALSE);
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun #endif
734*4882a593Smuzhiyun OSL_PCI_WRITE_CONFIG(sii->osh, PCI_CFG_CMD, sizeof(uint32), stcmd);
735*4882a593Smuzhiyun }
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun /* serror */
738*4882a593Smuzhiyun stcmd = OSL_PCI_READ_CONFIG(sii->osh, PCI_INT_STATUS, sizeof(uint32));
739*4882a593Smuzhiyun serror = stcmd & PCI_SBIM_STATUS_SERR;
740*4882a593Smuzhiyun if (serror) {
741*4882a593Smuzhiyun #ifdef BCMDBG
742*4882a593Smuzhiyun if (details) {
743*4882a593Smuzhiyun SI_ERROR(("\nserror:\n"));
744*4882a593Smuzhiyun si_viewall(sih, FALSE);
745*4882a593Smuzhiyun }
746*4882a593Smuzhiyun #endif
747*4882a593Smuzhiyun sb_serr_clear(sii);
748*4882a593Smuzhiyun OSL_PCI_WRITE_CONFIG(sii->osh, PCI_INT_STATUS, sizeof(uint32), stcmd);
749*4882a593Smuzhiyun }
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun /* timeout */
752*4882a593Smuzhiyun imstate = sb_corereg(sih, sii->pub.buscoreidx,
753*4882a593Smuzhiyun SBCONFIGOFF + OFFSETOF(sbconfig_t, sbimstate), 0, 0);
754*4882a593Smuzhiyun if ((imstate != 0xffffffff) && (imstate & (SBIM_IBE | SBIM_TO))) {
755*4882a593Smuzhiyun sb_corereg(sih, sii->pub.buscoreidx,
756*4882a593Smuzhiyun SBCONFIGOFF + OFFSETOF(sbconfig_t, sbimstate), ~0,
757*4882a593Smuzhiyun (imstate & ~(SBIM_IBE | SBIM_TO)));
758*4882a593Smuzhiyun /* inband = imstate & SBIM_IBE; same as TA above */
759*4882a593Smuzhiyun timeout = imstate & SBIM_TO;
760*4882a593Smuzhiyun if (timeout) {
761*4882a593Smuzhiyun #ifdef BCMDBG
762*4882a593Smuzhiyun if (details) {
763*4882a593Smuzhiyun SI_ERROR(("\ntimeout:\n"));
764*4882a593Smuzhiyun si_viewall(sih, FALSE);
765*4882a593Smuzhiyun }
766*4882a593Smuzhiyun #endif
767*4882a593Smuzhiyun }
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun if (inband) {
771*4882a593Smuzhiyun /* dump errlog for sonics >= 2.3 */
772*4882a593Smuzhiyun if (sii->pub.socirev == SONICS_2_2)
773*4882a593Smuzhiyun ;
774*4882a593Smuzhiyun else {
775*4882a593Smuzhiyun uint32 imerrlog, imerrloga;
776*4882a593Smuzhiyun imerrlog = sb_corereg(sih, sii->pub.buscoreidx, SBIMERRLOG, 0, 0);
777*4882a593Smuzhiyun if (imerrlog & SBTMEL_EC) {
778*4882a593Smuzhiyun imerrloga = sb_corereg(sih, sii->pub.buscoreidx,
779*4882a593Smuzhiyun SBIMERRLOGA, 0, 0);
780*4882a593Smuzhiyun BCM_REFERENCE(imerrloga);
781*4882a593Smuzhiyun /* clear errlog */
782*4882a593Smuzhiyun sb_corereg(sih, sii->pub.buscoreidx, SBIMERRLOG, ~0, 0);
783*4882a593Smuzhiyun SI_ERROR(("sb_taclear: ImErrLog 0x%x, ImErrLogA 0x%x\n",
784*4882a593Smuzhiyun imerrlog, imerrloga));
785*4882a593Smuzhiyun }
786*4882a593Smuzhiyun }
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun }
789*4882a593Smuzhiyun #ifdef BCMSDIO
790*4882a593Smuzhiyun else if ((BUSTYPE(sii->pub.bustype) == SDIO_BUS) ||
791*4882a593Smuzhiyun (BUSTYPE(sii->pub.bustype) == SPI_BUS)) {
792*4882a593Smuzhiyun sbconfig_t *sb;
793*4882a593Smuzhiyun uint origidx;
794*4882a593Smuzhiyun bcm_int_bitmask_t intr_val;
795*4882a593Smuzhiyun volatile void *corereg = NULL;
796*4882a593Smuzhiyun volatile uint32 tmstate;
797*4882a593Smuzhiyun
798*4882a593Smuzhiyun INTR_OFF(sii, &intr_val);
799*4882a593Smuzhiyun origidx = si_coreidx(sih);
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun corereg = si_setcore(sih, SDIOD_CORE_ID, 0);
802*4882a593Smuzhiyun if (corereg != NULL) {
803*4882a593Smuzhiyun sb = REGS2SB(corereg);
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun imstate = R_SBREG(sii, &sb->sbimstate);
806*4882a593Smuzhiyun if ((imstate != 0xffffffff) && (imstate & (SBIM_IBE | SBIM_TO))) {
807*4882a593Smuzhiyun AND_SBREG(sii, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
808*4882a593Smuzhiyun /* inband = imstate & SBIM_IBE; cmd error */
809*4882a593Smuzhiyun timeout = imstate & SBIM_TO;
810*4882a593Smuzhiyun }
811*4882a593Smuzhiyun tmstate = R_SBREG(sii, &sb->sbtmstatehigh);
812*4882a593Smuzhiyun if ((tmstate != 0xffffffff) && (tmstate & SBTMH_INT_STATUS)) {
813*4882a593Smuzhiyun sb_serr_clear(sii);
814*4882a593Smuzhiyun serror = 1;
815*4882a593Smuzhiyun OR_SBREG(sii, &sb->sbtmstatelow, SBTML_INT_ACK);
816*4882a593Smuzhiyun AND_SBREG(sii, &sb->sbtmstatelow, ~SBTML_INT_ACK);
817*4882a593Smuzhiyun }
818*4882a593Smuzhiyun }
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun sb_setcoreidx(sih, origidx);
821*4882a593Smuzhiyun INTR_RESTORE(sii, &intr_val);
822*4882a593Smuzhiyun }
823*4882a593Smuzhiyun #endif /* BCMSDIO */
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun if (inband | timeout | serror) {
826*4882a593Smuzhiyun rc = TRUE;
827*4882a593Smuzhiyun SI_ERROR(("sb_taclear: inband 0x%x, serror 0x%x, timeout 0x%x!\n",
828*4882a593Smuzhiyun inband, serror, timeout));
829*4882a593Smuzhiyun }
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun return (rc);
832*4882a593Smuzhiyun }
833*4882a593Smuzhiyun #endif /* BCMDBG_ERR || BCMASSERT_SUPPORT || BCMDBG_DUMP */
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun /* do buffered registers update */
836*4882a593Smuzhiyun void
sb_commit(si_t * sih)837*4882a593Smuzhiyun sb_commit(si_t *sih)
838*4882a593Smuzhiyun {
839*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
840*4882a593Smuzhiyun uint origidx;
841*4882a593Smuzhiyun bcm_int_bitmask_t intr_val;
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun origidx = sii->curidx;
844*4882a593Smuzhiyun ASSERT(GOODIDX(origidx, sii->numcores));
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun INTR_OFF(sii, &intr_val);
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun /* switch over to chipcommon core if there is one, else use pci */
849*4882a593Smuzhiyun if (sii->pub.ccrev != NOREV) {
850*4882a593Smuzhiyun chipcregs_t *ccregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
851*4882a593Smuzhiyun ASSERT(ccregs != NULL);
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun /* do the buffer registers update */
854*4882a593Smuzhiyun W_REG(sii->osh, &ccregs->broadcastaddress, SB_COMMIT);
855*4882a593Smuzhiyun W_REG(sii->osh, &ccregs->broadcastdata, 0x0);
856*4882a593Smuzhiyun #if !defined(BCMDONGLEHOST)
857*4882a593Smuzhiyun } else if (PCI(sii)) {
858*4882a593Smuzhiyun sbpciregs_t *pciregs = (sbpciregs_t *)si_setcore(sih, PCI_CORE_ID, 0);
859*4882a593Smuzhiyun ASSERT(pciregs != NULL);
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun /* do the buffer registers update */
862*4882a593Smuzhiyun W_REG(sii->osh, &pciregs->bcastaddr, SB_COMMIT);
863*4882a593Smuzhiyun W_REG(sii->osh, &pciregs->bcastdata, 0x0);
864*4882a593Smuzhiyun #endif /* !defined(BCMDONGLEHOST) */
865*4882a593Smuzhiyun } else
866*4882a593Smuzhiyun ASSERT(0);
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun /* restore core index */
869*4882a593Smuzhiyun sb_setcoreidx(sih, origidx);
870*4882a593Smuzhiyun INTR_RESTORE(sii, &intr_val);
871*4882a593Smuzhiyun }
872*4882a593Smuzhiyun
873*4882a593Smuzhiyun void
sb_core_disable(const si_t * sih,uint32 bits)874*4882a593Smuzhiyun sb_core_disable(const si_t *sih, uint32 bits)
875*4882a593Smuzhiyun {
876*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
877*4882a593Smuzhiyun volatile uint32 dummy;
878*4882a593Smuzhiyun sbconfig_t *sb;
879*4882a593Smuzhiyun
880*4882a593Smuzhiyun ASSERT(GOODREGS(sii->curmap));
881*4882a593Smuzhiyun sb = REGS2SB(sii->curmap);
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun /* if core is already in reset, just return */
884*4882a593Smuzhiyun if (R_SBREG(sii, &sb->sbtmstatelow) & SBTML_RESET)
885*4882a593Smuzhiyun return;
886*4882a593Smuzhiyun
887*4882a593Smuzhiyun /* if clocks are not enabled, put into reset and return */
888*4882a593Smuzhiyun if ((R_SBREG(sii, &sb->sbtmstatelow) & (SICF_CLOCK_EN << SBTML_SICF_SHIFT)) == 0)
889*4882a593Smuzhiyun goto disable;
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun /* set target reject and spin until busy is clear (preserve core-specific bits) */
892*4882a593Smuzhiyun OR_SBREG(sii, &sb->sbtmstatelow, SBTML_REJ);
893*4882a593Smuzhiyun dummy = R_SBREG(sii, &sb->sbtmstatelow);
894*4882a593Smuzhiyun BCM_REFERENCE(dummy);
895*4882a593Smuzhiyun OSL_DELAY(1);
896*4882a593Smuzhiyun SPINWAIT((R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY), 100000);
897*4882a593Smuzhiyun if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY)
898*4882a593Smuzhiyun SI_ERROR(("sb_core_disable: target state still busy\n"));
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun /*
901*4882a593Smuzhiyun * If core is initiator, set the Reject bit and allow Busy to clear.
902*4882a593Smuzhiyun * sonicsrev < 2.3 chips don't have the Reject and Busy bits (nops).
903*4882a593Smuzhiyun * Don't assert - dma engine might be stuck (PR4871).
904*4882a593Smuzhiyun */
905*4882a593Smuzhiyun if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT) {
906*4882a593Smuzhiyun OR_SBREG(sii, &sb->sbimstate, SBIM_RJ);
907*4882a593Smuzhiyun dummy = R_SBREG(sii, &sb->sbimstate);
908*4882a593Smuzhiyun BCM_REFERENCE(dummy);
909*4882a593Smuzhiyun OSL_DELAY(1);
910*4882a593Smuzhiyun SPINWAIT((R_SBREG(sii, &sb->sbimstate) & SBIM_BY), 100000);
911*4882a593Smuzhiyun }
912*4882a593Smuzhiyun
913*4882a593Smuzhiyun /* set reset and reject while enabling the clocks */
914*4882a593Smuzhiyun W_SBREG(sii, &sb->sbtmstatelow,
915*4882a593Smuzhiyun (((bits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
916*4882a593Smuzhiyun SBTML_REJ | SBTML_RESET));
917*4882a593Smuzhiyun dummy = R_SBREG(sii, &sb->sbtmstatelow);
918*4882a593Smuzhiyun BCM_REFERENCE(dummy);
919*4882a593Smuzhiyun OSL_DELAY(10);
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun /* don't forget to clear the initiator reject bit */
922*4882a593Smuzhiyun if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT)
923*4882a593Smuzhiyun AND_SBREG(sii, &sb->sbimstate, ~SBIM_RJ);
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun disable:
926*4882a593Smuzhiyun /* leave reset and reject asserted */
927*4882a593Smuzhiyun W_SBREG(sii, &sb->sbtmstatelow, ((bits << SBTML_SICF_SHIFT) | SBTML_REJ | SBTML_RESET));
928*4882a593Smuzhiyun OSL_DELAY(1);
929*4882a593Smuzhiyun }
930*4882a593Smuzhiyun
931*4882a593Smuzhiyun /* reset and re-enable a core
932*4882a593Smuzhiyun * inputs:
933*4882a593Smuzhiyun * bits - core specific bits that are set during and after reset sequence
934*4882a593Smuzhiyun * resetbits - core specific bits that are set only during reset sequence
935*4882a593Smuzhiyun */
936*4882a593Smuzhiyun void
sb_core_reset(const si_t * sih,uint32 bits,uint32 resetbits)937*4882a593Smuzhiyun sb_core_reset(const si_t *sih, uint32 bits, uint32 resetbits)
938*4882a593Smuzhiyun {
939*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
940*4882a593Smuzhiyun sbconfig_t *sb;
941*4882a593Smuzhiyun volatile uint32 dummy;
942*4882a593Smuzhiyun
943*4882a593Smuzhiyun ASSERT(GOODREGS(sii->curmap));
944*4882a593Smuzhiyun sb = REGS2SB(sii->curmap);
945*4882a593Smuzhiyun
946*4882a593Smuzhiyun /*
947*4882a593Smuzhiyun * Must do the disable sequence first to work for arbitrary current core state.
948*4882a593Smuzhiyun */
949*4882a593Smuzhiyun sb_core_disable(sih, (bits | resetbits));
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun /*
952*4882a593Smuzhiyun * Now do the initialization sequence.
953*4882a593Smuzhiyun */
954*4882a593Smuzhiyun
955*4882a593Smuzhiyun /* set reset while enabling the clock and forcing them on throughout the core */
956*4882a593Smuzhiyun W_SBREG(sii, &sb->sbtmstatelow,
957*4882a593Smuzhiyun (((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
958*4882a593Smuzhiyun SBTML_RESET));
959*4882a593Smuzhiyun dummy = R_SBREG(sii, &sb->sbtmstatelow);
960*4882a593Smuzhiyun BCM_REFERENCE(dummy);
961*4882a593Smuzhiyun OSL_DELAY(1);
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun /* PR3158 - clear any serror */
964*4882a593Smuzhiyun if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_SERR) {
965*4882a593Smuzhiyun W_SBREG(sii, &sb->sbtmstatehigh, 0);
966*4882a593Smuzhiyun }
967*4882a593Smuzhiyun if ((dummy = R_SBREG(sii, &sb->sbimstate)) & (SBIM_IBE | SBIM_TO)) {
968*4882a593Smuzhiyun AND_SBREG(sii, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
969*4882a593Smuzhiyun }
970*4882a593Smuzhiyun
971*4882a593Smuzhiyun /* clear reset and allow it to propagate throughout the core */
972*4882a593Smuzhiyun W_SBREG(sii, &sb->sbtmstatelow,
973*4882a593Smuzhiyun ((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
974*4882a593Smuzhiyun dummy = R_SBREG(sii, &sb->sbtmstatelow);
975*4882a593Smuzhiyun BCM_REFERENCE(dummy);
976*4882a593Smuzhiyun OSL_DELAY(1);
977*4882a593Smuzhiyun
978*4882a593Smuzhiyun /* leave clock enabled */
979*4882a593Smuzhiyun W_SBREG(sii, &sb->sbtmstatelow, ((bits | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
980*4882a593Smuzhiyun dummy = R_SBREG(sii, &sb->sbtmstatelow);
981*4882a593Smuzhiyun BCM_REFERENCE(dummy);
982*4882a593Smuzhiyun OSL_DELAY(1);
983*4882a593Smuzhiyun }
984*4882a593Smuzhiyun
985*4882a593Smuzhiyun uint32
sb_base(uint32 admatch)986*4882a593Smuzhiyun sb_base(uint32 admatch)
987*4882a593Smuzhiyun {
988*4882a593Smuzhiyun uint32 base;
989*4882a593Smuzhiyun uint type;
990*4882a593Smuzhiyun
991*4882a593Smuzhiyun type = admatch & SBAM_TYPE_MASK;
992*4882a593Smuzhiyun ASSERT(type < 3);
993*4882a593Smuzhiyun
994*4882a593Smuzhiyun base = 0;
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun if (type == 0) {
997*4882a593Smuzhiyun base = admatch & SBAM_BASE0_MASK;
998*4882a593Smuzhiyun } else if (type == 1) {
999*4882a593Smuzhiyun ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
1000*4882a593Smuzhiyun base = admatch & SBAM_BASE1_MASK;
1001*4882a593Smuzhiyun } else if (type == 2) {
1002*4882a593Smuzhiyun ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
1003*4882a593Smuzhiyun base = admatch & SBAM_BASE2_MASK;
1004*4882a593Smuzhiyun }
1005*4882a593Smuzhiyun
1006*4882a593Smuzhiyun return (base);
1007*4882a593Smuzhiyun }
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun uint32
sb_size(uint32 admatch)1010*4882a593Smuzhiyun sb_size(uint32 admatch)
1011*4882a593Smuzhiyun {
1012*4882a593Smuzhiyun uint32 size;
1013*4882a593Smuzhiyun uint type;
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun type = admatch & SBAM_TYPE_MASK;
1016*4882a593Smuzhiyun ASSERT(type < 3);
1017*4882a593Smuzhiyun
1018*4882a593Smuzhiyun size = 0;
1019*4882a593Smuzhiyun
1020*4882a593Smuzhiyun if (type == 0) {
1021*4882a593Smuzhiyun size = 1 << (((admatch & SBAM_ADINT0_MASK) >> SBAM_ADINT0_SHIFT) + 1);
1022*4882a593Smuzhiyun } else if (type == 1) {
1023*4882a593Smuzhiyun ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
1024*4882a593Smuzhiyun size = 1 << (((admatch & SBAM_ADINT1_MASK) >> SBAM_ADINT1_SHIFT) + 1);
1025*4882a593Smuzhiyun } else if (type == 2) {
1026*4882a593Smuzhiyun ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
1027*4882a593Smuzhiyun size = 1 << (((admatch & SBAM_ADINT2_MASK) >> SBAM_ADINT2_SHIFT) + 1);
1028*4882a593Smuzhiyun }
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun return (size);
1031*4882a593Smuzhiyun }
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun #if defined(BCMDBG) || defined(BCMDBG_DUMP)|| defined(BCMDBG_PHYDUMP)
1034*4882a593Smuzhiyun /* print interesting sbconfig registers */
1035*4882a593Smuzhiyun void
sb_dumpregs(si_t * sih,struct bcmstrbuf * b)1036*4882a593Smuzhiyun sb_dumpregs(si_t *sih, struct bcmstrbuf *b)
1037*4882a593Smuzhiyun {
1038*4882a593Smuzhiyun sbconfig_t *sb;
1039*4882a593Smuzhiyun uint origidx, i;
1040*4882a593Smuzhiyun bcm_int_bitmask_t intr_val;
1041*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
1042*4882a593Smuzhiyun const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
1043*4882a593Smuzhiyun
1044*4882a593Smuzhiyun origidx = sii->curidx;
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun INTR_OFF(sii, &intr_val);
1047*4882a593Smuzhiyun
1048*4882a593Smuzhiyun for (i = 0; i < sii->numcores; i++) {
1049*4882a593Smuzhiyun sb = REGS2SB(sb_setcoreidx(sih, i));
1050*4882a593Smuzhiyun
1051*4882a593Smuzhiyun bcm_bprintf(b, "core 0x%x: \n", cores_info->coreid[i]);
1052*4882a593Smuzhiyun
1053*4882a593Smuzhiyun if (sii->pub.socirev > SONICS_2_2)
1054*4882a593Smuzhiyun bcm_bprintf(b, "sbimerrlog 0x%x sbimerrloga 0x%x\n",
1055*4882a593Smuzhiyun sb_corereg(sih, si_coreidx(&sii->pub), SBIMERRLOG, 0, 0),
1056*4882a593Smuzhiyun sb_corereg(sih, si_coreidx(&sii->pub), SBIMERRLOGA, 0, 0));
1057*4882a593Smuzhiyun
1058*4882a593Smuzhiyun bcm_bprintf(b, "sbtmstatelow 0x%x sbtmstatehigh 0x%x sbidhigh 0x%x "
1059*4882a593Smuzhiyun "sbimstate 0x%x\n sbimconfiglow 0x%x sbimconfighigh 0x%x\n",
1060*4882a593Smuzhiyun R_SBREG(sii, &sb->sbtmstatelow), R_SBREG(sii, &sb->sbtmstatehigh),
1061*4882a593Smuzhiyun R_SBREG(sii, &sb->sbidhigh), R_SBREG(sii, &sb->sbimstate),
1062*4882a593Smuzhiyun R_SBREG(sii, &sb->sbimconfiglow), R_SBREG(sii, &sb->sbimconfighigh));
1063*4882a593Smuzhiyun }
1064*4882a593Smuzhiyun
1065*4882a593Smuzhiyun sb_setcoreidx(sih, origidx);
1066*4882a593Smuzhiyun INTR_RESTORE(sii, &intr_val);
1067*4882a593Smuzhiyun }
1068*4882a593Smuzhiyun #endif /* BCMDBG || BCMDBG_DUMP || BCMDBG_PHYDUMP */
1069*4882a593Smuzhiyun
1070*4882a593Smuzhiyun #if defined(BCMDBG)
1071*4882a593Smuzhiyun void
sb_view(si_t * sih,bool verbose)1072*4882a593Smuzhiyun sb_view(si_t *sih, bool verbose)
1073*4882a593Smuzhiyun {
1074*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
1075*4882a593Smuzhiyun sbconfig_t *sb = REGS2SB(sii->curmap);
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun SI_ERROR(("\nCore ID: 0x%x\n", sb_coreid(&sii->pub)));
1078*4882a593Smuzhiyun
1079*4882a593Smuzhiyun if (sii->pub.socirev > SONICS_2_2)
1080*4882a593Smuzhiyun SI_ERROR(("sbimerrlog 0x%x sbimerrloga 0x%x\n",
1081*4882a593Smuzhiyun sb_corereg(sih, si_coreidx(&sii->pub), SBIMERRLOG, 0, 0),
1082*4882a593Smuzhiyun sb_corereg(sih, si_coreidx(&sii->pub), SBIMERRLOGA, 0, 0)));
1083*4882a593Smuzhiyun
1084*4882a593Smuzhiyun /* Print important or helpful registers */
1085*4882a593Smuzhiyun SI_ERROR(("sbtmerrloga 0x%x sbtmerrlog 0x%x\n",
1086*4882a593Smuzhiyun R_SBREG(sii, &sb->sbtmerrloga), R_SBREG(sii, &sb->sbtmerrlog)));
1087*4882a593Smuzhiyun SI_ERROR(("sbimstate 0x%x sbtmstatelow 0x%x sbtmstatehigh 0x%x\n",
1088*4882a593Smuzhiyun R_SBREG(sii, &sb->sbimstate),
1089*4882a593Smuzhiyun R_SBREG(sii, &sb->sbtmstatelow), R_SBREG(sii, &sb->sbtmstatehigh)));
1090*4882a593Smuzhiyun SI_ERROR(("sbimconfiglow 0x%x sbtmconfiglow 0x%x\nsbtmconfighigh 0x%x sbidhigh 0x%x\n",
1091*4882a593Smuzhiyun R_SBREG(sii, &sb->sbimconfiglow), R_SBREG(sii, &sb->sbtmconfiglow),
1092*4882a593Smuzhiyun R_SBREG(sii, &sb->sbtmconfighigh), R_SBREG(sii, &sb->sbidhigh)));
1093*4882a593Smuzhiyun
1094*4882a593Smuzhiyun /* Print more detailed registers that are otherwise not relevant */
1095*4882a593Smuzhiyun if (verbose) {
1096*4882a593Smuzhiyun SI_ERROR(("sbipsflag 0x%x sbtpsflag 0x%x\n",
1097*4882a593Smuzhiyun R_SBREG(sii, &sb->sbipsflag), R_SBREG(sii, &sb->sbtpsflag)));
1098*4882a593Smuzhiyun SI_ERROR(("sbadmatch3 0x%x sbadmatch2 0x%x\nsbadmatch1 0x%x sbadmatch0 0x%x\n",
1099*4882a593Smuzhiyun R_SBREG(sii, &sb->sbadmatch3), R_SBREG(sii, &sb->sbadmatch2),
1100*4882a593Smuzhiyun R_SBREG(sii, &sb->sbadmatch1), R_SBREG(sii, &sb->sbadmatch0)));
1101*4882a593Smuzhiyun SI_ERROR(("sbintvec 0x%x sbbwa0 0x%x sbimconfighigh 0x%x\n",
1102*4882a593Smuzhiyun R_SBREG(sii, &sb->sbintvec), R_SBREG(sii, &sb->sbbwa0),
1103*4882a593Smuzhiyun R_SBREG(sii, &sb->sbimconfighigh)));
1104*4882a593Smuzhiyun SI_ERROR(("sbbconfig 0x%x sbbstate 0x%x\n",
1105*4882a593Smuzhiyun R_SBREG(sii, &sb->sbbconfig), R_SBREG(sii, &sb->sbbstate)));
1106*4882a593Smuzhiyun SI_ERROR(("sbactcnfg 0x%x sbflagst 0x%x sbidlow 0x%x \n\n",
1107*4882a593Smuzhiyun R_SBREG(sii, &sb->sbactcnfg), R_SBREG(sii, &sb->sbflagst),
1108*4882a593Smuzhiyun R_SBREG(sii, &sb->sbidlow)));
1109*4882a593Smuzhiyun }
1110*4882a593Smuzhiyun }
1111*4882a593Smuzhiyun #endif /* BCMDBG */
1112