1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Misc utility routines for accessing chip-specific features
3*4882a593Smuzhiyun * of the SiliconBackplane-based Broadcom chips.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Copyright (C) 1999-2017, Broadcom Corporation
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * Unless you and Broadcom execute a separate written software license
10*4882a593Smuzhiyun * agreement governing use of this software, this software is licensed to you
11*4882a593Smuzhiyun * under the terms of the GNU General Public License version 2 (the "GPL"),
12*4882a593Smuzhiyun * available at http://www.broadcom.com/licenses/GPLv2.php, with the
13*4882a593Smuzhiyun * following added to such license:
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * As a special exception, the copyright holders of this software give you
16*4882a593Smuzhiyun * permission to link this software with independent modules, and to copy and
17*4882a593Smuzhiyun * distribute the resulting executable under terms of your choice, provided that
18*4882a593Smuzhiyun * you also meet, for each linked independent module, the terms and conditions of
19*4882a593Smuzhiyun * the license of that module. An independent module is a module which is not
20*4882a593Smuzhiyun * derived from this software. The special exception does not apply to any
21*4882a593Smuzhiyun * modifications of the software.
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun * Notwithstanding the above, under no circumstances may you combine this
24*4882a593Smuzhiyun * software in any way with any other Broadcom software provided under a license
25*4882a593Smuzhiyun * other than the GPL, without Broadcom's express prior written consent.
26*4882a593Smuzhiyun *
27*4882a593Smuzhiyun *
28*4882a593Smuzhiyun * <<Broadcom-WL-IPTag/Open:>>
29*4882a593Smuzhiyun *
30*4882a593Smuzhiyun * $Id: sbutils.c 700323 2017-05-18 16:12:11Z $
31*4882a593Smuzhiyun */
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #include <bcm_cfg.h>
34*4882a593Smuzhiyun #include <typedefs.h>
35*4882a593Smuzhiyun #include <bcmdefs.h>
36*4882a593Smuzhiyun #include <osl.h>
37*4882a593Smuzhiyun #include <bcmutils.h>
38*4882a593Smuzhiyun #include <siutils.h>
39*4882a593Smuzhiyun #include <bcmdevs.h>
40*4882a593Smuzhiyun #include <hndsoc.h>
41*4882a593Smuzhiyun #include <sbchipc.h>
42*4882a593Smuzhiyun #include <pcicfg.h>
43*4882a593Smuzhiyun #include <sbpcmcia.h>
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun #include "siutils_priv.h"
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun /* local prototypes */
48*4882a593Smuzhiyun static uint _sb_coreidx(si_info_t *sii, uint32 sba);
49*4882a593Smuzhiyun static uint _sb_scan(si_info_t *sii, uint32 sba, volatile void *regs, uint bus, uint32 sbba,
50*4882a593Smuzhiyun uint ncores, uint devid);
51*4882a593Smuzhiyun static uint32 _sb_coresba(si_info_t *sii);
52*4882a593Smuzhiyun static volatile void *_sb_setcoreidx(si_info_t *sii, uint coreidx);
53*4882a593Smuzhiyun #define SET_SBREG(sii, r, mask, val) \
54*4882a593Smuzhiyun W_SBREG((sii), (r), ((R_SBREG((sii), (r)) & ~(mask)) | (val)))
55*4882a593Smuzhiyun #define REGS2SB(va) (sbconfig_t*) ((volatile int8*)(va) + SBCONFIGOFF)
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun /* sonicsrev */
58*4882a593Smuzhiyun #define SONICS_2_2 (SBIDL_RV_2_2 >> SBIDL_RV_SHIFT)
59*4882a593Smuzhiyun #define SONICS_2_3 (SBIDL_RV_2_3 >> SBIDL_RV_SHIFT)
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun #define R_SBREG(sii, sbr) sb_read_sbreg((sii), (sbr))
62*4882a593Smuzhiyun #define W_SBREG(sii, sbr, v) sb_write_sbreg((sii), (sbr), (v))
63*4882a593Smuzhiyun #define AND_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) & (v)))
64*4882a593Smuzhiyun #define OR_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) | (v)))
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun static uint32
sb_read_sbreg(si_info_t * sii,volatile uint32 * sbr)67*4882a593Smuzhiyun sb_read_sbreg(si_info_t *sii, volatile uint32 *sbr)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun uint8 tmp;
70*4882a593Smuzhiyun uint32 val, intr_val = 0;
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun /*
73*4882a593Smuzhiyun * compact flash only has 11 bits address, while we needs 12 bits address.
74*4882a593Smuzhiyun * MEM_SEG will be OR'd with other 11 bits address in hardware,
75*4882a593Smuzhiyun * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
76*4882a593Smuzhiyun * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
77*4882a593Smuzhiyun */
78*4882a593Smuzhiyun if (PCMCIA(sii)) {
79*4882a593Smuzhiyun INTR_OFF(sii, intr_val);
80*4882a593Smuzhiyun tmp = 1;
81*4882a593Smuzhiyun OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
82*4882a593Smuzhiyun sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun val = R_REG(sii->osh, sbr);
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun if (PCMCIA(sii)) {
88*4882a593Smuzhiyun tmp = 0;
89*4882a593Smuzhiyun OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
90*4882a593Smuzhiyun INTR_RESTORE(sii, intr_val);
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun return (val);
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun static void
sb_write_sbreg(si_info_t * sii,volatile uint32 * sbr,uint32 v)97*4882a593Smuzhiyun sb_write_sbreg(si_info_t *sii, volatile uint32 *sbr, uint32 v)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun uint8 tmp;
100*4882a593Smuzhiyun volatile uint32 dummy;
101*4882a593Smuzhiyun uint32 intr_val = 0;
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun /*
104*4882a593Smuzhiyun * compact flash only has 11 bits address, while we needs 12 bits address.
105*4882a593Smuzhiyun * MEM_SEG will be OR'd with other 11 bits address in hardware,
106*4882a593Smuzhiyun * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
107*4882a593Smuzhiyun * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
108*4882a593Smuzhiyun */
109*4882a593Smuzhiyun if (PCMCIA(sii)) {
110*4882a593Smuzhiyun INTR_OFF(sii, intr_val);
111*4882a593Smuzhiyun tmp = 1;
112*4882a593Smuzhiyun OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
113*4882a593Smuzhiyun sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun if (BUSTYPE(sii->pub.bustype) == PCMCIA_BUS) {
117*4882a593Smuzhiyun dummy = R_REG(sii->osh, sbr);
118*4882a593Smuzhiyun BCM_REFERENCE(dummy);
119*4882a593Smuzhiyun W_REG(sii->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff));
120*4882a593Smuzhiyun dummy = R_REG(sii->osh, sbr);
121*4882a593Smuzhiyun BCM_REFERENCE(dummy);
122*4882a593Smuzhiyun W_REG(sii->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff));
123*4882a593Smuzhiyun } else
124*4882a593Smuzhiyun W_REG(sii->osh, sbr, v);
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun if (PCMCIA(sii)) {
127*4882a593Smuzhiyun tmp = 0;
128*4882a593Smuzhiyun OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
129*4882a593Smuzhiyun INTR_RESTORE(sii, intr_val);
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun uint
sb_coreid(si_t * sih)134*4882a593Smuzhiyun sb_coreid(si_t *sih)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun si_info_t *sii;
137*4882a593Smuzhiyun sbconfig_t *sb;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun sii = SI_INFO(sih);
140*4882a593Smuzhiyun sb = REGS2SB(sii->curmap);
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT);
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun uint
sb_intflag(si_t * sih)146*4882a593Smuzhiyun sb_intflag(si_t *sih)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun si_info_t *sii = SI_INFO(sih);
149*4882a593Smuzhiyun volatile void *corereg;
150*4882a593Smuzhiyun sbconfig_t *sb;
151*4882a593Smuzhiyun uint origidx, intflag, intr_val = 0;
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun INTR_OFF(sii, intr_val);
154*4882a593Smuzhiyun origidx = si_coreidx(sih);
155*4882a593Smuzhiyun corereg = si_setcore(sih, CC_CORE_ID, 0);
156*4882a593Smuzhiyun ASSERT(corereg != NULL);
157*4882a593Smuzhiyun sb = REGS2SB(corereg);
158*4882a593Smuzhiyun intflag = R_SBREG(sii, &sb->sbflagst);
159*4882a593Smuzhiyun sb_setcoreidx(sih, origidx);
160*4882a593Smuzhiyun INTR_RESTORE(sii, intr_val);
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun return intflag;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun uint
sb_flag(si_t * sih)166*4882a593Smuzhiyun sb_flag(si_t *sih)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun si_info_t *sii;
169*4882a593Smuzhiyun sbconfig_t *sb;
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun sii = SI_INFO(sih);
172*4882a593Smuzhiyun sb = REGS2SB(sii->curmap);
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun return R_SBREG(sii, &sb->sbtpsflag) & SBTPS_NUM0_MASK;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun void
sb_setint(si_t * sih,int siflag)178*4882a593Smuzhiyun sb_setint(si_t *sih, int siflag)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun si_info_t *sii;
181*4882a593Smuzhiyun sbconfig_t *sb;
182*4882a593Smuzhiyun uint32 vec;
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun sii = SI_INFO(sih);
185*4882a593Smuzhiyun sb = REGS2SB(sii->curmap);
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun if (siflag == -1)
188*4882a593Smuzhiyun vec = 0;
189*4882a593Smuzhiyun else
190*4882a593Smuzhiyun vec = 1 << siflag;
191*4882a593Smuzhiyun W_SBREG(sii, &sb->sbintvec, vec);
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun /* return core index of the core with address 'sba' */
195*4882a593Smuzhiyun static uint
_sb_coreidx(si_info_t * sii,uint32 sba)196*4882a593Smuzhiyun _sb_coreidx(si_info_t *sii, uint32 sba)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun uint i;
199*4882a593Smuzhiyun si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun for (i = 0; i < sii->numcores; i ++)
202*4882a593Smuzhiyun if (sba == cores_info->coresba[i])
203*4882a593Smuzhiyun return i;
204*4882a593Smuzhiyun return BADIDX;
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun /* return core address of the current core */
208*4882a593Smuzhiyun static uint32
_sb_coresba(si_info_t * sii)209*4882a593Smuzhiyun _sb_coresba(si_info_t *sii)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun uint32 sbaddr;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun switch (BUSTYPE(sii->pub.bustype)) {
214*4882a593Smuzhiyun case SI_BUS: {
215*4882a593Smuzhiyun sbconfig_t *sb = REGS2SB(sii->curmap);
216*4882a593Smuzhiyun sbaddr = sb_base(R_SBREG(sii, &sb->sbadmatch0));
217*4882a593Smuzhiyun break;
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun case PCI_BUS:
221*4882a593Smuzhiyun sbaddr = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
222*4882a593Smuzhiyun break;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun case PCMCIA_BUS: {
225*4882a593Smuzhiyun uint8 tmp = 0;
226*4882a593Smuzhiyun OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
227*4882a593Smuzhiyun sbaddr = (uint32)tmp << 12;
228*4882a593Smuzhiyun OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
229*4882a593Smuzhiyun sbaddr |= (uint32)tmp << 16;
230*4882a593Smuzhiyun OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
231*4882a593Smuzhiyun sbaddr |= (uint32)tmp << 24;
232*4882a593Smuzhiyun break;
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun #ifdef BCMSDIO
236*4882a593Smuzhiyun case SPI_BUS:
237*4882a593Smuzhiyun case SDIO_BUS:
238*4882a593Smuzhiyun sbaddr = (uint32)(uintptr)sii->curmap;
239*4882a593Smuzhiyun break;
240*4882a593Smuzhiyun #endif // endif
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun default:
243*4882a593Smuzhiyun sbaddr = BADCOREADDR;
244*4882a593Smuzhiyun break;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun return sbaddr;
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun uint
sb_corevendor(si_t * sih)251*4882a593Smuzhiyun sb_corevendor(si_t *sih)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun si_info_t *sii;
254*4882a593Smuzhiyun sbconfig_t *sb;
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun sii = SI_INFO(sih);
257*4882a593Smuzhiyun sb = REGS2SB(sii->curmap);
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_VC_MASK) >> SBIDH_VC_SHIFT);
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun uint
sb_corerev(si_t * sih)263*4882a593Smuzhiyun sb_corerev(si_t *sih)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun si_info_t *sii;
266*4882a593Smuzhiyun sbconfig_t *sb;
267*4882a593Smuzhiyun uint sbidh;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun sii = SI_INFO(sih);
270*4882a593Smuzhiyun sb = REGS2SB(sii->curmap);
271*4882a593Smuzhiyun sbidh = R_SBREG(sii, &sb->sbidhigh);
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun return (SBCOREREV(sbidh));
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun /* set core-specific control flags */
277*4882a593Smuzhiyun void
sb_core_cflags_wo(si_t * sih,uint32 mask,uint32 val)278*4882a593Smuzhiyun sb_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
279*4882a593Smuzhiyun {
280*4882a593Smuzhiyun si_info_t *sii;
281*4882a593Smuzhiyun sbconfig_t *sb;
282*4882a593Smuzhiyun uint32 w;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun sii = SI_INFO(sih);
285*4882a593Smuzhiyun sb = REGS2SB(sii->curmap);
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun ASSERT((val & ~mask) == 0);
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun /* mask and set */
290*4882a593Smuzhiyun w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
291*4882a593Smuzhiyun (val << SBTML_SICF_SHIFT);
292*4882a593Smuzhiyun W_SBREG(sii, &sb->sbtmstatelow, w);
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun /* set/clear core-specific control flags */
296*4882a593Smuzhiyun uint32
sb_core_cflags(si_t * sih,uint32 mask,uint32 val)297*4882a593Smuzhiyun sb_core_cflags(si_t *sih, uint32 mask, uint32 val)
298*4882a593Smuzhiyun {
299*4882a593Smuzhiyun si_info_t *sii;
300*4882a593Smuzhiyun sbconfig_t *sb;
301*4882a593Smuzhiyun uint32 w;
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun sii = SI_INFO(sih);
304*4882a593Smuzhiyun sb = REGS2SB(sii->curmap);
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun ASSERT((val & ~mask) == 0);
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun /* mask and set */
309*4882a593Smuzhiyun if (mask || val) {
310*4882a593Smuzhiyun w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
311*4882a593Smuzhiyun (val << SBTML_SICF_SHIFT);
312*4882a593Smuzhiyun W_SBREG(sii, &sb->sbtmstatelow, w);
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun /* return the new value
316*4882a593Smuzhiyun * for write operation, the following readback ensures the completion of write opration.
317*4882a593Smuzhiyun */
318*4882a593Smuzhiyun return (R_SBREG(sii, &sb->sbtmstatelow) >> SBTML_SICF_SHIFT);
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun /* set/clear core-specific status flags */
322*4882a593Smuzhiyun uint32
sb_core_sflags(si_t * sih,uint32 mask,uint32 val)323*4882a593Smuzhiyun sb_core_sflags(si_t *sih, uint32 mask, uint32 val)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun si_info_t *sii;
326*4882a593Smuzhiyun sbconfig_t *sb;
327*4882a593Smuzhiyun uint32 w;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun sii = SI_INFO(sih);
330*4882a593Smuzhiyun sb = REGS2SB(sii->curmap);
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun ASSERT((val & ~mask) == 0);
333*4882a593Smuzhiyun ASSERT((mask & ~SISF_CORE_BITS) == 0);
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun /* mask and set */
336*4882a593Smuzhiyun if (mask || val) {
337*4882a593Smuzhiyun w = (R_SBREG(sii, &sb->sbtmstatehigh) & ~(mask << SBTMH_SISF_SHIFT)) |
338*4882a593Smuzhiyun (val << SBTMH_SISF_SHIFT);
339*4882a593Smuzhiyun W_SBREG(sii, &sb->sbtmstatehigh, w);
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun /* return the new value */
343*4882a593Smuzhiyun return (R_SBREG(sii, &sb->sbtmstatehigh) >> SBTMH_SISF_SHIFT);
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun bool
sb_iscoreup(si_t * sih)347*4882a593Smuzhiyun sb_iscoreup(si_t *sih)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun si_info_t *sii;
350*4882a593Smuzhiyun sbconfig_t *sb;
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun sii = SI_INFO(sih);
353*4882a593Smuzhiyun sb = REGS2SB(sii->curmap);
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun return ((R_SBREG(sii, &sb->sbtmstatelow) &
356*4882a593Smuzhiyun (SBTML_RESET | SBTML_REJ_MASK | (SICF_CLOCK_EN << SBTML_SICF_SHIFT))) ==
357*4882a593Smuzhiyun (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun /*
361*4882a593Smuzhiyun * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
362*4882a593Smuzhiyun * switch back to the original core, and return the new value.
363*4882a593Smuzhiyun *
364*4882a593Smuzhiyun * When using the silicon backplane, no fidleing with interrupts or core switches are needed.
365*4882a593Smuzhiyun *
366*4882a593Smuzhiyun * Also, when using pci/pcie, we can optimize away the core switching for pci registers
367*4882a593Smuzhiyun * and (on newer pci cores) chipcommon registers.
368*4882a593Smuzhiyun */
369*4882a593Smuzhiyun uint
sb_corereg(si_t * sih,uint coreidx,uint regoff,uint mask,uint val)370*4882a593Smuzhiyun sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun uint origidx = 0;
373*4882a593Smuzhiyun volatile uint32 *r = NULL;
374*4882a593Smuzhiyun uint w;
375*4882a593Smuzhiyun uint intr_val = 0;
376*4882a593Smuzhiyun bool fast = FALSE;
377*4882a593Smuzhiyun si_info_t *sii = SI_INFO(sih);
378*4882a593Smuzhiyun si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun ASSERT(GOODIDX(coreidx));
381*4882a593Smuzhiyun ASSERT(regoff < SI_CORE_SIZE);
382*4882a593Smuzhiyun ASSERT((val & ~mask) == 0);
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun if (coreidx >= SI_MAXCORES)
385*4882a593Smuzhiyun return 0;
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
388*4882a593Smuzhiyun /* If internal bus, we can always get at everything */
389*4882a593Smuzhiyun fast = TRUE;
390*4882a593Smuzhiyun /* map if does not exist */
391*4882a593Smuzhiyun if (!cores_info->regs[coreidx]) {
392*4882a593Smuzhiyun cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
393*4882a593Smuzhiyun SI_CORE_SIZE);
394*4882a593Smuzhiyun ASSERT(GOODREGS(cores_info->regs[coreidx]));
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
397*4882a593Smuzhiyun } else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
398*4882a593Smuzhiyun /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
401*4882a593Smuzhiyun /* Chipc registers are mapped at 12KB */
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun fast = TRUE;
404*4882a593Smuzhiyun r = (volatile uint32 *)((volatile char *)sii->curmap +
405*4882a593Smuzhiyun PCI_16KB0_CCREGS_OFFSET + regoff);
406*4882a593Smuzhiyun } else if (sii->pub.buscoreidx == coreidx) {
407*4882a593Smuzhiyun /* pci registers are at either in the last 2KB of an 8KB window
408*4882a593Smuzhiyun * or, in pcie and pci rev 13 at 8KB
409*4882a593Smuzhiyun */
410*4882a593Smuzhiyun fast = TRUE;
411*4882a593Smuzhiyun if (SI_FAST(sii))
412*4882a593Smuzhiyun r = (volatile uint32 *)((volatile char *)sii->curmap +
413*4882a593Smuzhiyun PCI_16KB0_PCIREGS_OFFSET + regoff);
414*4882a593Smuzhiyun else
415*4882a593Smuzhiyun r = (volatile uint32 *)((volatile char *)sii->curmap +
416*4882a593Smuzhiyun ((regoff >= SBCONFIGOFF) ?
417*4882a593Smuzhiyun PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
418*4882a593Smuzhiyun regoff);
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun if (!fast) {
423*4882a593Smuzhiyun INTR_OFF(sii, intr_val);
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun /* save current core index */
426*4882a593Smuzhiyun origidx = si_coreidx(&sii->pub);
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun /* switch core */
429*4882a593Smuzhiyun r = (volatile uint32*) ((volatile uchar*)sb_setcoreidx(&sii->pub, coreidx) +
430*4882a593Smuzhiyun regoff);
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun ASSERT(r != NULL);
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun /* mask and set */
435*4882a593Smuzhiyun if (mask || val) {
436*4882a593Smuzhiyun if (regoff >= SBCONFIGOFF) {
437*4882a593Smuzhiyun w = (R_SBREG(sii, r) & ~mask) | val;
438*4882a593Smuzhiyun W_SBREG(sii, r, w);
439*4882a593Smuzhiyun } else {
440*4882a593Smuzhiyun w = (R_REG(sii->osh, r) & ~mask) | val;
441*4882a593Smuzhiyun W_REG(sii->osh, r, w);
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun /* readback */
446*4882a593Smuzhiyun if (regoff >= SBCONFIGOFF)
447*4882a593Smuzhiyun w = R_SBREG(sii, r);
448*4882a593Smuzhiyun else {
449*4882a593Smuzhiyun w = R_REG(sii->osh, r);
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun if (!fast) {
453*4882a593Smuzhiyun /* restore core index */
454*4882a593Smuzhiyun if (origidx != coreidx)
455*4882a593Smuzhiyun sb_setcoreidx(&sii->pub, origidx);
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun INTR_RESTORE(sii, intr_val);
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun return (w);
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun /*
464*4882a593Smuzhiyun * If there is no need for fiddling with interrupts or core switches (typically silicon
465*4882a593Smuzhiyun * back plane registers, pci registers and chipcommon registers), this function
466*4882a593Smuzhiyun * returns the register offset on this core to a mapped address. This address can
467*4882a593Smuzhiyun * be used for W_REG/R_REG directly.
468*4882a593Smuzhiyun *
469*4882a593Smuzhiyun * For accessing registers that would need a core switch, this function will return
470*4882a593Smuzhiyun * NULL.
471*4882a593Smuzhiyun */
472*4882a593Smuzhiyun volatile uint32 *
sb_corereg_addr(si_t * sih,uint coreidx,uint regoff)473*4882a593Smuzhiyun sb_corereg_addr(si_t *sih, uint coreidx, uint regoff)
474*4882a593Smuzhiyun {
475*4882a593Smuzhiyun volatile uint32 *r = NULL;
476*4882a593Smuzhiyun bool fast = FALSE;
477*4882a593Smuzhiyun si_info_t *sii = SI_INFO(sih);
478*4882a593Smuzhiyun si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun ASSERT(GOODIDX(coreidx));
481*4882a593Smuzhiyun ASSERT(regoff < SI_CORE_SIZE);
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun if (coreidx >= SI_MAXCORES)
484*4882a593Smuzhiyun return 0;
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
487*4882a593Smuzhiyun /* If internal bus, we can always get at everything */
488*4882a593Smuzhiyun fast = TRUE;
489*4882a593Smuzhiyun /* map if does not exist */
490*4882a593Smuzhiyun if (!cores_info->regs[coreidx]) {
491*4882a593Smuzhiyun cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
492*4882a593Smuzhiyun SI_CORE_SIZE);
493*4882a593Smuzhiyun ASSERT(GOODREGS(cores_info->regs[coreidx]));
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
496*4882a593Smuzhiyun } else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
497*4882a593Smuzhiyun /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
500*4882a593Smuzhiyun /* Chipc registers are mapped at 12KB */
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun fast = TRUE;
503*4882a593Smuzhiyun r = (volatile uint32 *)((volatile char *)sii->curmap +
504*4882a593Smuzhiyun PCI_16KB0_CCREGS_OFFSET + regoff);
505*4882a593Smuzhiyun } else if (sii->pub.buscoreidx == coreidx) {
506*4882a593Smuzhiyun /* pci registers are at either in the last 2KB of an 8KB window
507*4882a593Smuzhiyun * or, in pcie and pci rev 13 at 8KB
508*4882a593Smuzhiyun */
509*4882a593Smuzhiyun fast = TRUE;
510*4882a593Smuzhiyun if (SI_FAST(sii))
511*4882a593Smuzhiyun r = (volatile uint32 *)((volatile char *)sii->curmap +
512*4882a593Smuzhiyun PCI_16KB0_PCIREGS_OFFSET + regoff);
513*4882a593Smuzhiyun else
514*4882a593Smuzhiyun r = (volatile uint32 *)((volatile char *)sii->curmap +
515*4882a593Smuzhiyun ((regoff >= SBCONFIGOFF) ?
516*4882a593Smuzhiyun PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
517*4882a593Smuzhiyun regoff);
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun if (!fast)
522*4882a593Smuzhiyun return 0;
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun return (r);
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun /* Scan the enumeration space to find all cores starting from the given
528*4882a593Smuzhiyun * bus 'sbba'. Append coreid and other info to the lists in 'si'. 'sba'
529*4882a593Smuzhiyun * is the default core address at chip POR time and 'regs' is the virtual
530*4882a593Smuzhiyun * address that the default core is mapped at. 'ncores' is the number of
531*4882a593Smuzhiyun * cores expected on bus 'sbba'. It returns the total number of cores
532*4882a593Smuzhiyun * starting from bus 'sbba', inclusive.
533*4882a593Smuzhiyun */
534*4882a593Smuzhiyun #define SB_MAXBUSES 2
535*4882a593Smuzhiyun static uint
_sb_scan(si_info_t * sii,uint32 sba,volatile void * regs,uint bus,uint32 sbba,uint numcores,uint devid)536*4882a593Smuzhiyun _sb_scan(si_info_t *sii, uint32 sba, volatile void *regs, uint bus,
537*4882a593Smuzhiyun uint32 sbba, uint numcores, uint devid)
538*4882a593Smuzhiyun {
539*4882a593Smuzhiyun uint next;
540*4882a593Smuzhiyun uint ncc = 0;
541*4882a593Smuzhiyun uint i;
542*4882a593Smuzhiyun si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun if (bus >= SB_MAXBUSES) {
545*4882a593Smuzhiyun SI_ERROR(("_sb_scan: bus 0x%08x at level %d is too deep to scan\n", sbba, bus));
546*4882a593Smuzhiyun return 0;
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun SI_MSG(("_sb_scan: scan bus 0x%08x assume %u cores\n", sbba, numcores));
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun /* Scan all cores on the bus starting from core 0.
551*4882a593Smuzhiyun * Core addresses must be contiguous on each bus.
552*4882a593Smuzhiyun */
553*4882a593Smuzhiyun for (i = 0, next = sii->numcores; i < numcores && next < SB_BUS_MAXCORES; i++, next++) {
554*4882a593Smuzhiyun cores_info->coresba[next] = sbba + (i * SI_CORE_SIZE);
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun /* keep and reuse the initial register mapping */
557*4882a593Smuzhiyun if ((BUSTYPE(sii->pub.bustype) == SI_BUS) && (cores_info->coresba[next] == sba)) {
558*4882a593Smuzhiyun SI_VMSG(("_sb_scan: reuse mapped regs %p for core %u\n", regs, next));
559*4882a593Smuzhiyun cores_info->regs[next] = regs;
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun /* change core to 'next' and read its coreid */
563*4882a593Smuzhiyun sii->curmap = _sb_setcoreidx(sii, next);
564*4882a593Smuzhiyun sii->curidx = next;
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun cores_info->coreid[next] = sb_coreid(&sii->pub);
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun /* core specific processing... */
569*4882a593Smuzhiyun /* chipc provides # cores */
570*4882a593Smuzhiyun if (cores_info->coreid[next] == CC_CORE_ID) {
571*4882a593Smuzhiyun chipcregs_t *cc = (chipcregs_t *)sii->curmap;
572*4882a593Smuzhiyun uint32 ccrev = sb_corerev(&sii->pub);
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun /* determine numcores - this is the total # cores in the chip */
575*4882a593Smuzhiyun if (((ccrev == 4) || (ccrev >= 6))) {
576*4882a593Smuzhiyun ASSERT(cc);
577*4882a593Smuzhiyun numcores = (R_REG(sii->osh, &cc->chipid) & CID_CC_MASK) >>
578*4882a593Smuzhiyun CID_CC_SHIFT;
579*4882a593Smuzhiyun } else {
580*4882a593Smuzhiyun /* Older chips */
581*4882a593Smuzhiyun uint chip = CHIPID(sii->pub.chip);
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun if (chip == BCM4704_CHIP_ID)
584*4882a593Smuzhiyun numcores = 9;
585*4882a593Smuzhiyun else if (chip == BCM5365_CHIP_ID)
586*4882a593Smuzhiyun numcores = 7;
587*4882a593Smuzhiyun else {
588*4882a593Smuzhiyun SI_ERROR(("sb_chip2numcores: unsupported chip 0x%x\n",
589*4882a593Smuzhiyun chip));
590*4882a593Smuzhiyun ASSERT(0);
591*4882a593Smuzhiyun numcores = 1;
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun }
594*4882a593Smuzhiyun SI_VMSG(("_sb_scan: there are %u cores in the chip %s\n", numcores,
595*4882a593Smuzhiyun sii->pub.issim ? "QT" : ""));
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun /* scan bridged SB(s) and add results to the end of the list */
598*4882a593Smuzhiyun else if (cores_info->coreid[next] == OCP_CORE_ID) {
599*4882a593Smuzhiyun sbconfig_t *sb = REGS2SB(sii->curmap);
600*4882a593Smuzhiyun uint32 nsbba = R_SBREG(sii, &sb->sbadmatch1);
601*4882a593Smuzhiyun uint nsbcc;
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun sii->numcores = next + 1;
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun if ((nsbba & 0xfff00000) != si_enum_base(devid))
606*4882a593Smuzhiyun continue;
607*4882a593Smuzhiyun nsbba &= 0xfffff000;
608*4882a593Smuzhiyun if (_sb_coreidx(sii, nsbba) != BADIDX)
609*4882a593Smuzhiyun continue;
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun nsbcc = (R_SBREG(sii, &sb->sbtmstatehigh) & 0x000f0000) >> 16;
612*4882a593Smuzhiyun nsbcc = _sb_scan(sii, sba, regs, bus + 1, nsbba, nsbcc, devid);
613*4882a593Smuzhiyun if (sbba == si_enum_base(devid))
614*4882a593Smuzhiyun numcores -= nsbcc;
615*4882a593Smuzhiyun ncc += nsbcc;
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun SI_MSG(("_sb_scan: found %u cores on bus 0x%08x\n", i, sbba));
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun sii->numcores = i + ncc;
622*4882a593Smuzhiyun return sii->numcores;
623*4882a593Smuzhiyun }
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun /* scan the sb enumerated space to identify all cores */
626*4882a593Smuzhiyun void
sb_scan(si_t * sih,volatile void * regs,uint devid)627*4882a593Smuzhiyun sb_scan(si_t *sih, volatile void *regs, uint devid)
628*4882a593Smuzhiyun {
629*4882a593Smuzhiyun uint32 origsba;
630*4882a593Smuzhiyun sbconfig_t *sb;
631*4882a593Smuzhiyun si_info_t *sii = SI_INFO(sih);
632*4882a593Smuzhiyun BCM_REFERENCE(devid);
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun sb = REGS2SB(sii->curmap);
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun sii->pub.socirev = (R_SBREG(sii, &sb->sbidlow) & SBIDL_RV_MASK) >> SBIDL_RV_SHIFT;
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun /* Save the current core info and validate it later till we know
639*4882a593Smuzhiyun * for sure what is good and what is bad.
640*4882a593Smuzhiyun */
641*4882a593Smuzhiyun origsba = _sb_coresba(sii);
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun /* scan all SB(s) starting from SI_ENUM_BASE_DEFAULT */
644*4882a593Smuzhiyun sii->numcores = _sb_scan(sii, origsba, regs, 0, si_enum_base(devid), 1, devid);
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun /*
648*4882a593Smuzhiyun * This function changes logical "focus" to the indicated core;
649*4882a593Smuzhiyun * must be called with interrupts off.
650*4882a593Smuzhiyun * Moreover, callers should keep interrupts off during switching out of and back to d11 core
651*4882a593Smuzhiyun */
652*4882a593Smuzhiyun volatile void *
sb_setcoreidx(si_t * sih,uint coreidx)653*4882a593Smuzhiyun sb_setcoreidx(si_t *sih, uint coreidx)
654*4882a593Smuzhiyun {
655*4882a593Smuzhiyun si_info_t *sii = SI_INFO(sih);
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun if (coreidx >= sii->numcores)
658*4882a593Smuzhiyun return (NULL);
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun /*
661*4882a593Smuzhiyun * If the user has provided an interrupt mask enabled function,
662*4882a593Smuzhiyun * then assert interrupts are disabled before switching the core.
663*4882a593Smuzhiyun */
664*4882a593Smuzhiyun ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun sii->curmap = _sb_setcoreidx(sii, coreidx);
667*4882a593Smuzhiyun sii->curidx = coreidx;
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun return (sii->curmap);
670*4882a593Smuzhiyun }
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun /* This function changes the logical "focus" to the indicated core.
673*4882a593Smuzhiyun * Return the current core's virtual address.
674*4882a593Smuzhiyun */
675*4882a593Smuzhiyun static volatile void *
_sb_setcoreidx(si_info_t * sii,uint coreidx)676*4882a593Smuzhiyun _sb_setcoreidx(si_info_t *sii, uint coreidx)
677*4882a593Smuzhiyun {
678*4882a593Smuzhiyun si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
679*4882a593Smuzhiyun uint32 sbaddr = cores_info->coresba[coreidx];
680*4882a593Smuzhiyun volatile void *regs;
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun switch (BUSTYPE(sii->pub.bustype)) {
683*4882a593Smuzhiyun case SI_BUS:
684*4882a593Smuzhiyun /* map new one */
685*4882a593Smuzhiyun if (!cores_info->regs[coreidx]) {
686*4882a593Smuzhiyun cores_info->regs[coreidx] = REG_MAP(sbaddr, SI_CORE_SIZE);
687*4882a593Smuzhiyun ASSERT(GOODREGS(cores_info->regs[coreidx]));
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun regs = cores_info->regs[coreidx];
690*4882a593Smuzhiyun break;
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun case PCI_BUS:
693*4882a593Smuzhiyun /* point bar0 window */
694*4882a593Smuzhiyun OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, sbaddr);
695*4882a593Smuzhiyun regs = sii->curmap;
696*4882a593Smuzhiyun break;
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun case PCMCIA_BUS: {
699*4882a593Smuzhiyun uint8 tmp = (sbaddr >> 12) & 0x0f;
700*4882a593Smuzhiyun OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
701*4882a593Smuzhiyun tmp = (sbaddr >> 16) & 0xff;
702*4882a593Smuzhiyun OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
703*4882a593Smuzhiyun tmp = (sbaddr >> 24) & 0xff;
704*4882a593Smuzhiyun OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
705*4882a593Smuzhiyun regs = sii->curmap;
706*4882a593Smuzhiyun break;
707*4882a593Smuzhiyun }
708*4882a593Smuzhiyun #ifdef BCMSDIO
709*4882a593Smuzhiyun case SPI_BUS:
710*4882a593Smuzhiyun case SDIO_BUS:
711*4882a593Smuzhiyun /* map new one */
712*4882a593Smuzhiyun if (!cores_info->regs[coreidx]) {
713*4882a593Smuzhiyun cores_info->regs[coreidx] = (void *)(uintptr)sbaddr;
714*4882a593Smuzhiyun ASSERT(GOODREGS(cores_info->regs[coreidx]));
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun regs = cores_info->regs[coreidx];
717*4882a593Smuzhiyun break;
718*4882a593Smuzhiyun #endif /* BCMSDIO */
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun default:
721*4882a593Smuzhiyun ASSERT(0);
722*4882a593Smuzhiyun regs = NULL;
723*4882a593Smuzhiyun break;
724*4882a593Smuzhiyun }
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun return regs;
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun /* Return the address of sbadmatch0/1/2/3 register */
730*4882a593Smuzhiyun static volatile uint32 *
sb_admatch(si_info_t * sii,uint asidx)731*4882a593Smuzhiyun sb_admatch(si_info_t *sii, uint asidx)
732*4882a593Smuzhiyun {
733*4882a593Smuzhiyun sbconfig_t *sb;
734*4882a593Smuzhiyun volatile uint32 *addrm;
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun sb = REGS2SB(sii->curmap);
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun switch (asidx) {
739*4882a593Smuzhiyun case 0:
740*4882a593Smuzhiyun addrm = &sb->sbadmatch0;
741*4882a593Smuzhiyun break;
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun case 1:
744*4882a593Smuzhiyun addrm = &sb->sbadmatch1;
745*4882a593Smuzhiyun break;
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun case 2:
748*4882a593Smuzhiyun addrm = &sb->sbadmatch2;
749*4882a593Smuzhiyun break;
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun case 3:
752*4882a593Smuzhiyun addrm = &sb->sbadmatch3;
753*4882a593Smuzhiyun break;
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun default:
756*4882a593Smuzhiyun SI_ERROR(("%s: Address space index (%d) out of range\n", __FUNCTION__, asidx));
757*4882a593Smuzhiyun return 0;
758*4882a593Smuzhiyun }
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun return (addrm);
761*4882a593Smuzhiyun }
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun /* Return the number of address spaces in current core */
764*4882a593Smuzhiyun int
sb_numaddrspaces(si_t * sih)765*4882a593Smuzhiyun sb_numaddrspaces(si_t *sih)
766*4882a593Smuzhiyun {
767*4882a593Smuzhiyun si_info_t *sii;
768*4882a593Smuzhiyun sbconfig_t *sb;
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun sii = SI_INFO(sih);
771*4882a593Smuzhiyun sb = REGS2SB(sii->curmap);
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun /* + 1 because of enumeration space */
774*4882a593Smuzhiyun return ((R_SBREG(sii, &sb->sbidlow) & SBIDL_AR_MASK) >> SBIDL_AR_SHIFT) + 1;
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun /* Return the address of the nth address space in the current core */
778*4882a593Smuzhiyun uint32
sb_addrspace(si_t * sih,uint asidx)779*4882a593Smuzhiyun sb_addrspace(si_t *sih, uint asidx)
780*4882a593Smuzhiyun {
781*4882a593Smuzhiyun si_info_t *sii;
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun sii = SI_INFO(sih);
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun return (sb_base(R_SBREG(sii, sb_admatch(sii, asidx))));
786*4882a593Smuzhiyun }
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun /* Return the size of the nth address space in the current core */
789*4882a593Smuzhiyun uint32
sb_addrspacesize(si_t * sih,uint asidx)790*4882a593Smuzhiyun sb_addrspacesize(si_t *sih, uint asidx)
791*4882a593Smuzhiyun {
792*4882a593Smuzhiyun si_info_t *sii;
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun sii = SI_INFO(sih);
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun return (sb_size(R_SBREG(sii, sb_admatch(sii, asidx))));
797*4882a593Smuzhiyun }
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun /* do buffered registers update */
800*4882a593Smuzhiyun void
sb_commit(si_t * sih)801*4882a593Smuzhiyun sb_commit(si_t *sih)
802*4882a593Smuzhiyun {
803*4882a593Smuzhiyun si_info_t *sii = SI_INFO(sih);
804*4882a593Smuzhiyun uint origidx;
805*4882a593Smuzhiyun uint intr_val = 0;
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun origidx = sii->curidx;
808*4882a593Smuzhiyun ASSERT(GOODIDX(origidx));
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun INTR_OFF(sii, intr_val);
811*4882a593Smuzhiyun
812*4882a593Smuzhiyun /* switch over to chipcommon core if there is one, else use pci */
813*4882a593Smuzhiyun if (sii->pub.ccrev != NOREV) {
814*4882a593Smuzhiyun chipcregs_t *ccregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
815*4882a593Smuzhiyun ASSERT(ccregs != NULL);
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun /* do the buffer registers update */
818*4882a593Smuzhiyun W_REG(sii->osh, &ccregs->broadcastaddress, SB_COMMIT);
819*4882a593Smuzhiyun W_REG(sii->osh, &ccregs->broadcastdata, 0x0);
820*4882a593Smuzhiyun } else
821*4882a593Smuzhiyun ASSERT(0);
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun /* restore core index */
824*4882a593Smuzhiyun sb_setcoreidx(sih, origidx);
825*4882a593Smuzhiyun INTR_RESTORE(sii, intr_val);
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun void
sb_core_disable(si_t * sih,uint32 bits)829*4882a593Smuzhiyun sb_core_disable(si_t *sih, uint32 bits)
830*4882a593Smuzhiyun {
831*4882a593Smuzhiyun si_info_t *sii;
832*4882a593Smuzhiyun volatile uint32 dummy;
833*4882a593Smuzhiyun sbconfig_t *sb;
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun sii = SI_INFO(sih);
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun ASSERT(GOODREGS(sii->curmap));
838*4882a593Smuzhiyun sb = REGS2SB(sii->curmap);
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun /* if core is already in reset, just return */
841*4882a593Smuzhiyun if (R_SBREG(sii, &sb->sbtmstatelow) & SBTML_RESET)
842*4882a593Smuzhiyun return;
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun /* if clocks are not enabled, put into reset and return */
845*4882a593Smuzhiyun if ((R_SBREG(sii, &sb->sbtmstatelow) & (SICF_CLOCK_EN << SBTML_SICF_SHIFT)) == 0)
846*4882a593Smuzhiyun goto disable;
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun /* set target reject and spin until busy is clear (preserve core-specific bits) */
849*4882a593Smuzhiyun OR_SBREG(sii, &sb->sbtmstatelow, SBTML_REJ);
850*4882a593Smuzhiyun dummy = R_SBREG(sii, &sb->sbtmstatelow);
851*4882a593Smuzhiyun BCM_REFERENCE(dummy);
852*4882a593Smuzhiyun OSL_DELAY(1);
853*4882a593Smuzhiyun SPINWAIT((R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY), 100000);
854*4882a593Smuzhiyun if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY)
855*4882a593Smuzhiyun SI_ERROR(("%s: target state still busy\n", __FUNCTION__));
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT) {
858*4882a593Smuzhiyun OR_SBREG(sii, &sb->sbimstate, SBIM_RJ);
859*4882a593Smuzhiyun dummy = R_SBREG(sii, &sb->sbimstate);
860*4882a593Smuzhiyun BCM_REFERENCE(dummy);
861*4882a593Smuzhiyun OSL_DELAY(1);
862*4882a593Smuzhiyun SPINWAIT((R_SBREG(sii, &sb->sbimstate) & SBIM_BY), 100000);
863*4882a593Smuzhiyun }
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun /* set reset and reject while enabling the clocks */
866*4882a593Smuzhiyun W_SBREG(sii, &sb->sbtmstatelow,
867*4882a593Smuzhiyun (((bits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
868*4882a593Smuzhiyun SBTML_REJ | SBTML_RESET));
869*4882a593Smuzhiyun dummy = R_SBREG(sii, &sb->sbtmstatelow);
870*4882a593Smuzhiyun BCM_REFERENCE(dummy);
871*4882a593Smuzhiyun OSL_DELAY(10);
872*4882a593Smuzhiyun
873*4882a593Smuzhiyun /* don't forget to clear the initiator reject bit */
874*4882a593Smuzhiyun if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT)
875*4882a593Smuzhiyun AND_SBREG(sii, &sb->sbimstate, ~SBIM_RJ);
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun disable:
878*4882a593Smuzhiyun /* leave reset and reject asserted */
879*4882a593Smuzhiyun W_SBREG(sii, &sb->sbtmstatelow, ((bits << SBTML_SICF_SHIFT) | SBTML_REJ | SBTML_RESET));
880*4882a593Smuzhiyun OSL_DELAY(1);
881*4882a593Smuzhiyun }
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun /* reset and re-enable a core
884*4882a593Smuzhiyun * inputs:
885*4882a593Smuzhiyun * bits - core specific bits that are set during and after reset sequence
886*4882a593Smuzhiyun * resetbits - core specific bits that are set only during reset sequence
887*4882a593Smuzhiyun */
888*4882a593Smuzhiyun void
sb_core_reset(si_t * sih,uint32 bits,uint32 resetbits)889*4882a593Smuzhiyun sb_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
890*4882a593Smuzhiyun {
891*4882a593Smuzhiyun si_info_t *sii;
892*4882a593Smuzhiyun sbconfig_t *sb;
893*4882a593Smuzhiyun volatile uint32 dummy;
894*4882a593Smuzhiyun
895*4882a593Smuzhiyun sii = SI_INFO(sih);
896*4882a593Smuzhiyun ASSERT(GOODREGS(sii->curmap));
897*4882a593Smuzhiyun sb = REGS2SB(sii->curmap);
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun /*
900*4882a593Smuzhiyun * Must do the disable sequence first to work for arbitrary current core state.
901*4882a593Smuzhiyun */
902*4882a593Smuzhiyun sb_core_disable(sih, (bits | resetbits));
903*4882a593Smuzhiyun
904*4882a593Smuzhiyun /*
905*4882a593Smuzhiyun * Now do the initialization sequence.
906*4882a593Smuzhiyun */
907*4882a593Smuzhiyun
908*4882a593Smuzhiyun /* set reset while enabling the clock and forcing them on throughout the core */
909*4882a593Smuzhiyun W_SBREG(sii, &sb->sbtmstatelow,
910*4882a593Smuzhiyun (((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
911*4882a593Smuzhiyun SBTML_RESET));
912*4882a593Smuzhiyun dummy = R_SBREG(sii, &sb->sbtmstatelow);
913*4882a593Smuzhiyun BCM_REFERENCE(dummy);
914*4882a593Smuzhiyun OSL_DELAY(1);
915*4882a593Smuzhiyun
916*4882a593Smuzhiyun if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_SERR) {
917*4882a593Smuzhiyun W_SBREG(sii, &sb->sbtmstatehigh, 0);
918*4882a593Smuzhiyun }
919*4882a593Smuzhiyun if ((dummy = R_SBREG(sii, &sb->sbimstate)) & (SBIM_IBE | SBIM_TO)) {
920*4882a593Smuzhiyun AND_SBREG(sii, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
921*4882a593Smuzhiyun }
922*4882a593Smuzhiyun
923*4882a593Smuzhiyun /* clear reset and allow it to propagate throughout the core */
924*4882a593Smuzhiyun W_SBREG(sii, &sb->sbtmstatelow,
925*4882a593Smuzhiyun ((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
926*4882a593Smuzhiyun dummy = R_SBREG(sii, &sb->sbtmstatelow);
927*4882a593Smuzhiyun BCM_REFERENCE(dummy);
928*4882a593Smuzhiyun OSL_DELAY(1);
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun /* leave clock enabled */
931*4882a593Smuzhiyun W_SBREG(sii, &sb->sbtmstatelow, ((bits | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
932*4882a593Smuzhiyun dummy = R_SBREG(sii, &sb->sbtmstatelow);
933*4882a593Smuzhiyun BCM_REFERENCE(dummy);
934*4882a593Smuzhiyun OSL_DELAY(1);
935*4882a593Smuzhiyun }
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun /*
938*4882a593Smuzhiyun * Set the initiator timeout for the "master core".
939*4882a593Smuzhiyun * The master core is defined to be the core in control
940*4882a593Smuzhiyun * of the chip and so it issues accesses to non-memory
941*4882a593Smuzhiyun * locations (Because of dma *any* core can access memeory).
942*4882a593Smuzhiyun *
943*4882a593Smuzhiyun * The routine uses the bus to decide who is the master:
944*4882a593Smuzhiyun * SI_BUS => mips
945*4882a593Smuzhiyun * JTAG_BUS => chipc
946*4882a593Smuzhiyun * PCI_BUS => pci or pcie
947*4882a593Smuzhiyun * PCMCIA_BUS => pcmcia
948*4882a593Smuzhiyun * SDIO_BUS => pcmcia
949*4882a593Smuzhiyun *
950*4882a593Smuzhiyun * This routine exists so callers can disable initiator
951*4882a593Smuzhiyun * timeouts so accesses to very slow devices like otp
952*4882a593Smuzhiyun * won't cause an abort. The routine allows arbitrary
953*4882a593Smuzhiyun * settings of the service and request timeouts, though.
954*4882a593Smuzhiyun *
955*4882a593Smuzhiyun * Returns the timeout state before changing it or -1
956*4882a593Smuzhiyun * on error.
957*4882a593Smuzhiyun */
958*4882a593Smuzhiyun
959*4882a593Smuzhiyun #define TO_MASK (SBIMCL_RTO_MASK | SBIMCL_STO_MASK)
960*4882a593Smuzhiyun
961*4882a593Smuzhiyun uint32
sb_set_initiator_to(si_t * sih,uint32 to,uint idx)962*4882a593Smuzhiyun sb_set_initiator_to(si_t *sih, uint32 to, uint idx)
963*4882a593Smuzhiyun {
964*4882a593Smuzhiyun si_info_t *sii = SI_INFO(sih);
965*4882a593Smuzhiyun uint origidx;
966*4882a593Smuzhiyun uint intr_val = 0;
967*4882a593Smuzhiyun uint32 tmp, ret = 0xffffffff;
968*4882a593Smuzhiyun sbconfig_t *sb;
969*4882a593Smuzhiyun
970*4882a593Smuzhiyun if ((to & ~TO_MASK) != 0)
971*4882a593Smuzhiyun return ret;
972*4882a593Smuzhiyun
973*4882a593Smuzhiyun /* Figure out the master core */
974*4882a593Smuzhiyun if (idx == BADIDX) {
975*4882a593Smuzhiyun switch (BUSTYPE(sii->pub.bustype)) {
976*4882a593Smuzhiyun case PCI_BUS:
977*4882a593Smuzhiyun idx = sii->pub.buscoreidx;
978*4882a593Smuzhiyun break;
979*4882a593Smuzhiyun case JTAG_BUS:
980*4882a593Smuzhiyun idx = SI_CC_IDX;
981*4882a593Smuzhiyun break;
982*4882a593Smuzhiyun case PCMCIA_BUS:
983*4882a593Smuzhiyun #ifdef BCMSDIO
984*4882a593Smuzhiyun case SDIO_BUS:
985*4882a593Smuzhiyun #endif // endif
986*4882a593Smuzhiyun idx = si_findcoreidx(sih, PCMCIA_CORE_ID, 0);
987*4882a593Smuzhiyun break;
988*4882a593Smuzhiyun case SI_BUS:
989*4882a593Smuzhiyun idx = si_findcoreidx(sih, MIPS33_CORE_ID, 0);
990*4882a593Smuzhiyun break;
991*4882a593Smuzhiyun default:
992*4882a593Smuzhiyun ASSERT(0);
993*4882a593Smuzhiyun }
994*4882a593Smuzhiyun if (idx == BADIDX)
995*4882a593Smuzhiyun return ret;
996*4882a593Smuzhiyun }
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun INTR_OFF(sii, intr_val);
999*4882a593Smuzhiyun origidx = si_coreidx(sih);
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun sb = REGS2SB(sb_setcoreidx(sih, idx));
1002*4882a593Smuzhiyun
1003*4882a593Smuzhiyun tmp = R_SBREG(sii, &sb->sbimconfiglow);
1004*4882a593Smuzhiyun ret = tmp & TO_MASK;
1005*4882a593Smuzhiyun W_SBREG(sii, &sb->sbimconfiglow, (tmp & ~TO_MASK) | to);
1006*4882a593Smuzhiyun
1007*4882a593Smuzhiyun sb_commit(sih);
1008*4882a593Smuzhiyun sb_setcoreidx(sih, origidx);
1009*4882a593Smuzhiyun INTR_RESTORE(sii, intr_val);
1010*4882a593Smuzhiyun return ret;
1011*4882a593Smuzhiyun }
1012*4882a593Smuzhiyun
1013*4882a593Smuzhiyun uint32
sb_base(uint32 admatch)1014*4882a593Smuzhiyun sb_base(uint32 admatch)
1015*4882a593Smuzhiyun {
1016*4882a593Smuzhiyun uint32 base;
1017*4882a593Smuzhiyun uint type;
1018*4882a593Smuzhiyun
1019*4882a593Smuzhiyun type = admatch & SBAM_TYPE_MASK;
1020*4882a593Smuzhiyun ASSERT(type < 3);
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun base = 0;
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun if (type == 0) {
1025*4882a593Smuzhiyun base = admatch & SBAM_BASE0_MASK;
1026*4882a593Smuzhiyun } else if (type == 1) {
1027*4882a593Smuzhiyun ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
1028*4882a593Smuzhiyun base = admatch & SBAM_BASE1_MASK;
1029*4882a593Smuzhiyun } else if (type == 2) {
1030*4882a593Smuzhiyun ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
1031*4882a593Smuzhiyun base = admatch & SBAM_BASE2_MASK;
1032*4882a593Smuzhiyun }
1033*4882a593Smuzhiyun
1034*4882a593Smuzhiyun return (base);
1035*4882a593Smuzhiyun }
1036*4882a593Smuzhiyun
1037*4882a593Smuzhiyun uint32
sb_size(uint32 admatch)1038*4882a593Smuzhiyun sb_size(uint32 admatch)
1039*4882a593Smuzhiyun {
1040*4882a593Smuzhiyun uint32 size;
1041*4882a593Smuzhiyun uint type;
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun type = admatch & SBAM_TYPE_MASK;
1044*4882a593Smuzhiyun ASSERT(type < 3);
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun size = 0;
1047*4882a593Smuzhiyun
1048*4882a593Smuzhiyun if (type == 0) {
1049*4882a593Smuzhiyun size = 1 << (((admatch & SBAM_ADINT0_MASK) >> SBAM_ADINT0_SHIFT) + 1);
1050*4882a593Smuzhiyun } else if (type == 1) {
1051*4882a593Smuzhiyun ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
1052*4882a593Smuzhiyun size = 1 << (((admatch & SBAM_ADINT1_MASK) >> SBAM_ADINT1_SHIFT) + 1);
1053*4882a593Smuzhiyun } else if (type == 2) {
1054*4882a593Smuzhiyun ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
1055*4882a593Smuzhiyun size = 1 << (((admatch & SBAM_ADINT2_MASK) >> SBAM_ADINT2_SHIFT) + 1);
1056*4882a593Smuzhiyun }
1057*4882a593Smuzhiyun
1058*4882a593Smuzhiyun return (size);
1059*4882a593Smuzhiyun }
1060*4882a593Smuzhiyun
1061*4882a593Smuzhiyun #if defined(BCMDBG_PHYDUMP)
1062*4882a593Smuzhiyun /* print interesting sbconfig registers */
1063*4882a593Smuzhiyun void
sb_dumpregs(si_t * sih,struct bcmstrbuf * b)1064*4882a593Smuzhiyun sb_dumpregs(si_t *sih, struct bcmstrbuf *b)
1065*4882a593Smuzhiyun {
1066*4882a593Smuzhiyun sbconfig_t *sb;
1067*4882a593Smuzhiyun uint origidx, i, intr_val = 0;
1068*4882a593Smuzhiyun si_info_t *sii = SI_INFO(sih);
1069*4882a593Smuzhiyun si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
1070*4882a593Smuzhiyun
1071*4882a593Smuzhiyun origidx = sii->curidx;
1072*4882a593Smuzhiyun
1073*4882a593Smuzhiyun INTR_OFF(sii, intr_val);
1074*4882a593Smuzhiyun
1075*4882a593Smuzhiyun for (i = 0; i < sii->numcores; i++) {
1076*4882a593Smuzhiyun sb = REGS2SB(sb_setcoreidx(sih, i));
1077*4882a593Smuzhiyun
1078*4882a593Smuzhiyun bcm_bprintf(b, "core 0x%x: \n", cores_info->coreid[i]);
1079*4882a593Smuzhiyun
1080*4882a593Smuzhiyun if (sii->pub.socirev > SONICS_2_2)
1081*4882a593Smuzhiyun bcm_bprintf(b, "sbimerrlog 0x%x sbimerrloga 0x%x\n",
1082*4882a593Smuzhiyun sb_corereg(sih, si_coreidx(&sii->pub), SBIMERRLOG, 0, 0),
1083*4882a593Smuzhiyun sb_corereg(sih, si_coreidx(&sii->pub), SBIMERRLOGA, 0, 0));
1084*4882a593Smuzhiyun
1085*4882a593Smuzhiyun bcm_bprintf(b, "sbtmstatelow 0x%x sbtmstatehigh 0x%x sbidhigh 0x%x "
1086*4882a593Smuzhiyun "sbimstate 0x%x\n sbimconfiglow 0x%x sbimconfighigh 0x%x\n",
1087*4882a593Smuzhiyun R_SBREG(sii, &sb->sbtmstatelow), R_SBREG(sii, &sb->sbtmstatehigh),
1088*4882a593Smuzhiyun R_SBREG(sii, &sb->sbidhigh), R_SBREG(sii, &sb->sbimstate),
1089*4882a593Smuzhiyun R_SBREG(sii, &sb->sbimconfiglow), R_SBREG(sii, &sb->sbimconfighigh));
1090*4882a593Smuzhiyun }
1091*4882a593Smuzhiyun
1092*4882a593Smuzhiyun sb_setcoreidx(sih, origidx);
1093*4882a593Smuzhiyun INTR_RESTORE(sii, intr_val);
1094*4882a593Smuzhiyun }
1095*4882a593Smuzhiyun #endif // endif
1096