1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Misc utility routines for accessing chip-specific features
3*4882a593Smuzhiyun * of the SiliconBackplane-based Broadcom chips.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2020, Broadcom.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Unless you and Broadcom execute a separate written software license
8*4882a593Smuzhiyun * agreement governing use of this software, this software is licensed to you
9*4882a593Smuzhiyun * under the terms of the GNU General Public License version 2 (the "GPL"),
10*4882a593Smuzhiyun * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11*4882a593Smuzhiyun * following added to such license:
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * As a special exception, the copyright holders of this software give you
14*4882a593Smuzhiyun * permission to link this software with independent modules, and to copy and
15*4882a593Smuzhiyun * distribute the resulting executable under terms of your choice, provided that
16*4882a593Smuzhiyun * you also meet, for each linked independent module, the terms and conditions of
17*4882a593Smuzhiyun * the license of that module. An independent module is a module which is not
18*4882a593Smuzhiyun * derived from this software. The special exception does not apply to any
19*4882a593Smuzhiyun * modifications of the software.
20*4882a593Smuzhiyun *
21*4882a593Smuzhiyun *
22*4882a593Smuzhiyun * <<Broadcom-WL-IPTag/Dual:>>
23*4882a593Smuzhiyun */
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #include <typedefs.h>
26*4882a593Smuzhiyun #include <bcmdefs.h>
27*4882a593Smuzhiyun #include <osl.h>
28*4882a593Smuzhiyun #include <bcmutils.h>
29*4882a593Smuzhiyun #include <siutils.h>
30*4882a593Smuzhiyun #include <hndsoc.h>
31*4882a593Smuzhiyun #include <sbchipc.h>
32*4882a593Smuzhiyun #include <pcicfg.h>
33*4882a593Smuzhiyun #include <pcie_core.h>
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #include "siutils_priv.h"
36*4882a593Smuzhiyun #include <bcmdevs.h>
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun #if defined(ETD)
39*4882a593Smuzhiyun #include <etd.h>
40*4882a593Smuzhiyun #endif
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun #if !defined(BCMDONGLEHOST)
43*4882a593Smuzhiyun #define PMU_DMP() (cores_info->coreid[sii->curidx] == PMU_CORE_ID)
44*4882a593Smuzhiyun #define GCI_DMP() (cores_info->coreid[sii->curidx] == GCI_CORE_ID)
45*4882a593Smuzhiyun #else
46*4882a593Smuzhiyun #define PMU_DMP() (0)
47*4882a593Smuzhiyun #define GCI_DMP() (0)
48*4882a593Smuzhiyun #endif /* !defined(BCMDONGLEHOST) */
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun #if defined(AXI_TIMEOUTS_NIC)
51*4882a593Smuzhiyun static bool ai_get_apb_bridge(const si_t *sih, uint32 coreidx, uint32 *apb_id,
52*4882a593Smuzhiyun uint32 *apb_coreunit);
53*4882a593Smuzhiyun #endif /* AXI_TIMEOUTS_NIC */
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun #if defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC)
56*4882a593Smuzhiyun static void ai_reset_axi_to(const si_info_t *sii, aidmp_t *ai);
57*4882a593Smuzhiyun #endif /* defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC) */
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun #ifdef DONGLEBUILD
60*4882a593Smuzhiyun static uint32 ai_get_sizeof_wrapper_offsets_to_dump(void);
61*4882a593Smuzhiyun static uint32 ai_get_wrapper_base_addr(uint32 **offset);
62*4882a593Smuzhiyun #endif /* DONGLEBUILD */
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun /* AXI ID to CoreID + unit mappings */
65*4882a593Smuzhiyun typedef struct axi_to_coreidx {
66*4882a593Smuzhiyun uint coreid;
67*4882a593Smuzhiyun uint coreunit;
68*4882a593Smuzhiyun } axi_to_coreidx_t;
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun static const axi_to_coreidx_t axi2coreidx_4369[] = {
71*4882a593Smuzhiyun {CC_CORE_ID, 0}, /* 00 Chipcommon */
72*4882a593Smuzhiyun {PCIE2_CORE_ID, 0}, /* 01 PCIe */
73*4882a593Smuzhiyun {D11_CORE_ID, 0}, /* 02 D11 Main */
74*4882a593Smuzhiyun {ARMCR4_CORE_ID, 0}, /* 03 ARM */
75*4882a593Smuzhiyun {BT_CORE_ID, 0}, /* 04 BT AHB */
76*4882a593Smuzhiyun {D11_CORE_ID, 1}, /* 05 D11 Aux */
77*4882a593Smuzhiyun {D11_CORE_ID, 0}, /* 06 D11 Main l1 */
78*4882a593Smuzhiyun {D11_CORE_ID, 1}, /* 07 D11 Aux l1 */
79*4882a593Smuzhiyun {D11_CORE_ID, 0}, /* 08 D11 Main l2 */
80*4882a593Smuzhiyun {D11_CORE_ID, 1}, /* 09 D11 Aux l2 */
81*4882a593Smuzhiyun {NODEV_CORE_ID, 0}, /* 10 M2M DMA */
82*4882a593Smuzhiyun {NODEV_CORE_ID, 0}, /* 11 unused */
83*4882a593Smuzhiyun {NODEV_CORE_ID, 0}, /* 12 unused */
84*4882a593Smuzhiyun {NODEV_CORE_ID, 0}, /* 13 unused */
85*4882a593Smuzhiyun {NODEV_CORE_ID, 0}, /* 14 unused */
86*4882a593Smuzhiyun {NODEV_CORE_ID, 0} /* 15 unused */
87*4882a593Smuzhiyun };
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun /* EROM parsing */
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun static uint32
get_erom_ent(const si_t * sih,uint32 ** eromptr,uint32 mask,uint32 match)92*4882a593Smuzhiyun get_erom_ent(const si_t *sih, uint32 **eromptr, uint32 mask, uint32 match)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun uint32 ent;
95*4882a593Smuzhiyun uint inv = 0, nom = 0;
96*4882a593Smuzhiyun uint32 size = 0;
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun while (TRUE) {
99*4882a593Smuzhiyun ent = R_REG(SI_INFO(sih)->osh, *eromptr);
100*4882a593Smuzhiyun (*eromptr)++;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun if (mask == 0)
103*4882a593Smuzhiyun break;
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun if ((ent & ER_VALID) == 0) {
106*4882a593Smuzhiyun inv++;
107*4882a593Smuzhiyun continue;
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun if (ent == (ER_END | ER_VALID))
111*4882a593Smuzhiyun break;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun if ((ent & mask) == match)
114*4882a593Smuzhiyun break;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun /* escape condition related EROM size if it has invalid values */
117*4882a593Smuzhiyun size += sizeof(*eromptr);
118*4882a593Smuzhiyun if (size >= ER_SZ_MAX) {
119*4882a593Smuzhiyun SI_ERROR(("Failed to find end of EROM marker\n"));
120*4882a593Smuzhiyun break;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun nom++;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun SI_VMSG(("get_erom_ent: Returning ent 0x%08x\n", ent));
127*4882a593Smuzhiyun if (inv + nom) {
128*4882a593Smuzhiyun SI_VMSG((" after %d invalid and %d non-matching entries\n", inv, nom));
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun return ent;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun static uint32
get_asd(const si_t * sih,uint32 ** eromptr,uint sp,uint ad,uint st,uint32 * addrl,uint32 * addrh,uint32 * sizel,uint32 * sizeh)134*4882a593Smuzhiyun get_asd(const si_t *sih, uint32 **eromptr, uint sp, uint ad, uint st, uint32 *addrl, uint32 *addrh,
135*4882a593Smuzhiyun uint32 *sizel, uint32 *sizeh)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun uint32 asd, sz, szd;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun BCM_REFERENCE(ad);
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
142*4882a593Smuzhiyun if (((asd & ER_TAG1) != ER_ADD) ||
143*4882a593Smuzhiyun (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
144*4882a593Smuzhiyun ((asd & AD_ST_MASK) != st)) {
145*4882a593Smuzhiyun /* This is not what we want, "push" it back */
146*4882a593Smuzhiyun (*eromptr)--;
147*4882a593Smuzhiyun return 0;
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun *addrl = asd & AD_ADDR_MASK;
150*4882a593Smuzhiyun if (asd & AD_AG32)
151*4882a593Smuzhiyun *addrh = get_erom_ent(sih, eromptr, 0, 0);
152*4882a593Smuzhiyun else
153*4882a593Smuzhiyun *addrh = 0;
154*4882a593Smuzhiyun *sizeh = 0;
155*4882a593Smuzhiyun sz = asd & AD_SZ_MASK;
156*4882a593Smuzhiyun if (sz == AD_SZ_SZD) {
157*4882a593Smuzhiyun szd = get_erom_ent(sih, eromptr, 0, 0);
158*4882a593Smuzhiyun *sizel = szd & SD_SZ_MASK;
159*4882a593Smuzhiyun if (szd & SD_SG32)
160*4882a593Smuzhiyun *sizeh = get_erom_ent(sih, eromptr, 0, 0);
161*4882a593Smuzhiyun } else
162*4882a593Smuzhiyun *sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun SI_VMSG((" SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n",
165*4882a593Smuzhiyun sp, ad, st, *sizeh, *sizel, *addrh, *addrl));
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun return asd;
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun /* Parse the enumeration rom to identify all cores */
171*4882a593Smuzhiyun void
BCMATTACHFN(ai_scan)172*4882a593Smuzhiyun BCMATTACHFN(ai_scan)(si_t *sih, void *regs, uint devid)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun si_info_t *sii = SI_INFO(sih);
175*4882a593Smuzhiyun si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
176*4882a593Smuzhiyun chipcregs_t *cc = (chipcregs_t *)regs;
177*4882a593Smuzhiyun uint32 erombase, *eromptr, *eromlim;
178*4882a593Smuzhiyun axi_wrapper_t * axi_wrapper = sii->axi_wrapper;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun BCM_REFERENCE(devid);
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun erombase = R_REG(sii->osh, &cc->eromptr);
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun switch (BUSTYPE(sih->bustype)) {
185*4882a593Smuzhiyun case SI_BUS:
186*4882a593Smuzhiyun eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
187*4882a593Smuzhiyun break;
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun case PCI_BUS:
190*4882a593Smuzhiyun /* Set wrappers address */
191*4882a593Smuzhiyun sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE);
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun /* Now point the window at the erom */
194*4882a593Smuzhiyun OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, erombase);
195*4882a593Smuzhiyun eromptr = regs;
196*4882a593Smuzhiyun break;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun #ifdef BCMSDIO
199*4882a593Smuzhiyun case SPI_BUS:
200*4882a593Smuzhiyun case SDIO_BUS:
201*4882a593Smuzhiyun eromptr = (uint32 *)(uintptr)erombase;
202*4882a593Smuzhiyun break;
203*4882a593Smuzhiyun #endif /* BCMSDIO */
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun default:
206*4882a593Smuzhiyun SI_ERROR(("Don't know how to do AXI enumeration on bus %d\n", sih->bustype));
207*4882a593Smuzhiyun ASSERT(0);
208*4882a593Smuzhiyun return;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
211*4882a593Smuzhiyun sii->axi_num_wrappers = 0;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim = 0x%p\n",
214*4882a593Smuzhiyun OSL_OBFUSCATE_BUF(regs), erombase,
215*4882a593Smuzhiyun OSL_OBFUSCATE_BUF(eromptr), OSL_OBFUSCATE_BUF(eromlim)));
216*4882a593Smuzhiyun while (eromptr < eromlim) {
217*4882a593Smuzhiyun uint32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp;
218*4882a593Smuzhiyun uint32 mpd, asd, addrl, addrh, sizel, sizeh;
219*4882a593Smuzhiyun uint i, j, idx;
220*4882a593Smuzhiyun bool br;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun br = FALSE;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun /* Grok a component */
225*4882a593Smuzhiyun cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
226*4882a593Smuzhiyun if (cia == (ER_END | ER_VALID)) {
227*4882a593Smuzhiyun SI_VMSG(("Found END of erom after %d cores\n", sii->numcores));
228*4882a593Smuzhiyun return;
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun cib = get_erom_ent(sih, &eromptr, 0, 0);
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun if ((cib & ER_TAG) != ER_CI) {
234*4882a593Smuzhiyun SI_ERROR(("CIA not followed by CIB\n"));
235*4882a593Smuzhiyun goto error;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
239*4882a593Smuzhiyun mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
240*4882a593Smuzhiyun crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
241*4882a593Smuzhiyun nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
242*4882a593Smuzhiyun nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
243*4882a593Smuzhiyun nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
244*4882a593Smuzhiyun nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun #ifdef BCMDBG_SI
247*4882a593Smuzhiyun SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with nmw = %d, "
248*4882a593Smuzhiyun "nsw = %d, nmp = %d & nsp = %d\n",
249*4882a593Smuzhiyun mfg, cid, crev, OSL_OBFUSCATE_BUF(eromptr - 1), nmw, nsw, nmp, nsp));
250*4882a593Smuzhiyun #else
251*4882a593Smuzhiyun BCM_REFERENCE(crev);
252*4882a593Smuzhiyun #endif
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun /* Include Default slave wrapper for timeout monitoring */
255*4882a593Smuzhiyun if ((nsp == 0 && nsw == 0) ||
256*4882a593Smuzhiyun #if !defined(AXI_TIMEOUTS) && !defined(AXI_TIMEOUTS_NIC)
257*4882a593Smuzhiyun ((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) ||
258*4882a593Smuzhiyun #else
259*4882a593Smuzhiyun ((CHIPTYPE(sii->pub.socitype) == SOCI_NAI) &&
260*4882a593Smuzhiyun (mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) ||
261*4882a593Smuzhiyun #endif /* !defined(AXI_TIMEOUTS) && !defined(AXI_TIMEOUTS_NIC) */
262*4882a593Smuzhiyun FALSE) {
263*4882a593Smuzhiyun continue;
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun if ((nmw + nsw == 0)) {
267*4882a593Smuzhiyun /* A component which is not a core */
268*4882a593Smuzhiyun /* Should record some info */
269*4882a593Smuzhiyun if (cid == OOB_ROUTER_CORE_ID) {
270*4882a593Smuzhiyun asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
271*4882a593Smuzhiyun &addrl, &addrh, &sizel, &sizeh);
272*4882a593Smuzhiyun if (asd != 0) {
273*4882a593Smuzhiyun if ((sii->oob_router != 0) && (sii->oob_router != addrl)) {
274*4882a593Smuzhiyun sii->oob_router1 = addrl;
275*4882a593Smuzhiyun } else {
276*4882a593Smuzhiyun sii->oob_router = addrl;
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun if ((cid != NS_CCB_CORE_ID) && (cid != PMU_CORE_ID) &&
281*4882a593Smuzhiyun (cid != GCI_CORE_ID) && (cid != SR_CORE_ID) &&
282*4882a593Smuzhiyun (cid != HUB_CORE_ID) && (cid != HND_OOBR_CORE_ID) &&
283*4882a593Smuzhiyun (cid != CCI400_CORE_ID) && (cid != SPMI_SLAVE_CORE_ID)) {
284*4882a593Smuzhiyun continue;
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun idx = sii->numcores;
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun cores_info->cia[idx] = cia;
291*4882a593Smuzhiyun cores_info->cib[idx] = cib;
292*4882a593Smuzhiyun cores_info->coreid[idx] = cid;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun /* workaround the fact the variable buscoretype is used in _ai_set_coreidx()
295*4882a593Smuzhiyun * when checking PCIE_GEN2() for PCI_BUS case before it is setup later...,
296*4882a593Smuzhiyun * both use and setup happen in si_buscore_setup().
297*4882a593Smuzhiyun */
298*4882a593Smuzhiyun if (BUSTYPE(sih->bustype) == PCI_BUS &&
299*4882a593Smuzhiyun (cid == PCI_CORE_ID || cid == PCIE_CORE_ID || cid == PCIE2_CORE_ID)) {
300*4882a593Smuzhiyun sii->pub.buscoretype = (uint16)cid;
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun for (i = 0; i < nmp; i++) {
304*4882a593Smuzhiyun mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
305*4882a593Smuzhiyun if ((mpd & ER_TAG) != ER_MP) {
306*4882a593Smuzhiyun SI_ERROR(("Not enough MP entries for component 0x%x\n", cid));
307*4882a593Smuzhiyun goto error;
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun /* Record something? */
310*4882a593Smuzhiyun SI_VMSG((" Master port %d, mp: %d id: %d\n", i,
311*4882a593Smuzhiyun (mpd & MPD_MP_MASK) >> MPD_MP_SHIFT,
312*4882a593Smuzhiyun (mpd & MPD_MUI_MASK) >> MPD_MUI_SHIFT));
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun /* First Slave Address Descriptor should be port 0:
316*4882a593Smuzhiyun * the main register space for the core
317*4882a593Smuzhiyun */
318*4882a593Smuzhiyun asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
319*4882a593Smuzhiyun if (asd == 0) {
320*4882a593Smuzhiyun do {
321*4882a593Smuzhiyun /* Try again to see if it is a bridge */
322*4882a593Smuzhiyun asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
323*4882a593Smuzhiyun &sizel, &sizeh);
324*4882a593Smuzhiyun if (asd != 0)
325*4882a593Smuzhiyun br = TRUE;
326*4882a593Smuzhiyun else {
327*4882a593Smuzhiyun break;
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun } while (1);
330*4882a593Smuzhiyun } else {
331*4882a593Smuzhiyun if (addrl == 0 || sizel == 0) {
332*4882a593Smuzhiyun SI_ERROR((" Invalid ASD %x for slave port \n", asd));
333*4882a593Smuzhiyun goto error;
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun cores_info->coresba[idx] = addrl;
336*4882a593Smuzhiyun cores_info->coresba_size[idx] = sizel;
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun /* Get any more ASDs in first port */
340*4882a593Smuzhiyun j = 1;
341*4882a593Smuzhiyun do {
342*4882a593Smuzhiyun asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
343*4882a593Smuzhiyun &sizel, &sizeh);
344*4882a593Smuzhiyun /* Support ARM debug core ASD with address space > 4K */
345*4882a593Smuzhiyun if ((asd != 0) && (j == 1)) {
346*4882a593Smuzhiyun SI_VMSG(("Warning: sizel > 0x1000\n"));
347*4882a593Smuzhiyun cores_info->coresba2[idx] = addrl;
348*4882a593Smuzhiyun cores_info->coresba2_size[idx] = sizel;
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun j++;
351*4882a593Smuzhiyun } while (asd != 0);
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun /* Go through the ASDs for other slave ports */
354*4882a593Smuzhiyun for (i = 1; i < nsp; i++) {
355*4882a593Smuzhiyun j = 0;
356*4882a593Smuzhiyun do {
357*4882a593Smuzhiyun asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh,
358*4882a593Smuzhiyun &sizel, &sizeh);
359*4882a593Smuzhiyun /* To get the first base address of second slave port */
360*4882a593Smuzhiyun if ((asd != 0) && (i == 1) && (j == 0)) {
361*4882a593Smuzhiyun cores_info->csp2ba[idx] = addrl;
362*4882a593Smuzhiyun cores_info->csp2ba_size[idx] = sizel;
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun if (asd == 0)
365*4882a593Smuzhiyun break;
366*4882a593Smuzhiyun j++;
367*4882a593Smuzhiyun } while (1);
368*4882a593Smuzhiyun if (j == 0) {
369*4882a593Smuzhiyun SI_ERROR((" SP %d has no address descriptors\n", i));
370*4882a593Smuzhiyun goto error;
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun /* Now get master wrappers */
375*4882a593Smuzhiyun for (i = 0; i < nmw; i++) {
376*4882a593Smuzhiyun asd = get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl, &addrh,
377*4882a593Smuzhiyun &sizel, &sizeh);
378*4882a593Smuzhiyun if (asd == 0) {
379*4882a593Smuzhiyun SI_ERROR(("Missing descriptor for MW %d\n", i));
380*4882a593Smuzhiyun goto error;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
383*4882a593Smuzhiyun SI_ERROR(("Master wrapper %d is not 4KB\n", i));
384*4882a593Smuzhiyun goto error;
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun if (i == 0) {
387*4882a593Smuzhiyun cores_info->wrapba[idx] = addrl;
388*4882a593Smuzhiyun } else if (i == 1) {
389*4882a593Smuzhiyun cores_info->wrapba2[idx] = addrl;
390*4882a593Smuzhiyun } else if (i == 2) {
391*4882a593Smuzhiyun cores_info->wrapba3[idx] = addrl;
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun if (axi_wrapper && (sii->axi_num_wrappers < SI_MAX_AXI_WRAPPERS)) {
395*4882a593Smuzhiyun axi_wrapper[sii->axi_num_wrappers].mfg = mfg;
396*4882a593Smuzhiyun axi_wrapper[sii->axi_num_wrappers].cid = cid;
397*4882a593Smuzhiyun axi_wrapper[sii->axi_num_wrappers].rev = crev;
398*4882a593Smuzhiyun axi_wrapper[sii->axi_num_wrappers].wrapper_type = AI_MASTER_WRAPPER;
399*4882a593Smuzhiyun axi_wrapper[sii->axi_num_wrappers].wrapper_addr = addrl;
400*4882a593Smuzhiyun sii->axi_num_wrappers++;
401*4882a593Smuzhiyun SI_VMSG(("MASTER WRAPPER: %d, mfg:%x, cid:%x,"
402*4882a593Smuzhiyun "rev:%x, addr:%x, size:%x\n",
403*4882a593Smuzhiyun sii->axi_num_wrappers, mfg, cid, crev, addrl, sizel));
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun /* And finally slave wrappers */
408*4882a593Smuzhiyun for (i = 0; i < nsw; i++) {
409*4882a593Smuzhiyun uint fwp = (nsp <= 1) ? 0 : 1;
410*4882a593Smuzhiyun asd = get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP, &addrl, &addrh,
411*4882a593Smuzhiyun &sizel, &sizeh);
412*4882a593Smuzhiyun if (asd == 0) {
413*4882a593Smuzhiyun SI_ERROR(("Missing descriptor for SW %d cid %x eromp %p fwp %d \n",
414*4882a593Smuzhiyun i, cid, eromptr, fwp));
415*4882a593Smuzhiyun goto error;
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
419*4882a593Smuzhiyun SI_ERROR(("Slave wrapper %d is not 4KB\n", i));
420*4882a593Smuzhiyun goto error;
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun /* cache APB bridge wrapper address for set/clear timeout */
424*4882a593Smuzhiyun if ((mfg == MFGID_ARM) && (cid == APB_BRIDGE_ID)) {
425*4882a593Smuzhiyun ASSERT(sii->num_br < SI_MAXBR);
426*4882a593Smuzhiyun sii->br_wrapba[sii->num_br++] = addrl;
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun if ((mfg == MFGID_ARM) && (cid == ADB_BRIDGE_ID)) {
430*4882a593Smuzhiyun br = TRUE;
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun BCM_REFERENCE(br);
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun if ((nmw == 0) && (i == 0)) {
436*4882a593Smuzhiyun cores_info->wrapba[idx] = addrl;
437*4882a593Smuzhiyun } else if ((nmw == 0) && (i == 1)) {
438*4882a593Smuzhiyun cores_info->wrapba2[idx] = addrl;
439*4882a593Smuzhiyun } else if ((nmw == 0) && (i == 2)) {
440*4882a593Smuzhiyun cores_info->wrapba3[idx] = addrl;
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun /* Include all slave wrappers to the list to
444*4882a593Smuzhiyun * enable and monitor watchdog timeouts
445*4882a593Smuzhiyun */
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun if (axi_wrapper && (sii->axi_num_wrappers < SI_MAX_AXI_WRAPPERS)) {
448*4882a593Smuzhiyun axi_wrapper[sii->axi_num_wrappers].mfg = mfg;
449*4882a593Smuzhiyun axi_wrapper[sii->axi_num_wrappers].cid = cid;
450*4882a593Smuzhiyun axi_wrapper[sii->axi_num_wrappers].rev = crev;
451*4882a593Smuzhiyun axi_wrapper[sii->axi_num_wrappers].wrapper_type = AI_SLAVE_WRAPPER;
452*4882a593Smuzhiyun axi_wrapper[sii->axi_num_wrappers].wrapper_addr = addrl;
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun sii->axi_num_wrappers++;
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun SI_VMSG(("SLAVE WRAPPER: %d, mfg:%x, cid:%x,"
457*4882a593Smuzhiyun "rev:%x, addr:%x, size:%x\n",
458*4882a593Smuzhiyun sii->axi_num_wrappers, mfg, cid, crev, addrl, sizel));
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun #ifndef AXI_TIMEOUTS_NIC
463*4882a593Smuzhiyun /* Don't record bridges and core with 0 slave ports */
464*4882a593Smuzhiyun if (br || (nsp == 0)) {
465*4882a593Smuzhiyun continue;
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun #endif
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun /* Done with core */
470*4882a593Smuzhiyun sii->numcores++;
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun SI_ERROR(("Reached end of erom without finding END\n"));
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun error:
476*4882a593Smuzhiyun sii->numcores = 0;
477*4882a593Smuzhiyun return;
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun #define AI_SETCOREIDX_MAPSIZE(coreid) \
481*4882a593Smuzhiyun (((coreid) == NS_CCB_CORE_ID) ? 15 * SI_CORE_SIZE : SI_CORE_SIZE)
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun /* This function changes the logical "focus" to the indicated core.
484*4882a593Smuzhiyun * Return the current core's virtual address.
485*4882a593Smuzhiyun */
486*4882a593Smuzhiyun static volatile void *
BCMPOSTTRAPFN(_ai_setcoreidx)487*4882a593Smuzhiyun BCMPOSTTRAPFN(_ai_setcoreidx)(si_t *sih, uint coreidx, uint use_wrapn)
488*4882a593Smuzhiyun {
489*4882a593Smuzhiyun si_info_t *sii = SI_INFO(sih);
490*4882a593Smuzhiyun si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
491*4882a593Smuzhiyun uint32 addr, wrap, wrap2, wrap3;
492*4882a593Smuzhiyun volatile void *regs;
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun if (coreidx >= MIN(sii->numcores, SI_MAXCORES))
495*4882a593Smuzhiyun return (NULL);
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun addr = cores_info->coresba[coreidx];
498*4882a593Smuzhiyun wrap = cores_info->wrapba[coreidx];
499*4882a593Smuzhiyun wrap2 = cores_info->wrapba2[coreidx];
500*4882a593Smuzhiyun wrap3 = cores_info->wrapba3[coreidx];
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun #ifdef AXI_TIMEOUTS_NIC
503*4882a593Smuzhiyun /* No need to disable interrupts while entering/exiting APB bridge core */
504*4882a593Smuzhiyun if ((cores_info->coreid[coreidx] != APB_BRIDGE_CORE_ID) &&
505*4882a593Smuzhiyun (cores_info->coreid[sii->curidx] != APB_BRIDGE_CORE_ID))
506*4882a593Smuzhiyun #endif /* AXI_TIMEOUTS_NIC */
507*4882a593Smuzhiyun {
508*4882a593Smuzhiyun /*
509*4882a593Smuzhiyun * If the user has provided an interrupt mask enabled function,
510*4882a593Smuzhiyun * then assert interrupts are disabled before switching the core.
511*4882a593Smuzhiyun */
512*4882a593Smuzhiyun ASSERT((sii->intrsenabled_fn == NULL) ||
513*4882a593Smuzhiyun !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun switch (BUSTYPE(sih->bustype)) {
517*4882a593Smuzhiyun case SI_BUS:
518*4882a593Smuzhiyun /* map new one */
519*4882a593Smuzhiyun if (!cores_info->regs[coreidx]) {
520*4882a593Smuzhiyun cores_info->regs[coreidx] = REG_MAP(addr,
521*4882a593Smuzhiyun AI_SETCOREIDX_MAPSIZE(cores_info->coreid[coreidx]));
522*4882a593Smuzhiyun ASSERT(GOODREGS(cores_info->regs[coreidx]));
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun sii->curmap = regs = cores_info->regs[coreidx];
525*4882a593Smuzhiyun if (!cores_info->wrappers[coreidx] && (wrap != 0)) {
526*4882a593Smuzhiyun cores_info->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE);
527*4882a593Smuzhiyun ASSERT(GOODREGS(cores_info->wrappers[coreidx]));
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun if (!cores_info->wrappers2[coreidx] && (wrap2 != 0)) {
530*4882a593Smuzhiyun cores_info->wrappers2[coreidx] = REG_MAP(wrap2, SI_CORE_SIZE);
531*4882a593Smuzhiyun ASSERT(GOODREGS(cores_info->wrappers2[coreidx]));
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun if (!cores_info->wrappers3[coreidx] && (wrap3 != 0)) {
534*4882a593Smuzhiyun cores_info->wrappers3[coreidx] = REG_MAP(wrap3, SI_CORE_SIZE);
535*4882a593Smuzhiyun ASSERT(GOODREGS(cores_info->wrappers3[coreidx]));
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun if (use_wrapn == 2) {
539*4882a593Smuzhiyun sii->curwrap = cores_info->wrappers3[coreidx];
540*4882a593Smuzhiyun } else if (use_wrapn == 1) {
541*4882a593Smuzhiyun sii->curwrap = cores_info->wrappers2[coreidx];
542*4882a593Smuzhiyun } else {
543*4882a593Smuzhiyun sii->curwrap = cores_info->wrappers[coreidx];
544*4882a593Smuzhiyun }
545*4882a593Smuzhiyun break;
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun case PCI_BUS:
548*4882a593Smuzhiyun regs = sii->curmap;
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun /* point bar0 2nd 4KB window to the primary wrapper */
551*4882a593Smuzhiyun if (use_wrapn == 2) {
552*4882a593Smuzhiyun wrap = wrap3;
553*4882a593Smuzhiyun } else if (use_wrapn == 1) {
554*4882a593Smuzhiyun wrap = wrap2;
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun /* Use BAR0 Window to support dual mac chips... */
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun /* TODO: the other mac unit can't be supportd by the current BAR0 window.
560*4882a593Smuzhiyun * need to find other ways to access these cores.
561*4882a593Smuzhiyun */
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun switch (sii->slice) {
564*4882a593Smuzhiyun case 0: /* main/first slice */
565*4882a593Smuzhiyun #ifdef AXI_TIMEOUTS_NIC
566*4882a593Smuzhiyun /* No need to set the BAR0 if core is APB Bridge.
567*4882a593Smuzhiyun * This is to reduce 2 PCI writes while checkng for errlog
568*4882a593Smuzhiyun */
569*4882a593Smuzhiyun if (cores_info->coreid[coreidx] != APB_BRIDGE_CORE_ID)
570*4882a593Smuzhiyun #endif /* AXI_TIMEOUTS_NIC */
571*4882a593Smuzhiyun {
572*4882a593Smuzhiyun /* point bar0 window */
573*4882a593Smuzhiyun OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, addr);
574*4882a593Smuzhiyun }
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun if (PCIE_GEN2(sii))
577*4882a593Smuzhiyun OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_WIN2, 4, wrap);
578*4882a593Smuzhiyun else
579*4882a593Smuzhiyun OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN2, 4, wrap);
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun break;
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun case 1: /* aux/second slice */
584*4882a593Smuzhiyun /* PCIE GEN2 only for other slices */
585*4882a593Smuzhiyun if (!PCIE_GEN2(sii)) {
586*4882a593Smuzhiyun /* other slices not supported */
587*4882a593Smuzhiyun SI_ERROR(("PCI GEN not supported for slice %d\n", sii->slice));
588*4882a593Smuzhiyun ASSERT(0);
589*4882a593Smuzhiyun break;
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun /* 0x4000 - 0x4fff: enum space 0x5000 - 0x5fff: wrapper space */
593*4882a593Smuzhiyun regs = (volatile uint8 *)regs + PCI_SEC_BAR0_WIN_OFFSET;
594*4882a593Smuzhiyun sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE);
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun /* point bar0 window */
597*4882a593Smuzhiyun OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_CORE2_WIN, 4, addr);
598*4882a593Smuzhiyun OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_CORE2_WIN2, 4, wrap);
599*4882a593Smuzhiyun break;
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun case 2: /* scan/third slice */
602*4882a593Smuzhiyun /* PCIE GEN2 only for other slices */
603*4882a593Smuzhiyun if (!PCIE_GEN2(sii)) {
604*4882a593Smuzhiyun /* other slices not supported */
605*4882a593Smuzhiyun SI_ERROR(("PCI GEN not supported for slice %d\n", sii->slice));
606*4882a593Smuzhiyun ASSERT(0);
607*4882a593Smuzhiyun break;
608*4882a593Smuzhiyun }
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun /* 0x9000 - 0x9fff: enum space 0xa000 - 0xafff: wrapper space */
611*4882a593Smuzhiyun regs = (volatile uint8 *)regs + PCI_TER_BAR0_WIN_OFFSET;
612*4882a593Smuzhiyun sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE);
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun /* point bar0 window */
615*4882a593Smuzhiyun ai_corereg(sih, sih->buscoreidx, PCIE_TER_BAR0_WIN, ~0, addr);
616*4882a593Smuzhiyun ai_corereg(sih, sih->buscoreidx, PCIE_TER_BAR0_WRAPPER, ~0, wrap);
617*4882a593Smuzhiyun break;
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun default: /* other slices */
620*4882a593Smuzhiyun SI_ERROR(("BAR0 Window not supported for slice %d\n", sii->slice));
621*4882a593Smuzhiyun ASSERT(0);
622*4882a593Smuzhiyun break;
623*4882a593Smuzhiyun }
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun break;
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun #ifdef BCMSDIO
628*4882a593Smuzhiyun case SPI_BUS:
629*4882a593Smuzhiyun case SDIO_BUS:
630*4882a593Smuzhiyun sii->curmap = regs = (void *)((uintptr)addr);
631*4882a593Smuzhiyun if (use_wrapn)
632*4882a593Smuzhiyun sii->curwrap = (void *)((uintptr)wrap2);
633*4882a593Smuzhiyun else
634*4882a593Smuzhiyun sii->curwrap = (void *)((uintptr)wrap);
635*4882a593Smuzhiyun break;
636*4882a593Smuzhiyun #endif /* BCMSDIO */
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun default:
639*4882a593Smuzhiyun ASSERT(0);
640*4882a593Smuzhiyun sii->curmap = regs = NULL;
641*4882a593Smuzhiyun break;
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun sii->curidx = coreidx;
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun return regs;
647*4882a593Smuzhiyun }
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun volatile void *
BCMPOSTTRAPFN(ai_setcoreidx)650*4882a593Smuzhiyun BCMPOSTTRAPFN(ai_setcoreidx)(si_t *sih, uint coreidx)
651*4882a593Smuzhiyun {
652*4882a593Smuzhiyun return _ai_setcoreidx(sih, coreidx, 0);
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun volatile void *
BCMPOSTTRAPFN(ai_setcoreidx_2ndwrap)656*4882a593Smuzhiyun BCMPOSTTRAPFN(ai_setcoreidx_2ndwrap)(si_t *sih, uint coreidx)
657*4882a593Smuzhiyun {
658*4882a593Smuzhiyun return _ai_setcoreidx(sih, coreidx, 1);
659*4882a593Smuzhiyun }
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun volatile void *
BCMPOSTTRAPFN(ai_setcoreidx_3rdwrap)662*4882a593Smuzhiyun BCMPOSTTRAPFN(ai_setcoreidx_3rdwrap)(si_t *sih, uint coreidx)
663*4882a593Smuzhiyun {
664*4882a593Smuzhiyun return _ai_setcoreidx(sih, coreidx, 2);
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun void
ai_coreaddrspaceX(const si_t * sih,uint asidx,uint32 * addr,uint32 * size)668*4882a593Smuzhiyun ai_coreaddrspaceX(const si_t *sih, uint asidx, uint32 *addr, uint32 *size)
669*4882a593Smuzhiyun {
670*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
671*4882a593Smuzhiyun const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
672*4882a593Smuzhiyun chipcregs_t *cc = NULL;
673*4882a593Smuzhiyun uint32 erombase, *eromptr, *eromlim;
674*4882a593Smuzhiyun uint i, j, cidx;
675*4882a593Smuzhiyun uint32 cia, cib, nmp, nsp;
676*4882a593Smuzhiyun uint32 asd, addrl, addrh, sizel, sizeh;
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun for (i = 0; i < sii->numcores; i++) {
679*4882a593Smuzhiyun if (cores_info->coreid[i] == CC_CORE_ID) {
680*4882a593Smuzhiyun cc = (chipcregs_t *)cores_info->regs[i];
681*4882a593Smuzhiyun break;
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun if (cc == NULL)
685*4882a593Smuzhiyun goto error;
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun BCM_REFERENCE(erombase);
688*4882a593Smuzhiyun erombase = R_REG(sii->osh, &cc->eromptr);
689*4882a593Smuzhiyun eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
690*4882a593Smuzhiyun eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun cidx = sii->curidx;
693*4882a593Smuzhiyun cia = cores_info->cia[cidx];
694*4882a593Smuzhiyun cib = cores_info->cib[cidx];
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
697*4882a593Smuzhiyun nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun /* scan for cores */
700*4882a593Smuzhiyun while (eromptr < eromlim) {
701*4882a593Smuzhiyun if ((get_erom_ent(sih, &eromptr, ER_TAG, ER_CI) == cia) &&
702*4882a593Smuzhiyun (get_erom_ent(sih, &eromptr, 0, 0) == cib)) {
703*4882a593Smuzhiyun break;
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun /* skip master ports */
708*4882a593Smuzhiyun for (i = 0; i < nmp; i++)
709*4882a593Smuzhiyun get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun /* Skip ASDs in port 0 */
712*4882a593Smuzhiyun asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
713*4882a593Smuzhiyun if (asd == 0) {
714*4882a593Smuzhiyun /* Try again to see if it is a bridge */
715*4882a593Smuzhiyun asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
716*4882a593Smuzhiyun &sizel, &sizeh);
717*4882a593Smuzhiyun }
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun j = 1;
720*4882a593Smuzhiyun do {
721*4882a593Smuzhiyun asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
722*4882a593Smuzhiyun &sizel, &sizeh);
723*4882a593Smuzhiyun j++;
724*4882a593Smuzhiyun } while (asd != 0);
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun /* Go through the ASDs for other slave ports */
727*4882a593Smuzhiyun for (i = 1; i < nsp; i++) {
728*4882a593Smuzhiyun j = 0;
729*4882a593Smuzhiyun do {
730*4882a593Smuzhiyun asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh,
731*4882a593Smuzhiyun &sizel, &sizeh);
732*4882a593Smuzhiyun if (asd == 0)
733*4882a593Smuzhiyun break;
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun if (!asidx--) {
736*4882a593Smuzhiyun *addr = addrl;
737*4882a593Smuzhiyun *size = sizel;
738*4882a593Smuzhiyun return;
739*4882a593Smuzhiyun }
740*4882a593Smuzhiyun j++;
741*4882a593Smuzhiyun } while (1);
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun if (j == 0) {
744*4882a593Smuzhiyun SI_ERROR((" SP %d has no address descriptors\n", i));
745*4882a593Smuzhiyun break;
746*4882a593Smuzhiyun }
747*4882a593Smuzhiyun }
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun error:
750*4882a593Smuzhiyun *size = 0;
751*4882a593Smuzhiyun return;
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun /* Return the number of address spaces in current core */
755*4882a593Smuzhiyun int
ai_numaddrspaces(const si_t * sih)756*4882a593Smuzhiyun ai_numaddrspaces(const si_t *sih)
757*4882a593Smuzhiyun {
758*4882a593Smuzhiyun /* TODO: Either save it or parse the EROM on demand, currently hardcode 2 */
759*4882a593Smuzhiyun BCM_REFERENCE(sih);
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun return 2;
762*4882a593Smuzhiyun }
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun /* Return the address of the nth address space in the current core
765*4882a593Smuzhiyun * Arguments:
766*4882a593Smuzhiyun * sih : Pointer to struct si_t
767*4882a593Smuzhiyun * spidx : slave port index
768*4882a593Smuzhiyun * baidx : base address index
769*4882a593Smuzhiyun */
770*4882a593Smuzhiyun uint32
ai_addrspace(const si_t * sih,uint spidx,uint baidx)771*4882a593Smuzhiyun ai_addrspace(const si_t *sih, uint spidx, uint baidx)
772*4882a593Smuzhiyun {
773*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
774*4882a593Smuzhiyun const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
775*4882a593Smuzhiyun uint cidx;
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun cidx = sii->curidx;
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun if (spidx == CORE_SLAVE_PORT_0) {
780*4882a593Smuzhiyun if (baidx == CORE_BASE_ADDR_0)
781*4882a593Smuzhiyun return cores_info->coresba[cidx];
782*4882a593Smuzhiyun else if (baidx == CORE_BASE_ADDR_1)
783*4882a593Smuzhiyun return cores_info->coresba2[cidx];
784*4882a593Smuzhiyun }
785*4882a593Smuzhiyun else if (spidx == CORE_SLAVE_PORT_1) {
786*4882a593Smuzhiyun if (baidx == CORE_BASE_ADDR_0)
787*4882a593Smuzhiyun return cores_info->csp2ba[cidx];
788*4882a593Smuzhiyun }
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun SI_ERROR(("ai_addrspace: Need to parse the erom again to find %d base addr"
791*4882a593Smuzhiyun " in %d slave port\n",
792*4882a593Smuzhiyun baidx, spidx));
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun return 0;
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun }
797*4882a593Smuzhiyun
798*4882a593Smuzhiyun /* Return the size of the nth address space in the current core
799*4882a593Smuzhiyun * Arguments:
800*4882a593Smuzhiyun * sih : Pointer to struct si_t
801*4882a593Smuzhiyun * spidx : slave port index
802*4882a593Smuzhiyun * baidx : base address index
803*4882a593Smuzhiyun */
804*4882a593Smuzhiyun uint32
ai_addrspacesize(const si_t * sih,uint spidx,uint baidx)805*4882a593Smuzhiyun ai_addrspacesize(const si_t *sih, uint spidx, uint baidx)
806*4882a593Smuzhiyun {
807*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
808*4882a593Smuzhiyun const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
809*4882a593Smuzhiyun uint cidx;
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun cidx = sii->curidx;
812*4882a593Smuzhiyun if (spidx == CORE_SLAVE_PORT_0) {
813*4882a593Smuzhiyun if (baidx == CORE_BASE_ADDR_0)
814*4882a593Smuzhiyun return cores_info->coresba_size[cidx];
815*4882a593Smuzhiyun else if (baidx == CORE_BASE_ADDR_1)
816*4882a593Smuzhiyun return cores_info->coresba2_size[cidx];
817*4882a593Smuzhiyun }
818*4882a593Smuzhiyun else if (spidx == CORE_SLAVE_PORT_1) {
819*4882a593Smuzhiyun if (baidx == CORE_BASE_ADDR_0)
820*4882a593Smuzhiyun return cores_info->csp2ba_size[cidx];
821*4882a593Smuzhiyun }
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun SI_ERROR(("ai_addrspacesize: Need to parse the erom again to find %d"
824*4882a593Smuzhiyun " base addr in %d slave port\n",
825*4882a593Smuzhiyun baidx, spidx));
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun return 0;
828*4882a593Smuzhiyun }
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun uint
ai_flag(si_t * sih)831*4882a593Smuzhiyun ai_flag(si_t *sih)
832*4882a593Smuzhiyun {
833*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
834*4882a593Smuzhiyun #if !defined(BCMDONGLEHOST)
835*4882a593Smuzhiyun const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
836*4882a593Smuzhiyun #endif
837*4882a593Smuzhiyun aidmp_t *ai;
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun if (PMU_DMP()) {
840*4882a593Smuzhiyun uint idx, flag;
841*4882a593Smuzhiyun idx = sii->curidx;
842*4882a593Smuzhiyun ai_setcoreidx(sih, SI_CC_IDX);
843*4882a593Smuzhiyun flag = ai_flag_alt(sih);
844*4882a593Smuzhiyun ai_setcoreidx(sih, idx);
845*4882a593Smuzhiyun return flag;
846*4882a593Smuzhiyun }
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun ai = sii->curwrap;
849*4882a593Smuzhiyun ASSERT(ai != NULL);
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun return (R_REG(sii->osh, &ai->oobselouta30) & 0x1f);
852*4882a593Smuzhiyun }
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun uint
ai_flag_alt(const si_t * sih)855*4882a593Smuzhiyun ai_flag_alt(const si_t *sih)
856*4882a593Smuzhiyun {
857*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
858*4882a593Smuzhiyun aidmp_t *ai = sii->curwrap;
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun return ((R_REG(sii->osh, &ai->oobselouta30) >> AI_OOBSEL_1_SHIFT) & AI_OOBSEL_MASK);
861*4882a593Smuzhiyun }
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun void
ai_setint(const si_t * sih,int siflag)864*4882a593Smuzhiyun ai_setint(const si_t *sih, int siflag)
865*4882a593Smuzhiyun {
866*4882a593Smuzhiyun BCM_REFERENCE(sih);
867*4882a593Smuzhiyun BCM_REFERENCE(siflag);
868*4882a593Smuzhiyun
869*4882a593Smuzhiyun /* TODO: Figure out how to set interrupt mask in ai */
870*4882a593Smuzhiyun }
871*4882a593Smuzhiyun
872*4882a593Smuzhiyun uint
BCMPOSTTRAPFN(ai_wrap_reg)873*4882a593Smuzhiyun BCMPOSTTRAPFN(ai_wrap_reg)(const si_t *sih, uint32 offset, uint32 mask, uint32 val)
874*4882a593Smuzhiyun {
875*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
876*4882a593Smuzhiyun uint32 *addr = (uint32 *) ((uchar *)(sii->curwrap) + offset);
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun if (mask || val) {
879*4882a593Smuzhiyun uint32 w = R_REG(sii->osh, addr);
880*4882a593Smuzhiyun w &= ~mask;
881*4882a593Smuzhiyun w |= val;
882*4882a593Smuzhiyun W_REG(sii->osh, addr, w);
883*4882a593Smuzhiyun }
884*4882a593Smuzhiyun return (R_REG(sii->osh, addr));
885*4882a593Smuzhiyun }
886*4882a593Smuzhiyun
887*4882a593Smuzhiyun uint
ai_corevendor(const si_t * sih)888*4882a593Smuzhiyun ai_corevendor(const si_t *sih)
889*4882a593Smuzhiyun {
890*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
891*4882a593Smuzhiyun const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
892*4882a593Smuzhiyun uint32 cia;
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun cia = cores_info->cia[sii->curidx];
895*4882a593Smuzhiyun return ((cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT);
896*4882a593Smuzhiyun }
897*4882a593Smuzhiyun
898*4882a593Smuzhiyun uint
BCMPOSTTRAPFN(ai_corerev)899*4882a593Smuzhiyun BCMPOSTTRAPFN(ai_corerev)(const si_t *sih)
900*4882a593Smuzhiyun {
901*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
902*4882a593Smuzhiyun const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
903*4882a593Smuzhiyun uint32 cib;
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun cib = cores_info->cib[sii->curidx];
906*4882a593Smuzhiyun return ((cib & CIB_REV_MASK) >> CIB_REV_SHIFT);
907*4882a593Smuzhiyun }
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun uint
ai_corerev_minor(const si_t * sih)910*4882a593Smuzhiyun ai_corerev_minor(const si_t *sih)
911*4882a593Smuzhiyun {
912*4882a593Smuzhiyun return (ai_core_sflags(sih, 0, 0) >> SISF_MINORREV_D11_SHIFT) &
913*4882a593Smuzhiyun SISF_MINORREV_D11_MASK;
914*4882a593Smuzhiyun }
915*4882a593Smuzhiyun
916*4882a593Smuzhiyun bool
BCMPOSTTRAPFN(ai_iscoreup)917*4882a593Smuzhiyun BCMPOSTTRAPFN(ai_iscoreup)(const si_t *sih)
918*4882a593Smuzhiyun {
919*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
920*4882a593Smuzhiyun aidmp_t *ai = sii->curwrap;
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun return (((R_REG(sii->osh, &ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) == SICF_CLOCK_EN) &&
923*4882a593Smuzhiyun ((R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) == 0));
924*4882a593Smuzhiyun }
925*4882a593Smuzhiyun
926*4882a593Smuzhiyun /*
927*4882a593Smuzhiyun * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
928*4882a593Smuzhiyun * switch back to the original core, and return the new value.
929*4882a593Smuzhiyun *
930*4882a593Smuzhiyun * When using the silicon backplane, no fiddling with interrupts or core switches is needed.
931*4882a593Smuzhiyun *
932*4882a593Smuzhiyun * Also, when using pci/pcie, we can optimize away the core switching for pci registers
933*4882a593Smuzhiyun * and (on newer pci cores) chipcommon registers.
934*4882a593Smuzhiyun */
935*4882a593Smuzhiyun uint
BCMPOSTTRAPFN(ai_corereg)936*4882a593Smuzhiyun BCMPOSTTRAPFN(ai_corereg)(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
937*4882a593Smuzhiyun {
938*4882a593Smuzhiyun uint origidx = 0;
939*4882a593Smuzhiyun volatile uint32 *r = NULL;
940*4882a593Smuzhiyun uint w;
941*4882a593Smuzhiyun bcm_int_bitmask_t intr_val;
942*4882a593Smuzhiyun bool fast = FALSE;
943*4882a593Smuzhiyun si_info_t *sii = SI_INFO(sih);
944*4882a593Smuzhiyun si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
945*4882a593Smuzhiyun
946*4882a593Smuzhiyun ASSERT(GOODIDX(coreidx, sii->numcores));
947*4882a593Smuzhiyun ASSERT(regoff < SI_CORE_SIZE);
948*4882a593Smuzhiyun ASSERT((val & ~mask) == 0);
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun if (coreidx >= SI_MAXCORES)
951*4882a593Smuzhiyun return 0;
952*4882a593Smuzhiyun
953*4882a593Smuzhiyun if (BUSTYPE(sih->bustype) == SI_BUS) {
954*4882a593Smuzhiyun /* If internal bus, we can always get at everything */
955*4882a593Smuzhiyun fast = TRUE;
956*4882a593Smuzhiyun /* map if does not exist */
957*4882a593Smuzhiyun if (!cores_info->regs[coreidx]) {
958*4882a593Smuzhiyun cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
959*4882a593Smuzhiyun SI_CORE_SIZE);
960*4882a593Smuzhiyun ASSERT(GOODREGS(cores_info->regs[coreidx]));
961*4882a593Smuzhiyun }
962*4882a593Smuzhiyun r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
963*4882a593Smuzhiyun } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
964*4882a593Smuzhiyun /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
965*4882a593Smuzhiyun
966*4882a593Smuzhiyun if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
967*4882a593Smuzhiyun /* Chipc registers are mapped at 12KB */
968*4882a593Smuzhiyun
969*4882a593Smuzhiyun fast = TRUE;
970*4882a593Smuzhiyun r = (volatile uint32 *)((volatile char *)sii->curmap +
971*4882a593Smuzhiyun PCI_16KB0_CCREGS_OFFSET + regoff);
972*4882a593Smuzhiyun } else if (sii->pub.buscoreidx == coreidx) {
973*4882a593Smuzhiyun /* pci registers are at either in the last 2KB of an 8KB window
974*4882a593Smuzhiyun * or, in pcie and pci rev 13 at 8KB
975*4882a593Smuzhiyun */
976*4882a593Smuzhiyun fast = TRUE;
977*4882a593Smuzhiyun if (SI_FAST(sii))
978*4882a593Smuzhiyun r = (volatile uint32 *)((volatile char *)sii->curmap +
979*4882a593Smuzhiyun PCI_16KB0_PCIREGS_OFFSET + regoff);
980*4882a593Smuzhiyun else
981*4882a593Smuzhiyun r = (volatile uint32 *)((volatile char *)sii->curmap +
982*4882a593Smuzhiyun ((regoff >= SBCONFIGOFF) ?
983*4882a593Smuzhiyun PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
984*4882a593Smuzhiyun regoff);
985*4882a593Smuzhiyun }
986*4882a593Smuzhiyun }
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun if (!fast) {
989*4882a593Smuzhiyun INTR_OFF(sii, &intr_val);
990*4882a593Smuzhiyun
991*4882a593Smuzhiyun /* save current core index */
992*4882a593Smuzhiyun origidx = si_coreidx(&sii->pub);
993*4882a593Smuzhiyun
994*4882a593Smuzhiyun /* switch core */
995*4882a593Smuzhiyun r = (volatile uint32*) ((volatile uchar*) ai_setcoreidx(&sii->pub, coreidx) +
996*4882a593Smuzhiyun regoff);
997*4882a593Smuzhiyun }
998*4882a593Smuzhiyun ASSERT(r != NULL);
999*4882a593Smuzhiyun
1000*4882a593Smuzhiyun /* mask and set */
1001*4882a593Smuzhiyun if (mask || val) {
1002*4882a593Smuzhiyun w = (R_REG(sii->osh, r) & ~mask) | val;
1003*4882a593Smuzhiyun W_REG(sii->osh, r, w);
1004*4882a593Smuzhiyun }
1005*4882a593Smuzhiyun
1006*4882a593Smuzhiyun /* readback */
1007*4882a593Smuzhiyun w = R_REG(sii->osh, r);
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun if (!fast) {
1010*4882a593Smuzhiyun /* restore core index */
1011*4882a593Smuzhiyun if (origidx != coreidx)
1012*4882a593Smuzhiyun ai_setcoreidx(&sii->pub, origidx);
1013*4882a593Smuzhiyun
1014*4882a593Smuzhiyun INTR_RESTORE(sii, &intr_val);
1015*4882a593Smuzhiyun }
1016*4882a593Smuzhiyun
1017*4882a593Smuzhiyun return (w);
1018*4882a593Smuzhiyun }
1019*4882a593Smuzhiyun
1020*4882a593Smuzhiyun /*
1021*4882a593Smuzhiyun * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
1022*4882a593Smuzhiyun * switch back to the original core, and return the new value.
1023*4882a593Smuzhiyun *
1024*4882a593Smuzhiyun * When using the silicon backplane, no fiddling with interrupts or core switches is needed.
1025*4882a593Smuzhiyun *
1026*4882a593Smuzhiyun * Also, when using pci/pcie, we can optimize away the core switching for pci registers
1027*4882a593Smuzhiyun * and (on newer pci cores) chipcommon registers.
1028*4882a593Smuzhiyun */
1029*4882a593Smuzhiyun uint
ai_corereg_writeonly(si_t * sih,uint coreidx,uint regoff,uint mask,uint val)1030*4882a593Smuzhiyun ai_corereg_writeonly(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
1031*4882a593Smuzhiyun {
1032*4882a593Smuzhiyun uint origidx = 0;
1033*4882a593Smuzhiyun volatile uint32 *r = NULL;
1034*4882a593Smuzhiyun uint w = 0;
1035*4882a593Smuzhiyun bcm_int_bitmask_t intr_val;
1036*4882a593Smuzhiyun bool fast = FALSE;
1037*4882a593Smuzhiyun si_info_t *sii = SI_INFO(sih);
1038*4882a593Smuzhiyun si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
1039*4882a593Smuzhiyun
1040*4882a593Smuzhiyun ASSERT(GOODIDX(coreidx, sii->numcores));
1041*4882a593Smuzhiyun ASSERT(regoff < SI_CORE_SIZE);
1042*4882a593Smuzhiyun ASSERT((val & ~mask) == 0);
1043*4882a593Smuzhiyun
1044*4882a593Smuzhiyun if (coreidx >= SI_MAXCORES)
1045*4882a593Smuzhiyun return 0;
1046*4882a593Smuzhiyun
1047*4882a593Smuzhiyun if (BUSTYPE(sih->bustype) == SI_BUS) {
1048*4882a593Smuzhiyun /* If internal bus, we can always get at everything */
1049*4882a593Smuzhiyun fast = TRUE;
1050*4882a593Smuzhiyun /* map if does not exist */
1051*4882a593Smuzhiyun if (!cores_info->regs[coreidx]) {
1052*4882a593Smuzhiyun cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
1053*4882a593Smuzhiyun SI_CORE_SIZE);
1054*4882a593Smuzhiyun ASSERT(GOODREGS(cores_info->regs[coreidx]));
1055*4882a593Smuzhiyun }
1056*4882a593Smuzhiyun r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
1057*4882a593Smuzhiyun } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
1058*4882a593Smuzhiyun /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
1061*4882a593Smuzhiyun /* Chipc registers are mapped at 12KB */
1062*4882a593Smuzhiyun
1063*4882a593Smuzhiyun fast = TRUE;
1064*4882a593Smuzhiyun r = (volatile uint32 *)((volatile char *)sii->curmap +
1065*4882a593Smuzhiyun PCI_16KB0_CCREGS_OFFSET + regoff);
1066*4882a593Smuzhiyun } else if (sii->pub.buscoreidx == coreidx) {
1067*4882a593Smuzhiyun /* pci registers are at either in the last 2KB of an 8KB window
1068*4882a593Smuzhiyun * or, in pcie and pci rev 13 at 8KB
1069*4882a593Smuzhiyun */
1070*4882a593Smuzhiyun fast = TRUE;
1071*4882a593Smuzhiyun if (SI_FAST(sii))
1072*4882a593Smuzhiyun r = (volatile uint32 *)((volatile char *)sii->curmap +
1073*4882a593Smuzhiyun PCI_16KB0_PCIREGS_OFFSET + regoff);
1074*4882a593Smuzhiyun else
1075*4882a593Smuzhiyun r = (volatile uint32 *)((volatile char *)sii->curmap +
1076*4882a593Smuzhiyun ((regoff >= SBCONFIGOFF) ?
1077*4882a593Smuzhiyun PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
1078*4882a593Smuzhiyun regoff);
1079*4882a593Smuzhiyun }
1080*4882a593Smuzhiyun }
1081*4882a593Smuzhiyun
1082*4882a593Smuzhiyun if (!fast) {
1083*4882a593Smuzhiyun INTR_OFF(sii, &intr_val);
1084*4882a593Smuzhiyun
1085*4882a593Smuzhiyun /* save current core index */
1086*4882a593Smuzhiyun origidx = si_coreidx(&sii->pub);
1087*4882a593Smuzhiyun
1088*4882a593Smuzhiyun /* switch core */
1089*4882a593Smuzhiyun r = (volatile uint32*) ((volatile uchar*) ai_setcoreidx(&sii->pub, coreidx) +
1090*4882a593Smuzhiyun regoff);
1091*4882a593Smuzhiyun }
1092*4882a593Smuzhiyun ASSERT(r != NULL);
1093*4882a593Smuzhiyun
1094*4882a593Smuzhiyun /* mask and set */
1095*4882a593Smuzhiyun if (mask || val) {
1096*4882a593Smuzhiyun w = (R_REG(sii->osh, r) & ~mask) | val;
1097*4882a593Smuzhiyun W_REG(sii->osh, r, w);
1098*4882a593Smuzhiyun }
1099*4882a593Smuzhiyun
1100*4882a593Smuzhiyun if (!fast) {
1101*4882a593Smuzhiyun /* restore core index */
1102*4882a593Smuzhiyun if (origidx != coreidx)
1103*4882a593Smuzhiyun ai_setcoreidx(&sii->pub, origidx);
1104*4882a593Smuzhiyun
1105*4882a593Smuzhiyun INTR_RESTORE(sii, &intr_val);
1106*4882a593Smuzhiyun }
1107*4882a593Smuzhiyun
1108*4882a593Smuzhiyun return (w);
1109*4882a593Smuzhiyun }
1110*4882a593Smuzhiyun
1111*4882a593Smuzhiyun /*
1112*4882a593Smuzhiyun * If there is no need for fiddling with interrupts or core switches (typically silicon
1113*4882a593Smuzhiyun * back plane registers, pci registers and chipcommon registers), this function
1114*4882a593Smuzhiyun * returns the register offset on this core to a mapped address. This address can
1115*4882a593Smuzhiyun * be used for W_REG/R_REG directly.
1116*4882a593Smuzhiyun *
1117*4882a593Smuzhiyun * For accessing registers that would need a core switch, this function will return
1118*4882a593Smuzhiyun * NULL.
1119*4882a593Smuzhiyun */
1120*4882a593Smuzhiyun volatile uint32 *
BCMPOSTTRAPFN(ai_corereg_addr)1121*4882a593Smuzhiyun BCMPOSTTRAPFN(ai_corereg_addr)(si_t *sih, uint coreidx, uint regoff)
1122*4882a593Smuzhiyun {
1123*4882a593Smuzhiyun volatile uint32 *r = NULL;
1124*4882a593Smuzhiyun bool fast = FALSE;
1125*4882a593Smuzhiyun si_info_t *sii = SI_INFO(sih);
1126*4882a593Smuzhiyun si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
1127*4882a593Smuzhiyun
1128*4882a593Smuzhiyun ASSERT(GOODIDX(coreidx, sii->numcores));
1129*4882a593Smuzhiyun ASSERT(regoff < SI_CORE_SIZE);
1130*4882a593Smuzhiyun
1131*4882a593Smuzhiyun if (coreidx >= SI_MAXCORES)
1132*4882a593Smuzhiyun return 0;
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyun if (BUSTYPE(sih->bustype) == SI_BUS) {
1135*4882a593Smuzhiyun /* If internal bus, we can always get at everything */
1136*4882a593Smuzhiyun fast = TRUE;
1137*4882a593Smuzhiyun /* map if does not exist */
1138*4882a593Smuzhiyun if (!cores_info->regs[coreidx]) {
1139*4882a593Smuzhiyun cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
1140*4882a593Smuzhiyun SI_CORE_SIZE);
1141*4882a593Smuzhiyun ASSERT(GOODREGS(cores_info->regs[coreidx]));
1142*4882a593Smuzhiyun }
1143*4882a593Smuzhiyun r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
1144*4882a593Smuzhiyun } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
1145*4882a593Smuzhiyun /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
1146*4882a593Smuzhiyun
1147*4882a593Smuzhiyun if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
1148*4882a593Smuzhiyun /* Chipc registers are mapped at 12KB */
1149*4882a593Smuzhiyun
1150*4882a593Smuzhiyun fast = TRUE;
1151*4882a593Smuzhiyun r = (volatile uint32 *)((volatile char *)sii->curmap +
1152*4882a593Smuzhiyun PCI_16KB0_CCREGS_OFFSET + regoff);
1153*4882a593Smuzhiyun } else if (sii->pub.buscoreidx == coreidx) {
1154*4882a593Smuzhiyun /* pci registers are at either in the last 2KB of an 8KB window
1155*4882a593Smuzhiyun * or, in pcie and pci rev 13 at 8KB
1156*4882a593Smuzhiyun */
1157*4882a593Smuzhiyun fast = TRUE;
1158*4882a593Smuzhiyun if (SI_FAST(sii))
1159*4882a593Smuzhiyun r = (volatile uint32 *)((volatile char *)sii->curmap +
1160*4882a593Smuzhiyun PCI_16KB0_PCIREGS_OFFSET + regoff);
1161*4882a593Smuzhiyun else
1162*4882a593Smuzhiyun r = (volatile uint32 *)((volatile char *)sii->curmap +
1163*4882a593Smuzhiyun ((regoff >= SBCONFIGOFF) ?
1164*4882a593Smuzhiyun PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
1165*4882a593Smuzhiyun regoff);
1166*4882a593Smuzhiyun }
1167*4882a593Smuzhiyun }
1168*4882a593Smuzhiyun
1169*4882a593Smuzhiyun if (!fast) {
1170*4882a593Smuzhiyun ASSERT(sii->curidx == coreidx);
1171*4882a593Smuzhiyun r = (volatile uint32*) ((volatile uchar*)sii->curmap + regoff);
1172*4882a593Smuzhiyun }
1173*4882a593Smuzhiyun
1174*4882a593Smuzhiyun return (r);
1175*4882a593Smuzhiyun }
1176*4882a593Smuzhiyun
1177*4882a593Smuzhiyun void
ai_core_disable(const si_t * sih,uint32 bits)1178*4882a593Smuzhiyun ai_core_disable(const si_t *sih, uint32 bits)
1179*4882a593Smuzhiyun {
1180*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
1181*4882a593Smuzhiyun volatile uint32 dummy;
1182*4882a593Smuzhiyun uint32 status;
1183*4882a593Smuzhiyun aidmp_t *ai;
1184*4882a593Smuzhiyun
1185*4882a593Smuzhiyun ASSERT(GOODREGS(sii->curwrap));
1186*4882a593Smuzhiyun ai = sii->curwrap;
1187*4882a593Smuzhiyun
1188*4882a593Smuzhiyun /* if core is already in reset, just return */
1189*4882a593Smuzhiyun if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) {
1190*4882a593Smuzhiyun return;
1191*4882a593Smuzhiyun }
1192*4882a593Smuzhiyun
1193*4882a593Smuzhiyun /* ensure there are no pending backplane operations */
1194*4882a593Smuzhiyun SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
1195*4882a593Smuzhiyun
1196*4882a593Smuzhiyun /* if pending backplane ops still, try waiting longer */
1197*4882a593Smuzhiyun if (status != 0) {
1198*4882a593Smuzhiyun /* 300usecs was sufficient to allow backplane ops to clear for big hammer */
1199*4882a593Smuzhiyun /* during driver load we may need more time */
1200*4882a593Smuzhiyun SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 10000);
1201*4882a593Smuzhiyun /* if still pending ops, continue on and try disable anyway */
1202*4882a593Smuzhiyun /* this is in big hammer path, so don't call wl_reinit in this case... */
1203*4882a593Smuzhiyun #ifdef BCMDBG
1204*4882a593Smuzhiyun if (status != 0) {
1205*4882a593Smuzhiyun SI_ERROR(("ai_core_disable: WARN: resetstatus=%0x on core disable\n",
1206*4882a593Smuzhiyun status));
1207*4882a593Smuzhiyun }
1208*4882a593Smuzhiyun #endif
1209*4882a593Smuzhiyun }
1210*4882a593Smuzhiyun
1211*4882a593Smuzhiyun W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
1212*4882a593Smuzhiyun dummy = R_REG(sii->osh, &ai->resetctrl);
1213*4882a593Smuzhiyun BCM_REFERENCE(dummy);
1214*4882a593Smuzhiyun OSL_DELAY(1);
1215*4882a593Smuzhiyun
1216*4882a593Smuzhiyun W_REG(sii->osh, &ai->ioctrl, bits);
1217*4882a593Smuzhiyun dummy = R_REG(sii->osh, &ai->ioctrl);
1218*4882a593Smuzhiyun BCM_REFERENCE(dummy);
1219*4882a593Smuzhiyun OSL_DELAY(10);
1220*4882a593Smuzhiyun }
1221*4882a593Smuzhiyun
1222*4882a593Smuzhiyun /* reset and re-enable a core
1223*4882a593Smuzhiyun * inputs:
1224*4882a593Smuzhiyun * bits - core specific bits that are set during and after reset sequence
1225*4882a593Smuzhiyun * resetbits - core specific bits that are set only during reset sequence
1226*4882a593Smuzhiyun */
1227*4882a593Smuzhiyun static void
BCMPOSTTRAPFN(_ai_core_reset)1228*4882a593Smuzhiyun BCMPOSTTRAPFN(_ai_core_reset)(const si_t *sih, uint32 bits, uint32 resetbits)
1229*4882a593Smuzhiyun {
1230*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
1231*4882a593Smuzhiyun aidmp_t *ai;
1232*4882a593Smuzhiyun volatile uint32 dummy;
1233*4882a593Smuzhiyun uint loop_counter = 10;
1234*4882a593Smuzhiyun
1235*4882a593Smuzhiyun ASSERT(GOODREGS(sii->curwrap));
1236*4882a593Smuzhiyun ai = sii->curwrap;
1237*4882a593Smuzhiyun
1238*4882a593Smuzhiyun /* ensure there are no pending backplane operations */
1239*4882a593Smuzhiyun SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
1240*4882a593Smuzhiyun
1241*4882a593Smuzhiyun #ifdef BCMDBG_ERR
1242*4882a593Smuzhiyun if (dummy != 0) {
1243*4882a593Smuzhiyun SI_ERROR(("_ai_core_reset: WARN1: resetstatus=0x%0x\n", dummy));
1244*4882a593Smuzhiyun }
1245*4882a593Smuzhiyun #endif /* BCMDBG_ERR */
1246*4882a593Smuzhiyun
1247*4882a593Smuzhiyun /* put core into reset state */
1248*4882a593Smuzhiyun W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
1249*4882a593Smuzhiyun OSL_DELAY(10);
1250*4882a593Smuzhiyun
1251*4882a593Smuzhiyun /* ensure there are no pending backplane operations */
1252*4882a593Smuzhiyun SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
1253*4882a593Smuzhiyun
1254*4882a593Smuzhiyun W_REG(sii->osh, &ai->ioctrl, (bits | resetbits | SICF_FGC | SICF_CLOCK_EN));
1255*4882a593Smuzhiyun dummy = R_REG(sii->osh, &ai->ioctrl);
1256*4882a593Smuzhiyun BCM_REFERENCE(dummy);
1257*4882a593Smuzhiyun #ifdef UCM_CORRUPTION_WAR
1258*4882a593Smuzhiyun if (si_coreid(sih) == D11_CORE_ID) {
1259*4882a593Smuzhiyun /* Reset FGC */
1260*4882a593Smuzhiyun OSL_DELAY(1);
1261*4882a593Smuzhiyun W_REG(sii->osh, &ai->ioctrl, (dummy & (~SICF_FGC)));
1262*4882a593Smuzhiyun }
1263*4882a593Smuzhiyun #endif /* UCM_CORRUPTION_WAR */
1264*4882a593Smuzhiyun /* ensure there are no pending backplane operations */
1265*4882a593Smuzhiyun SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
1266*4882a593Smuzhiyun
1267*4882a593Smuzhiyun #ifdef BCMDBG_ERR
1268*4882a593Smuzhiyun if (dummy != 0)
1269*4882a593Smuzhiyun SI_ERROR(("_ai_core_reset: WARN2: resetstatus=0x%0x\n", dummy));
1270*4882a593Smuzhiyun #endif
1271*4882a593Smuzhiyun
1272*4882a593Smuzhiyun while (R_REG(sii->osh, &ai->resetctrl) != 0 && --loop_counter != 0) {
1273*4882a593Smuzhiyun /* ensure there are no pending backplane operations */
1274*4882a593Smuzhiyun SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
1275*4882a593Smuzhiyun
1276*4882a593Smuzhiyun #ifdef BCMDBG_ERR
1277*4882a593Smuzhiyun if (dummy != 0)
1278*4882a593Smuzhiyun SI_ERROR(("_ai_core_reset: WARN3 resetstatus=0x%0x\n", dummy));
1279*4882a593Smuzhiyun #endif
1280*4882a593Smuzhiyun
1281*4882a593Smuzhiyun /* take core out of reset */
1282*4882a593Smuzhiyun W_REG(sii->osh, &ai->resetctrl, 0);
1283*4882a593Smuzhiyun
1284*4882a593Smuzhiyun /* ensure there are no pending backplane operations */
1285*4882a593Smuzhiyun SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
1286*4882a593Smuzhiyun }
1287*4882a593Smuzhiyun
1288*4882a593Smuzhiyun #ifdef BCMDBG_ERR
1289*4882a593Smuzhiyun if (loop_counter == 0) {
1290*4882a593Smuzhiyun SI_ERROR(("_ai_core_reset: Failed to take core 0x%x out of reset\n",
1291*4882a593Smuzhiyun si_coreid(sih)));
1292*4882a593Smuzhiyun }
1293*4882a593Smuzhiyun #endif
1294*4882a593Smuzhiyun
1295*4882a593Smuzhiyun #ifdef UCM_CORRUPTION_WAR
1296*4882a593Smuzhiyun /* Pulse FGC after lifting Reset */
1297*4882a593Smuzhiyun W_REG(sii->osh, &ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN));
1298*4882a593Smuzhiyun #else
1299*4882a593Smuzhiyun W_REG(sii->osh, &ai->ioctrl, (bits | SICF_CLOCK_EN));
1300*4882a593Smuzhiyun #endif /* UCM_CORRUPTION_WAR */
1301*4882a593Smuzhiyun dummy = R_REG(sii->osh, &ai->ioctrl);
1302*4882a593Smuzhiyun BCM_REFERENCE(dummy);
1303*4882a593Smuzhiyun #ifdef UCM_CORRUPTION_WAR
1304*4882a593Smuzhiyun if (si_coreid(sih) == D11_CORE_ID) {
1305*4882a593Smuzhiyun /* Reset FGC */
1306*4882a593Smuzhiyun OSL_DELAY(1);
1307*4882a593Smuzhiyun W_REG(sii->osh, &ai->ioctrl, (dummy & (~SICF_FGC)));
1308*4882a593Smuzhiyun }
1309*4882a593Smuzhiyun #endif /* UCM_CORRUPTION_WAR */
1310*4882a593Smuzhiyun OSL_DELAY(1);
1311*4882a593Smuzhiyun }
1312*4882a593Smuzhiyun
1313*4882a593Smuzhiyun void
BCMPOSTTRAPFN(ai_core_reset)1314*4882a593Smuzhiyun BCMPOSTTRAPFN(ai_core_reset)(si_t *sih, uint32 bits, uint32 resetbits)
1315*4882a593Smuzhiyun {
1316*4882a593Smuzhiyun si_info_t *sii = SI_INFO(sih);
1317*4882a593Smuzhiyun const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
1318*4882a593Smuzhiyun uint idx = sii->curidx;
1319*4882a593Smuzhiyun
1320*4882a593Smuzhiyun if (cores_info->wrapba3[idx] != 0) {
1321*4882a593Smuzhiyun ai_setcoreidx_3rdwrap(sih, idx);
1322*4882a593Smuzhiyun _ai_core_reset(sih, bits, resetbits);
1323*4882a593Smuzhiyun ai_setcoreidx(sih, idx);
1324*4882a593Smuzhiyun }
1325*4882a593Smuzhiyun
1326*4882a593Smuzhiyun if (cores_info->wrapba2[idx] != 0) {
1327*4882a593Smuzhiyun ai_setcoreidx_2ndwrap(sih, idx);
1328*4882a593Smuzhiyun _ai_core_reset(sih, bits, resetbits);
1329*4882a593Smuzhiyun ai_setcoreidx(sih, idx);
1330*4882a593Smuzhiyun }
1331*4882a593Smuzhiyun
1332*4882a593Smuzhiyun _ai_core_reset(sih, bits, resetbits);
1333*4882a593Smuzhiyun }
1334*4882a593Smuzhiyun
1335*4882a593Smuzhiyun #ifdef BOOKER_NIC400_INF
1336*4882a593Smuzhiyun void
BCMPOSTTRAPFN(ai_core_reset_ext)1337*4882a593Smuzhiyun BCMPOSTTRAPFN(ai_core_reset_ext)(const si_t *sih, uint32 bits, uint32 resetbits)
1338*4882a593Smuzhiyun {
1339*4882a593Smuzhiyun _ai_core_reset(sih, bits, resetbits);
1340*4882a593Smuzhiyun }
1341*4882a593Smuzhiyun #endif /* BOOKER_NIC400_INF */
1342*4882a593Smuzhiyun
1343*4882a593Smuzhiyun void
ai_core_cflags_wo(const si_t * sih,uint32 mask,uint32 val)1344*4882a593Smuzhiyun ai_core_cflags_wo(const si_t *sih, uint32 mask, uint32 val)
1345*4882a593Smuzhiyun {
1346*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
1347*4882a593Smuzhiyun #if !defined(BCMDONGLEHOST)
1348*4882a593Smuzhiyun const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
1349*4882a593Smuzhiyun #endif
1350*4882a593Smuzhiyun aidmp_t *ai;
1351*4882a593Smuzhiyun uint32 w;
1352*4882a593Smuzhiyun
1353*4882a593Smuzhiyun if (PMU_DMP()) {
1354*4882a593Smuzhiyun SI_ERROR(("ai_core_cflags_wo: Accessing PMU DMP register (ioctrl)\n"));
1355*4882a593Smuzhiyun return;
1356*4882a593Smuzhiyun }
1357*4882a593Smuzhiyun
1358*4882a593Smuzhiyun ASSERT(GOODREGS(sii->curwrap));
1359*4882a593Smuzhiyun ai = sii->curwrap;
1360*4882a593Smuzhiyun
1361*4882a593Smuzhiyun ASSERT((val & ~mask) == 0);
1362*4882a593Smuzhiyun
1363*4882a593Smuzhiyun if (mask || val) {
1364*4882a593Smuzhiyun w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
1365*4882a593Smuzhiyun W_REG(sii->osh, &ai->ioctrl, w);
1366*4882a593Smuzhiyun }
1367*4882a593Smuzhiyun }
1368*4882a593Smuzhiyun
1369*4882a593Smuzhiyun uint32
BCMPOSTTRAPFN(ai_core_cflags)1370*4882a593Smuzhiyun BCMPOSTTRAPFN(ai_core_cflags)(const si_t *sih, uint32 mask, uint32 val)
1371*4882a593Smuzhiyun {
1372*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
1373*4882a593Smuzhiyun #if !defined(BCMDONGLEHOST)
1374*4882a593Smuzhiyun const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
1375*4882a593Smuzhiyun #endif
1376*4882a593Smuzhiyun aidmp_t *ai;
1377*4882a593Smuzhiyun uint32 w;
1378*4882a593Smuzhiyun
1379*4882a593Smuzhiyun if (PMU_DMP()) {
1380*4882a593Smuzhiyun SI_ERROR(("ai_core_cflags: Accessing PMU DMP register (ioctrl)\n"));
1381*4882a593Smuzhiyun return 0;
1382*4882a593Smuzhiyun }
1383*4882a593Smuzhiyun ASSERT(GOODREGS(sii->curwrap));
1384*4882a593Smuzhiyun ai = sii->curwrap;
1385*4882a593Smuzhiyun
1386*4882a593Smuzhiyun ASSERT((val & ~mask) == 0);
1387*4882a593Smuzhiyun
1388*4882a593Smuzhiyun if (mask || val) {
1389*4882a593Smuzhiyun w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
1390*4882a593Smuzhiyun W_REG(sii->osh, &ai->ioctrl, w);
1391*4882a593Smuzhiyun }
1392*4882a593Smuzhiyun
1393*4882a593Smuzhiyun return R_REG(sii->osh, &ai->ioctrl);
1394*4882a593Smuzhiyun }
1395*4882a593Smuzhiyun
1396*4882a593Smuzhiyun uint32
ai_core_sflags(const si_t * sih,uint32 mask,uint32 val)1397*4882a593Smuzhiyun ai_core_sflags(const si_t *sih, uint32 mask, uint32 val)
1398*4882a593Smuzhiyun {
1399*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
1400*4882a593Smuzhiyun #if !defined(BCMDONGLEHOST)
1401*4882a593Smuzhiyun const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
1402*4882a593Smuzhiyun #endif
1403*4882a593Smuzhiyun aidmp_t *ai;
1404*4882a593Smuzhiyun uint32 w;
1405*4882a593Smuzhiyun
1406*4882a593Smuzhiyun if (PMU_DMP()) {
1407*4882a593Smuzhiyun SI_ERROR(("ai_core_sflags: Accessing PMU DMP register (ioctrl)\n"));
1408*4882a593Smuzhiyun return 0;
1409*4882a593Smuzhiyun }
1410*4882a593Smuzhiyun
1411*4882a593Smuzhiyun ASSERT(GOODREGS(sii->curwrap));
1412*4882a593Smuzhiyun ai = sii->curwrap;
1413*4882a593Smuzhiyun
1414*4882a593Smuzhiyun ASSERT((val & ~mask) == 0);
1415*4882a593Smuzhiyun ASSERT((mask & ~SISF_CORE_BITS) == 0);
1416*4882a593Smuzhiyun
1417*4882a593Smuzhiyun if (mask || val) {
1418*4882a593Smuzhiyun w = ((R_REG(sii->osh, &ai->iostatus) & ~mask) | val);
1419*4882a593Smuzhiyun W_REG(sii->osh, &ai->iostatus, w);
1420*4882a593Smuzhiyun }
1421*4882a593Smuzhiyun
1422*4882a593Smuzhiyun return R_REG(sii->osh, &ai->iostatus);
1423*4882a593Smuzhiyun }
1424*4882a593Smuzhiyun
1425*4882a593Smuzhiyun #if defined(BCMDBG) || defined(BCMDBG_DUMP) || defined(BCMDBG_PHYDUMP)
1426*4882a593Smuzhiyun /* print interesting aidmp registers */
1427*4882a593Smuzhiyun void
ai_dumpregs(const si_t * sih,struct bcmstrbuf * b)1428*4882a593Smuzhiyun ai_dumpregs(const si_t *sih, struct bcmstrbuf *b)
1429*4882a593Smuzhiyun {
1430*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
1431*4882a593Smuzhiyun osl_t *osh;
1432*4882a593Smuzhiyun aidmp_t *ai;
1433*4882a593Smuzhiyun uint i;
1434*4882a593Smuzhiyun uint32 prev_value = 0;
1435*4882a593Smuzhiyun const axi_wrapper_t * axi_wrapper = sii->axi_wrapper;
1436*4882a593Smuzhiyun uint32 cfg_reg = 0;
1437*4882a593Smuzhiyun uint bar0_win_offset = 0;
1438*4882a593Smuzhiyun
1439*4882a593Smuzhiyun osh = sii->osh;
1440*4882a593Smuzhiyun
1441*4882a593Smuzhiyun /* Save and restore wrapper access window */
1442*4882a593Smuzhiyun if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1443*4882a593Smuzhiyun if (PCIE_GEN2(sii)) {
1444*4882a593Smuzhiyun cfg_reg = PCIE2_BAR0_CORE2_WIN2;
1445*4882a593Smuzhiyun bar0_win_offset = PCIE2_BAR0_CORE2_WIN2_OFFSET;
1446*4882a593Smuzhiyun } else {
1447*4882a593Smuzhiyun cfg_reg = PCI_BAR0_WIN2;
1448*4882a593Smuzhiyun bar0_win_offset = PCI_BAR0_WIN2_OFFSET;
1449*4882a593Smuzhiyun }
1450*4882a593Smuzhiyun
1451*4882a593Smuzhiyun prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 4);
1452*4882a593Smuzhiyun
1453*4882a593Smuzhiyun if (prev_value == ID32_INVALID) {
1454*4882a593Smuzhiyun SI_PRINT(("ai_dumpregs, PCI_BAR0_WIN2 - %x\n", prev_value));
1455*4882a593Smuzhiyun return;
1456*4882a593Smuzhiyun }
1457*4882a593Smuzhiyun }
1458*4882a593Smuzhiyun
1459*4882a593Smuzhiyun bcm_bprintf(b, "ChipNum:%x, ChipRev;%x, BusType:%x, BoardType:%x, BoardVendor:%x\n\n",
1460*4882a593Smuzhiyun sih->chip, sih->chiprev, sih->bustype, sih->boardtype, sih->boardvendor);
1461*4882a593Smuzhiyun
1462*4882a593Smuzhiyun for (i = 0; i < sii->axi_num_wrappers; i++) {
1463*4882a593Smuzhiyun
1464*4882a593Smuzhiyun if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1465*4882a593Smuzhiyun /* Set BAR0 window to bridge wapper base address */
1466*4882a593Smuzhiyun OSL_PCI_WRITE_CONFIG(osh,
1467*4882a593Smuzhiyun cfg_reg, 4, axi_wrapper[i].wrapper_addr);
1468*4882a593Smuzhiyun
1469*4882a593Smuzhiyun ai = (aidmp_t *) ((volatile uint8*)sii->curmap + bar0_win_offset);
1470*4882a593Smuzhiyun } else {
1471*4882a593Smuzhiyun ai = (aidmp_t *)(uintptr) axi_wrapper[i].wrapper_addr;
1472*4882a593Smuzhiyun }
1473*4882a593Smuzhiyun
1474*4882a593Smuzhiyun bcm_bprintf(b, "core 0x%x: core_rev:%d, %s_WR ADDR:%x \n", axi_wrapper[i].cid,
1475*4882a593Smuzhiyun axi_wrapper[i].rev,
1476*4882a593Smuzhiyun axi_wrapper[i].wrapper_type == AI_SLAVE_WRAPPER ? "SLAVE" : "MASTER",
1477*4882a593Smuzhiyun axi_wrapper[i].wrapper_addr);
1478*4882a593Smuzhiyun
1479*4882a593Smuzhiyun bcm_bprintf(b, "ioctrlset 0x%x ioctrlclear 0x%x ioctrl 0x%x iostatus 0x%x "
1480*4882a593Smuzhiyun "ioctrlwidth 0x%x iostatuswidth 0x%x\n"
1481*4882a593Smuzhiyun "resetctrl 0x%x resetstatus 0x%x resetreadid 0x%x resetwriteid 0x%x\n"
1482*4882a593Smuzhiyun "errlogctrl 0x%x errlogdone 0x%x errlogstatus 0x%x "
1483*4882a593Smuzhiyun "errlogaddrlo 0x%x errlogaddrhi 0x%x\n"
1484*4882a593Smuzhiyun "errlogid 0x%x errloguser 0x%x errlogflags 0x%x\n"
1485*4882a593Smuzhiyun "intstatus 0x%x config 0x%x itcr 0x%x\n\n",
1486*4882a593Smuzhiyun R_REG(osh, &ai->ioctrlset),
1487*4882a593Smuzhiyun R_REG(osh, &ai->ioctrlclear),
1488*4882a593Smuzhiyun R_REG(osh, &ai->ioctrl),
1489*4882a593Smuzhiyun R_REG(osh, &ai->iostatus),
1490*4882a593Smuzhiyun R_REG(osh, &ai->ioctrlwidth),
1491*4882a593Smuzhiyun R_REG(osh, &ai->iostatuswidth),
1492*4882a593Smuzhiyun R_REG(osh, &ai->resetctrl),
1493*4882a593Smuzhiyun R_REG(osh, &ai->resetstatus),
1494*4882a593Smuzhiyun R_REG(osh, &ai->resetreadid),
1495*4882a593Smuzhiyun R_REG(osh, &ai->resetwriteid),
1496*4882a593Smuzhiyun R_REG(osh, &ai->errlogctrl),
1497*4882a593Smuzhiyun R_REG(osh, &ai->errlogdone),
1498*4882a593Smuzhiyun R_REG(osh, &ai->errlogstatus),
1499*4882a593Smuzhiyun R_REG(osh, &ai->errlogaddrlo),
1500*4882a593Smuzhiyun R_REG(osh, &ai->errlogaddrhi),
1501*4882a593Smuzhiyun R_REG(osh, &ai->errlogid),
1502*4882a593Smuzhiyun R_REG(osh, &ai->errloguser),
1503*4882a593Smuzhiyun R_REG(osh, &ai->errlogflags),
1504*4882a593Smuzhiyun R_REG(osh, &ai->intstatus),
1505*4882a593Smuzhiyun R_REG(osh, &ai->config),
1506*4882a593Smuzhiyun R_REG(osh, &ai->itcr));
1507*4882a593Smuzhiyun }
1508*4882a593Smuzhiyun
1509*4882a593Smuzhiyun /* Restore the initial wrapper space */
1510*4882a593Smuzhiyun if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1511*4882a593Smuzhiyun if (prev_value && cfg_reg) {
1512*4882a593Smuzhiyun OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 4, prev_value);
1513*4882a593Smuzhiyun }
1514*4882a593Smuzhiyun }
1515*4882a593Smuzhiyun }
1516*4882a593Smuzhiyun #endif /* BCMDBG || BCMDBG_DUMP || BCMDBG_PHYDUMP */
1517*4882a593Smuzhiyun
1518*4882a593Smuzhiyun #ifdef BCMDBG
1519*4882a593Smuzhiyun static void
_ai_view(osl_t * osh,aidmp_t * ai,uint32 cid,uint32 addr,bool verbose)1520*4882a593Smuzhiyun _ai_view(osl_t *osh, aidmp_t *ai, uint32 cid, uint32 addr, bool verbose)
1521*4882a593Smuzhiyun {
1522*4882a593Smuzhiyun uint32 config;
1523*4882a593Smuzhiyun
1524*4882a593Smuzhiyun config = R_REG(osh, &ai->config);
1525*4882a593Smuzhiyun SI_PRINT(("\nCore ID: 0x%x, addr 0x%x, config 0x%x\n", cid, addr, config));
1526*4882a593Smuzhiyun
1527*4882a593Smuzhiyun if (config & AICFG_RST)
1528*4882a593Smuzhiyun SI_PRINT(("resetctrl 0x%x, resetstatus 0x%x, resetreadid 0x%x, resetwriteid 0x%x\n",
1529*4882a593Smuzhiyun R_REG(osh, &ai->resetctrl), R_REG(osh, &ai->resetstatus),
1530*4882a593Smuzhiyun R_REG(osh, &ai->resetreadid), R_REG(osh, &ai->resetwriteid)));
1531*4882a593Smuzhiyun
1532*4882a593Smuzhiyun if (config & AICFG_IOC)
1533*4882a593Smuzhiyun SI_PRINT(("ioctrl 0x%x, width %d\n", R_REG(osh, &ai->ioctrl),
1534*4882a593Smuzhiyun R_REG(osh, &ai->ioctrlwidth)));
1535*4882a593Smuzhiyun
1536*4882a593Smuzhiyun if (config & AICFG_IOS)
1537*4882a593Smuzhiyun SI_PRINT(("iostatus 0x%x, width %d\n", R_REG(osh, &ai->iostatus),
1538*4882a593Smuzhiyun R_REG(osh, &ai->iostatuswidth)));
1539*4882a593Smuzhiyun
1540*4882a593Smuzhiyun if (config & AICFG_ERRL) {
1541*4882a593Smuzhiyun SI_PRINT(("errlogctrl 0x%x, errlogdone 0x%x, errlogstatus 0x%x, intstatus 0x%x\n",
1542*4882a593Smuzhiyun R_REG(osh, &ai->errlogctrl), R_REG(osh, &ai->errlogdone),
1543*4882a593Smuzhiyun R_REG(osh, &ai->errlogstatus), R_REG(osh, &ai->intstatus)));
1544*4882a593Smuzhiyun SI_PRINT(("errlogid 0x%x, errloguser 0x%x, errlogflags 0x%x, errlogaddr "
1545*4882a593Smuzhiyun "0x%x/0x%x\n",
1546*4882a593Smuzhiyun R_REG(osh, &ai->errlogid), R_REG(osh, &ai->errloguser),
1547*4882a593Smuzhiyun R_REG(osh, &ai->errlogflags), R_REG(osh, &ai->errlogaddrhi),
1548*4882a593Smuzhiyun R_REG(osh, &ai->errlogaddrlo)));
1549*4882a593Smuzhiyun }
1550*4882a593Smuzhiyun
1551*4882a593Smuzhiyun if (verbose && (config & AICFG_OOB)) {
1552*4882a593Smuzhiyun SI_PRINT(("oobselina30 0x%x, oobselina74 0x%x\n",
1553*4882a593Smuzhiyun R_REG(osh, &ai->oobselina30), R_REG(osh, &ai->oobselina74)));
1554*4882a593Smuzhiyun SI_PRINT(("oobselinb30 0x%x, oobselinb74 0x%x\n",
1555*4882a593Smuzhiyun R_REG(osh, &ai->oobselinb30), R_REG(osh, &ai->oobselinb74)));
1556*4882a593Smuzhiyun SI_PRINT(("oobselinc30 0x%x, oobselinc74 0x%x\n",
1557*4882a593Smuzhiyun R_REG(osh, &ai->oobselinc30), R_REG(osh, &ai->oobselinc74)));
1558*4882a593Smuzhiyun SI_PRINT(("oobselind30 0x%x, oobselind74 0x%x\n",
1559*4882a593Smuzhiyun R_REG(osh, &ai->oobselind30), R_REG(osh, &ai->oobselind74)));
1560*4882a593Smuzhiyun SI_PRINT(("oobselouta30 0x%x, oobselouta74 0x%x\n",
1561*4882a593Smuzhiyun R_REG(osh, &ai->oobselouta30), R_REG(osh, &ai->oobselouta74)));
1562*4882a593Smuzhiyun SI_PRINT(("oobseloutb30 0x%x, oobseloutb74 0x%x\n",
1563*4882a593Smuzhiyun R_REG(osh, &ai->oobseloutb30), R_REG(osh, &ai->oobseloutb74)));
1564*4882a593Smuzhiyun SI_PRINT(("oobseloutc30 0x%x, oobseloutc74 0x%x\n",
1565*4882a593Smuzhiyun R_REG(osh, &ai->oobseloutc30), R_REG(osh, &ai->oobseloutc74)));
1566*4882a593Smuzhiyun SI_PRINT(("oobseloutd30 0x%x, oobseloutd74 0x%x\n",
1567*4882a593Smuzhiyun R_REG(osh, &ai->oobseloutd30), R_REG(osh, &ai->oobseloutd74)));
1568*4882a593Smuzhiyun SI_PRINT(("oobsynca 0x%x, oobseloutaen 0x%x\n",
1569*4882a593Smuzhiyun R_REG(osh, &ai->oobsynca), R_REG(osh, &ai->oobseloutaen)));
1570*4882a593Smuzhiyun SI_PRINT(("oobsyncb 0x%x, oobseloutben 0x%x\n",
1571*4882a593Smuzhiyun R_REG(osh, &ai->oobsyncb), R_REG(osh, &ai->oobseloutben)));
1572*4882a593Smuzhiyun SI_PRINT(("oobsyncc 0x%x, oobseloutcen 0x%x\n",
1573*4882a593Smuzhiyun R_REG(osh, &ai->oobsyncc), R_REG(osh, &ai->oobseloutcen)));
1574*4882a593Smuzhiyun SI_PRINT(("oobsyncd 0x%x, oobseloutden 0x%x\n",
1575*4882a593Smuzhiyun R_REG(osh, &ai->oobsyncd), R_REG(osh, &ai->oobseloutden)));
1576*4882a593Smuzhiyun SI_PRINT(("oobaextwidth 0x%x, oobainwidth 0x%x, oobaoutwidth 0x%x\n",
1577*4882a593Smuzhiyun R_REG(osh, &ai->oobaextwidth), R_REG(osh, &ai->oobainwidth),
1578*4882a593Smuzhiyun R_REG(osh, &ai->oobaoutwidth)));
1579*4882a593Smuzhiyun SI_PRINT(("oobbextwidth 0x%x, oobbinwidth 0x%x, oobboutwidth 0x%x\n",
1580*4882a593Smuzhiyun R_REG(osh, &ai->oobbextwidth), R_REG(osh, &ai->oobbinwidth),
1581*4882a593Smuzhiyun R_REG(osh, &ai->oobboutwidth)));
1582*4882a593Smuzhiyun SI_PRINT(("oobcextwidth 0x%x, oobcinwidth 0x%x, oobcoutwidth 0x%x\n",
1583*4882a593Smuzhiyun R_REG(osh, &ai->oobcextwidth), R_REG(osh, &ai->oobcinwidth),
1584*4882a593Smuzhiyun R_REG(osh, &ai->oobcoutwidth)));
1585*4882a593Smuzhiyun SI_PRINT(("oobdextwidth 0x%x, oobdinwidth 0x%x, oobdoutwidth 0x%x\n",
1586*4882a593Smuzhiyun R_REG(osh, &ai->oobdextwidth), R_REG(osh, &ai->oobdinwidth),
1587*4882a593Smuzhiyun R_REG(osh, &ai->oobdoutwidth)));
1588*4882a593Smuzhiyun }
1589*4882a593Smuzhiyun }
1590*4882a593Smuzhiyun
1591*4882a593Smuzhiyun void
ai_view(const si_t * sih,bool verbose)1592*4882a593Smuzhiyun ai_view(const si_t *sih, bool verbose)
1593*4882a593Smuzhiyun {
1594*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
1595*4882a593Smuzhiyun const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
1596*4882a593Smuzhiyun osl_t *osh;
1597*4882a593Smuzhiyun aidmp_t *ai;
1598*4882a593Smuzhiyun uint32 cid, addr;
1599*4882a593Smuzhiyun
1600*4882a593Smuzhiyun ai = sii->curwrap;
1601*4882a593Smuzhiyun osh = sii->osh;
1602*4882a593Smuzhiyun
1603*4882a593Smuzhiyun if (PMU_DMP()) {
1604*4882a593Smuzhiyun SI_ERROR(("Cannot access pmu DMP\n"));
1605*4882a593Smuzhiyun return;
1606*4882a593Smuzhiyun }
1607*4882a593Smuzhiyun cid = cores_info->coreid[sii->curidx];
1608*4882a593Smuzhiyun addr = cores_info->wrapba[sii->curidx];
1609*4882a593Smuzhiyun _ai_view(osh, ai, cid, addr, verbose);
1610*4882a593Smuzhiyun }
1611*4882a593Smuzhiyun
1612*4882a593Smuzhiyun void
ai_viewall(si_t * sih,bool verbose)1613*4882a593Smuzhiyun ai_viewall(si_t *sih, bool verbose)
1614*4882a593Smuzhiyun {
1615*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
1616*4882a593Smuzhiyun const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
1617*4882a593Smuzhiyun osl_t *osh;
1618*4882a593Smuzhiyun aidmp_t *ai;
1619*4882a593Smuzhiyun uint32 cid, addr;
1620*4882a593Smuzhiyun uint i;
1621*4882a593Smuzhiyun
1622*4882a593Smuzhiyun osh = sii->osh;
1623*4882a593Smuzhiyun for (i = 0; i < sii->numcores; i++) {
1624*4882a593Smuzhiyun si_setcoreidx(sih, i);
1625*4882a593Smuzhiyun
1626*4882a593Smuzhiyun if (PMU_DMP()) {
1627*4882a593Smuzhiyun SI_ERROR(("Skipping pmu DMP\n"));
1628*4882a593Smuzhiyun continue;
1629*4882a593Smuzhiyun }
1630*4882a593Smuzhiyun ai = sii->curwrap;
1631*4882a593Smuzhiyun cid = cores_info->coreid[sii->curidx];
1632*4882a593Smuzhiyun addr = cores_info->wrapba[sii->curidx];
1633*4882a593Smuzhiyun _ai_view(osh, ai, cid, addr, verbose);
1634*4882a593Smuzhiyun }
1635*4882a593Smuzhiyun }
1636*4882a593Smuzhiyun #endif /* BCMDBG */
1637*4882a593Smuzhiyun
1638*4882a593Smuzhiyun void
ai_update_backplane_timeouts(const si_t * sih,bool enable,uint32 timeout_exp,uint32 cid)1639*4882a593Smuzhiyun ai_update_backplane_timeouts(const si_t *sih, bool enable, uint32 timeout_exp, uint32 cid)
1640*4882a593Smuzhiyun {
1641*4882a593Smuzhiyun #if defined(AXI_TIMEOUTS) || defined(AXI_TIMEOUTS_NIC)
1642*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
1643*4882a593Smuzhiyun aidmp_t *ai;
1644*4882a593Smuzhiyun uint32 i;
1645*4882a593Smuzhiyun axi_wrapper_t * axi_wrapper = sii->axi_wrapper;
1646*4882a593Smuzhiyun uint32 errlogctrl = (enable << AIELC_TO_ENAB_SHIFT) |
1647*4882a593Smuzhiyun ((timeout_exp << AIELC_TO_EXP_SHIFT) & AIELC_TO_EXP_MASK);
1648*4882a593Smuzhiyun
1649*4882a593Smuzhiyun #ifdef AXI_TIMEOUTS_NIC
1650*4882a593Smuzhiyun uint32 prev_value = 0;
1651*4882a593Smuzhiyun osl_t *osh = sii->osh;
1652*4882a593Smuzhiyun uint32 cfg_reg = 0;
1653*4882a593Smuzhiyun uint32 offset = 0;
1654*4882a593Smuzhiyun #endif /* AXI_TIMEOUTS_NIC */
1655*4882a593Smuzhiyun
1656*4882a593Smuzhiyun if ((sii->axi_num_wrappers == 0) ||
1657*4882a593Smuzhiyun #ifdef AXI_TIMEOUTS_NIC
1658*4882a593Smuzhiyun (!PCIE(sii)) ||
1659*4882a593Smuzhiyun #endif /* AXI_TIMEOUTS_NIC */
1660*4882a593Smuzhiyun FALSE) {
1661*4882a593Smuzhiyun SI_VMSG((" iai_update_backplane_timeouts, axi_num_wrappers:%d, Is_PCIE:%d,"
1662*4882a593Smuzhiyun " BUS_TYPE:%d, ID:%x\n",
1663*4882a593Smuzhiyun sii->axi_num_wrappers, PCIE(sii),
1664*4882a593Smuzhiyun BUSTYPE(sii->pub.bustype), sii->pub.buscoretype));
1665*4882a593Smuzhiyun return;
1666*4882a593Smuzhiyun }
1667*4882a593Smuzhiyun
1668*4882a593Smuzhiyun #ifdef AXI_TIMEOUTS_NIC
1669*4882a593Smuzhiyun /* Save and restore the wrapper access window */
1670*4882a593Smuzhiyun if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1671*4882a593Smuzhiyun if (PCIE_GEN1(sii)) {
1672*4882a593Smuzhiyun cfg_reg = PCI_BAR0_WIN2;
1673*4882a593Smuzhiyun offset = PCI_BAR0_WIN2_OFFSET;
1674*4882a593Smuzhiyun } else if (PCIE_GEN2(sii)) {
1675*4882a593Smuzhiyun cfg_reg = PCIE2_BAR0_CORE2_WIN2;
1676*4882a593Smuzhiyun offset = PCIE2_BAR0_CORE2_WIN2_OFFSET;
1677*4882a593Smuzhiyun }
1678*4882a593Smuzhiyun else {
1679*4882a593Smuzhiyun ASSERT(!"!PCIE_GEN1 && !PCIE_GEN2");
1680*4882a593Smuzhiyun }
1681*4882a593Smuzhiyun
1682*4882a593Smuzhiyun prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 4);
1683*4882a593Smuzhiyun if (prev_value == ID32_INVALID) {
1684*4882a593Smuzhiyun SI_PRINT(("ai_update_backplane_timeouts, PCI_BAR0_WIN2 - %x\n",
1685*4882a593Smuzhiyun prev_value));
1686*4882a593Smuzhiyun return;
1687*4882a593Smuzhiyun }
1688*4882a593Smuzhiyun }
1689*4882a593Smuzhiyun #endif /* AXI_TIMEOUTS_NIC */
1690*4882a593Smuzhiyun
1691*4882a593Smuzhiyun for (i = 0; i < sii->axi_num_wrappers; ++i) {
1692*4882a593Smuzhiyun /* WAR for wrong EROM entries w.r.t slave and master wrapper
1693*4882a593Smuzhiyun * for ADB bridge core...so checking actual wrapper config to determine type
1694*4882a593Smuzhiyun * http://jira.broadcom.com/browse/HW4388-905
1695*4882a593Smuzhiyun */
1696*4882a593Smuzhiyun if ((cid == 0 || cid == ADB_BRIDGE_ID) &&
1697*4882a593Smuzhiyun (axi_wrapper[i].cid == ADB_BRIDGE_ID)) {
1698*4882a593Smuzhiyun /* WAR is applicable only to 89B0 and 89C0 */
1699*4882a593Smuzhiyun if (CCREV(sih->ccrev) == 70) {
1700*4882a593Smuzhiyun ai = (aidmp_t *)(uintptr)axi_wrapper[i].wrapper_addr;
1701*4882a593Smuzhiyun if (R_REG(sii->osh, &ai->config) & WRAPPER_TIMEOUT_CONFIG) {
1702*4882a593Smuzhiyun axi_wrapper[i].wrapper_type = AI_SLAVE_WRAPPER;
1703*4882a593Smuzhiyun } else {
1704*4882a593Smuzhiyun axi_wrapper[i].wrapper_type = AI_MASTER_WRAPPER;
1705*4882a593Smuzhiyun }
1706*4882a593Smuzhiyun }
1707*4882a593Smuzhiyun }
1708*4882a593Smuzhiyun if (axi_wrapper[i].wrapper_type != AI_SLAVE_WRAPPER || ((BCM4389_CHIP(sih->chip) ||
1709*4882a593Smuzhiyun BCM4388_CHIP(sih->chip)) &&
1710*4882a593Smuzhiyun (axi_wrapper[i].wrapper_addr == WL_BRIDGE1_S ||
1711*4882a593Smuzhiyun axi_wrapper[i].wrapper_addr == WL_BRIDGE2_S))) {
1712*4882a593Smuzhiyun SI_VMSG(("SKIP ENABLE BPT: MFG:%x, CID:%x, ADDR:%x\n",
1713*4882a593Smuzhiyun axi_wrapper[i].mfg,
1714*4882a593Smuzhiyun axi_wrapper[i].cid,
1715*4882a593Smuzhiyun axi_wrapper[i].wrapper_addr));
1716*4882a593Smuzhiyun continue;
1717*4882a593Smuzhiyun }
1718*4882a593Smuzhiyun
1719*4882a593Smuzhiyun /* Update only given core if requested */
1720*4882a593Smuzhiyun if ((cid != 0) && (axi_wrapper[i].cid != cid)) {
1721*4882a593Smuzhiyun continue;
1722*4882a593Smuzhiyun }
1723*4882a593Smuzhiyun
1724*4882a593Smuzhiyun #ifdef AXI_TIMEOUTS_NIC
1725*4882a593Smuzhiyun if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1726*4882a593Smuzhiyun /* Set BAR0_CORE2_WIN2 to bridge wapper base address */
1727*4882a593Smuzhiyun OSL_PCI_WRITE_CONFIG(osh,
1728*4882a593Smuzhiyun cfg_reg, 4, axi_wrapper[i].wrapper_addr);
1729*4882a593Smuzhiyun
1730*4882a593Smuzhiyun /* set AI to BAR0 + Offset corresponding to Gen1 or gen2 */
1731*4882a593Smuzhiyun ai = (aidmp_t *) (DISCARD_QUAL(sii->curmap, uint8) + offset);
1732*4882a593Smuzhiyun }
1733*4882a593Smuzhiyun else
1734*4882a593Smuzhiyun #endif /* AXI_TIMEOUTS_NIC */
1735*4882a593Smuzhiyun {
1736*4882a593Smuzhiyun ai = (aidmp_t *)(uintptr) axi_wrapper[i].wrapper_addr;
1737*4882a593Smuzhiyun }
1738*4882a593Smuzhiyun
1739*4882a593Smuzhiyun W_REG(sii->osh, &ai->errlogctrl, errlogctrl);
1740*4882a593Smuzhiyun
1741*4882a593Smuzhiyun SI_VMSG(("ENABLED BPT: MFG:%x, CID:%x, ADDR:%x, ERR_CTRL:%x\n",
1742*4882a593Smuzhiyun axi_wrapper[i].mfg,
1743*4882a593Smuzhiyun axi_wrapper[i].cid,
1744*4882a593Smuzhiyun axi_wrapper[i].wrapper_addr,
1745*4882a593Smuzhiyun R_REG(sii->osh, &ai->errlogctrl)));
1746*4882a593Smuzhiyun }
1747*4882a593Smuzhiyun
1748*4882a593Smuzhiyun #ifdef AXI_TIMEOUTS_NIC
1749*4882a593Smuzhiyun /* Restore the initial wrapper space */
1750*4882a593Smuzhiyun if (prev_value) {
1751*4882a593Smuzhiyun OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 4, prev_value);
1752*4882a593Smuzhiyun }
1753*4882a593Smuzhiyun #endif /* AXI_TIMEOUTS_NIC */
1754*4882a593Smuzhiyun
1755*4882a593Smuzhiyun #endif /* AXI_TIMEOUTS || AXI_TIMEOUTS_NIC */
1756*4882a593Smuzhiyun }
1757*4882a593Smuzhiyun
1758*4882a593Smuzhiyun #if defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC)
1759*4882a593Smuzhiyun
1760*4882a593Smuzhiyun /* slave error is ignored, so account for those cases */
1761*4882a593Smuzhiyun static uint32 si_ignore_errlog_cnt = 0;
1762*4882a593Smuzhiyun
1763*4882a593Smuzhiyun static bool
BCMPOSTTRAPFN(ai_ignore_errlog)1764*4882a593Smuzhiyun BCMPOSTTRAPFN(ai_ignore_errlog)(const si_info_t *sii, const aidmp_t *ai,
1765*4882a593Smuzhiyun uint32 lo_addr, uint32 hi_addr, uint32 err_axi_id, uint32 errsts)
1766*4882a593Smuzhiyun {
1767*4882a593Smuzhiyun uint32 ignore_errsts = AIELS_SLAVE_ERR;
1768*4882a593Smuzhiyun uint32 ignore_errsts_2 = 0;
1769*4882a593Smuzhiyun uint32 ignore_hi = BT_CC_SPROM_BADREG_HI;
1770*4882a593Smuzhiyun uint32 ignore_lo = BT_CC_SPROM_BADREG_LO;
1771*4882a593Smuzhiyun uint32 ignore_size = BT_CC_SPROM_BADREG_SIZE;
1772*4882a593Smuzhiyun bool address_check = TRUE;
1773*4882a593Smuzhiyun uint32 axi_id = 0;
1774*4882a593Smuzhiyun uint32 axi_id2 = 0;
1775*4882a593Smuzhiyun bool extd_axi_id_mask = FALSE;
1776*4882a593Smuzhiyun uint32 axi_id_mask;
1777*4882a593Smuzhiyun
1778*4882a593Smuzhiyun SI_PRINT(("err check: core %p, error %d, axi id 0x%04x, addr(0x%08x:%08x)\n",
1779*4882a593Smuzhiyun ai, errsts, err_axi_id, hi_addr, lo_addr));
1780*4882a593Smuzhiyun
1781*4882a593Smuzhiyun /* ignore the BT slave errors if the errlog is to chipcommon addr 0x190 */
1782*4882a593Smuzhiyun switch (CHIPID(sii->pub.chip)) {
1783*4882a593Smuzhiyun #if defined(BT_WLAN_REG_ON_WAR)
1784*4882a593Smuzhiyun /*
1785*4882a593Smuzhiyun * 4389B0/C0 - WL and BT turn on WAR, ignore AXI error originating from
1786*4882a593Smuzhiyun * AHB-AXI bridge i.e, any slave error or timeout from BT access
1787*4882a593Smuzhiyun */
1788*4882a593Smuzhiyun case BCM4389_CHIP_GRPID:
1789*4882a593Smuzhiyun axi_id = BCM4389_BT_AXI_ID;
1790*4882a593Smuzhiyun ignore_errsts = AIELS_SLAVE_ERR;
1791*4882a593Smuzhiyun axi_id2 = BCM4389_BT_AXI_ID;
1792*4882a593Smuzhiyun ignore_errsts_2 = AIELS_TIMEOUT;
1793*4882a593Smuzhiyun address_check = FALSE;
1794*4882a593Smuzhiyun extd_axi_id_mask = TRUE;
1795*4882a593Smuzhiyun break;
1796*4882a593Smuzhiyun #endif /* BT_WLAN_REG_ON_WAR */
1797*4882a593Smuzhiyun #ifdef BTOVERPCIE
1798*4882a593Smuzhiyun case BCM4388_CHIP_GRPID:
1799*4882a593Smuzhiyun axi_id = BCM4388_BT_AXI_ID;
1800*4882a593Smuzhiyun /* For BT over PCIE, ignore any slave error from BT. */
1801*4882a593Smuzhiyun /* No need to check any address range */
1802*4882a593Smuzhiyun address_check = FALSE;
1803*4882a593Smuzhiyun ignore_errsts_2 = AIELS_DECODE;
1804*4882a593Smuzhiyun break;
1805*4882a593Smuzhiyun case BCM4369_CHIP_GRPID:
1806*4882a593Smuzhiyun axi_id = BCM4369_BT_AXI_ID;
1807*4882a593Smuzhiyun /* For BT over PCIE, ignore any slave error from BT. */
1808*4882a593Smuzhiyun /* No need to check any address range */
1809*4882a593Smuzhiyun address_check = FALSE;
1810*4882a593Smuzhiyun ignore_errsts_2 = AIELS_DECODE;
1811*4882a593Smuzhiyun break;
1812*4882a593Smuzhiyun #endif /* BTOVERPCIE */
1813*4882a593Smuzhiyun case BCM4376_CHIP_GRPID:
1814*4882a593Smuzhiyun case BCM4378_CHIP_GRPID:
1815*4882a593Smuzhiyun case BCM4385_CHIP_GRPID:
1816*4882a593Smuzhiyun case BCM4387_CHIP_GRPID:
1817*4882a593Smuzhiyun #ifdef BTOVERPCIE
1818*4882a593Smuzhiyun axi_id = BCM4378_BT_AXI_ID;
1819*4882a593Smuzhiyun /* For BT over PCIE, ignore any slave error from BT. */
1820*4882a593Smuzhiyun /* No need to check any address range */
1821*4882a593Smuzhiyun address_check = FALSE;
1822*4882a593Smuzhiyun #endif /* BTOVERPCIE */
1823*4882a593Smuzhiyun axi_id2 = BCM4378_ARM_PREFETCH_AXI_ID;
1824*4882a593Smuzhiyun extd_axi_id_mask = TRUE;
1825*4882a593Smuzhiyun ignore_errsts_2 = AIELS_DECODE;
1826*4882a593Smuzhiyun break;
1827*4882a593Smuzhiyun #ifdef USE_HOSTMEM
1828*4882a593Smuzhiyun case BCM43602_CHIP_ID:
1829*4882a593Smuzhiyun axi_id = BCM43602_BT_AXI_ID;
1830*4882a593Smuzhiyun address_check = FALSE;
1831*4882a593Smuzhiyun break;
1832*4882a593Smuzhiyun #endif /* USE_HOSTMEM */
1833*4882a593Smuzhiyun default:
1834*4882a593Smuzhiyun return FALSE;
1835*4882a593Smuzhiyun }
1836*4882a593Smuzhiyun
1837*4882a593Smuzhiyun axi_id_mask = extd_axi_id_mask ? AI_ERRLOGID_AXI_ID_MASK_EXTD : AI_ERRLOGID_AXI_ID_MASK;
1838*4882a593Smuzhiyun
1839*4882a593Smuzhiyun /* AXI ID check */
1840*4882a593Smuzhiyun err_axi_id &= axi_id_mask;
1841*4882a593Smuzhiyun errsts &= AIELS_ERROR_MASK;
1842*4882a593Smuzhiyun
1843*4882a593Smuzhiyun /* check the ignore error cases. 2 checks */
1844*4882a593Smuzhiyun if (!(((err_axi_id == axi_id) && (errsts == ignore_errsts)) ||
1845*4882a593Smuzhiyun ((err_axi_id == axi_id2) && (errsts == ignore_errsts_2)))) {
1846*4882a593Smuzhiyun /* not the error ignore cases */
1847*4882a593Smuzhiyun return FALSE;
1848*4882a593Smuzhiyun
1849*4882a593Smuzhiyun }
1850*4882a593Smuzhiyun
1851*4882a593Smuzhiyun /* check the specific address checks now, if specified */
1852*4882a593Smuzhiyun if (address_check) {
1853*4882a593Smuzhiyun /* address range check */
1854*4882a593Smuzhiyun if ((hi_addr != ignore_hi) ||
1855*4882a593Smuzhiyun (lo_addr < ignore_lo) || (lo_addr >= (ignore_lo + ignore_size))) {
1856*4882a593Smuzhiyun return FALSE;
1857*4882a593Smuzhiyun }
1858*4882a593Smuzhiyun }
1859*4882a593Smuzhiyun
1860*4882a593Smuzhiyun SI_PRINT(("err check: ignored\n"));
1861*4882a593Smuzhiyun return TRUE;
1862*4882a593Smuzhiyun }
1863*4882a593Smuzhiyun #endif /* defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC) */
1864*4882a593Smuzhiyun
1865*4882a593Smuzhiyun #ifdef AXI_TIMEOUTS_NIC
1866*4882a593Smuzhiyun
1867*4882a593Smuzhiyun /* Function to return the APB bridge details corresponding to the core */
1868*4882a593Smuzhiyun static bool
ai_get_apb_bridge(const si_t * sih,uint32 coreidx,uint32 * apb_id,uint32 * apb_coreunit)1869*4882a593Smuzhiyun ai_get_apb_bridge(const si_t * sih, uint32 coreidx, uint32 *apb_id, uint32 * apb_coreunit)
1870*4882a593Smuzhiyun {
1871*4882a593Smuzhiyun uint i;
1872*4882a593Smuzhiyun uint32 core_base, core_end;
1873*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
1874*4882a593Smuzhiyun static uint32 coreidx_cached = 0, apb_id_cached = 0, apb_coreunit_cached = 0;
1875*4882a593Smuzhiyun uint32 tmp_coreunit = 0;
1876*4882a593Smuzhiyun const si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
1877*4882a593Smuzhiyun
1878*4882a593Smuzhiyun if (coreidx >= MIN(sii->numcores, SI_MAXCORES))
1879*4882a593Smuzhiyun return FALSE;
1880*4882a593Smuzhiyun
1881*4882a593Smuzhiyun /* Most of the time apb bridge query will be for d11 core.
1882*4882a593Smuzhiyun * Maintain the last cache and return if found rather than iterating the table
1883*4882a593Smuzhiyun */
1884*4882a593Smuzhiyun if (coreidx_cached == coreidx) {
1885*4882a593Smuzhiyun *apb_id = apb_id_cached;
1886*4882a593Smuzhiyun *apb_coreunit = apb_coreunit_cached;
1887*4882a593Smuzhiyun return TRUE;
1888*4882a593Smuzhiyun }
1889*4882a593Smuzhiyun
1890*4882a593Smuzhiyun core_base = cores_info->coresba[coreidx];
1891*4882a593Smuzhiyun core_end = core_base + cores_info->coresba_size[coreidx];
1892*4882a593Smuzhiyun
1893*4882a593Smuzhiyun for (i = 0; i < sii->numcores; i++) {
1894*4882a593Smuzhiyun if (cores_info->coreid[i] == APB_BRIDGE_ID) {
1895*4882a593Smuzhiyun uint32 apb_base;
1896*4882a593Smuzhiyun uint32 apb_end;
1897*4882a593Smuzhiyun
1898*4882a593Smuzhiyun apb_base = cores_info->coresba[i];
1899*4882a593Smuzhiyun apb_end = apb_base + cores_info->coresba_size[i];
1900*4882a593Smuzhiyun
1901*4882a593Smuzhiyun if ((core_base >= apb_base) &&
1902*4882a593Smuzhiyun (core_end <= apb_end)) {
1903*4882a593Smuzhiyun /* Current core is attached to this APB bridge */
1904*4882a593Smuzhiyun *apb_id = apb_id_cached = APB_BRIDGE_ID;
1905*4882a593Smuzhiyun *apb_coreunit = apb_coreunit_cached = tmp_coreunit;
1906*4882a593Smuzhiyun coreidx_cached = coreidx;
1907*4882a593Smuzhiyun return TRUE;
1908*4882a593Smuzhiyun }
1909*4882a593Smuzhiyun /* Increment the coreunit */
1910*4882a593Smuzhiyun tmp_coreunit++;
1911*4882a593Smuzhiyun }
1912*4882a593Smuzhiyun }
1913*4882a593Smuzhiyun
1914*4882a593Smuzhiyun return FALSE;
1915*4882a593Smuzhiyun }
1916*4882a593Smuzhiyun
1917*4882a593Smuzhiyun uint32
ai_clear_backplane_to_fast(si_t * sih,void * addr)1918*4882a593Smuzhiyun ai_clear_backplane_to_fast(si_t *sih, void *addr)
1919*4882a593Smuzhiyun {
1920*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
1921*4882a593Smuzhiyun volatile const void *curmap = sii->curmap;
1922*4882a593Smuzhiyun bool core_reg = FALSE;
1923*4882a593Smuzhiyun
1924*4882a593Smuzhiyun /* Use fast path only for core register access */
1925*4882a593Smuzhiyun if (((uintptr)addr >= (uintptr)curmap) &&
1926*4882a593Smuzhiyun ((uintptr)addr < ((uintptr)curmap + SI_CORE_SIZE))) {
1927*4882a593Smuzhiyun /* address being accessed is within current core reg map */
1928*4882a593Smuzhiyun core_reg = TRUE;
1929*4882a593Smuzhiyun }
1930*4882a593Smuzhiyun
1931*4882a593Smuzhiyun if (core_reg) {
1932*4882a593Smuzhiyun uint32 apb_id, apb_coreunit;
1933*4882a593Smuzhiyun
1934*4882a593Smuzhiyun if (ai_get_apb_bridge(sih, si_coreidx(&sii->pub),
1935*4882a593Smuzhiyun &apb_id, &apb_coreunit) == TRUE) {
1936*4882a593Smuzhiyun /* Found the APB bridge corresponding to current core,
1937*4882a593Smuzhiyun * Check for bus errors in APB wrapper
1938*4882a593Smuzhiyun */
1939*4882a593Smuzhiyun return ai_clear_backplane_to_per_core(sih,
1940*4882a593Smuzhiyun apb_id, apb_coreunit, NULL);
1941*4882a593Smuzhiyun }
1942*4882a593Smuzhiyun }
1943*4882a593Smuzhiyun
1944*4882a593Smuzhiyun /* Default is to poll for errors on all slave wrappers */
1945*4882a593Smuzhiyun return si_clear_backplane_to(sih);
1946*4882a593Smuzhiyun }
1947*4882a593Smuzhiyun #endif /* AXI_TIMEOUTS_NIC */
1948*4882a593Smuzhiyun
1949*4882a593Smuzhiyun #if defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC)
1950*4882a593Smuzhiyun static bool g_disable_backplane_logs = FALSE;
1951*4882a593Smuzhiyun
1952*4882a593Smuzhiyun static uint32 last_axi_error = AXI_WRAP_STS_NONE;
1953*4882a593Smuzhiyun static uint32 last_axi_error_log_status = 0;
1954*4882a593Smuzhiyun static uint32 last_axi_error_core = 0;
1955*4882a593Smuzhiyun static uint32 last_axi_error_wrap = 0;
1956*4882a593Smuzhiyun static uint32 last_axi_errlog_lo = 0;
1957*4882a593Smuzhiyun static uint32 last_axi_errlog_hi = 0;
1958*4882a593Smuzhiyun static uint32 last_axi_errlog_id = 0;
1959*4882a593Smuzhiyun
1960*4882a593Smuzhiyun /*
1961*4882a593Smuzhiyun * API to clear the back plane timeout per core.
1962*4882a593Smuzhiyun * Caller may pass optional wrapper address. If present this will be used as
1963*4882a593Smuzhiyun * the wrapper base address. If wrapper base address is provided then caller
1964*4882a593Smuzhiyun * must provide the coreid also.
1965*4882a593Smuzhiyun * If both coreid and wrapper is zero, then err status of current bridge
1966*4882a593Smuzhiyun * will be verified.
1967*4882a593Smuzhiyun */
1968*4882a593Smuzhiyun uint32
BCMPOSTTRAPFN(ai_clear_backplane_to_per_core)1969*4882a593Smuzhiyun BCMPOSTTRAPFN(ai_clear_backplane_to_per_core)(si_t *sih, uint coreid, uint coreunit, void *wrap)
1970*4882a593Smuzhiyun {
1971*4882a593Smuzhiyun int ret = AXI_WRAP_STS_NONE;
1972*4882a593Smuzhiyun aidmp_t *ai = NULL;
1973*4882a593Smuzhiyun uint32 errlog_status = 0;
1974*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
1975*4882a593Smuzhiyun uint32 errlog_lo = 0, errlog_hi = 0, errlog_id = 0, errlog_flags = 0;
1976*4882a593Smuzhiyun uint32 current_coreidx = si_coreidx(sih);
1977*4882a593Smuzhiyun uint32 target_coreidx = si_findcoreidx(sih, coreid, coreunit);
1978*4882a593Smuzhiyun
1979*4882a593Smuzhiyun #if defined(AXI_TIMEOUTS_NIC)
1980*4882a593Smuzhiyun si_axi_error_t * axi_error = sih->err_info ?
1981*4882a593Smuzhiyun &sih->err_info->axi_error[sih->err_info->count] : NULL;
1982*4882a593Smuzhiyun #endif /* AXI_TIMEOUTS_NIC */
1983*4882a593Smuzhiyun bool restore_core = FALSE;
1984*4882a593Smuzhiyun
1985*4882a593Smuzhiyun if ((sii->axi_num_wrappers == 0) ||
1986*4882a593Smuzhiyun #ifdef AXI_TIMEOUTS_NIC
1987*4882a593Smuzhiyun (!PCIE(sii)) ||
1988*4882a593Smuzhiyun #endif /* AXI_TIMEOUTS_NIC */
1989*4882a593Smuzhiyun FALSE) {
1990*4882a593Smuzhiyun SI_VMSG(("ai_clear_backplane_to_per_core, axi_num_wrappers:%d, Is_PCIE:%d,"
1991*4882a593Smuzhiyun " BUS_TYPE:%d, ID:%x\n",
1992*4882a593Smuzhiyun sii->axi_num_wrappers, PCIE(sii),
1993*4882a593Smuzhiyun BUSTYPE(sii->pub.bustype), sii->pub.buscoretype));
1994*4882a593Smuzhiyun return AXI_WRAP_STS_NONE;
1995*4882a593Smuzhiyun }
1996*4882a593Smuzhiyun
1997*4882a593Smuzhiyun if (wrap != NULL) {
1998*4882a593Smuzhiyun ai = (aidmp_t *)wrap;
1999*4882a593Smuzhiyun } else if (coreid && (target_coreidx != current_coreidx)) {
2000*4882a593Smuzhiyun
2001*4882a593Smuzhiyun if (ai_setcoreidx(sih, target_coreidx) == NULL) {
2002*4882a593Smuzhiyun /* Unable to set the core */
2003*4882a593Smuzhiyun SI_PRINT(("Set Code Failed: coreid:%x, unit:%d, target_coreidx:%d\n",
2004*4882a593Smuzhiyun coreid, coreunit, target_coreidx));
2005*4882a593Smuzhiyun errlog_lo = target_coreidx;
2006*4882a593Smuzhiyun ret = AXI_WRAP_STS_SET_CORE_FAIL;
2007*4882a593Smuzhiyun goto end;
2008*4882a593Smuzhiyun }
2009*4882a593Smuzhiyun
2010*4882a593Smuzhiyun restore_core = TRUE;
2011*4882a593Smuzhiyun ai = (aidmp_t *)si_wrapperregs(sih);
2012*4882a593Smuzhiyun } else {
2013*4882a593Smuzhiyun /* Read error status of current wrapper */
2014*4882a593Smuzhiyun ai = (aidmp_t *)si_wrapperregs(sih);
2015*4882a593Smuzhiyun
2016*4882a593Smuzhiyun /* Update CoreID to current Code ID */
2017*4882a593Smuzhiyun coreid = si_coreid(sih);
2018*4882a593Smuzhiyun }
2019*4882a593Smuzhiyun
2020*4882a593Smuzhiyun /* read error log status */
2021*4882a593Smuzhiyun errlog_status = R_REG(sii->osh, &ai->errlogstatus);
2022*4882a593Smuzhiyun
2023*4882a593Smuzhiyun if (errlog_status == ID32_INVALID) {
2024*4882a593Smuzhiyun /* Do not try to peek further */
2025*4882a593Smuzhiyun SI_PRINT(("ai_clear_backplane_to_per_core, errlogstatus:%x - Slave Wrapper:%x\n",
2026*4882a593Smuzhiyun errlog_status, coreid));
2027*4882a593Smuzhiyun ret = AXI_WRAP_STS_WRAP_RD_ERR;
2028*4882a593Smuzhiyun errlog_lo = (uint32)(uintptr)&ai->errlogstatus;
2029*4882a593Smuzhiyun goto end;
2030*4882a593Smuzhiyun }
2031*4882a593Smuzhiyun
2032*4882a593Smuzhiyun if ((errlog_status & AIELS_ERROR_MASK) != 0) {
2033*4882a593Smuzhiyun uint32 tmp;
2034*4882a593Smuzhiyun uint32 count = 0;
2035*4882a593Smuzhiyun /* set ErrDone to clear the condition */
2036*4882a593Smuzhiyun W_REG(sii->osh, &ai->errlogdone, AIELD_ERRDONE_MASK);
2037*4882a593Smuzhiyun
2038*4882a593Smuzhiyun /* SPINWAIT on errlogstatus timeout status bits */
2039*4882a593Smuzhiyun while ((tmp = R_REG(sii->osh, &ai->errlogstatus)) & AIELS_ERROR_MASK) {
2040*4882a593Smuzhiyun
2041*4882a593Smuzhiyun if (tmp == ID32_INVALID) {
2042*4882a593Smuzhiyun SI_PRINT(("ai_clear_backplane_to_per_core: prev errlogstatus:%x,"
2043*4882a593Smuzhiyun " errlogstatus:%x\n",
2044*4882a593Smuzhiyun errlog_status, tmp));
2045*4882a593Smuzhiyun ret = AXI_WRAP_STS_WRAP_RD_ERR;
2046*4882a593Smuzhiyun errlog_lo = (uint32)(uintptr)&ai->errlogstatus;
2047*4882a593Smuzhiyun goto end;
2048*4882a593Smuzhiyun }
2049*4882a593Smuzhiyun /*
2050*4882a593Smuzhiyun * Clear again, to avoid getting stuck in the loop, if a new error
2051*4882a593Smuzhiyun * is logged after we cleared the first timeout
2052*4882a593Smuzhiyun */
2053*4882a593Smuzhiyun W_REG(sii->osh, &ai->errlogdone, AIELD_ERRDONE_MASK);
2054*4882a593Smuzhiyun
2055*4882a593Smuzhiyun count++;
2056*4882a593Smuzhiyun OSL_DELAY(10);
2057*4882a593Smuzhiyun if ((10 * count) > AI_REG_READ_TIMEOUT) {
2058*4882a593Smuzhiyun errlog_status = tmp;
2059*4882a593Smuzhiyun break;
2060*4882a593Smuzhiyun }
2061*4882a593Smuzhiyun }
2062*4882a593Smuzhiyun
2063*4882a593Smuzhiyun errlog_lo = R_REG(sii->osh, &ai->errlogaddrlo);
2064*4882a593Smuzhiyun errlog_hi = R_REG(sii->osh, &ai->errlogaddrhi);
2065*4882a593Smuzhiyun errlog_id = R_REG(sii->osh, &ai->errlogid);
2066*4882a593Smuzhiyun errlog_flags = R_REG(sii->osh, &ai->errlogflags);
2067*4882a593Smuzhiyun
2068*4882a593Smuzhiyun /* we are already in the error path, so OK to check for the slave error */
2069*4882a593Smuzhiyun if (ai_ignore_errlog(sii, ai, errlog_lo, errlog_hi, errlog_id,
2070*4882a593Smuzhiyun errlog_status)) {
2071*4882a593Smuzhiyun si_ignore_errlog_cnt++;
2072*4882a593Smuzhiyun goto end;
2073*4882a593Smuzhiyun }
2074*4882a593Smuzhiyun
2075*4882a593Smuzhiyun /* only reset APB Bridge on timeout (not slave error, or dec error) */
2076*4882a593Smuzhiyun switch (errlog_status & AIELS_ERROR_MASK) {
2077*4882a593Smuzhiyun case AIELS_SLAVE_ERR:
2078*4882a593Smuzhiyun SI_PRINT(("AXI slave error\n"));
2079*4882a593Smuzhiyun ret |= AXI_WRAP_STS_SLAVE_ERR;
2080*4882a593Smuzhiyun break;
2081*4882a593Smuzhiyun
2082*4882a593Smuzhiyun case AIELS_TIMEOUT:
2083*4882a593Smuzhiyun ai_reset_axi_to(sii, ai);
2084*4882a593Smuzhiyun ret |= AXI_WRAP_STS_TIMEOUT;
2085*4882a593Smuzhiyun break;
2086*4882a593Smuzhiyun
2087*4882a593Smuzhiyun case AIELS_DECODE:
2088*4882a593Smuzhiyun SI_PRINT(("AXI decode error\n"));
2089*4882a593Smuzhiyun #ifdef USE_HOSTMEM
2090*4882a593Smuzhiyun /* Ignore known cases of CR4 prefetch abort bugs */
2091*4882a593Smuzhiyun if ((errlog_id & (BCM_AXI_ID_MASK | BCM_AXI_ACCESS_TYPE_MASK)) !=
2092*4882a593Smuzhiyun (BCM43xx_AXI_ACCESS_TYPE_PREFETCH | BCM43xx_CR4_AXI_ID))
2093*4882a593Smuzhiyun #endif
2094*4882a593Smuzhiyun {
2095*4882a593Smuzhiyun ret |= AXI_WRAP_STS_DECODE_ERR;
2096*4882a593Smuzhiyun }
2097*4882a593Smuzhiyun break;
2098*4882a593Smuzhiyun default:
2099*4882a593Smuzhiyun ASSERT(0); /* should be impossible */
2100*4882a593Smuzhiyun }
2101*4882a593Smuzhiyun
2102*4882a593Smuzhiyun if (errlog_status & AIELS_MULTIPLE_ERRORS) {
2103*4882a593Smuzhiyun SI_PRINT(("Multiple AXI Errors\n"));
2104*4882a593Smuzhiyun /* Set multiple errors bit only if actual error is not ignored */
2105*4882a593Smuzhiyun if (ret) {
2106*4882a593Smuzhiyun ret |= AXI_WRAP_STS_MULTIPLE_ERRORS;
2107*4882a593Smuzhiyun }
2108*4882a593Smuzhiyun }
2109*4882a593Smuzhiyun
2110*4882a593Smuzhiyun SI_PRINT(("\tCoreID: %x\n", coreid));
2111*4882a593Smuzhiyun SI_PRINT(("\t errlog: lo 0x%08x, hi 0x%08x, id 0x%08x, flags 0x%08x"
2112*4882a593Smuzhiyun ", status 0x%08x\n",
2113*4882a593Smuzhiyun errlog_lo, errlog_hi, errlog_id, errlog_flags,
2114*4882a593Smuzhiyun errlog_status));
2115*4882a593Smuzhiyun }
2116*4882a593Smuzhiyun
2117*4882a593Smuzhiyun end:
2118*4882a593Smuzhiyun if (ret != AXI_WRAP_STS_NONE) {
2119*4882a593Smuzhiyun last_axi_error = ret;
2120*4882a593Smuzhiyun last_axi_error_log_status = errlog_status;
2121*4882a593Smuzhiyun last_axi_error_core = coreid;
2122*4882a593Smuzhiyun last_axi_error_wrap = (uint32)ai;
2123*4882a593Smuzhiyun last_axi_errlog_lo = errlog_lo;
2124*4882a593Smuzhiyun last_axi_errlog_hi = errlog_hi;
2125*4882a593Smuzhiyun last_axi_errlog_id = errlog_id;
2126*4882a593Smuzhiyun }
2127*4882a593Smuzhiyun
2128*4882a593Smuzhiyun #if defined(AXI_TIMEOUTS_NIC)
2129*4882a593Smuzhiyun if (axi_error && (ret != AXI_WRAP_STS_NONE)) {
2130*4882a593Smuzhiyun axi_error->error = ret;
2131*4882a593Smuzhiyun axi_error->coreid = coreid;
2132*4882a593Smuzhiyun axi_error->errlog_lo = errlog_lo;
2133*4882a593Smuzhiyun axi_error->errlog_hi = errlog_hi;
2134*4882a593Smuzhiyun axi_error->errlog_id = errlog_id;
2135*4882a593Smuzhiyun axi_error->errlog_flags = errlog_flags;
2136*4882a593Smuzhiyun axi_error->errlog_status = errlog_status;
2137*4882a593Smuzhiyun sih->err_info->count++;
2138*4882a593Smuzhiyun
2139*4882a593Smuzhiyun if (sih->err_info->count == SI_MAX_ERRLOG_SIZE) {
2140*4882a593Smuzhiyun sih->err_info->count = SI_MAX_ERRLOG_SIZE - 1;
2141*4882a593Smuzhiyun SI_PRINT(("AXI Error log overflow\n"));
2142*4882a593Smuzhiyun }
2143*4882a593Smuzhiyun }
2144*4882a593Smuzhiyun #endif /* AXI_TIMEOUTS_NIC */
2145*4882a593Smuzhiyun
2146*4882a593Smuzhiyun if (restore_core) {
2147*4882a593Smuzhiyun if (ai_setcoreidx(sih, current_coreidx) == NULL) {
2148*4882a593Smuzhiyun /* Unable to set the core */
2149*4882a593Smuzhiyun return ID32_INVALID;
2150*4882a593Smuzhiyun }
2151*4882a593Smuzhiyun }
2152*4882a593Smuzhiyun
2153*4882a593Smuzhiyun return ret;
2154*4882a593Smuzhiyun }
2155*4882a593Smuzhiyun
2156*4882a593Smuzhiyun /* reset AXI timeout */
2157*4882a593Smuzhiyun static void
BCMPOSTTRAPFN(ai_reset_axi_to)2158*4882a593Smuzhiyun BCMPOSTTRAPFN(ai_reset_axi_to)(const si_info_t *sii, aidmp_t *ai)
2159*4882a593Smuzhiyun {
2160*4882a593Smuzhiyun /* reset APB Bridge */
2161*4882a593Smuzhiyun OR_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
2162*4882a593Smuzhiyun /* sync write */
2163*4882a593Smuzhiyun (void)R_REG(sii->osh, &ai->resetctrl);
2164*4882a593Smuzhiyun /* clear Reset bit */
2165*4882a593Smuzhiyun AND_REG(sii->osh, &ai->resetctrl, ~(AIRC_RESET));
2166*4882a593Smuzhiyun /* sync write */
2167*4882a593Smuzhiyun (void)R_REG(sii->osh, &ai->resetctrl);
2168*4882a593Smuzhiyun SI_PRINT(("AXI timeout\n"));
2169*4882a593Smuzhiyun if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) {
2170*4882a593Smuzhiyun SI_PRINT(("reset failed on wrapper %p\n", ai));
2171*4882a593Smuzhiyun g_disable_backplane_logs = TRUE;
2172*4882a593Smuzhiyun }
2173*4882a593Smuzhiyun }
2174*4882a593Smuzhiyun
2175*4882a593Smuzhiyun void
BCMPOSTTRAPFN(ai_wrapper_get_last_error)2176*4882a593Smuzhiyun BCMPOSTTRAPFN(ai_wrapper_get_last_error)(const si_t *sih, uint32 *error_status, uint32 *core,
2177*4882a593Smuzhiyun uint32 *lo, uint32 *hi, uint32 *id)
2178*4882a593Smuzhiyun {
2179*4882a593Smuzhiyun *error_status = last_axi_error_log_status;
2180*4882a593Smuzhiyun *core = last_axi_error_core;
2181*4882a593Smuzhiyun *lo = last_axi_errlog_lo;
2182*4882a593Smuzhiyun *hi = last_axi_errlog_hi;
2183*4882a593Smuzhiyun *id = last_axi_errlog_id;
2184*4882a593Smuzhiyun }
2185*4882a593Smuzhiyun
2186*4882a593Smuzhiyun /* Function to check whether AXI timeout has been registered on a core */
2187*4882a593Smuzhiyun uint32
ai_get_axi_timeout_reg(void)2188*4882a593Smuzhiyun ai_get_axi_timeout_reg(void)
2189*4882a593Smuzhiyun {
2190*4882a593Smuzhiyun return (GOODREGS(last_axi_errlog_lo) ? last_axi_errlog_lo : 0);
2191*4882a593Smuzhiyun }
2192*4882a593Smuzhiyun #endif /* AXI_TIMEOUTS || AXI_TIMEOUTS_NIC */
2193*4882a593Smuzhiyun
2194*4882a593Smuzhiyun uint32
BCMPOSTTRAPFN(ai_findcoreidx_by_axiid)2195*4882a593Smuzhiyun BCMPOSTTRAPFN(ai_findcoreidx_by_axiid)(const si_t *sih, uint32 axiid)
2196*4882a593Smuzhiyun {
2197*4882a593Smuzhiyun uint coreid = 0;
2198*4882a593Smuzhiyun uint coreunit = 0;
2199*4882a593Smuzhiyun const axi_to_coreidx_t *axi2coreidx = NULL;
2200*4882a593Smuzhiyun switch (CHIPID(sih->chip)) {
2201*4882a593Smuzhiyun case BCM4369_CHIP_GRPID:
2202*4882a593Smuzhiyun axi2coreidx = axi2coreidx_4369;
2203*4882a593Smuzhiyun break;
2204*4882a593Smuzhiyun default:
2205*4882a593Smuzhiyun SI_PRINT(("Chipid mapping not found\n"));
2206*4882a593Smuzhiyun break;
2207*4882a593Smuzhiyun }
2208*4882a593Smuzhiyun
2209*4882a593Smuzhiyun if (!axi2coreidx)
2210*4882a593Smuzhiyun return (BADIDX);
2211*4882a593Smuzhiyun
2212*4882a593Smuzhiyun coreid = axi2coreidx[axiid].coreid;
2213*4882a593Smuzhiyun coreunit = axi2coreidx[axiid].coreunit;
2214*4882a593Smuzhiyun
2215*4882a593Smuzhiyun return si_findcoreidx(sih, coreid, coreunit);
2216*4882a593Smuzhiyun
2217*4882a593Smuzhiyun }
2218*4882a593Smuzhiyun
2219*4882a593Smuzhiyun /*
2220*4882a593Smuzhiyun * This API polls all slave wrappers for errors and returns bit map of
2221*4882a593Smuzhiyun * all reported errors.
2222*4882a593Smuzhiyun * return - bit map of
2223*4882a593Smuzhiyun * AXI_WRAP_STS_NONE
2224*4882a593Smuzhiyun * AXI_WRAP_STS_TIMEOUT
2225*4882a593Smuzhiyun * AXI_WRAP_STS_SLAVE_ERR
2226*4882a593Smuzhiyun * AXI_WRAP_STS_DECODE_ERR
2227*4882a593Smuzhiyun * AXI_WRAP_STS_PCI_RD_ERR
2228*4882a593Smuzhiyun * AXI_WRAP_STS_WRAP_RD_ERR
2229*4882a593Smuzhiyun * AXI_WRAP_STS_SET_CORE_FAIL
2230*4882a593Smuzhiyun * On timeout detection, correspondign bridge will be reset to
2231*4882a593Smuzhiyun * unblock the bus.
2232*4882a593Smuzhiyun * Error reported in each wrapper can be retrieved using the API
2233*4882a593Smuzhiyun * si_get_axi_errlog_info()
2234*4882a593Smuzhiyun */
2235*4882a593Smuzhiyun uint32
BCMPOSTTRAPFN(ai_clear_backplane_to)2236*4882a593Smuzhiyun BCMPOSTTRAPFN(ai_clear_backplane_to)(si_t *sih)
2237*4882a593Smuzhiyun {
2238*4882a593Smuzhiyun uint32 ret = 0;
2239*4882a593Smuzhiyun #if defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC)
2240*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
2241*4882a593Smuzhiyun aidmp_t *ai;
2242*4882a593Smuzhiyun uint32 i;
2243*4882a593Smuzhiyun axi_wrapper_t * axi_wrapper = sii->axi_wrapper;
2244*4882a593Smuzhiyun
2245*4882a593Smuzhiyun #ifdef AXI_TIMEOUTS_NIC
2246*4882a593Smuzhiyun uint32 prev_value = 0;
2247*4882a593Smuzhiyun osl_t *osh = sii->osh;
2248*4882a593Smuzhiyun uint32 cfg_reg = 0;
2249*4882a593Smuzhiyun uint32 offset = 0;
2250*4882a593Smuzhiyun
2251*4882a593Smuzhiyun if ((sii->axi_num_wrappers == 0) || (!PCIE(sii)))
2252*4882a593Smuzhiyun #else
2253*4882a593Smuzhiyun if (sii->axi_num_wrappers == 0)
2254*4882a593Smuzhiyun #endif
2255*4882a593Smuzhiyun {
2256*4882a593Smuzhiyun SI_VMSG(("ai_clear_backplane_to, axi_num_wrappers:%d, Is_PCIE:%d, BUS_TYPE:%d,"
2257*4882a593Smuzhiyun " ID:%x\n",
2258*4882a593Smuzhiyun sii->axi_num_wrappers, PCIE(sii),
2259*4882a593Smuzhiyun BUSTYPE(sii->pub.bustype), sii->pub.buscoretype));
2260*4882a593Smuzhiyun return AXI_WRAP_STS_NONE;
2261*4882a593Smuzhiyun }
2262*4882a593Smuzhiyun
2263*4882a593Smuzhiyun #ifdef AXI_TIMEOUTS_NIC
2264*4882a593Smuzhiyun /* Save and restore wrapper access window */
2265*4882a593Smuzhiyun if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
2266*4882a593Smuzhiyun if (PCIE_GEN1(sii)) {
2267*4882a593Smuzhiyun cfg_reg = PCI_BAR0_WIN2;
2268*4882a593Smuzhiyun offset = PCI_BAR0_WIN2_OFFSET;
2269*4882a593Smuzhiyun } else if (PCIE_GEN2(sii)) {
2270*4882a593Smuzhiyun cfg_reg = PCIE2_BAR0_CORE2_WIN2;
2271*4882a593Smuzhiyun offset = PCIE2_BAR0_CORE2_WIN2_OFFSET;
2272*4882a593Smuzhiyun }
2273*4882a593Smuzhiyun else {
2274*4882a593Smuzhiyun ASSERT(!"!PCIE_GEN1 && !PCIE_GEN2");
2275*4882a593Smuzhiyun }
2276*4882a593Smuzhiyun
2277*4882a593Smuzhiyun prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 4);
2278*4882a593Smuzhiyun
2279*4882a593Smuzhiyun if (prev_value == ID32_INVALID) {
2280*4882a593Smuzhiyun si_axi_error_t * axi_error =
2281*4882a593Smuzhiyun sih->err_info ?
2282*4882a593Smuzhiyun &sih->err_info->axi_error[sih->err_info->count] :
2283*4882a593Smuzhiyun NULL;
2284*4882a593Smuzhiyun
2285*4882a593Smuzhiyun SI_PRINT(("ai_clear_backplane_to, PCI_BAR0_WIN2 - %x\n", prev_value));
2286*4882a593Smuzhiyun if (axi_error) {
2287*4882a593Smuzhiyun axi_error->error = ret = AXI_WRAP_STS_PCI_RD_ERR;
2288*4882a593Smuzhiyun axi_error->errlog_lo = cfg_reg;
2289*4882a593Smuzhiyun sih->err_info->count++;
2290*4882a593Smuzhiyun
2291*4882a593Smuzhiyun if (sih->err_info->count == SI_MAX_ERRLOG_SIZE) {
2292*4882a593Smuzhiyun sih->err_info->count = SI_MAX_ERRLOG_SIZE - 1;
2293*4882a593Smuzhiyun SI_PRINT(("AXI Error log overflow\n"));
2294*4882a593Smuzhiyun }
2295*4882a593Smuzhiyun }
2296*4882a593Smuzhiyun
2297*4882a593Smuzhiyun return ret;
2298*4882a593Smuzhiyun }
2299*4882a593Smuzhiyun }
2300*4882a593Smuzhiyun #endif /* AXI_TIMEOUTS_NIC */
2301*4882a593Smuzhiyun
2302*4882a593Smuzhiyun for (i = 0; i < sii->axi_num_wrappers; ++i) {
2303*4882a593Smuzhiyun uint32 tmp;
2304*4882a593Smuzhiyun
2305*4882a593Smuzhiyun if (axi_wrapper[i].wrapper_type != AI_SLAVE_WRAPPER) {
2306*4882a593Smuzhiyun continue;
2307*4882a593Smuzhiyun }
2308*4882a593Smuzhiyun
2309*4882a593Smuzhiyun #ifdef AXI_TIMEOUTS_NIC
2310*4882a593Smuzhiyun if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
2311*4882a593Smuzhiyun /* Set BAR0_CORE2_WIN2 to bridge wapper base address */
2312*4882a593Smuzhiyun OSL_PCI_WRITE_CONFIG(osh,
2313*4882a593Smuzhiyun cfg_reg, 4, axi_wrapper[i].wrapper_addr);
2314*4882a593Smuzhiyun
2315*4882a593Smuzhiyun /* set AI to BAR0 + Offset corresponding to Gen1 or gen2 */
2316*4882a593Smuzhiyun ai = (aidmp_t *) (DISCARD_QUAL(sii->curmap, uint8) + offset);
2317*4882a593Smuzhiyun }
2318*4882a593Smuzhiyun else
2319*4882a593Smuzhiyun #endif /* AXI_TIMEOUTS_NIC */
2320*4882a593Smuzhiyun {
2321*4882a593Smuzhiyun ai = (aidmp_t *)(uintptr) axi_wrapper[i].wrapper_addr;
2322*4882a593Smuzhiyun }
2323*4882a593Smuzhiyun
2324*4882a593Smuzhiyun tmp = ai_clear_backplane_to_per_core(sih, axi_wrapper[i].cid, 0,
2325*4882a593Smuzhiyun DISCARD_QUAL(ai, void));
2326*4882a593Smuzhiyun
2327*4882a593Smuzhiyun ret |= tmp;
2328*4882a593Smuzhiyun }
2329*4882a593Smuzhiyun
2330*4882a593Smuzhiyun #ifdef AXI_TIMEOUTS_NIC
2331*4882a593Smuzhiyun /* Restore the initial wrapper space */
2332*4882a593Smuzhiyun if (prev_value) {
2333*4882a593Smuzhiyun OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 4, prev_value);
2334*4882a593Smuzhiyun }
2335*4882a593Smuzhiyun #endif /* AXI_TIMEOUTS_NIC */
2336*4882a593Smuzhiyun
2337*4882a593Smuzhiyun #endif /* AXI_TIMEOUTS || AXI_TIMEOUTS_NIC */
2338*4882a593Smuzhiyun
2339*4882a593Smuzhiyun return ret;
2340*4882a593Smuzhiyun }
2341*4882a593Smuzhiyun
2342*4882a593Smuzhiyun uint
ai_num_slaveports(const si_t * sih,uint coreidx)2343*4882a593Smuzhiyun ai_num_slaveports(const si_t *sih, uint coreidx)
2344*4882a593Smuzhiyun {
2345*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
2346*4882a593Smuzhiyun const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
2347*4882a593Smuzhiyun uint32 cib;
2348*4882a593Smuzhiyun
2349*4882a593Smuzhiyun cib = cores_info->cib[coreidx];
2350*4882a593Smuzhiyun return ((cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT);
2351*4882a593Smuzhiyun }
2352*4882a593Smuzhiyun
2353*4882a593Smuzhiyun #ifdef UART_TRAP_DBG
2354*4882a593Smuzhiyun void
ai_dump_APB_Bridge_registers(const si_t * sih)2355*4882a593Smuzhiyun ai_dump_APB_Bridge_registers(const si_t *sih)
2356*4882a593Smuzhiyun {
2357*4882a593Smuzhiyun aidmp_t *ai;
2358*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
2359*4882a593Smuzhiyun
2360*4882a593Smuzhiyun ai = (aidmp_t *)sii->br_wrapba[0];
2361*4882a593Smuzhiyun printf("APB Bridge 0\n");
2362*4882a593Smuzhiyun printf("lo 0x%08x, hi 0x%08x, id 0x%08x, flags 0x%08x",
2363*4882a593Smuzhiyun R_REG(sii->osh, &ai->errlogaddrlo),
2364*4882a593Smuzhiyun R_REG(sii->osh, &ai->errlogaddrhi),
2365*4882a593Smuzhiyun R_REG(sii->osh, &ai->errlogid),
2366*4882a593Smuzhiyun R_REG(sii->osh, &ai->errlogflags));
2367*4882a593Smuzhiyun printf("\n status 0x%08x\n", R_REG(sii->osh, &ai->errlogstatus));
2368*4882a593Smuzhiyun }
2369*4882a593Smuzhiyun #endif /* UART_TRAP_DBG */
2370*4882a593Smuzhiyun
2371*4882a593Smuzhiyun void
ai_force_clocks(const si_t * sih,uint clock_state)2372*4882a593Smuzhiyun ai_force_clocks(const si_t *sih, uint clock_state)
2373*4882a593Smuzhiyun {
2374*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
2375*4882a593Smuzhiyun aidmp_t *ai, *ai_sec = NULL;
2376*4882a593Smuzhiyun volatile uint32 dummy;
2377*4882a593Smuzhiyun uint32 ioctrl;
2378*4882a593Smuzhiyun const si_cores_info_t *cores_info = (const si_cores_info_t *)sii->cores_info;
2379*4882a593Smuzhiyun
2380*4882a593Smuzhiyun ASSERT(GOODREGS(sii->curwrap));
2381*4882a593Smuzhiyun ai = sii->curwrap;
2382*4882a593Smuzhiyun if (cores_info->wrapba2[sii->curidx])
2383*4882a593Smuzhiyun ai_sec = REG_MAP(cores_info->wrapba2[sii->curidx], SI_CORE_SIZE);
2384*4882a593Smuzhiyun
2385*4882a593Smuzhiyun /* ensure there are no pending backplane operations */
2386*4882a593Smuzhiyun SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
2387*4882a593Smuzhiyun
2388*4882a593Smuzhiyun if (clock_state == FORCE_CLK_ON) {
2389*4882a593Smuzhiyun ioctrl = R_REG(sii->osh, &ai->ioctrl);
2390*4882a593Smuzhiyun W_REG(sii->osh, &ai->ioctrl, (ioctrl | SICF_FGC));
2391*4882a593Smuzhiyun dummy = R_REG(sii->osh, &ai->ioctrl);
2392*4882a593Smuzhiyun BCM_REFERENCE(dummy);
2393*4882a593Smuzhiyun if (ai_sec) {
2394*4882a593Smuzhiyun ioctrl = R_REG(sii->osh, &ai_sec->ioctrl);
2395*4882a593Smuzhiyun W_REG(sii->osh, &ai_sec->ioctrl, (ioctrl | SICF_FGC));
2396*4882a593Smuzhiyun dummy = R_REG(sii->osh, &ai_sec->ioctrl);
2397*4882a593Smuzhiyun BCM_REFERENCE(dummy);
2398*4882a593Smuzhiyun }
2399*4882a593Smuzhiyun } else {
2400*4882a593Smuzhiyun ioctrl = R_REG(sii->osh, &ai->ioctrl);
2401*4882a593Smuzhiyun W_REG(sii->osh, &ai->ioctrl, (ioctrl & (~SICF_FGC)));
2402*4882a593Smuzhiyun dummy = R_REG(sii->osh, &ai->ioctrl);
2403*4882a593Smuzhiyun BCM_REFERENCE(dummy);
2404*4882a593Smuzhiyun if (ai_sec) {
2405*4882a593Smuzhiyun ioctrl = R_REG(sii->osh, &ai_sec->ioctrl);
2406*4882a593Smuzhiyun W_REG(sii->osh, &ai_sec->ioctrl, (ioctrl & (~SICF_FGC)));
2407*4882a593Smuzhiyun dummy = R_REG(sii->osh, &ai_sec->ioctrl);
2408*4882a593Smuzhiyun BCM_REFERENCE(dummy);
2409*4882a593Smuzhiyun }
2410*4882a593Smuzhiyun }
2411*4882a593Smuzhiyun /* ensure there are no pending backplane operations */
2412*4882a593Smuzhiyun SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
2413*4882a593Smuzhiyun }
2414*4882a593Smuzhiyun
2415*4882a593Smuzhiyun #ifdef DONGLEBUILD
2416*4882a593Smuzhiyun /*
2417*4882a593Smuzhiyun * this is not declared as static const, although that is the right thing to do
2418*4882a593Smuzhiyun * reason being if declared as static const, compile/link process would that in
2419*4882a593Smuzhiyun * read only section...
2420*4882a593Smuzhiyun * currently this code/array is used to identify the registers which are dumped
2421*4882a593Smuzhiyun * during trap processing
2422*4882a593Smuzhiyun * and usually for the trap buffer, .rodata buffer is reused, so for now just static
2423*4882a593Smuzhiyun */
2424*4882a593Smuzhiyun static uint32 BCMPOST_TRAP_RODATA(wrapper_offsets_to_dump)[] = {
2425*4882a593Smuzhiyun OFFSETOF(aidmp_t, ioctrlset),
2426*4882a593Smuzhiyun OFFSETOF(aidmp_t, ioctrlclear),
2427*4882a593Smuzhiyun OFFSETOF(aidmp_t, ioctrl),
2428*4882a593Smuzhiyun OFFSETOF(aidmp_t, iostatus),
2429*4882a593Smuzhiyun OFFSETOF(aidmp_t, ioctrlwidth),
2430*4882a593Smuzhiyun OFFSETOF(aidmp_t, iostatuswidth),
2431*4882a593Smuzhiyun OFFSETOF(aidmp_t, resetctrl),
2432*4882a593Smuzhiyun OFFSETOF(aidmp_t, resetstatus),
2433*4882a593Smuzhiyun OFFSETOF(aidmp_t, resetreadid),
2434*4882a593Smuzhiyun OFFSETOF(aidmp_t, resetwriteid),
2435*4882a593Smuzhiyun OFFSETOF(aidmp_t, errlogctrl),
2436*4882a593Smuzhiyun OFFSETOF(aidmp_t, errlogdone),
2437*4882a593Smuzhiyun OFFSETOF(aidmp_t, errlogstatus),
2438*4882a593Smuzhiyun OFFSETOF(aidmp_t, errlogaddrlo),
2439*4882a593Smuzhiyun OFFSETOF(aidmp_t, errlogaddrhi),
2440*4882a593Smuzhiyun OFFSETOF(aidmp_t, errlogid),
2441*4882a593Smuzhiyun OFFSETOF(aidmp_t, errloguser),
2442*4882a593Smuzhiyun OFFSETOF(aidmp_t, errlogflags),
2443*4882a593Smuzhiyun OFFSETOF(aidmp_t, intstatus),
2444*4882a593Smuzhiyun OFFSETOF(aidmp_t, config),
2445*4882a593Smuzhiyun OFFSETOF(aidmp_t, itipoobaout),
2446*4882a593Smuzhiyun OFFSETOF(aidmp_t, itipoobbout),
2447*4882a593Smuzhiyun OFFSETOF(aidmp_t, itipoobcout),
2448*4882a593Smuzhiyun OFFSETOF(aidmp_t, itipoobdout)};
2449*4882a593Smuzhiyun
2450*4882a593Smuzhiyun #ifdef ETD
2451*4882a593Smuzhiyun
2452*4882a593Smuzhiyun /* This is used for dumping wrapper registers for etd when axierror happens.
2453*4882a593Smuzhiyun * This should match with the structure hnd_ext_trap_bp_err_t
2454*4882a593Smuzhiyun */
2455*4882a593Smuzhiyun static uint32 BCMPOST_TRAP_RODATA(etd_wrapper_offsets_axierr)[] = {
2456*4882a593Smuzhiyun OFFSETOF(aidmp_t, ioctrl),
2457*4882a593Smuzhiyun OFFSETOF(aidmp_t, iostatus),
2458*4882a593Smuzhiyun OFFSETOF(aidmp_t, resetctrl),
2459*4882a593Smuzhiyun OFFSETOF(aidmp_t, resetstatus),
2460*4882a593Smuzhiyun OFFSETOF(aidmp_t, resetreadid),
2461*4882a593Smuzhiyun OFFSETOF(aidmp_t, resetwriteid),
2462*4882a593Smuzhiyun OFFSETOF(aidmp_t, errlogctrl),
2463*4882a593Smuzhiyun OFFSETOF(aidmp_t, errlogdone),
2464*4882a593Smuzhiyun OFFSETOF(aidmp_t, errlogstatus),
2465*4882a593Smuzhiyun OFFSETOF(aidmp_t, errlogaddrlo),
2466*4882a593Smuzhiyun OFFSETOF(aidmp_t, errlogaddrhi),
2467*4882a593Smuzhiyun OFFSETOF(aidmp_t, errlogid),
2468*4882a593Smuzhiyun OFFSETOF(aidmp_t, errloguser),
2469*4882a593Smuzhiyun OFFSETOF(aidmp_t, errlogflags),
2470*4882a593Smuzhiyun OFFSETOF(aidmp_t, itipoobaout),
2471*4882a593Smuzhiyun OFFSETOF(aidmp_t, itipoobbout),
2472*4882a593Smuzhiyun OFFSETOF(aidmp_t, itipoobcout),
2473*4882a593Smuzhiyun OFFSETOF(aidmp_t, itipoobdout)};
2474*4882a593Smuzhiyun #endif /* ETD */
2475*4882a593Smuzhiyun
2476*4882a593Smuzhiyun /* wrapper function to access the global array wrapper_offsets_to_dump */
2477*4882a593Smuzhiyun static uint32
BCMRAMFN(ai_get_sizeof_wrapper_offsets_to_dump)2478*4882a593Smuzhiyun BCMRAMFN(ai_get_sizeof_wrapper_offsets_to_dump)(void)
2479*4882a593Smuzhiyun {
2480*4882a593Smuzhiyun return (sizeof(wrapper_offsets_to_dump));
2481*4882a593Smuzhiyun }
2482*4882a593Smuzhiyun
2483*4882a593Smuzhiyun static uint32
BCMPOSTTRAPRAMFN(ai_get_wrapper_base_addr)2484*4882a593Smuzhiyun BCMPOSTTRAPRAMFN(ai_get_wrapper_base_addr)(uint32 **offset)
2485*4882a593Smuzhiyun {
2486*4882a593Smuzhiyun uint32 arr_size = ARRAYSIZE(wrapper_offsets_to_dump);
2487*4882a593Smuzhiyun
2488*4882a593Smuzhiyun *offset = &wrapper_offsets_to_dump[0];
2489*4882a593Smuzhiyun return arr_size;
2490*4882a593Smuzhiyun }
2491*4882a593Smuzhiyun
2492*4882a593Smuzhiyun uint32
BCMATTACHFN(ai_wrapper_dump_buf_size)2493*4882a593Smuzhiyun BCMATTACHFN(ai_wrapper_dump_buf_size)(const si_t *sih)
2494*4882a593Smuzhiyun {
2495*4882a593Smuzhiyun uint32 buf_size = 0;
2496*4882a593Smuzhiyun uint32 wrapper_count = 0;
2497*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
2498*4882a593Smuzhiyun
2499*4882a593Smuzhiyun wrapper_count = sii->axi_num_wrappers;
2500*4882a593Smuzhiyun if (wrapper_count == 0)
2501*4882a593Smuzhiyun return 0;
2502*4882a593Smuzhiyun
2503*4882a593Smuzhiyun /* cnt indicates how many registers, tag_id 0 will say these are address/value */
2504*4882a593Smuzhiyun /* address/value pairs */
2505*4882a593Smuzhiyun buf_size += 2 * (ai_get_sizeof_wrapper_offsets_to_dump() * wrapper_count);
2506*4882a593Smuzhiyun
2507*4882a593Smuzhiyun return buf_size;
2508*4882a593Smuzhiyun }
2509*4882a593Smuzhiyun
2510*4882a593Smuzhiyun static uint32*
BCMPOSTTRAPFN(ai_wrapper_dump_binary_one)2511*4882a593Smuzhiyun BCMPOSTTRAPFN(ai_wrapper_dump_binary_one)(const si_info_t *sii, uint32 *p32, uint32 wrap_ba)
2512*4882a593Smuzhiyun {
2513*4882a593Smuzhiyun uint i;
2514*4882a593Smuzhiyun uint32 *addr;
2515*4882a593Smuzhiyun uint32 arr_size;
2516*4882a593Smuzhiyun uint32 *offset_base;
2517*4882a593Smuzhiyun
2518*4882a593Smuzhiyun arr_size = ai_get_wrapper_base_addr(&offset_base);
2519*4882a593Smuzhiyun
2520*4882a593Smuzhiyun for (i = 0; i < arr_size; i++) {
2521*4882a593Smuzhiyun addr = (uint32 *)(wrap_ba + *(offset_base + i));
2522*4882a593Smuzhiyun *p32++ = (uint32)addr;
2523*4882a593Smuzhiyun *p32++ = R_REG(sii->osh, addr);
2524*4882a593Smuzhiyun }
2525*4882a593Smuzhiyun return p32;
2526*4882a593Smuzhiyun }
2527*4882a593Smuzhiyun
2528*4882a593Smuzhiyun #if defined(ETD)
2529*4882a593Smuzhiyun static uint32
BCMPOSTTRAPRAMFN(ai_get_wrapper_base_addr_etd_axierr)2530*4882a593Smuzhiyun BCMPOSTTRAPRAMFN(ai_get_wrapper_base_addr_etd_axierr)(uint32 **offset)
2531*4882a593Smuzhiyun {
2532*4882a593Smuzhiyun uint32 arr_size = ARRAYSIZE(etd_wrapper_offsets_axierr);
2533*4882a593Smuzhiyun
2534*4882a593Smuzhiyun *offset = &etd_wrapper_offsets_axierr[0];
2535*4882a593Smuzhiyun return arr_size;
2536*4882a593Smuzhiyun }
2537*4882a593Smuzhiyun
2538*4882a593Smuzhiyun uint32
BCMPOSTTRAPFN(ai_wrapper_dump_last_timeout)2539*4882a593Smuzhiyun BCMPOSTTRAPFN(ai_wrapper_dump_last_timeout)(const si_t *sih, uint32 *error, uint32 *core,
2540*4882a593Smuzhiyun uint32 *ba, uchar *p)
2541*4882a593Smuzhiyun {
2542*4882a593Smuzhiyun #if defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC)
2543*4882a593Smuzhiyun uint32 *p32;
2544*4882a593Smuzhiyun uint32 wrap_ba = last_axi_error_wrap;
2545*4882a593Smuzhiyun uint i;
2546*4882a593Smuzhiyun uint32 *addr;
2547*4882a593Smuzhiyun
2548*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
2549*4882a593Smuzhiyun
2550*4882a593Smuzhiyun if (last_axi_error != AXI_WRAP_STS_NONE)
2551*4882a593Smuzhiyun {
2552*4882a593Smuzhiyun if (wrap_ba)
2553*4882a593Smuzhiyun {
2554*4882a593Smuzhiyun p32 = (uint32 *)p;
2555*4882a593Smuzhiyun uint32 arr_size;
2556*4882a593Smuzhiyun uint32 *offset_base;
2557*4882a593Smuzhiyun
2558*4882a593Smuzhiyun arr_size = ai_get_wrapper_base_addr_etd_axierr(&offset_base);
2559*4882a593Smuzhiyun for (i = 0; i < arr_size; i++) {
2560*4882a593Smuzhiyun addr = (uint32 *)(wrap_ba + *(offset_base + i));
2561*4882a593Smuzhiyun *p32++ = R_REG(sii->osh, addr);
2562*4882a593Smuzhiyun }
2563*4882a593Smuzhiyun }
2564*4882a593Smuzhiyun *error = last_axi_error;
2565*4882a593Smuzhiyun *core = last_axi_error_core;
2566*4882a593Smuzhiyun *ba = wrap_ba;
2567*4882a593Smuzhiyun }
2568*4882a593Smuzhiyun #else
2569*4882a593Smuzhiyun *error = 0;
2570*4882a593Smuzhiyun *core = 0;
2571*4882a593Smuzhiyun *ba = 0;
2572*4882a593Smuzhiyun #endif /* AXI_TIMEOUTS || AXI_TIMEOUTS_NIC */
2573*4882a593Smuzhiyun return 0;
2574*4882a593Smuzhiyun }
2575*4882a593Smuzhiyun #endif /* ETD */
2576*4882a593Smuzhiyun
2577*4882a593Smuzhiyun uint32
BCMPOSTTRAPFN(ai_wrapper_dump_binary)2578*4882a593Smuzhiyun BCMPOSTTRAPFN(ai_wrapper_dump_binary)(const si_t *sih, uchar *p)
2579*4882a593Smuzhiyun {
2580*4882a593Smuzhiyun uint32 *p32 = (uint32 *)p;
2581*4882a593Smuzhiyun uint32 i;
2582*4882a593Smuzhiyun const si_info_t *sii = SI_INFO(sih);
2583*4882a593Smuzhiyun
2584*4882a593Smuzhiyun for (i = 0; i < sii->axi_num_wrappers; i++) {
2585*4882a593Smuzhiyun p32 = ai_wrapper_dump_binary_one(sii, p32, sii->axi_wrapper[i].wrapper_addr);
2586*4882a593Smuzhiyun }
2587*4882a593Smuzhiyun return 0;
2588*4882a593Smuzhiyun }
2589*4882a593Smuzhiyun
2590*4882a593Smuzhiyun bool
BCMPOSTTRAPFN(ai_check_enable_backplane_log)2591*4882a593Smuzhiyun BCMPOSTTRAPFN(ai_check_enable_backplane_log)(const si_t *sih)
2592*4882a593Smuzhiyun {
2593*4882a593Smuzhiyun #if defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC)
2594*4882a593Smuzhiyun if (g_disable_backplane_logs) {
2595*4882a593Smuzhiyun return FALSE;
2596*4882a593Smuzhiyun }
2597*4882a593Smuzhiyun else {
2598*4882a593Smuzhiyun return TRUE;
2599*4882a593Smuzhiyun }
2600*4882a593Smuzhiyun #else /* (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC) */
2601*4882a593Smuzhiyun return FALSE;
2602*4882a593Smuzhiyun #endif /* (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC) */
2603*4882a593Smuzhiyun }
2604*4882a593Smuzhiyun #endif /* DONGLEBUILD */
2605