1 /*
2 * Misc utility routines for accessing chip-specific features
3 * of the SiliconBackplane-based Broadcom chips.
4 *
5 * Portions of this code are copyright (c) 2022 Cypress Semiconductor Corporation
6 *
7 * Copyright (C) 1999-2017, Broadcom Corporation
8 *
9 * Unless you and Broadcom execute a separate written software license
10 * agreement governing use of this software, this software is licensed to you
11 * under the terms of the GNU General Public License version 2 (the "GPL"),
12 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
13 * following added to such license:
14 *
15 * As a special exception, the copyright holders of this software give you
16 * permission to link this software with independent modules, and to copy and
17 * distribute the resulting executable under terms of your choice, provided that
18 * you also meet, for each linked independent module, the terms and conditions of
19 * the license of that module. An independent module is a module which is not
20 * derived from this software. The special exception does not apply to any
21 * modifications of the software.
22 *
23 * Notwithstanding the above, under no circumstances may you combine this
24 * software in any way with any other Broadcom software provided under a license
25 * other than the GPL, without Broadcom's express prior written consent.
26 *
27 *
28 * <<Broadcom-WL-IPTag/Open:>>
29 *
30 * $Id: aiutils.c 701122 2017-05-23 19:32:45Z $
31 */
32 #include <bcm_cfg.h>
33 #include <typedefs.h>
34 #include <bcmdefs.h>
35 #include <osl.h>
36 #include <bcmutils.h>
37 #include <siutils.h>
38 #include <hndsoc.h>
39 #include <sbchipc.h>
40 #include <pcicfg.h>
41
42 #include "siutils_priv.h"
43 #include <bcmdevs.h>
44
45 #define BCM53573_DMP() (0)
46 #define BCM4707_DMP() (0)
47 #define PMU_DMP() (0)
48 #define GCI_DMP() (0)
49
50 #if defined(BCM_BACKPLANE_TIMEOUT)
51 static bool ai_get_apb_bridge(si_t *sih, uint32 coreidx, uint32 *apb_id, uint32 *apb_coreuinit);
52 #endif /* BCM_BACKPLANE_TIMEOUT */
53
54 #if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
55 static void ai_reset_axi_to(si_info_t *sii, aidmp_t *ai);
56 #endif /* defined (AXI_TIMEOUTS) || defined (BCM_BACKPLANE_TIMEOUT) */
57
58 /* EROM parsing */
59
60 #ifdef BCMQT
61 #define SPINWAIT_TIME_US 3000
62 #else
63 #define SPINWAIT_TIME_US 300
64 #endif /* BCMQT */
65
66 static uint32
get_erom_ent(si_t * sih,uint32 ** eromptr,uint32 mask,uint32 match)67 get_erom_ent(si_t *sih, uint32 **eromptr, uint32 mask, uint32 match)
68 {
69 uint32 ent;
70 uint inv = 0, nom = 0;
71 uint32 size = 0;
72
73 while (TRUE) {
74 ent = R_REG(si_osh(sih), *eromptr);
75 (*eromptr)++;
76
77 if (mask == 0)
78 break;
79
80 if ((ent & ER_VALID) == 0) {
81 inv++;
82 continue;
83 }
84
85 if (ent == (ER_END | ER_VALID))
86 break;
87
88 if ((ent & mask) == match)
89 break;
90
91 /* escape condition related EROM size if it has invalid values */
92 size += sizeof(*eromptr);
93 if (size >= ER_SZ_MAX) {
94 SI_ERROR(("Failed to find end of EROM marker\n"));
95 break;
96 }
97
98 nom++;
99 }
100
101 SI_VMSG(("%s: Returning ent 0x%08x\n", __FUNCTION__, ent));
102 if (inv + nom) {
103 SI_VMSG((" after %d invalid and %d non-matching entries\n", inv, nom));
104 }
105 return ent;
106 }
107
108 static uint32
get_asd(si_t * sih,uint32 ** eromptr,uint sp,uint ad,uint st,uint32 * addrl,uint32 * addrh,uint32 * sizel,uint32 * sizeh)109 get_asd(si_t *sih, uint32 **eromptr, uint sp, uint ad, uint st, uint32 *addrl, uint32 *addrh,
110 uint32 *sizel, uint32 *sizeh)
111 {
112 uint32 asd, sz, szd;
113
114 BCM_REFERENCE(ad);
115
116 asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
117 if (((asd & ER_TAG1) != ER_ADD) ||
118 (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
119 ((asd & AD_ST_MASK) != st)) {
120 /* This is not what we want, "push" it back */
121 (*eromptr)--;
122 return 0;
123 }
124 *addrl = asd & AD_ADDR_MASK;
125 if (asd & AD_AG32)
126 *addrh = get_erom_ent(sih, eromptr, 0, 0);
127 else
128 *addrh = 0;
129 *sizeh = 0;
130 sz = asd & AD_SZ_MASK;
131 if (sz == AD_SZ_SZD) {
132 szd = get_erom_ent(sih, eromptr, 0, 0);
133 *sizel = szd & SD_SZ_MASK;
134 if (szd & SD_SG32)
135 *sizeh = get_erom_ent(sih, eromptr, 0, 0);
136 } else
137 *sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
138
139 SI_VMSG((" SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n",
140 sp, ad, st, *sizeh, *sizel, *addrh, *addrl));
141
142 return asd;
143 }
144
145 /* Parse the enumeration rom to identify all cores
146 * Erom content format can be found in:
147 * http://hwnbu-twiki.broadcom.com/twiki/pub/Mwgroup/ArmDocumentation/SystemDiscovery.pdf
148 */
149 void
ai_scan(si_t * sih,void * regs,uint32 erombase,uint devid)150 ai_scan(si_t *sih, void *regs, uint32 erombase, uint devid)
151 {
152 si_info_t *sii = SI_INFO(sih);
153 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
154 uint32 *eromptr, *eromlim;
155 axi_wrapper_t * axi_wrapper = sii->axi_wrapper;
156
157 BCM_REFERENCE(devid);
158
159 switch (BUSTYPE(sih->bustype)) {
160 case SI_BUS:
161 eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
162 break;
163
164 case PCI_BUS:
165 /* Set wrappers address */
166 sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE);
167
168 /* Now point the window at the erom */
169 OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, erombase);
170 eromptr = regs;
171 break;
172
173 #ifdef BCMSDIO
174 case SPI_BUS:
175 case SDIO_BUS:
176 eromptr = (uint32 *)(uintptr)erombase;
177 break;
178 #endif /* BCMSDIO */
179
180 case PCMCIA_BUS:
181 default:
182 SI_ERROR(("Don't know how to do AXI enumertion on bus %d\n", sih->bustype));
183 ASSERT(0);
184 return;
185 }
186 eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
187 sii->axi_num_wrappers = 0;
188
189 SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim = 0x%p\n",
190 OSL_OBFUSCATE_BUF(regs), erombase,
191 OSL_OBFUSCATE_BUF(eromptr), OSL_OBFUSATE_BUF(eromlim)));
192 while (eromptr < eromlim) {
193 uint32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp;
194 uint32 mpd, asd, addrl, addrh, sizel, sizeh;
195 uint i, j, idx;
196 bool br;
197
198 br = FALSE;
199
200 /* Grok a component */
201 cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
202 if (cia == (ER_END | ER_VALID)) {
203 SI_VMSG(("Found END of erom after %d cores\n", sii->numcores));
204 return;
205 }
206
207 cib = get_erom_ent(sih, &eromptr, 0, 0);
208
209 if ((cib & ER_TAG) != ER_CI) {
210 SI_ERROR(("CIA not followed by CIB\n"));
211 goto error;
212 }
213
214 cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
215 mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
216 crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
217 nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
218 nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
219 nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
220 nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
221
222 #ifdef BCMDBG_SI
223 SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with nmw = %d, "
224 "nsw = %d, nmp = %d & nsp = %d\n",
225 mfg, cid, crev, OSL_OBFUSCATE_BUF(eromptr - 1), nmw, nsw, nmp, nsp));
226 #else
227 BCM_REFERENCE(crev);
228 #endif // endif
229
230 if (BCM4347_CHIP(sih->chip)) {
231 /* 4347 has more entries for ARM core
232 * This should apply to all chips but crashes on router
233 * This is a temp fix to be further analyze
234 */
235 if (nsp == 0)
236 continue;
237 } else
238 {
239 /* Include Default slave wrapper for timeout monitoring */
240 if ((nsp == 0) ||
241 #if !defined(AXI_TIMEOUTS) && !defined(BCM_BACKPLANE_TIMEOUT)
242 ((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) ||
243 #else
244 ((CHIPTYPE(sii->pub.socitype) == SOCI_NAI) &&
245 (mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) ||
246 #endif /* !defined(AXI_TIMEOUTS) && !defined(BCM_BACKPLANE_TIMEOUT) */
247 FALSE) {
248 continue;
249 }
250 }
251
252 if ((nmw + nsw == 0)) {
253 /* A component which is not a core */
254 if (cid == OOB_ROUTER_CORE_ID) {
255 asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
256 &addrl, &addrh, &sizel, &sizeh);
257 if (asd != 0) {
258 if ((sii->oob_router != 0) && (sii->oob_router != addrl)) {
259 sii->oob_router1 = addrl;
260 } else {
261 sii->oob_router = addrl;
262 }
263 }
264 }
265 if (cid != NS_CCB_CORE_ID &&
266 cid != PMU_CORE_ID && cid != GCI_CORE_ID && cid != SR_CORE_ID &&
267 cid != HUB_CORE_ID && cid != HND_OOBR_CORE_ID)
268 continue;
269 }
270
271 idx = sii->numcores;
272
273 cores_info->cia[idx] = cia;
274 cores_info->cib[idx] = cib;
275 cores_info->coreid[idx] = cid;
276
277 for (i = 0; i < nmp; i++) {
278 mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
279 if ((mpd & ER_TAG) != ER_MP) {
280 SI_ERROR(("Not enough MP entries for component 0x%x\n", cid));
281 goto error;
282 }
283 SI_VMSG((" Master port %d, mp: %d id: %d\n", i,
284 (mpd & MPD_MP_MASK) >> MPD_MP_SHIFT,
285 (mpd & MPD_MUI_MASK) >> MPD_MUI_SHIFT));
286 }
287
288 /* First Slave Address Descriptor should be port 0:
289 * the main register space for the core
290 */
291 asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
292 if (asd == 0) {
293 do {
294 /* Try again to see if it is a bridge */
295 asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
296 &sizel, &sizeh);
297 if (asd != 0)
298 br = TRUE;
299 else {
300 if (br == TRUE) {
301 break;
302 }
303 else if ((addrh != 0) || (sizeh != 0) ||
304 (sizel != SI_CORE_SIZE)) {
305 SI_ERROR(("addrh = 0x%x\t sizeh = 0x%x\t size1 ="
306 "0x%x\n", addrh, sizeh, sizel));
307 SI_ERROR(("First Slave ASD for"
308 "core 0x%04x malformed "
309 "(0x%08x)\n", cid, asd));
310 goto error;
311 }
312 }
313 } while (1);
314 }
315 cores_info->coresba[idx] = addrl;
316 cores_info->coresba_size[idx] = sizel;
317 /* Get any more ASDs in first port */
318 j = 1;
319 do {
320 asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
321 &sizel, &sizeh);
322 if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) {
323 cores_info->coresba2[idx] = addrl;
324 cores_info->coresba2_size[idx] = sizel;
325 }
326 j++;
327 } while (asd != 0);
328
329 /* Go through the ASDs for other slave ports */
330 for (i = 1; i < nsp; i++) {
331 j = 0;
332 do {
333 asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh,
334 &sizel, &sizeh);
335 /* To get the first base address of second slave port */
336 if ((asd != 0) && (i == 1) && (j == 0)) {
337 cores_info->csp2ba[idx] = addrl;
338 cores_info->csp2ba_size[idx] = sizel;
339 }
340 if (asd == 0)
341 break;
342 j++;
343 } while (1);
344 if (j == 0) {
345 SI_ERROR((" SP %d has no address descriptors\n", i));
346 goto error;
347 }
348 }
349
350 /* Now get master wrappers */
351 for (i = 0; i < nmw; i++) {
352 asd = get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl, &addrh,
353 &sizel, &sizeh);
354 if (asd == 0) {
355 SI_ERROR(("Missing descriptor for MW %d\n", i));
356 goto error;
357 }
358 if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
359 SI_ERROR(("Master wrapper %d is not 4KB\n", i));
360 goto error;
361 }
362 if (i == 0) {
363 cores_info->wrapba[idx] = addrl;
364 } else if (i == 1) {
365 cores_info->wrapba2[idx] = addrl;
366 } else if (i == 2) {
367 cores_info->wrapba3[idx] = addrl;
368 }
369
370 if (axi_wrapper &&
371 (sii->axi_num_wrappers < SI_MAX_AXI_WRAPPERS)) {
372 axi_wrapper[sii->axi_num_wrappers].mfg = mfg;
373 axi_wrapper[sii->axi_num_wrappers].cid = cid;
374 axi_wrapper[sii->axi_num_wrappers].rev = crev;
375 axi_wrapper[sii->axi_num_wrappers].wrapper_type = AI_MASTER_WRAPPER;
376 axi_wrapper[sii->axi_num_wrappers].wrapper_addr = addrl;
377 sii->axi_num_wrappers++;
378 SI_VMSG(("MASTER WRAPPER: %d, mfg:%x, cid:%x,"
379 "rev:%x, addr:%x, size:%x\n",
380 sii->axi_num_wrappers, mfg, cid, crev, addrl, sizel));
381 }
382 }
383
384 /* And finally slave wrappers */
385 for (i = 0; i < nsw; i++) {
386 uint fwp = (nsp == 1) ? 0 : 1;
387 asd = get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP, &addrl, &addrh,
388 &sizel, &sizeh);
389
390 /* cache APB bridge wrapper address for set/clear timeout */
391 if ((mfg == MFGID_ARM) && (cid == APB_BRIDGE_ID)) {
392 ASSERT(sii->num_br < SI_MAXBR);
393 sii->br_wrapba[sii->num_br++] = addrl;
394 }
395
396 if (asd == 0) {
397 SI_ERROR(("Missing descriptor for SW %d\n", i));
398 goto error;
399 }
400 if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
401 SI_ERROR(("Slave wrapper %d is not 4KB\n", i));
402 goto error;
403 }
404 if ((nmw == 0) && (i == 0)) {
405 cores_info->wrapba[idx] = addrl;
406 } else if ((nmw == 0) && (i == 1)) {
407 cores_info->wrapba2[idx] = addrl;
408 } else if ((nmw == 0) && (i == 2)) {
409 cores_info->wrapba3[idx] = addrl;
410 }
411
412 /* Include all slave wrappers to the list to
413 * enable and monitor watchdog timeouts
414 */
415
416 if (axi_wrapper &&
417 (sii->axi_num_wrappers < SI_MAX_AXI_WRAPPERS)) {
418 axi_wrapper[sii->axi_num_wrappers].mfg = mfg;
419 axi_wrapper[sii->axi_num_wrappers].cid = cid;
420 axi_wrapper[sii->axi_num_wrappers].rev = crev;
421 axi_wrapper[sii->axi_num_wrappers].wrapper_type = AI_SLAVE_WRAPPER;
422
423 /* Software WAR as discussed with hardware team, to ensure proper
424 * Slave Wrapper Base address is set for 4364 Chip ID.
425 * Current address is 0x1810c000, Corrected the same to 0x1810e000.
426 * This ensures AXI default slave wrapper is registered along with
427 * other slave wrapper cores and is useful while generating trap info
428 * when write operation is tried on Invalid Core / Wrapper register
429 */
430
431 if ((CHIPID(sih->chip) == BCM4364_CHIP_ID) &&
432 (cid == DEF_AI_COMP)) {
433 axi_wrapper[sii->axi_num_wrappers].wrapper_addr =
434 0x1810e000;
435 } else {
436 axi_wrapper[sii->axi_num_wrappers].wrapper_addr = addrl;
437 }
438
439 sii->axi_num_wrappers++;
440
441 SI_VMSG(("SLAVE WRAPPER: %d, mfg:%x, cid:%x,"
442 "rev:%x, addr:%x, size:%x\n",
443 sii->axi_num_wrappers, mfg, cid, crev, addrl, sizel));
444 }
445 }
446
447 #ifndef BCM_BACKPLANE_TIMEOUT
448 /* Don't record bridges */
449 if (br)
450 continue;
451 #endif // endif
452
453 /* Done with core */
454 sii->numcores++;
455 }
456
457 SI_ERROR(("Reached end of erom without finding END"));
458
459 error:
460 sii->numcores = 0;
461 return;
462 }
463
464 #define AI_SETCOREIDX_MAPSIZE(coreid) \
465 (((coreid) == NS_CCB_CORE_ID) ? 15 * SI_CORE_SIZE : SI_CORE_SIZE)
466
467 /* This function changes the logical "focus" to the indicated core.
468 * Return the current core's virtual address.
469 */
470 static volatile void *
_ai_setcoreidx(si_t * sih,uint coreidx,uint use_wrapn)471 _ai_setcoreidx(si_t *sih, uint coreidx, uint use_wrapn)
472 {
473 si_info_t *sii = SI_INFO(sih);
474 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
475 uint32 addr, wrap, wrap2, wrap3;
476 volatile void *regs;
477
478 if (coreidx >= MIN(sii->numcores, SI_MAXCORES))
479 return (NULL);
480
481 addr = cores_info->coresba[coreidx];
482 wrap = cores_info->wrapba[coreidx];
483 wrap2 = cores_info->wrapba2[coreidx];
484 wrap3 = cores_info->wrapba3[coreidx];
485
486 #ifdef BCM_BACKPLANE_TIMEOUT
487 /* No need to disable interrupts while entering/exiting APB bridge core */
488 if ((cores_info->coreid[coreidx] != APB_BRIDGE_CORE_ID) &&
489 (cores_info->coreid[sii->curidx] != APB_BRIDGE_CORE_ID))
490 #endif /* BCM_BACKPLANE_TIMEOUT */
491 {
492 /*
493 * If the user has provided an interrupt mask enabled function,
494 * then assert interrupts are disabled before switching the core.
495 */
496 ASSERT((sii->intrsenabled_fn == NULL) ||
497 !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
498 }
499
500 switch (BUSTYPE(sih->bustype)) {
501 case SI_BUS:
502 /* map new one */
503 if (!cores_info->regs[coreidx]) {
504 cores_info->regs[coreidx] = REG_MAP(addr,
505 AI_SETCOREIDX_MAPSIZE(cores_info->coreid[coreidx]));
506 ASSERT(GOODREGS(cores_info->regs[coreidx]));
507 }
508 sii->curmap = regs = cores_info->regs[coreidx];
509 if (!cores_info->wrappers[coreidx] && (wrap != 0)) {
510 cores_info->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE);
511 ASSERT(GOODREGS(cores_info->wrappers[coreidx]));
512 }
513 if (!cores_info->wrappers2[coreidx] && (wrap2 != 0)) {
514 cores_info->wrappers2[coreidx] = REG_MAP(wrap2, SI_CORE_SIZE);
515 ASSERT(GOODREGS(cores_info->wrappers2[coreidx]));
516 }
517 if (!cores_info->wrappers3[coreidx] && (wrap3 != 0)) {
518 cores_info->wrappers3[coreidx] = REG_MAP(wrap3, SI_CORE_SIZE);
519 ASSERT(GOODREGS(cores_info->wrappers3[coreidx]));
520 }
521
522 if (use_wrapn == 2) {
523 sii->curwrap = cores_info->wrappers3[coreidx];
524 } else if (use_wrapn == 1) {
525 sii->curwrap = cores_info->wrappers2[coreidx];
526 } else {
527 sii->curwrap = cores_info->wrappers[coreidx];
528 }
529 break;
530
531 case PCI_BUS:
532 #ifdef BCM_BACKPLANE_TIMEOUT
533 /* No need to set the BAR0 if core is APB Bridge.
534 * This is to reduce 2 PCI writes while checkng for errlog
535 */
536 if (cores_info->coreid[coreidx] != APB_BRIDGE_CORE_ID)
537 #endif /* BCM_BACKPLANE_TIMEOUT */
538 {
539 /* point bar0 window */
540 OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, addr);
541 }
542
543 regs = sii->curmap;
544 /* point bar0 2nd 4KB window to the primary wrapper */
545 if (use_wrapn)
546 wrap = wrap2;
547 if (PCIE_GEN2(sii))
548 OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_WIN2, 4, wrap);
549 else
550 OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN2, 4, wrap);
551 break;
552
553 #ifdef BCMSDIO
554 case SPI_BUS:
555 case SDIO_BUS:
556 sii->curmap = regs = (void *)((uintptr)addr);
557 if (use_wrapn)
558 sii->curwrap = (void *)((uintptr)wrap2);
559 else
560 sii->curwrap = (void *)((uintptr)wrap);
561 break;
562 #endif /* BCMSDIO */
563
564 case PCMCIA_BUS:
565 default:
566 ASSERT(0);
567 regs = NULL;
568 break;
569 }
570
571 sii->curmap = regs;
572 sii->curidx = coreidx;
573
574 return regs;
575 }
576
577 volatile void *
ai_setcoreidx(si_t * sih,uint coreidx)578 ai_setcoreidx(si_t *sih, uint coreidx)
579 {
580 return _ai_setcoreidx(sih, coreidx, 0);
581 }
582
583 volatile void *
ai_setcoreidx_2ndwrap(si_t * sih,uint coreidx)584 ai_setcoreidx_2ndwrap(si_t *sih, uint coreidx)
585 {
586 return _ai_setcoreidx(sih, coreidx, 1);
587 }
588
589 volatile void *
ai_setcoreidx_3rdwrap(si_t * sih,uint coreidx)590 ai_setcoreidx_3rdwrap(si_t *sih, uint coreidx)
591 {
592 return _ai_setcoreidx(sih, coreidx, 2);
593 }
594
595 void
ai_coreaddrspaceX(si_t * sih,uint asidx,uint32 * addr,uint32 * size)596 ai_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size)
597 {
598 si_info_t *sii = SI_INFO(sih);
599 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
600 chipcregs_t *cc = NULL;
601 uint32 erombase, *eromptr, *eromlim;
602 uint i, j, cidx;
603 uint32 cia, cib, nmp, nsp;
604 uint32 asd, addrl, addrh, sizel, sizeh;
605
606 for (i = 0; i < sii->numcores; i++) {
607 if (cores_info->coreid[i] == CC_CORE_ID) {
608 cc = (chipcregs_t *)cores_info->regs[i];
609 break;
610 }
611 }
612 if (cc == NULL)
613 goto error;
614
615 erombase = R_REG(sii->osh, &cc->eromptr);
616 eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
617 eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
618
619 cidx = sii->curidx;
620 cia = cores_info->cia[cidx];
621 cib = cores_info->cib[cidx];
622
623 nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
624 nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
625
626 /* scan for cores */
627 while (eromptr < eromlim) {
628 if ((get_erom_ent(sih, &eromptr, ER_TAG, ER_CI) == cia) &&
629 (get_erom_ent(sih, &eromptr, 0, 0) == cib)) {
630 break;
631 }
632 }
633
634 /* skip master ports */
635 for (i = 0; i < nmp; i++)
636 get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
637
638 /* Skip ASDs in port 0 */
639 asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
640 if (asd == 0) {
641 /* Try again to see if it is a bridge */
642 asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
643 &sizel, &sizeh);
644 }
645
646 j = 1;
647 do {
648 asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
649 &sizel, &sizeh);
650 j++;
651 } while (asd != 0);
652
653 /* Go through the ASDs for other slave ports */
654 for (i = 1; i < nsp; i++) {
655 j = 0;
656 do {
657 asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh,
658 &sizel, &sizeh);
659 if (asd == 0)
660 break;
661
662 if (!asidx--) {
663 *addr = addrl;
664 *size = sizel;
665 return;
666 }
667 j++;
668 } while (1);
669
670 if (j == 0) {
671 SI_ERROR((" SP %d has no address descriptors\n", i));
672 break;
673 }
674 }
675
676 error:
677 *size = 0;
678 return;
679 }
680
681 /* Return the number of address spaces in current core */
682 int
ai_numaddrspaces(si_t * sih)683 ai_numaddrspaces(si_t *sih)
684 {
685
686 BCM_REFERENCE(sih);
687
688 return 2;
689 }
690
691 /* Return the address of the nth address space in the current core
692 * Arguments:
693 * sih : Pointer to struct si_t
694 * spidx : slave port index
695 * baidx : base address index
696 */
697 uint32
ai_addrspace(si_t * sih,uint spidx,uint baidx)698 ai_addrspace(si_t *sih, uint spidx, uint baidx)
699 {
700 si_info_t *sii = SI_INFO(sih);
701 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
702 uint cidx;
703
704 cidx = sii->curidx;
705
706 if (spidx == CORE_SLAVE_PORT_0) {
707 if (baidx == CORE_BASE_ADDR_0)
708 return cores_info->coresba[cidx];
709 else if (baidx == CORE_BASE_ADDR_1)
710 return cores_info->coresba2[cidx];
711 }
712 else if (spidx == CORE_SLAVE_PORT_1) {
713 if (baidx == CORE_BASE_ADDR_0)
714 return cores_info->csp2ba[cidx];
715 }
716
717 SI_ERROR(("%s: Need to parse the erom again to find %d base addr in %d slave port\n",
718 __FUNCTION__, baidx, spidx));
719
720 return 0;
721
722 }
723
724 /* Return the size of the nth address space in the current core
725 * Arguments:
726 * sih : Pointer to struct si_t
727 * spidx : slave port index
728 * baidx : base address index
729 */
730 uint32
ai_addrspacesize(si_t * sih,uint spidx,uint baidx)731 ai_addrspacesize(si_t *sih, uint spidx, uint baidx)
732 {
733 si_info_t *sii = SI_INFO(sih);
734 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
735 uint cidx;
736
737 cidx = sii->curidx;
738 if (spidx == CORE_SLAVE_PORT_0) {
739 if (baidx == CORE_BASE_ADDR_0)
740 return cores_info->coresba_size[cidx];
741 else if (baidx == CORE_BASE_ADDR_1)
742 return cores_info->coresba2_size[cidx];
743 }
744 else if (spidx == CORE_SLAVE_PORT_1) {
745 if (baidx == CORE_BASE_ADDR_0)
746 return cores_info->csp2ba_size[cidx];
747 }
748
749 SI_ERROR(("%s: Need to parse the erom again to find %d base addr in %d slave port\n",
750 __FUNCTION__, baidx, spidx));
751
752 return 0;
753 }
754
755 uint
ai_flag(si_t * sih)756 ai_flag(si_t *sih)
757 {
758 si_info_t *sii = SI_INFO(sih);
759 aidmp_t *ai;
760
761 if (BCM4707_DMP()) {
762 SI_ERROR(("%s: Attempting to read CHIPCOMMONB DMP registers on 4707\n",
763 __FUNCTION__));
764 return sii->curidx;
765 }
766 if (BCM53573_DMP()) {
767 SI_ERROR(("%s: Attempting to read DMP registers on 53573\n", __FUNCTION__));
768 return sii->curidx;
769 }
770 if (PMU_DMP()) {
771 uint idx, flag;
772 idx = sii->curidx;
773 ai_setcoreidx(sih, SI_CC_IDX);
774 flag = ai_flag_alt(sih);
775 ai_setcoreidx(sih, idx);
776 return flag;
777 }
778
779 ai = sii->curwrap;
780 ASSERT(ai != NULL);
781
782 return (R_REG(sii->osh, &ai->oobselouta30) & 0x1f);
783 }
784
785 uint
ai_flag_alt(si_t * sih)786 ai_flag_alt(si_t *sih)
787 {
788 si_info_t *sii = SI_INFO(sih);
789 aidmp_t *ai;
790
791 if (BCM4707_DMP()) {
792 SI_ERROR(("%s: Attempting to read CHIPCOMMONB DMP registers on 4707\n",
793 __FUNCTION__));
794 return sii->curidx;
795 }
796
797 ai = sii->curwrap;
798
799 return ((R_REG(sii->osh, &ai->oobselouta30) >> AI_OOBSEL_1_SHIFT) & AI_OOBSEL_MASK);
800 }
801
802 void
ai_setint(si_t * sih,int siflag)803 ai_setint(si_t *sih, int siflag)
804 {
805 BCM_REFERENCE(sih);
806 BCM_REFERENCE(siflag);
807
808 }
809
810 uint
ai_wrap_reg(si_t * sih,uint32 offset,uint32 mask,uint32 val)811 ai_wrap_reg(si_t *sih, uint32 offset, uint32 mask, uint32 val)
812 {
813 si_info_t *sii = SI_INFO(sih);
814 uint32 *addr = (uint32 *) ((uchar *)(sii->curwrap) + offset);
815
816 if (mask || val) {
817 uint32 w = R_REG(sii->osh, addr);
818 w &= ~mask;
819 w |= val;
820 W_REG(sii->osh, addr, w);
821 }
822 return (R_REG(sii->osh, addr));
823 }
824
825 uint
ai_corevendor(si_t * sih)826 ai_corevendor(si_t *sih)
827 {
828 si_info_t *sii = SI_INFO(sih);
829 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
830 uint32 cia;
831
832 cia = cores_info->cia[sii->curidx];
833 return ((cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT);
834 }
835
836 uint
ai_corerev(si_t * sih)837 ai_corerev(si_t *sih)
838 {
839 si_info_t *sii = SI_INFO(sih);
840 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
841 uint32 cib;
842
843 cib = cores_info->cib[sii->curidx];
844 return ((cib & CIB_REV_MASK) >> CIB_REV_SHIFT);
845 }
846
847 uint
ai_corerev_minor(si_t * sih)848 ai_corerev_minor(si_t *sih)
849 {
850 return (ai_core_sflags(sih, 0, 0) >> SISF_MINORREV_D11_SHIFT) &
851 SISF_MINORREV_D11_MASK;
852 }
853
854 bool
ai_iscoreup(si_t * sih)855 ai_iscoreup(si_t *sih)
856 {
857 si_info_t *sii = SI_INFO(sih);
858 aidmp_t *ai;
859
860 ai = sii->curwrap;
861
862 return (((R_REG(sii->osh, &ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) == SICF_CLOCK_EN) &&
863 ((R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) == 0));
864 }
865
866 /*
867 * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
868 * switch back to the original core, and return the new value.
869 *
870 * When using the silicon backplane, no fiddling with interrupts or core switches is needed.
871 *
872 * Also, when using pci/pcie, we can optimize away the core switching for pci registers
873 * and (on newer pci cores) chipcommon registers.
874 */
875 uint
ai_corereg(si_t * sih,uint coreidx,uint regoff,uint mask,uint val)876 ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
877 {
878 uint origidx = 0;
879 volatile uint32 *r = NULL;
880 uint w;
881 uint intr_val = 0;
882 bool fast = FALSE;
883 si_info_t *sii = SI_INFO(sih);
884 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
885
886 ASSERT(GOODIDX(coreidx));
887 ASSERT(regoff < SI_CORE_SIZE);
888 ASSERT((val & ~mask) == 0);
889
890 if (coreidx >= SI_MAXCORES)
891 return 0;
892
893 if (BUSTYPE(sih->bustype) == SI_BUS) {
894 /* If internal bus, we can always get at everything */
895 fast = TRUE;
896 /* map if does not exist */
897 if (!cores_info->regs[coreidx]) {
898 cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
899 SI_CORE_SIZE);
900 ASSERT(GOODREGS(cores_info->regs[coreidx]));
901 }
902 r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
903 } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
904 /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
905
906 if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
907 /* Chipc registers are mapped at 12KB */
908
909 fast = TRUE;
910 r = (volatile uint32 *)((volatile char *)sii->curmap +
911 PCI_16KB0_CCREGS_OFFSET + regoff);
912 } else if (sii->pub.buscoreidx == coreidx) {
913 /* pci registers are at either in the last 2KB of an 8KB window
914 * or, in pcie and pci rev 13 at 8KB
915 */
916 fast = TRUE;
917 if (SI_FAST(sii))
918 r = (volatile uint32 *)((volatile char *)sii->curmap +
919 PCI_16KB0_PCIREGS_OFFSET + regoff);
920 else
921 r = (volatile uint32 *)((volatile char *)sii->curmap +
922 ((regoff >= SBCONFIGOFF) ?
923 PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
924 regoff);
925 }
926 }
927
928 if (!fast) {
929 INTR_OFF(sii, intr_val);
930
931 /* save current core index */
932 origidx = si_coreidx(&sii->pub);
933
934 /* switch core */
935 r = (volatile uint32*) ((volatile uchar*) ai_setcoreidx(&sii->pub, coreidx) +
936 regoff);
937 }
938 ASSERT(r != NULL);
939
940 /* mask and set */
941 if (mask || val) {
942 w = (R_REG(sii->osh, r) & ~mask) | val;
943 W_REG(sii->osh, r, w);
944 }
945
946 /* readback */
947 w = R_REG(sii->osh, r);
948
949 if (!fast) {
950 /* restore core index */
951 if (origidx != coreidx)
952 ai_setcoreidx(&sii->pub, origidx);
953
954 INTR_RESTORE(sii, intr_val);
955 }
956
957 return (w);
958 }
959
960 /*
961 * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
962 * switch back to the original core, and return the new value.
963 *
964 * When using the silicon backplane, no fiddling with interrupts or core switches is needed.
965 *
966 * Also, when using pci/pcie, we can optimize away the core switching for pci registers
967 * and (on newer pci cores) chipcommon registers.
968 */
969 uint
ai_corereg_writeonly(si_t * sih,uint coreidx,uint regoff,uint mask,uint val)970 ai_corereg_writeonly(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
971 {
972 uint origidx = 0;
973 volatile uint32 *r = NULL;
974 uint w = 0;
975 uint intr_val = 0;
976 bool fast = FALSE;
977 si_info_t *sii = SI_INFO(sih);
978 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
979
980 ASSERT(GOODIDX(coreidx));
981 ASSERT(regoff < SI_CORE_SIZE);
982 ASSERT((val & ~mask) == 0);
983
984 if (coreidx >= SI_MAXCORES)
985 return 0;
986
987 if (BUSTYPE(sih->bustype) == SI_BUS) {
988 /* If internal bus, we can always get at everything */
989 fast = TRUE;
990 /* map if does not exist */
991 if (!cores_info->regs[coreidx]) {
992 cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
993 SI_CORE_SIZE);
994 ASSERT(GOODREGS(cores_info->regs[coreidx]));
995 }
996 r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
997 } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
998 /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
999
1000 if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
1001 /* Chipc registers are mapped at 12KB */
1002
1003 fast = TRUE;
1004 r = (volatile uint32 *)((volatile char *)sii->curmap +
1005 PCI_16KB0_CCREGS_OFFSET + regoff);
1006 } else if (sii->pub.buscoreidx == coreidx) {
1007 /* pci registers are at either in the last 2KB of an 8KB window
1008 * or, in pcie and pci rev 13 at 8KB
1009 */
1010 fast = TRUE;
1011 if (SI_FAST(sii))
1012 r = (volatile uint32 *)((volatile char *)sii->curmap +
1013 PCI_16KB0_PCIREGS_OFFSET + regoff);
1014 else
1015 r = (volatile uint32 *)((volatile char *)sii->curmap +
1016 ((regoff >= SBCONFIGOFF) ?
1017 PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
1018 regoff);
1019 }
1020 }
1021
1022 if (!fast) {
1023 INTR_OFF(sii, intr_val);
1024
1025 /* save current core index */
1026 origidx = si_coreidx(&sii->pub);
1027
1028 /* switch core */
1029 r = (volatile uint32*) ((volatile uchar*) ai_setcoreidx(&sii->pub, coreidx) +
1030 regoff);
1031 }
1032 ASSERT(r != NULL);
1033
1034 /* mask and set */
1035 if (mask || val) {
1036 w = (R_REG(sii->osh, r) & ~mask) | val;
1037 W_REG(sii->osh, r, w);
1038 }
1039
1040 if (!fast) {
1041 /* restore core index */
1042 if (origidx != coreidx)
1043 ai_setcoreidx(&sii->pub, origidx);
1044
1045 INTR_RESTORE(sii, intr_val);
1046 }
1047
1048 return (w);
1049 }
1050
1051 /*
1052 * If there is no need for fiddling with interrupts or core switches (typically silicon
1053 * back plane registers, pci registers and chipcommon registers), this function
1054 * returns the register offset on this core to a mapped address. This address can
1055 * be used for W_REG/R_REG directly.
1056 *
1057 * For accessing registers that would need a core switch, this function will return
1058 * NULL.
1059 */
1060 volatile uint32 *
ai_corereg_addr(si_t * sih,uint coreidx,uint regoff)1061 ai_corereg_addr(si_t *sih, uint coreidx, uint regoff)
1062 {
1063 volatile uint32 *r = NULL;
1064 bool fast = FALSE;
1065 si_info_t *sii = SI_INFO(sih);
1066 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
1067
1068 ASSERT(GOODIDX(coreidx));
1069 ASSERT(regoff < SI_CORE_SIZE);
1070
1071 if (coreidx >= SI_MAXCORES)
1072 return 0;
1073
1074 if (BUSTYPE(sih->bustype) == SI_BUS) {
1075 /* If internal bus, we can always get at everything */
1076 fast = TRUE;
1077 /* map if does not exist */
1078 if (!cores_info->regs[coreidx]) {
1079 cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
1080 SI_CORE_SIZE);
1081 ASSERT(GOODREGS(cores_info->regs[coreidx]));
1082 }
1083 r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
1084 } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
1085 /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
1086
1087 if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
1088 /* Chipc registers are mapped at 12KB */
1089
1090 fast = TRUE;
1091 r = (volatile uint32 *)((volatile char *)sii->curmap +
1092 PCI_16KB0_CCREGS_OFFSET + regoff);
1093 } else if (sii->pub.buscoreidx == coreidx) {
1094 /* pci registers are at either in the last 2KB of an 8KB window
1095 * or, in pcie and pci rev 13 at 8KB
1096 */
1097 fast = TRUE;
1098 if (SI_FAST(sii))
1099 r = (volatile uint32 *)((volatile char *)sii->curmap +
1100 PCI_16KB0_PCIREGS_OFFSET + regoff);
1101 else
1102 r = (volatile uint32 *)((volatile char *)sii->curmap +
1103 ((regoff >= SBCONFIGOFF) ?
1104 PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
1105 regoff);
1106 }
1107 }
1108
1109 if (!fast) {
1110 ASSERT(sii->curidx == coreidx);
1111 r = (volatile uint32*) ((volatile uchar*)sii->curmap + regoff);
1112 }
1113
1114 return (r);
1115 }
1116
1117 void
ai_core_disable(si_t * sih,uint32 bits)1118 ai_core_disable(si_t *sih, uint32 bits)
1119 {
1120 si_info_t *sii = SI_INFO(sih);
1121 volatile uint32 dummy;
1122 uint32 status;
1123 aidmp_t *ai;
1124
1125 ASSERT(GOODREGS(sii->curwrap));
1126 ai = sii->curwrap;
1127
1128 /* if core is already in reset, just return */
1129 if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) {
1130 return;
1131 }
1132
1133 /* ensure there are no pending backplane operations */
1134 SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
1135
1136 /* if pending backplane ops still, try waiting longer */
1137 if (status != 0) {
1138 /* 300usecs was sufficient to allow backplane ops to clear for big hammer */
1139 /* during driver load we may need more time */
1140 SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 10000);
1141 /* if still pending ops, continue on and try disable anyway */
1142 /* this is in big hammer path, so don't call wl_reinit in this case... */
1143 }
1144
1145 W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
1146 dummy = R_REG(sii->osh, &ai->resetctrl);
1147 BCM_REFERENCE(dummy);
1148 OSL_DELAY(1);
1149
1150 W_REG(sii->osh, &ai->ioctrl, bits);
1151 dummy = R_REG(sii->osh, &ai->ioctrl);
1152 BCM_REFERENCE(dummy);
1153 OSL_DELAY(10);
1154 }
1155
1156 /* reset and re-enable a core
1157 * inputs:
1158 * bits - core specific bits that are set during and after reset sequence
1159 * resetbits - core specific bits that are set only during reset sequence
1160 */
1161 static void
_ai_core_reset(si_t * sih,uint32 bits,uint32 resetbits)1162 _ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
1163 {
1164 si_info_t *sii = SI_INFO(sih);
1165 #if defined(UCM_CORRUPTION_WAR)
1166 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
1167 #endif // endif
1168 aidmp_t *ai;
1169 volatile uint32 dummy;
1170 uint loop_counter = 10;
1171
1172 ASSERT(GOODREGS(sii->curwrap));
1173 ai = sii->curwrap;
1174
1175 /* ensure there are no pending backplane operations */
1176 SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), SPINWAIT_TIME_US);
1177
1178 /* put core into reset state */
1179 W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
1180 OSL_DELAY(10);
1181
1182 /* ensure there are no pending backplane operations */
1183 SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), SPINWAIT_TIME_US);
1184
1185 W_REG(sii->osh, &ai->ioctrl, (bits | resetbits | SICF_FGC | SICF_CLOCK_EN));
1186 dummy = R_REG(sii->osh, &ai->ioctrl);
1187 BCM_REFERENCE(dummy);
1188 #ifdef UCM_CORRUPTION_WAR
1189 if (cores_info->coreid[sii->curidx] == D11_CORE_ID) {
1190 /* Reset FGC */
1191 OSL_DELAY(1);
1192 W_REG(sii->osh, &ai->ioctrl, (dummy & (~SICF_FGC)));
1193 }
1194 #endif /* UCM_CORRUPTION_WAR */
1195 /* ensure there are no pending backplane operations */
1196 SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), SPINWAIT_TIME_US);
1197
1198 while (R_REG(sii->osh, &ai->resetctrl) != 0 && --loop_counter != 0) {
1199 /* ensure there are no pending backplane operations */
1200 SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), SPINWAIT_TIME_US);
1201
1202 /* take core out of reset */
1203 W_REG(sii->osh, &ai->resetctrl, 0);
1204
1205 /* ensure there are no pending backplane operations */
1206 SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), SPINWAIT_TIME_US);
1207 }
1208
1209 #ifdef UCM_CORRUPTION_WAR
1210 /* Pulse FGC after lifting Reset */
1211 W_REG(sii->osh, &ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN));
1212 #else
1213 W_REG(sii->osh, &ai->ioctrl, (bits | SICF_CLOCK_EN));
1214 #endif /* UCM_CORRUPTION_WAR */
1215 dummy = R_REG(sii->osh, &ai->ioctrl);
1216 BCM_REFERENCE(dummy);
1217 #ifdef UCM_CORRUPTION_WAR
1218 if (cores_info->coreid[sii->curidx] == D11_CORE_ID) {
1219 /* Reset FGC */
1220 OSL_DELAY(1);
1221 W_REG(sii->osh, &ai->ioctrl, (dummy & (~SICF_FGC)));
1222 }
1223 #endif /* UCM_CORRUPTION_WAR */
1224 OSL_DELAY(1);
1225
1226 }
1227
1228 void
ai_core_reset(si_t * sih,uint32 bits,uint32 resetbits)1229 ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
1230 {
1231 si_info_t *sii = SI_INFO(sih);
1232 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
1233 uint idx = sii->curidx;
1234
1235 if (cores_info->wrapba3[idx] != 0) {
1236 ai_setcoreidx_3rdwrap(sih, idx);
1237 _ai_core_reset(sih, bits, resetbits);
1238 ai_setcoreidx(sih, idx);
1239 }
1240
1241 if (cores_info->wrapba2[idx] != 0) {
1242 ai_setcoreidx_2ndwrap(sih, idx);
1243 _ai_core_reset(sih, bits, resetbits);
1244 ai_setcoreidx(sih, idx);
1245 }
1246
1247 _ai_core_reset(sih, bits, resetbits);
1248 }
1249
1250 void
ai_core_cflags_wo(si_t * sih,uint32 mask,uint32 val)1251 ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
1252 {
1253 si_info_t *sii = SI_INFO(sih);
1254 aidmp_t *ai;
1255 uint32 w;
1256
1257 if (BCM4707_DMP()) {
1258 SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
1259 __FUNCTION__));
1260 return;
1261 }
1262 if (PMU_DMP()) {
1263 SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n",
1264 __FUNCTION__));
1265 return;
1266 }
1267
1268 ASSERT(GOODREGS(sii->curwrap));
1269 ai = sii->curwrap;
1270
1271 ASSERT((val & ~mask) == 0);
1272
1273 if (mask || val) {
1274 w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
1275 W_REG(sii->osh, &ai->ioctrl, w);
1276 }
1277 }
1278
1279 uint32
ai_core_cflags(si_t * sih,uint32 mask,uint32 val)1280 ai_core_cflags(si_t *sih, uint32 mask, uint32 val)
1281 {
1282 si_info_t *sii = SI_INFO(sih);
1283 aidmp_t *ai;
1284 uint32 w;
1285
1286 if (BCM4707_DMP()) {
1287 SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
1288 __FUNCTION__));
1289 return 0;
1290 }
1291
1292 if (PMU_DMP()) {
1293 SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n",
1294 __FUNCTION__));
1295 return 0;
1296 }
1297 ASSERT(GOODREGS(sii->curwrap));
1298 ai = sii->curwrap;
1299
1300 ASSERT((val & ~mask) == 0);
1301
1302 if (mask || val) {
1303 w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
1304 W_REG(sii->osh, &ai->ioctrl, w);
1305 }
1306
1307 return R_REG(sii->osh, &ai->ioctrl);
1308 }
1309
1310 uint32
ai_core_sflags(si_t * sih,uint32 mask,uint32 val)1311 ai_core_sflags(si_t *sih, uint32 mask, uint32 val)
1312 {
1313 si_info_t *sii = SI_INFO(sih);
1314 aidmp_t *ai;
1315 uint32 w;
1316
1317 if (BCM4707_DMP()) {
1318 SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
1319 __FUNCTION__));
1320 return 0;
1321 }
1322 if (PMU_DMP()) {
1323 SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n",
1324 __FUNCTION__));
1325 return 0;
1326 }
1327
1328 ASSERT(GOODREGS(sii->curwrap));
1329 ai = sii->curwrap;
1330
1331 ASSERT((val & ~mask) == 0);
1332 ASSERT((mask & ~SISF_CORE_BITS) == 0);
1333
1334 if (mask || val) {
1335 w = ((R_REG(sii->osh, &ai->iostatus) & ~mask) | val);
1336 W_REG(sii->osh, &ai->iostatus, w);
1337 }
1338
1339 return R_REG(sii->osh, &ai->iostatus);
1340 }
1341
1342 #if defined(BCMDBG_PHYDUMP)
1343 /* print interesting aidmp registers */
1344 void
ai_dumpregs(si_t * sih,struct bcmstrbuf * b)1345 ai_dumpregs(si_t *sih, struct bcmstrbuf *b)
1346 {
1347 si_info_t *sii = SI_INFO(sih);
1348 osl_t *osh;
1349 aidmp_t *ai;
1350 uint i;
1351 uint32 prev_value = 0;
1352 axi_wrapper_t * axi_wrapper = sii->axi_wrapper;
1353 uint32 cfg_reg = 0;
1354 uint bar0_win_offset = 0;
1355
1356 osh = sii->osh;
1357
1358 /* Save and restore wrapper access window */
1359 if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1360 if (PCIE_GEN2(sii)) {
1361 cfg_reg = PCIE2_BAR0_CORE2_WIN2;
1362 bar0_win_offset = PCIE2_BAR0_CORE2_WIN2_OFFSET;
1363 } else {
1364 cfg_reg = PCI_BAR0_WIN2;
1365 bar0_win_offset = PCI_BAR0_WIN2_OFFSET;
1366 }
1367
1368 prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 4);
1369
1370 if (prev_value == ID32_INVALID) {
1371 SI_PRINT(("%s, PCI_BAR0_WIN2 - %x\n", __FUNCTION__, prev_value));
1372 return;
1373 }
1374 }
1375
1376 bcm_bprintf(b, "ChipNum:%x, ChipRev;%x, BusType:%x, BoardType:%x, BoardVendor:%x\n\n",
1377 sih->chip, sih->chiprev, sih->bustype, sih->boardtype, sih->boardvendor);
1378
1379 for (i = 0; i < sii->axi_num_wrappers; i++) {
1380
1381 if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1382 /* Set BAR0 window to bridge wapper base address */
1383 OSL_PCI_WRITE_CONFIG(osh,
1384 cfg_reg, 4, axi_wrapper[i].wrapper_addr);
1385
1386 ai = (aidmp_t *) ((volatile uint8*)sii->curmap + bar0_win_offset);
1387 } else {
1388 ai = (aidmp_t *)(uintptr) axi_wrapper[i].wrapper_addr;
1389 }
1390
1391 bcm_bprintf(b, "core 0x%x: core_rev:%d, %s_WR ADDR:%x \n", axi_wrapper[i].cid,
1392 axi_wrapper[i].rev,
1393 axi_wrapper[i].wrapper_type == AI_SLAVE_WRAPPER ? "SLAVE" : "MASTER",
1394 axi_wrapper[i].wrapper_addr);
1395
1396 /* BCM4707_DMP() */
1397 if (BCM4707_CHIP(CHIPID(sih->chip)) &&
1398 (axi_wrapper[i].cid == NS_CCB_CORE_ID)) {
1399 bcm_bprintf(b, "Skipping chipcommonb in 4707\n");
1400 continue;
1401 }
1402
1403 bcm_bprintf(b, "ioctrlset 0x%x ioctrlclear 0x%x ioctrl 0x%x iostatus 0x%x "
1404 "ioctrlwidth 0x%x iostatuswidth 0x%x\n"
1405 "resetctrl 0x%x resetstatus 0x%x resetreadid 0x%x resetwriteid 0x%x\n"
1406 "errlogctrl 0x%x errlogdone 0x%x errlogstatus 0x%x "
1407 "errlogaddrlo 0x%x errlogaddrhi 0x%x\n"
1408 "errlogid 0x%x errloguser 0x%x errlogflags 0x%x\n"
1409 "intstatus 0x%x config 0x%x itcr 0x%x\n\n",
1410 R_REG(osh, &ai->ioctrlset),
1411 R_REG(osh, &ai->ioctrlclear),
1412 R_REG(osh, &ai->ioctrl),
1413 R_REG(osh, &ai->iostatus),
1414 R_REG(osh, &ai->ioctrlwidth),
1415 R_REG(osh, &ai->iostatuswidth),
1416 R_REG(osh, &ai->resetctrl),
1417 R_REG(osh, &ai->resetstatus),
1418 R_REG(osh, &ai->resetreadid),
1419 R_REG(osh, &ai->resetwriteid),
1420 R_REG(osh, &ai->errlogctrl),
1421 R_REG(osh, &ai->errlogdone),
1422 R_REG(osh, &ai->errlogstatus),
1423 R_REG(osh, &ai->errlogaddrlo),
1424 R_REG(osh, &ai->errlogaddrhi),
1425 R_REG(osh, &ai->errlogid),
1426 R_REG(osh, &ai->errloguser),
1427 R_REG(osh, &ai->errlogflags),
1428 R_REG(osh, &ai->intstatus),
1429 R_REG(osh, &ai->config),
1430 R_REG(osh, &ai->itcr));
1431 }
1432
1433 /* Restore the initial wrapper space */
1434 if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1435 if (prev_value && cfg_reg) {
1436 OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 4, prev_value);
1437 }
1438 }
1439 }
1440 #endif // endif
1441
1442 void
ai_update_backplane_timeouts(si_t * sih,bool enable,uint32 timeout_exp,uint32 cid)1443 ai_update_backplane_timeouts(si_t *sih, bool enable, uint32 timeout_exp, uint32 cid)
1444 {
1445 #if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
1446 si_info_t *sii = SI_INFO(sih);
1447 aidmp_t *ai;
1448 uint32 i;
1449 axi_wrapper_t * axi_wrapper = sii->axi_wrapper;
1450 uint32 errlogctrl = (enable << AIELC_TO_ENAB_SHIFT) |
1451 ((timeout_exp << AIELC_TO_EXP_SHIFT) & AIELC_TO_EXP_MASK);
1452
1453 #ifdef BCM_BACKPLANE_TIMEOUT
1454 uint32 prev_value = 0;
1455 osl_t *osh = sii->osh;
1456 uint32 cfg_reg = 0;
1457 uint32 offset = 0;
1458 #endif /* BCM_BACKPLANE_TIMEOUT */
1459
1460 if ((sii->axi_num_wrappers == 0) ||
1461 #ifdef BCM_BACKPLANE_TIMEOUT
1462 (!PCIE(sii)) ||
1463 #endif /* BCM_BACKPLANE_TIMEOUT */
1464 FALSE) {
1465 SI_VMSG((" %s, axi_num_wrappers:%d, Is_PCIE:%d, BUS_TYPE:%d, ID:%x\n",
1466 __FUNCTION__, sii->axi_num_wrappers, PCIE(sii),
1467 BUSTYPE(sii->pub.bustype), sii->pub.buscoretype));
1468 return;
1469 }
1470
1471 #ifdef BCM_BACKPLANE_TIMEOUT
1472 /* Save and restore the wrapper access window */
1473 if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1474 if (PCIE_GEN1(sii)) {
1475 cfg_reg = PCI_BAR0_WIN2;
1476 offset = PCI_BAR0_WIN2_OFFSET;
1477 } else if (PCIE_GEN2(sii)) {
1478 cfg_reg = PCIE2_BAR0_CORE2_WIN2;
1479 offset = PCIE2_BAR0_CORE2_WIN2_OFFSET;
1480 }
1481 else {
1482 ASSERT(!"!PCIE_GEN1 && !PCIE_GEN2");
1483 }
1484
1485 prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 4);
1486 if (prev_value == ID32_INVALID) {
1487 SI_PRINT(("%s, PCI_BAR0_WIN2 - %x\n", __FUNCTION__, prev_value));
1488 return;
1489 }
1490 }
1491 #endif /* BCM_BACKPLANE_TIMEOUT */
1492
1493 for (i = 0; i < sii->axi_num_wrappers; ++i) {
1494
1495 if (axi_wrapper[i].wrapper_type != AI_SLAVE_WRAPPER) {
1496 SI_VMSG(("SKIP ENABLE BPT: MFG:%x, CID:%x, ADDR:%x\n",
1497 axi_wrapper[i].mfg,
1498 axi_wrapper[i].cid,
1499 axi_wrapper[i].wrapper_addr));
1500 continue;
1501 }
1502
1503 /* Update only given core if requested */
1504 if ((cid != 0) && (axi_wrapper[i].cid != cid)) {
1505 continue;
1506 }
1507
1508 #ifdef BCM_BACKPLANE_TIMEOUT
1509 if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1510 /* Set BAR0_CORE2_WIN2 to bridge wapper base address */
1511 OSL_PCI_WRITE_CONFIG(osh,
1512 cfg_reg, 4, axi_wrapper[i].wrapper_addr);
1513
1514 /* set AI to BAR0 + Offset corresponding to Gen1 or gen2 */
1515 ai = (aidmp_t *) (DISCARD_QUAL(sii->curmap, uint8) + offset);
1516 }
1517 else
1518 #endif /* BCM_BACKPLANE_TIMEOUT */
1519 {
1520 ai = (aidmp_t *)(uintptr) axi_wrapper[i].wrapper_addr;
1521 }
1522
1523 W_REG(sii->osh, &ai->errlogctrl, errlogctrl);
1524
1525 SI_VMSG(("ENABLED BPT: MFG:%x, CID:%x, ADDR:%x, ERR_CTRL:%x\n",
1526 axi_wrapper[i].mfg,
1527 axi_wrapper[i].cid,
1528 axi_wrapper[i].wrapper_addr,
1529 R_REG(sii->osh, &ai->errlogctrl)));
1530 }
1531
1532 #ifdef BCM_BACKPLANE_TIMEOUT
1533 /* Restore the initial wrapper space */
1534 if (prev_value) {
1535 OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 4, prev_value);
1536 }
1537 #endif /* BCM_BACKPLANE_TIMEOUT */
1538
1539 #endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */
1540 }
1541
1542 #if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
1543
1544 /* slave error is ignored, so account for those cases */
1545 static uint32 si_ignore_errlog_cnt = 0;
1546
1547 static bool
ai_ignore_errlog(si_info_t * sii,aidmp_t * ai,uint32 lo_addr,uint32 hi_addr,uint32 err_axi_id,uint32 errsts)1548 ai_ignore_errlog(si_info_t *sii, aidmp_t *ai,
1549 uint32 lo_addr, uint32 hi_addr, uint32 err_axi_id, uint32 errsts)
1550 {
1551 uint32 axi_id;
1552 #ifdef BCMPCIE_BTLOG
1553 uint32 axi_id2 = BCM4347_UNUSED_AXI_ID;
1554 #endif /* BCMPCIE_BTLOG */
1555 uint32 ignore_errsts = AIELS_SLAVE_ERR;
1556 uint32 ignore_hi = BT_CC_SPROM_BADREG_HI;
1557 uint32 ignore_lo = BT_CC_SPROM_BADREG_LO;
1558 uint32 ignore_size = BT_CC_SPROM_BADREG_SIZE;
1559
1560 /* ignore the BT slave errors if the errlog is to chipcommon addr 0x190 */
1561 switch (CHIPID(sii->pub.chip)) {
1562 case BCM4350_CHIP_ID:
1563 axi_id = BCM4350_BT_AXI_ID;
1564 break;
1565 case BCM4345_CHIP_ID:
1566 axi_id = BCM4345_BT_AXI_ID;
1567 break;
1568 case BCM4349_CHIP_GRPID:
1569 axi_id = BCM4349_BT_AXI_ID;
1570 break;
1571 case BCM4364_CHIP_ID:
1572 case BCM4373_CHIP_ID:
1573 axi_id = BCM4364_BT_AXI_ID;
1574 break;
1575 #ifdef BCMPCIE_BTLOG
1576 case BCM4347_CHIP_ID:
1577 case BCM4357_CHIP_ID:
1578 axi_id = BCM4347_CC_AXI_ID;
1579 axi_id2 = BCM4347_PCIE_AXI_ID;
1580 ignore_errsts = AIELS_TIMEOUT;
1581 ignore_hi = BCM4347_BT_ADDR_HI;
1582 ignore_lo = BCM4347_BT_ADDR_LO;
1583 ignore_size = BCM4347_BT_SIZE;
1584 break;
1585 #endif /* BCMPCIE_BTLOG */
1586
1587 default:
1588 return FALSE;
1589 }
1590
1591 /* AXI ID check */
1592 err_axi_id &= AI_ERRLOGID_AXI_ID_MASK;
1593 if (!(err_axi_id == axi_id ||
1594 #ifdef BCMPCIE_BTLOG
1595 (axi_id2 != BCM4347_UNUSED_AXI_ID && err_axi_id == axi_id2)))
1596 #else
1597 FALSE))
1598 #endif /* BCMPCIE_BTLOG */
1599 return FALSE;
1600
1601 /* slave errors */
1602 if ((errsts & AIELS_TIMEOUT_MASK) != ignore_errsts)
1603 return FALSE;
1604
1605 /* address range check */
1606 if ((hi_addr != ignore_hi) ||
1607 (lo_addr < ignore_lo) || (lo_addr >= (ignore_lo + ignore_size)))
1608 return FALSE;
1609
1610 #ifdef BCMPCIE_BTLOG
1611 if (ignore_errsts == AIELS_TIMEOUT) {
1612 /* reset AXI timeout */
1613 ai_reset_axi_to(sii, ai);
1614 }
1615 #endif /* BCMPCIE_BTLOG */
1616
1617 return TRUE;
1618 }
1619 #endif /* defined (AXI_TIMEOUTS) || defined (BCM_BACKPLANE_TIMEOUT) */
1620
1621 #ifdef BCM_BACKPLANE_TIMEOUT
1622
1623 /* Function to return the APB bridge details corresponding to the core */
1624 static bool
ai_get_apb_bridge(si_t * sih,uint32 coreidx,uint32 * apb_id,uint32 * apb_coreuinit)1625 ai_get_apb_bridge(si_t * sih, uint32 coreidx, uint32 *apb_id, uint32 * apb_coreuinit)
1626 {
1627 uint i;
1628 uint32 core_base, core_end;
1629 si_info_t *sii = SI_INFO(sih);
1630 static uint32 coreidx_cached = 0, apb_id_cached = 0, apb_coreunit_cached = 0;
1631 uint32 tmp_coreunit = 0;
1632 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
1633
1634 if (coreidx >= MIN(sii->numcores, SI_MAXCORES))
1635 return FALSE;
1636
1637 /* Most of the time apb bridge query will be for d11 core.
1638 * Maintain the last cache and return if found rather than iterating the table
1639 */
1640 if (coreidx_cached == coreidx) {
1641 *apb_id = apb_id_cached;
1642 *apb_coreuinit = apb_coreunit_cached;
1643 return TRUE;
1644 }
1645
1646 core_base = cores_info->coresba[coreidx];
1647 core_end = core_base + cores_info->coresba_size[coreidx];
1648
1649 for (i = 0; i < sii->numcores; i++) {
1650 if (cores_info->coreid[i] == APB_BRIDGE_ID) {
1651 uint32 apb_base;
1652 uint32 apb_end;
1653
1654 apb_base = cores_info->coresba[i];
1655 apb_end = apb_base + cores_info->coresba_size[i];
1656
1657 if ((core_base >= apb_base) &&
1658 (core_end <= apb_end)) {
1659 /* Current core is attached to this APB bridge */
1660 *apb_id = apb_id_cached = APB_BRIDGE_ID;
1661 *apb_coreuinit = apb_coreunit_cached = tmp_coreunit;
1662 coreidx_cached = coreidx;
1663 return TRUE;
1664 }
1665 /* Increment the coreunit */
1666 tmp_coreunit++;
1667 }
1668 }
1669
1670 return FALSE;
1671 }
1672
1673 uint32
ai_clear_backplane_to_fast(si_t * sih,void * addr)1674 ai_clear_backplane_to_fast(si_t *sih, void *addr)
1675 {
1676 si_info_t *sii = SI_INFO(sih);
1677 volatile void *curmap = sii->curmap;
1678 bool core_reg = FALSE;
1679
1680 /* Use fast path only for core register access */
1681 if (((uintptr)addr >= (uintptr)curmap) &&
1682 ((uintptr)addr < ((uintptr)curmap + SI_CORE_SIZE))) {
1683 /* address being accessed is within current core reg map */
1684 core_reg = TRUE;
1685 }
1686
1687 if (core_reg) {
1688 uint32 apb_id, apb_coreuinit;
1689
1690 if (ai_get_apb_bridge(sih, si_coreidx(&sii->pub),
1691 &apb_id, &apb_coreuinit) == TRUE) {
1692 /* Found the APB bridge corresponding to current core,
1693 * Check for bus errors in APB wrapper
1694 */
1695 return ai_clear_backplane_to_per_core(sih,
1696 apb_id, apb_coreuinit, NULL);
1697 }
1698 }
1699
1700 /* Default is to poll for errors on all slave wrappers */
1701 return si_clear_backplane_to(sih);
1702 }
1703 #endif /* BCM_BACKPLANE_TIMEOUT */
1704
1705 #if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
1706 static bool g_disable_backplane_logs = FALSE;
1707
1708 #if defined(ETD)
1709 static uint32 last_axi_error = AXI_WRAP_STS_NONE;
1710 static uint32 last_axi_error_core = 0;
1711 static uint32 last_axi_error_wrap = 0;
1712 #endif /* ETD */
1713
1714 /*
1715 * API to clear the back plane timeout per core.
1716 * Caller may passs optional wrapper address. If present this will be used as
1717 * the wrapper base address. If wrapper base address is provided then caller
1718 * must provide the coreid also.
1719 * If both coreid and wrapper is zero, then err status of current bridge
1720 * will be verified.
1721 */
1722 uint32
ai_clear_backplane_to_per_core(si_t * sih,uint coreid,uint coreunit,void * wrap)1723 ai_clear_backplane_to_per_core(si_t *sih, uint coreid, uint coreunit, void *wrap)
1724 {
1725 int ret = AXI_WRAP_STS_NONE;
1726 aidmp_t *ai = NULL;
1727 uint32 errlog_status = 0;
1728 si_info_t *sii = SI_INFO(sih);
1729 uint32 errlog_lo = 0, errlog_hi = 0, errlog_id = 0, errlog_flags = 0;
1730 uint32 current_coreidx = si_coreidx(sih);
1731 uint32 target_coreidx = si_findcoreidx(sih, coreid, coreunit);
1732
1733 #if defined(BCM_BACKPLANE_TIMEOUT)
1734 si_axi_error_t * axi_error = sih->err_info ?
1735 &sih->err_info->axi_error[sih->err_info->count] : NULL;
1736 #endif /* BCM_BACKPLANE_TIMEOUT */
1737 bool restore_core = FALSE;
1738
1739 if ((sii->axi_num_wrappers == 0) ||
1740 #ifdef BCM_BACKPLANE_TIMEOUT
1741 (!PCIE(sii)) ||
1742 #endif /* BCM_BACKPLANE_TIMEOUT */
1743 FALSE) {
1744 SI_VMSG((" %s, axi_num_wrappers:%d, Is_PCIE:%d, BUS_TYPE:%d, ID:%x\n",
1745 __FUNCTION__, sii->axi_num_wrappers, PCIE(sii),
1746 BUSTYPE(sii->pub.bustype), sii->pub.buscoretype));
1747 return AXI_WRAP_STS_NONE;
1748 }
1749
1750 if (wrap != NULL) {
1751 ai = (aidmp_t *)wrap;
1752 } else if (coreid && (target_coreidx != current_coreidx)) {
1753
1754 if (ai_setcoreidx(sih, target_coreidx) == NULL) {
1755 /* Unable to set the core */
1756 SI_PRINT(("Set Code Failed: coreid:%x, unit:%d, target_coreidx:%d\n",
1757 coreid, coreunit, target_coreidx));
1758 errlog_lo = target_coreidx;
1759 ret = AXI_WRAP_STS_SET_CORE_FAIL;
1760 goto end;
1761 }
1762
1763 restore_core = TRUE;
1764 ai = (aidmp_t *)si_wrapperregs(sih);
1765 } else {
1766 /* Read error status of current wrapper */
1767 ai = (aidmp_t *)si_wrapperregs(sih);
1768
1769 /* Update CoreID to current Code ID */
1770 coreid = si_coreid(sih);
1771 }
1772
1773 /* read error log status */
1774 errlog_status = R_REG(sii->osh, &ai->errlogstatus);
1775
1776 if (errlog_status == ID32_INVALID) {
1777 /* Do not try to peek further */
1778 SI_PRINT(("%s, errlogstatus:%x - Slave Wrapper:%x\n",
1779 __FUNCTION__, errlog_status, coreid));
1780 ret = AXI_WRAP_STS_WRAP_RD_ERR;
1781 errlog_lo = (uint32)(uintptr)&ai->errlogstatus;
1782 goto end;
1783 }
1784
1785 if ((errlog_status & AIELS_TIMEOUT_MASK) != 0) {
1786 uint32 tmp;
1787 uint32 count = 0;
1788 /* set ErrDone to clear the condition */
1789 W_REG(sii->osh, &ai->errlogdone, AIELD_ERRDONE_MASK);
1790
1791 /* SPINWAIT on errlogstatus timeout status bits */
1792 while ((tmp = R_REG(sii->osh, &ai->errlogstatus)) & AIELS_TIMEOUT_MASK) {
1793
1794 if (tmp == ID32_INVALID) {
1795 SI_PRINT(("%s: prev errlogstatus:%x, errlogstatus:%x\n",
1796 __FUNCTION__, errlog_status, tmp));
1797 ret = AXI_WRAP_STS_WRAP_RD_ERR;
1798 errlog_lo = (uint32)(uintptr)&ai->errlogstatus;
1799 goto end;
1800 }
1801 /*
1802 * Clear again, to avoid getting stuck in the loop, if a new error
1803 * is logged after we cleared the first timeout
1804 */
1805 W_REG(sii->osh, &ai->errlogdone, AIELD_ERRDONE_MASK);
1806
1807 count++;
1808 OSL_DELAY(10);
1809 if ((10 * count) > AI_REG_READ_TIMEOUT) {
1810 errlog_status = tmp;
1811 break;
1812 }
1813 }
1814
1815 errlog_lo = R_REG(sii->osh, &ai->errlogaddrlo);
1816 errlog_hi = R_REG(sii->osh, &ai->errlogaddrhi);
1817 errlog_id = R_REG(sii->osh, &ai->errlogid);
1818 errlog_flags = R_REG(sii->osh, &ai->errlogflags);
1819
1820 /* we are already in the error path, so OK to check for the slave error */
1821 if (ai_ignore_errlog(sii, ai, errlog_lo, errlog_hi, errlog_id,
1822 errlog_status)) {
1823 si_ignore_errlog_cnt++;
1824 goto end;
1825 }
1826
1827 /* only reset APB Bridge on timeout (not slave error, or dec error) */
1828 switch (errlog_status & AIELS_TIMEOUT_MASK) {
1829 case AIELS_SLAVE_ERR:
1830 SI_PRINT(("AXI slave error\n"));
1831 ret = AXI_WRAP_STS_SLAVE_ERR;
1832 break;
1833
1834 case AIELS_TIMEOUT:
1835 ai_reset_axi_to(sii, ai);
1836 ret = AXI_WRAP_STS_TIMEOUT;
1837 break;
1838
1839 case AIELS_DECODE:
1840 SI_PRINT(("AXI decode error\n"));
1841 ret = AXI_WRAP_STS_DECODE_ERR;
1842 break;
1843 default:
1844 ASSERT(0); /* should be impossible */
1845 }
1846
1847 SI_PRINT(("\tCoreID: %x\n", coreid));
1848 SI_PRINT(("\t errlog: lo 0x%08x, hi 0x%08x, id 0x%08x, flags 0x%08x"
1849 ", status 0x%08x\n",
1850 errlog_lo, errlog_hi, errlog_id, errlog_flags,
1851 errlog_status));
1852 }
1853
1854 end:
1855 #if defined(ETD)
1856 if (ret != AXI_WRAP_STS_NONE) {
1857 last_axi_error = ret;
1858 last_axi_error_core = coreid;
1859 last_axi_error_wrap = (uint32)ai;
1860 }
1861 #endif /* ETD */
1862
1863 #if defined(BCM_BACKPLANE_TIMEOUT)
1864 if (axi_error && (ret != AXI_WRAP_STS_NONE)) {
1865 axi_error->error = ret;
1866 axi_error->coreid = coreid;
1867 axi_error->errlog_lo = errlog_lo;
1868 axi_error->errlog_hi = errlog_hi;
1869 axi_error->errlog_id = errlog_id;
1870 axi_error->errlog_flags = errlog_flags;
1871 axi_error->errlog_status = errlog_status;
1872 sih->err_info->count++;
1873
1874 if (sih->err_info->count == SI_MAX_ERRLOG_SIZE) {
1875 sih->err_info->count = SI_MAX_ERRLOG_SIZE - 1;
1876 SI_PRINT(("AXI Error log overflow\n"));
1877 }
1878 }
1879 #endif /* BCM_BACKPLANE_TIMEOUT */
1880
1881 if (restore_core) {
1882 if (ai_setcoreidx(sih, current_coreidx) == NULL) {
1883 /* Unable to set the core */
1884 return ID32_INVALID;
1885 }
1886 }
1887
1888 return ret;
1889 }
1890
1891 /* reset AXI timeout */
1892 static void
ai_reset_axi_to(si_info_t * sii,aidmp_t * ai)1893 ai_reset_axi_to(si_info_t *sii, aidmp_t *ai)
1894 {
1895 /* reset APB Bridge */
1896 OR_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
1897 /* sync write */
1898 (void)R_REG(sii->osh, &ai->resetctrl);
1899 /* clear Reset bit */
1900 AND_REG(sii->osh, &ai->resetctrl, ~(AIRC_RESET));
1901 /* sync write */
1902 (void)R_REG(sii->osh, &ai->resetctrl);
1903 SI_PRINT(("AXI timeout\n"));
1904 if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) {
1905 SI_PRINT(("reset failed on wrapper %p\n", ai));
1906 g_disable_backplane_logs = TRUE;
1907 }
1908 }
1909 #endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */
1910
1911 /*
1912 * This API polls all slave wrappers for errors and returns bit map of
1913 * all reported errors.
1914 * return - bit map of
1915 * AXI_WRAP_STS_NONE
1916 * AXI_WRAP_STS_TIMEOUT
1917 * AXI_WRAP_STS_SLAVE_ERR
1918 * AXI_WRAP_STS_DECODE_ERR
1919 * AXI_WRAP_STS_PCI_RD_ERR
1920 * AXI_WRAP_STS_WRAP_RD_ERR
1921 * AXI_WRAP_STS_SET_CORE_FAIL
1922 * On timeout detection, correspondign bridge will be reset to
1923 * unblock the bus.
1924 * Error reported in each wrapper can be retrieved using the API
1925 * si_get_axi_errlog_info()
1926 */
1927 uint32
ai_clear_backplane_to(si_t * sih)1928 ai_clear_backplane_to(si_t *sih)
1929 {
1930 uint32 ret = 0;
1931 #if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
1932
1933 si_info_t *sii = SI_INFO(sih);
1934 aidmp_t *ai;
1935 uint32 i;
1936 axi_wrapper_t * axi_wrapper = sii->axi_wrapper;
1937
1938 #ifdef BCM_BACKPLANE_TIMEOUT
1939 uint32 prev_value = 0;
1940 osl_t *osh = sii->osh;
1941 uint32 cfg_reg = 0;
1942 uint32 offset = 0;
1943
1944 if ((sii->axi_num_wrappers == 0) || (!PCIE(sii)))
1945 #else
1946 if (sii->axi_num_wrappers == 0)
1947 #endif // endif
1948 {
1949 SI_VMSG((" %s, axi_num_wrappers:%d, Is_PCIE:%d, BUS_TYPE:%d, ID:%x\n",
1950 __FUNCTION__, sii->axi_num_wrappers, PCIE(sii),
1951 BUSTYPE(sii->pub.bustype), sii->pub.buscoretype));
1952 return AXI_WRAP_STS_NONE;
1953 }
1954
1955 #ifdef BCM_BACKPLANE_TIMEOUT
1956 /* Save and restore wrapper access window */
1957 if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1958 if (PCIE_GEN1(sii)) {
1959 cfg_reg = PCI_BAR0_WIN2;
1960 offset = PCI_BAR0_WIN2_OFFSET;
1961 } else if (PCIE_GEN2(sii)) {
1962 cfg_reg = PCIE2_BAR0_CORE2_WIN2;
1963 offset = PCIE2_BAR0_CORE2_WIN2_OFFSET;
1964 }
1965 else {
1966 ASSERT(!"!PCIE_GEN1 && !PCIE_GEN2");
1967 }
1968
1969 prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 4);
1970
1971 if (prev_value == ID32_INVALID) {
1972 si_axi_error_t * axi_error =
1973 sih->err_info ?
1974 &sih->err_info->axi_error[sih->err_info->count] :
1975 NULL;
1976
1977 SI_PRINT(("%s, PCI_BAR0_WIN2 - %x\n", __FUNCTION__, prev_value));
1978 if (axi_error) {
1979 axi_error->error = ret = AXI_WRAP_STS_PCI_RD_ERR;
1980 axi_error->errlog_lo = cfg_reg;
1981 sih->err_info->count++;
1982
1983 if (sih->err_info->count == SI_MAX_ERRLOG_SIZE) {
1984 sih->err_info->count = SI_MAX_ERRLOG_SIZE - 1;
1985 SI_PRINT(("AXI Error log overflow\n"));
1986 }
1987 }
1988
1989 return ret;
1990 }
1991 }
1992 #endif /* BCM_BACKPLANE_TIMEOUT */
1993
1994 for (i = 0; i < sii->axi_num_wrappers; ++i) {
1995 uint32 tmp;
1996
1997 if (axi_wrapper[i].wrapper_type != AI_SLAVE_WRAPPER) {
1998 continue;
1999 }
2000
2001 #ifdef BCM_BACKPLANE_TIMEOUT
2002 if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
2003 /* Set BAR0_CORE2_WIN2 to bridge wapper base address */
2004 OSL_PCI_WRITE_CONFIG(osh,
2005 cfg_reg, 4, axi_wrapper[i].wrapper_addr);
2006
2007 /* set AI to BAR0 + Offset corresponding to Gen1 or gen2 */
2008 ai = (aidmp_t *) (DISCARD_QUAL(sii->curmap, uint8) + offset);
2009 }
2010 else
2011 #endif /* BCM_BACKPLANE_TIMEOUT */
2012 {
2013 ai = (aidmp_t *)(uintptr) axi_wrapper[i].wrapper_addr;
2014 }
2015
2016 tmp = ai_clear_backplane_to_per_core(sih, axi_wrapper[i].cid, 0,
2017 DISCARD_QUAL(ai, void));
2018
2019 ret |= tmp;
2020 }
2021
2022 #ifdef BCM_BACKPLANE_TIMEOUT
2023 /* Restore the initial wrapper space */
2024 if (prev_value) {
2025 OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 4, prev_value);
2026 }
2027 #endif /* BCM_BACKPLANE_TIMEOUT */
2028
2029 #endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */
2030
2031 return ret;
2032 }
2033
2034 uint
ai_num_slaveports(si_t * sih,uint coreidx)2035 ai_num_slaveports(si_t *sih, uint coreidx)
2036 {
2037 si_info_t *sii = SI_INFO(sih);
2038 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
2039 uint32 cib;
2040
2041 cib = cores_info->cib[coreidx];
2042 return ((cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT);
2043 }
2044
2045 #ifdef UART_TRAP_DBG
2046 void
ai_dump_APB_Bridge_registers(si_t * sih)2047 ai_dump_APB_Bridge_registers(si_t *sih)
2048 {
2049 aidmp_t *ai;
2050 si_info_t *sii = SI_INFO(sih);
2051
2052 ai = (aidmp_t *) sii->br_wrapba[0];
2053 printf("APB Bridge 0\n");
2054 printf("lo 0x%08x, hi 0x%08x, id 0x%08x, flags 0x%08x",
2055 R_REG(sii->osh, &ai->errlogaddrlo),
2056 R_REG(sii->osh, &ai->errlogaddrhi),
2057 R_REG(sii->osh, &ai->errlogid),
2058 R_REG(sii->osh, &ai->errlogflags));
2059 printf("\n status 0x%08x\n", R_REG(sii->osh, &ai->errlogstatus));
2060 }
2061 #endif /* UART_TRAP_DBG */
2062
2063 void
ai_force_clocks(si_t * sih,uint clock_state)2064 ai_force_clocks(si_t *sih, uint clock_state)
2065 {
2066
2067 si_info_t *sii = SI_INFO(sih);
2068 aidmp_t *ai, *ai_sec = NULL;
2069 volatile uint32 dummy;
2070 uint32 ioctrl;
2071 si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
2072
2073 ASSERT(GOODREGS(sii->curwrap));
2074 ai = sii->curwrap;
2075 if (cores_info->wrapba2[sii->curidx])
2076 ai_sec = REG_MAP(cores_info->wrapba2[sii->curidx], SI_CORE_SIZE);
2077
2078 /* ensure there are no pending backplane operations */
2079 SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
2080
2081 if (clock_state == FORCE_CLK_ON) {
2082 ioctrl = R_REG(sii->osh, &ai->ioctrl);
2083 W_REG(sii->osh, &ai->ioctrl, (ioctrl | SICF_FGC));
2084 dummy = R_REG(sii->osh, &ai->ioctrl);
2085 BCM_REFERENCE(dummy);
2086 if (ai_sec) {
2087 ioctrl = R_REG(sii->osh, &ai_sec->ioctrl);
2088 W_REG(sii->osh, &ai_sec->ioctrl, (ioctrl | SICF_FGC));
2089 dummy = R_REG(sii->osh, &ai_sec->ioctrl);
2090 BCM_REFERENCE(dummy);
2091 }
2092 } else {
2093 ioctrl = R_REG(sii->osh, &ai->ioctrl);
2094 W_REG(sii->osh, &ai->ioctrl, (ioctrl & (~SICF_FGC)));
2095 dummy = R_REG(sii->osh, &ai->ioctrl);
2096 BCM_REFERENCE(dummy);
2097 if (ai_sec) {
2098 ioctrl = R_REG(sii->osh, &ai_sec->ioctrl);
2099 W_REG(sii->osh, &ai_sec->ioctrl, (ioctrl & (~SICF_FGC)));
2100 dummy = R_REG(sii->osh, &ai_sec->ioctrl);
2101 BCM_REFERENCE(dummy);
2102 }
2103 }
2104 /* ensure there are no pending backplane operations */
2105 SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
2106 }
2107