xref: /OK3568_Linux_fs/kernel/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd_indep_power/aiutils.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Misc utility routines for accessing chip-specific features
4  * of the SiliconBackplane-based Broadcom chips.
5  *
6  * Copyright (C) 1999-2017, Broadcom Corporation
7  *
8  *      Unless you and Broadcom execute a separate written software license
9  * agreement governing use of this software, this software is licensed to you
10  * under the terms of the GNU General Public License version 2 (the "GPL"),
11  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
12  * following added to such license:
13  *
14  *      As a special exception, the copyright holders of this software give you
15  * permission to link this software with independent modules, and to copy and
16  * distribute the resulting executable under terms of your choice, provided that
17  * you also meet, for each linked independent module, the terms and conditions of
18  * the license of that module.  An independent module is a module which is not
19  * derived from this software.  The special exception does not apply to any
20  * modifications of the software.
21  *
22  *      Notwithstanding the above, under no circumstances may you combine this
23  * software in any way with any other Broadcom software provided under a license
24  * other than the GPL, without Broadcom's express prior written consent.
25  *
26  *
27  * <<Broadcom-WL-IPTag/Open:>>
28  *
29  * $Id: aiutils.c 625027 2016-03-15 08:20:18Z $
30  */
31 #include <bcm_cfg.h>
32 #include <typedefs.h>
33 #include <bcmdefs.h>
34 #include <osl.h>
35 #include <bcmutils.h>
36 #include <siutils.h>
37 #include <hndsoc.h>
38 #include <sbchipc.h>
39 #include <pcicfg.h>
40 
41 #include "siutils_priv.h"
42 #include <bcmdevs.h>
43 
44 #define BCM5357_DMP() (0)
45 #define BCM53573_DMP() (0)
46 #define BCM4707_DMP() (0)
47 #define PMU_DMP() (0)
48 #define GCI_DMP() (0)
49 #define remap_coreid(sih, coreid)	(coreid)
50 #define remap_corerev(sih, corerev)	(corerev)
51 
52 /* EROM parsing */
53 
54 static uint32
get_erom_ent(si_t * sih,uint32 ** eromptr,uint32 mask,uint32 match)55 get_erom_ent(si_t *sih, uint32 **eromptr, uint32 mask, uint32 match)
56 {
57 	uint32 ent;
58 	uint inv = 0, nom = 0;
59 	uint32 size = 0;
60 
61 	while (TRUE) {
62 		ent = R_REG(si_osh(sih), *eromptr);
63 		(*eromptr)++;
64 
65 		if (mask == 0)
66 			break;
67 
68 		if ((ent & ER_VALID) == 0) {
69 			inv++;
70 			continue;
71 		}
72 
73 		if (ent == (ER_END | ER_VALID))
74 			break;
75 
76 		if ((ent & mask) == match)
77 			break;
78 
79 		/* escape condition related EROM size if it has invalid values */
80 		size += sizeof(*eromptr);
81 		if (size >= ER_SZ_MAX) {
82 			SI_ERROR(("Failed to find end of EROM marker\n"));
83 			break;
84 		}
85 
86 		nom++;
87 	}
88 
89 	SI_VMSG(("%s: Returning ent 0x%08x\n", __FUNCTION__, ent));
90 	if (inv + nom) {
91 		SI_VMSG(("  after %d invalid and %d non-matching entries\n", inv, nom));
92 	}
93 	return ent;
94 }
95 
96 static uint32
get_asd(si_t * sih,uint32 ** eromptr,uint sp,uint ad,uint st,uint32 * addrl,uint32 * addrh,uint32 * sizel,uint32 * sizeh)97 get_asd(si_t *sih, uint32 **eromptr, uint sp, uint ad, uint st, uint32 *addrl, uint32 *addrh,
98 	uint32 *sizel, uint32 *sizeh)
99 {
100 	uint32 asd, sz, szd;
101 
102 	BCM_REFERENCE(ad);
103 
104 	asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
105 	if (((asd & ER_TAG1) != ER_ADD) ||
106 	    (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
107 	    ((asd & AD_ST_MASK) != st)) {
108 		/* This is not what we want, "push" it back */
109 		(*eromptr)--;
110 		return 0;
111 	}
112 	*addrl = asd & AD_ADDR_MASK;
113 	if (asd & AD_AG32)
114 		*addrh = get_erom_ent(sih, eromptr, 0, 0);
115 	else
116 		*addrh = 0;
117 	*sizeh = 0;
118 	sz = asd & AD_SZ_MASK;
119 	if (sz == AD_SZ_SZD) {
120 		szd = get_erom_ent(sih, eromptr, 0, 0);
121 		*sizel = szd & SD_SZ_MASK;
122 		if (szd & SD_SG32)
123 			*sizeh = get_erom_ent(sih, eromptr, 0, 0);
124 	} else
125 		*sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
126 
127 	SI_VMSG(("  SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n",
128 	        sp, ad, st, *sizeh, *sizel, *addrh, *addrl));
129 
130 	return asd;
131 }
132 
133 
134 /* parse the enumeration rom to identify all cores */
135 void
ai_scan(si_t * sih,void * regs,uint devid)136 ai_scan(si_t *sih, void *regs, uint devid)
137 {
138 	si_info_t *sii = SI_INFO(sih);
139 	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
140 	chipcregs_t *cc = (chipcregs_t *)regs;
141 	uint32 erombase, *eromptr, *eromlim;
142 	axi_wrapper_t * axi_wrapper = sii->axi_wrapper;
143 
144 	BCM_REFERENCE(devid);
145 
146 	erombase = R_REG(sii->osh, &cc->eromptr);
147 
148 	switch (BUSTYPE(sih->bustype)) {
149 	case SI_BUS:
150 		eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
151 		break;
152 
153 	case PCI_BUS:
154 		/* Set wrappers address */
155 		sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE);
156 
157 		/* Now point the window at the erom */
158 		OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, erombase);
159 		eromptr = regs;
160 		break;
161 
162 #ifdef BCMSDIO
163 	case SPI_BUS:
164 	case SDIO_BUS:
165 		eromptr = (uint32 *)(uintptr)erombase;
166 		break;
167 #endif	/* BCMSDIO */
168 
169 	case PCMCIA_BUS:
170 	default:
171 		SI_ERROR(("Don't know how to do AXI enumertion on bus %d\n", sih->bustype));
172 		ASSERT(0);
173 		return;
174 	}
175 	eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
176 	sii->axi_num_wrappers = 0;
177 
178 	SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim = 0x%p\n",
179 	         OSL_OBFUSCATE_BUF(regs), erombase,
180 		OSL_OBFUSCATE_BUF(eromptr), OSL_OBFUSATE_BUF(eromlim)));
181 	while (eromptr < eromlim) {
182 		uint32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp;
183 		uint32 mpd, asd, addrl, addrh, sizel, sizeh;
184 		uint i, j, idx;
185 		bool br;
186 
187 		br = FALSE;
188 
189 		/* Grok a component */
190 		cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
191 		if (cia == (ER_END | ER_VALID)) {
192 			SI_VMSG(("Found END of erom after %d cores\n", sii->numcores));
193 			return;
194 		}
195 
196 		cib = get_erom_ent(sih, &eromptr, 0, 0);
197 
198 		if ((cib & ER_TAG) != ER_CI) {
199 			SI_ERROR(("CIA not followed by CIB\n"));
200 			goto error;
201 		}
202 
203 		cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
204 		mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
205 		crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
206 		nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
207 		nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
208 		nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
209 		nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
210 
211 #ifdef BCMDBG_SI
212 		SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with nmw = %d, "
213 		         "nsw = %d, nmp = %d & nsp = %d\n",
214 		         mfg, cid, crev, OSL_OBFUSCATE_BUF(eromptr - 1), nmw, nsw, nmp, nsp));
215 #else
216 		BCM_REFERENCE(crev);
217 #endif
218 
219 		if (CHIPID(sih->chip) == BCM4347_CHIP_ID) {
220 			/* 4347 has more entries for ARM core
221 			 * This should apply to all chips but crashes on router
222 			 * This is a temp fix to be further analyze
223 			 */
224 			if (nsp == 0)
225 				continue;
226 		} else {
227 			/* Include Default slave wrapper for timeout monitoring */
228 			if ((nsp == 0) ||
229 #if !defined(AXI_TIMEOUTS) && !defined(BCM_BACKPLANE_TIMEOUT)
230 				((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) ||
231 #endif /* !defined(AXI_TIMEOUTS) && !defined(BCM_BACKPLANE_TIMEOUT) */
232 				FALSE) {
233 				continue;
234 			}
235 		}
236 
237 		if ((nmw + nsw == 0)) {
238 			/* A component which is not a core */
239 			if (cid == OOB_ROUTER_CORE_ID) {
240 				asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
241 					&addrl, &addrh, &sizel, &sizeh);
242 				if (asd != 0) {
243 					sii->oob_router = addrl;
244 				}
245 			}
246 			if (cid != GMAC_COMMON_4706_CORE_ID && cid != NS_CCB_CORE_ID &&
247 				cid != PMU_CORE_ID && cid != GCI_CORE_ID)
248 				continue;
249 		}
250 
251 		idx = sii->numcores;
252 
253 		cores_info->cia[idx] = cia;
254 		cores_info->cib[idx] = cib;
255 		cores_info->coreid[idx] = remap_coreid(sih, cid);
256 
257 		for (i = 0; i < nmp; i++) {
258 			mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
259 			if ((mpd & ER_TAG) != ER_MP) {
260 				SI_ERROR(("Not enough MP entries for component 0x%x\n", cid));
261 				goto error;
262 			}
263 			SI_VMSG(("  Master port %d, mp: %d id: %d\n", i,
264 			         (mpd & MPD_MP_MASK) >> MPD_MP_SHIFT,
265 			         (mpd & MPD_MUI_MASK) >> MPD_MUI_SHIFT));
266 		}
267 
268 		/* First Slave Address Descriptor should be port 0:
269 		 * the main register space for the core
270 		 */
271 		asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
272 		if (asd == 0) {
273 			do {
274 			/* Try again to see if it is a bridge */
275 			asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
276 			              &sizel, &sizeh);
277 			if (asd != 0)
278 				br = TRUE;
279 			else {
280 					if (br == TRUE) {
281 						break;
282 					}
283 					else if ((addrh != 0) || (sizeh != 0) ||
284 						(sizel != SI_CORE_SIZE)) {
285 						SI_ERROR(("addrh = 0x%x\t sizeh = 0x%x\t size1 ="
286 							"0x%x\n", addrh, sizeh, sizel));
287 						SI_ERROR(("First Slave ASD for"
288 							"core 0x%04x malformed "
289 							"(0x%08x)\n", cid, asd));
290 						goto error;
291 					}
292 				}
293 			} while (1);
294 		}
295 		cores_info->coresba[idx] = addrl;
296 		cores_info->coresba_size[idx] = sizel;
297 		/* Get any more ASDs in port 0 */
298 		j = 1;
299 		do {
300 			asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
301 			              &sizel, &sizeh);
302 			if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) {
303 				cores_info->coresba2[idx] = addrl;
304 				cores_info->coresba2_size[idx] = sizel;
305 			}
306 			j++;
307 		} while (asd != 0);
308 
309 		/* Go through the ASDs for other slave ports */
310 		for (i = 1; i < nsp; i++) {
311 			j = 0;
312 			do {
313 				asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh,
314 				              &sizel, &sizeh);
315 
316 				if (asd == 0)
317 					break;
318 				j++;
319 			} while (1);
320 			if (j == 0) {
321 				SI_ERROR((" SP %d has no address descriptors\n", i));
322 				goto error;
323 			}
324 		}
325 
326 		/* Now get master wrappers */
327 		for (i = 0; i < nmw; i++) {
328 			asd = get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl, &addrh,
329 			              &sizel, &sizeh);
330 			if (asd == 0) {
331 				SI_ERROR(("Missing descriptor for MW %d\n", i));
332 				goto error;
333 			}
334 			if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
335 				SI_ERROR(("Master wrapper %d is not 4KB\n", i));
336 				goto error;
337 			}
338 			if (i == 0)
339 				cores_info->wrapba[idx] = addrl;
340 			else if (i == 1)
341 				cores_info->wrapba2[idx] = addrl;
342 
343 
344 			ASSERT(sii->axi_num_wrappers < SI_MAX_AXI_WRAPPERS);
345 			axi_wrapper[sii->axi_num_wrappers].mfg = mfg;
346 			axi_wrapper[sii->axi_num_wrappers].cid = cid;
347 			axi_wrapper[sii->axi_num_wrappers].rev = crev;
348 			axi_wrapper[sii->axi_num_wrappers].wrapper_type = AI_MASTER_WRAPPER;
349 			axi_wrapper[sii->axi_num_wrappers].wrapper_addr = addrl;
350 			sii->axi_num_wrappers++;
351 			SI_VMSG(("MASTER WRAPPER: %d, mfg:%x, cid:%x, rev:%x, addr:%x, size:%x\n",
352 					sii->axi_num_wrappers, mfg, cid, crev, addrl, sizel));
353 		}
354 
355 		/* And finally slave wrappers */
356 		for (i = 0; i < nsw; i++) {
357 			uint fwp = (nsp == 1) ? 0 : 1;
358 			asd = get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP, &addrl, &addrh,
359 			              &sizel, &sizeh);
360 
361 			/* cache APB bridge wrapper address for set/clear timeout */
362 			if ((mfg == MFGID_ARM) && (cid == APB_BRIDGE_ID)) {
363 				ASSERT(sii->num_br < SI_MAXBR);
364 				sii->br_wrapba[sii->num_br++] = addrl;
365 			}
366 
367 			if (asd == 0) {
368 				SI_ERROR(("Missing descriptor for SW %d\n", i));
369 				goto error;
370 			}
371 			if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
372 				SI_ERROR(("Slave wrapper %d is not 4KB\n", i));
373 				goto error;
374 			}
375 			if ((nmw == 0) && (i == 0))
376 				cores_info->wrapba[idx] = addrl;
377 			else if ((nmw == 0) && (i == 1))
378 				cores_info->wrapba2[idx] = addrl;
379 
380 			/* Include all slave wrappers to the list to
381 			 * enable and monitor watchdog timeouts
382 			 */
383 
384 			ASSERT(sii->axi_num_wrappers < SI_MAX_AXI_WRAPPERS);
385 			axi_wrapper[sii->axi_num_wrappers].mfg = mfg;
386 			axi_wrapper[sii->axi_num_wrappers].cid = cid;
387 			axi_wrapper[sii->axi_num_wrappers].rev = crev;
388 			axi_wrapper[sii->axi_num_wrappers].wrapper_type = AI_SLAVE_WRAPPER;
389 			axi_wrapper[sii->axi_num_wrappers].wrapper_addr = addrl;
390 			sii->axi_num_wrappers++;
391 
392 			SI_VMSG(("SLAVE WRAPPER: %d,  mfg:%x, cid:%x, rev:%x, addr:%x, size:%x\n",
393 				sii->axi_num_wrappers,  mfg, cid, crev, addrl, sizel));
394 		}
395 
396 
397 #ifndef BCM_BACKPLANE_TIMEOUT
398 		/* Don't record bridges */
399 		if (br)
400 			continue;
401 #endif
402 
403 		/* Done with core */
404 		sii->numcores++;
405 	}
406 
407 	SI_ERROR(("Reached end of erom without finding END\n"));
408 
409 error:
410 	sii->numcores = 0;
411 	return;
412 }
413 
414 #define AI_SETCOREIDX_MAPSIZE(coreid) \
415 	(((coreid) == NS_CCB_CORE_ID) ? 15 * SI_CORE_SIZE : SI_CORE_SIZE)
416 
417 /* This function changes the logical "focus" to the indicated core.
418  * Return the current core's virtual address.
419  */
420 static volatile void *
_ai_setcoreidx(si_t * sih,uint coreidx,uint use_wrap2)421 _ai_setcoreidx(si_t *sih, uint coreidx, uint use_wrap2)
422 {
423 	si_info_t *sii = SI_INFO(sih);
424 	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
425 	uint32 addr, wrap, wrap2;
426 	volatile void *regs;
427 
428 	if (coreidx >= MIN(sii->numcores, SI_MAXCORES))
429 		return (NULL);
430 
431 	addr = cores_info->coresba[coreidx];
432 	wrap = cores_info->wrapba[coreidx];
433 	wrap2 = cores_info->wrapba2[coreidx];
434 
435 #ifdef BCM_BACKPLANE_TIMEOUT
436 	/* No need to disable interrupts while entering/exiting APB bridge core */
437 	if ((cores_info->coreid[coreidx] != APB_BRIDGE_CORE_ID) &&
438 		(cores_info->coreid[sii->curidx] != APB_BRIDGE_CORE_ID))
439 #endif /* BCM_BACKPLANE_TIMEOUT */
440 	{
441 		/*
442 		 * If the user has provided an interrupt mask enabled function,
443 		 * then assert interrupts are disabled before switching the core.
444 		 */
445 		ASSERT((sii->intrsenabled_fn == NULL) ||
446 			!(*(sii)->intrsenabled_fn)((sii)->intr_arg));
447 	}
448 
449 	switch (BUSTYPE(sih->bustype)) {
450 	case SI_BUS:
451 		/* map new one */
452 		if (!cores_info->regs[coreidx]) {
453 			cores_info->regs[coreidx] = REG_MAP(addr,
454 				AI_SETCOREIDX_MAPSIZE(cores_info->coreid[coreidx]));
455 			ASSERT(GOODREGS(cores_info->regs[coreidx]));
456 		}
457 		sii->curmap = regs = cores_info->regs[coreidx];
458 		if (!cores_info->wrappers[coreidx] && (wrap != 0)) {
459 			cores_info->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE);
460 			ASSERT(GOODREGS(cores_info->wrappers[coreidx]));
461 		}
462 		if (!cores_info->wrappers2[coreidx] && (wrap2 != 0)) {
463 			cores_info->wrappers2[coreidx] = REG_MAP(wrap2, SI_CORE_SIZE);
464 			ASSERT(GOODREGS(cores_info->wrappers2[coreidx]));
465 		}
466 		if (use_wrap2)
467 			sii->curwrap = cores_info->wrappers2[coreidx];
468 		else
469 			sii->curwrap = cores_info->wrappers[coreidx];
470 		break;
471 
472 	case PCI_BUS:
473 #ifdef BCM_BACKPLANE_TIMEOUT
474 		/* No need to set the BAR0 if core is APB Bridge.
475 		 * This is to reduce 2 PCI writes while checkng for errlog
476 		 */
477 		if (cores_info->coreid[coreidx] != APB_BRIDGE_CORE_ID)
478 #endif /* BCM_BACKPLANE_TIMEOUT */
479 		{
480 			/* point bar0 window */
481 			OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, addr);
482 		}
483 
484 		regs = sii->curmap;
485 		/* point bar0 2nd 4KB window to the primary wrapper */
486 		if (use_wrap2)
487 			wrap = wrap2;
488 		if (PCIE_GEN2(sii))
489 			OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_WIN2, 4, wrap);
490 		else
491 			OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN2, 4, wrap);
492 		break;
493 
494 #ifdef BCMSDIO
495 	case SPI_BUS:
496 	case SDIO_BUS:
497 		sii->curmap = regs = (void *)((uintptr)addr);
498 		if (use_wrap2)
499 			sii->curwrap = (void *)((uintptr)wrap2);
500 		else
501 			sii->curwrap = (void *)((uintptr)wrap);
502 		break;
503 #endif	/* BCMSDIO */
504 
505 	case PCMCIA_BUS:
506 	default:
507 		ASSERT(0);
508 		regs = NULL;
509 		break;
510 	}
511 
512 	sii->curmap = regs;
513 	sii->curidx = coreidx;
514 
515 	return regs;
516 }
517 
518 volatile void *
ai_setcoreidx(si_t * sih,uint coreidx)519 ai_setcoreidx(si_t *sih, uint coreidx)
520 {
521 	return _ai_setcoreidx(sih, coreidx, 0);
522 }
523 
524 volatile void *
ai_setcoreidx_2ndwrap(si_t * sih,uint coreidx)525 ai_setcoreidx_2ndwrap(si_t *sih, uint coreidx)
526 {
527 	return _ai_setcoreidx(sih, coreidx, 1);
528 }
529 
530 void
ai_coreaddrspaceX(si_t * sih,uint asidx,uint32 * addr,uint32 * size)531 ai_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size)
532 {
533 	si_info_t *sii = SI_INFO(sih);
534 	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
535 	chipcregs_t *cc = NULL;
536 	uint32 erombase, *eromptr, *eromlim;
537 	uint i, j, cidx;
538 	uint32 cia, cib, nmp, nsp;
539 	uint32 asd, addrl, addrh, sizel, sizeh;
540 
541 	for (i = 0; i < sii->numcores; i++) {
542 		if (cores_info->coreid[i] == CC_CORE_ID) {
543 			cc = (chipcregs_t *)cores_info->regs[i];
544 			break;
545 		}
546 	}
547 	if (cc == NULL)
548 		goto error;
549 
550 	erombase = R_REG(sii->osh, &cc->eromptr);
551 	eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
552 	eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
553 
554 	cidx = sii->curidx;
555 	cia = cores_info->cia[cidx];
556 	cib = cores_info->cib[cidx];
557 
558 	nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
559 	nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
560 
561 	/* scan for cores */
562 	while (eromptr < eromlim) {
563 		if ((get_erom_ent(sih, &eromptr, ER_TAG, ER_CI) == cia) &&
564 			(get_erom_ent(sih, &eromptr, 0, 0) == cib)) {
565 			break;
566 		}
567 	}
568 
569 	/* skip master ports */
570 	for (i = 0; i < nmp; i++)
571 		get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
572 
573 	/* Skip ASDs in port 0 */
574 	asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
575 	if (asd == 0) {
576 		/* Try again to see if it is a bridge */
577 		asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
578 		              &sizel, &sizeh);
579 	}
580 
581 	j = 1;
582 	do {
583 		asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
584 		              &sizel, &sizeh);
585 		j++;
586 	} while (asd != 0);
587 
588 	/* Go through the ASDs for other slave ports */
589 	for (i = 1; i < nsp; i++) {
590 		j = 0;
591 		do {
592 			asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh,
593 				&sizel, &sizeh);
594 			if (asd == 0)
595 				break;
596 
597 			if (!asidx--) {
598 				*addr = addrl;
599 				*size = sizel;
600 				return;
601 			}
602 			j++;
603 		} while (1);
604 
605 		if (j == 0) {
606 			SI_ERROR((" SP %d has no address descriptors\n", i));
607 			break;
608 		}
609 	}
610 
611 error:
612 	*size = 0;
613 	return;
614 }
615 
616 /* Return the number of address spaces in current core */
617 int
ai_numaddrspaces(si_t * sih)618 ai_numaddrspaces(si_t *sih)
619 {
620 
621 	BCM_REFERENCE(sih);
622 
623 	return 2;
624 }
625 
626 /* Return the address of the nth address space in the current core */
627 uint32
ai_addrspace(si_t * sih,uint asidx)628 ai_addrspace(si_t *sih, uint asidx)
629 {
630 	si_info_t *sii = SI_INFO(sih);
631 	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
632 	uint cidx;
633 
634 	cidx = sii->curidx;
635 
636 	if (asidx == 0)
637 		return cores_info->coresba[cidx];
638 	else if (asidx == 1)
639 		return cores_info->coresba2[cidx];
640 	else {
641 		SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n",
642 		          __FUNCTION__, asidx));
643 		return 0;
644 	}
645 }
646 
647 /* Return the size of the nth address space in the current core */
648 uint32
ai_addrspacesize(si_t * sih,uint asidx)649 ai_addrspacesize(si_t *sih, uint asidx)
650 {
651 	si_info_t *sii = SI_INFO(sih);
652 	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
653 	uint cidx;
654 
655 	cidx = sii->curidx;
656 
657 	if (asidx == 0)
658 		return cores_info->coresba_size[cidx];
659 	else if (asidx == 1)
660 		return cores_info->coresba2_size[cidx];
661 	else {
662 		SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n",
663 		          __FUNCTION__, asidx));
664 		return 0;
665 	}
666 }
667 
668 uint
ai_flag(si_t * sih)669 ai_flag(si_t *sih)
670 {
671 	si_info_t *sii = SI_INFO(sih);
672 	aidmp_t *ai;
673 
674 	if (BCM5357_DMP()) {
675 		SI_ERROR(("%s: Attempting to read USB20H DMP registers on 5357b0\n", __FUNCTION__));
676 		return sii->curidx;
677 	}
678 	if (BCM4707_DMP()) {
679 		SI_ERROR(("%s: Attempting to read CHIPCOMMONB DMP registers on 4707\n",
680 			__FUNCTION__));
681 		return sii->curidx;
682 	}
683 	if (BCM53573_DMP()) {
684 		SI_ERROR(("%s: Attempting to read DMP registers on 53573\n", __FUNCTION__));
685 		return sii->curidx;
686 	}
687 #ifdef REROUTE_OOBINT
688 	if (PMU_DMP()) {
689 		SI_ERROR(("%s: Attempting to read PMU DMP registers\n",
690 			__FUNCTION__));
691 		return PMU_OOB_BIT;
692 	}
693 #else
694 	if (PMU_DMP()) {
695 		uint idx, flag;
696 		idx = sii->curidx;
697 		ai_setcoreidx(sih, SI_CC_IDX);
698 		flag = ai_flag_alt(sih);
699 		ai_setcoreidx(sih, idx);
700 		return flag;
701 	}
702 #endif /* REROUTE_OOBINT */
703 
704 	ai = sii->curwrap;
705 	ASSERT(ai != NULL);
706 
707 	return (R_REG(sii->osh, &ai->oobselouta30) & 0x1f);
708 }
709 
710 uint
ai_flag_alt(si_t * sih)711 ai_flag_alt(si_t *sih)
712 {
713 	si_info_t *sii = SI_INFO(sih);
714 	aidmp_t *ai;
715 
716 	if (BCM5357_DMP()) {
717 		SI_ERROR(("%s: Attempting to read USB20H DMP registers on 5357b0\n", __FUNCTION__));
718 		return sii->curidx;
719 	}
720 	if (BCM4707_DMP()) {
721 		SI_ERROR(("%s: Attempting to read CHIPCOMMONB DMP registers on 4707\n",
722 			__FUNCTION__));
723 		return sii->curidx;
724 	}
725 #ifdef REROUTE_OOBINT
726 	if (PMU_DMP()) {
727 		SI_ERROR(("%s: Attempting to read PMU DMP registers\n",
728 			__FUNCTION__));
729 		return PMU_OOB_BIT;
730 	}
731 #endif /* REROUTE_OOBINT */
732 
733 	ai = sii->curwrap;
734 
735 	return ((R_REG(sii->osh, &ai->oobselouta30) >> AI_OOBSEL_1_SHIFT) & AI_OOBSEL_MASK);
736 }
737 
738 void
ai_setint(si_t * sih,int siflag)739 ai_setint(si_t *sih, int siflag)
740 {
741 	BCM_REFERENCE(sih);
742 	BCM_REFERENCE(siflag);
743 
744 }
745 
746 uint
ai_wrap_reg(si_t * sih,uint32 offset,uint32 mask,uint32 val)747 ai_wrap_reg(si_t *sih, uint32 offset, uint32 mask, uint32 val)
748 {
749 	si_info_t *sii = SI_INFO(sih);
750 	uint32 *map = (uint32 *) sii->curwrap;
751 
752 	if (mask || val) {
753 		uint32 w = R_REG(sii->osh, map+(offset/4));
754 		w &= ~mask;
755 		w |= val;
756 		W_REG(sii->osh, map+(offset/4), w);
757 	}
758 
759 	return (R_REG(sii->osh, map+(offset/4)));
760 }
761 
762 uint
ai_corevendor(si_t * sih)763 ai_corevendor(si_t *sih)
764 {
765 	si_info_t *sii = SI_INFO(sih);
766 	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
767 	uint32 cia;
768 
769 	cia = cores_info->cia[sii->curidx];
770 	return ((cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT);
771 }
772 
773 uint
ai_corerev(si_t * sih)774 ai_corerev(si_t *sih)
775 {
776 	si_info_t *sii = SI_INFO(sih);
777 	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
778 	uint32 cib;
779 
780 
781 	cib = cores_info->cib[sii->curidx];
782 	return remap_corerev(sih, (cib & CIB_REV_MASK) >> CIB_REV_SHIFT);
783 }
784 
785 bool
ai_iscoreup(si_t * sih)786 ai_iscoreup(si_t *sih)
787 {
788 	si_info_t *sii = SI_INFO(sih);
789 	aidmp_t *ai;
790 
791 	ai = sii->curwrap;
792 
793 	return (((R_REG(sii->osh, &ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) == SICF_CLOCK_EN) &&
794 	        ((R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) == 0));
795 }
796 
797 /*
798  * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
799  * switch back to the original core, and return the new value.
800  *
801  * When using the silicon backplane, no fiddling with interrupts or core switches is needed.
802  *
803  * Also, when using pci/pcie, we can optimize away the core switching for pci registers
804  * and (on newer pci cores) chipcommon registers.
805  */
806 uint
ai_corereg(si_t * sih,uint coreidx,uint regoff,uint mask,uint val)807 ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
808 {
809 	uint origidx = 0;
810 	volatile uint32 *r = NULL;
811 	uint w;
812 	uint intr_val = 0;
813 	bool fast = FALSE;
814 	si_info_t *sii = SI_INFO(sih);
815 	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
816 
817 
818 	ASSERT(GOODIDX(coreidx));
819 	ASSERT(regoff < SI_CORE_SIZE);
820 	ASSERT((val & ~mask) == 0);
821 
822 	if (coreidx >= SI_MAXCORES)
823 		return 0;
824 
825 	if (BUSTYPE(sih->bustype) == SI_BUS) {
826 		/* If internal bus, we can always get at everything */
827 		fast = TRUE;
828 		/* map if does not exist */
829 		if (!cores_info->regs[coreidx]) {
830 			cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
831 			                            SI_CORE_SIZE);
832 			ASSERT(GOODREGS(cores_info->regs[coreidx]));
833 		}
834 		r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
835 	} else if (BUSTYPE(sih->bustype) == PCI_BUS) {
836 		/* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
837 
838 		if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
839 			/* Chipc registers are mapped at 12KB */
840 
841 			fast = TRUE;
842 			r = (volatile uint32 *)((volatile char *)sii->curmap +
843 			               PCI_16KB0_CCREGS_OFFSET + regoff);
844 		} else if (sii->pub.buscoreidx == coreidx) {
845 			/* pci registers are at either in the last 2KB of an 8KB window
846 			 * or, in pcie and pci rev 13 at 8KB
847 			 */
848 			fast = TRUE;
849 			if (SI_FAST(sii))
850 				r = (volatile uint32 *)((volatile char *)sii->curmap +
851 				               PCI_16KB0_PCIREGS_OFFSET + regoff);
852 			else
853 				r = (volatile uint32 *)((volatile char *)sii->curmap +
854 				               ((regoff >= SBCONFIGOFF) ?
855 				                PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
856 				               regoff);
857 		}
858 	}
859 
860 	if (!fast) {
861 		INTR_OFF(sii, intr_val);
862 
863 		/* save current core index */
864 		origidx = si_coreidx(&sii->pub);
865 
866 		/* switch core */
867 		r = (volatile uint32*) ((volatile uchar*) ai_setcoreidx(&sii->pub, coreidx) +
868 		               regoff);
869 	}
870 	ASSERT(r != NULL);
871 
872 	/* mask and set */
873 	if (mask || val) {
874 		w = (R_REG(sii->osh, r) & ~mask) | val;
875 		W_REG(sii->osh, r, w);
876 	}
877 
878 	/* readback */
879 	w = R_REG(sii->osh, r);
880 
881 	if (!fast) {
882 		/* restore core index */
883 		if (origidx != coreidx)
884 			ai_setcoreidx(&sii->pub, origidx);
885 
886 		INTR_RESTORE(sii, intr_val);
887 	}
888 
889 	return (w);
890 }
891 
892 /*
893  * If there is no need for fiddling with interrupts or core switches (typically silicon
894  * back plane registers, pci registers and chipcommon registers), this function
895  * returns the register offset on this core to a mapped address. This address can
896  * be used for W_REG/R_REG directly.
897  *
898  * For accessing registers that would need a core switch, this function will return
899  * NULL.
900  */
901 volatile uint32 *
ai_corereg_addr(si_t * sih,uint coreidx,uint regoff)902 ai_corereg_addr(si_t *sih, uint coreidx, uint regoff)
903 {
904 	volatile uint32 *r = NULL;
905 	bool fast = FALSE;
906 	si_info_t *sii = SI_INFO(sih);
907 	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
908 
909 
910 	ASSERT(GOODIDX(coreidx));
911 	ASSERT(regoff < SI_CORE_SIZE);
912 
913 	if (coreidx >= SI_MAXCORES)
914 		return 0;
915 
916 	if (BUSTYPE(sih->bustype) == SI_BUS) {
917 		/* If internal bus, we can always get at everything */
918 		fast = TRUE;
919 		/* map if does not exist */
920 		if (!cores_info->regs[coreidx]) {
921 			cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
922 			                            SI_CORE_SIZE);
923 			ASSERT(GOODREGS(cores_info->regs[coreidx]));
924 		}
925 		r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
926 	} else if (BUSTYPE(sih->bustype) == PCI_BUS) {
927 		/* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
928 
929 		if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
930 			/* Chipc registers are mapped at 12KB */
931 
932 			fast = TRUE;
933 			r = (volatile uint32 *)((volatile char *)sii->curmap +
934 			               PCI_16KB0_CCREGS_OFFSET + regoff);
935 		} else if (sii->pub.buscoreidx == coreidx) {
936 			/* pci registers are at either in the last 2KB of an 8KB window
937 			 * or, in pcie and pci rev 13 at 8KB
938 			 */
939 			fast = TRUE;
940 			if (SI_FAST(sii))
941 				r = (volatile uint32 *)((volatile char *)sii->curmap +
942 				               PCI_16KB0_PCIREGS_OFFSET + regoff);
943 			else
944 				r = (volatile uint32 *)((volatile char *)sii->curmap +
945 				               ((regoff >= SBCONFIGOFF) ?
946 				                PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
947 				               regoff);
948 		}
949 	}
950 
951 	if (!fast) {
952 		ASSERT(sii->curidx == coreidx);
953 		r = (volatile uint32*) ((volatile uchar*)sii->curmap + regoff);
954 	}
955 
956 	return (r);
957 }
958 
959 void
ai_core_disable(si_t * sih,uint32 bits)960 ai_core_disable(si_t *sih, uint32 bits)
961 {
962 	si_info_t *sii = SI_INFO(sih);
963 	volatile uint32 dummy;
964 	uint32 status;
965 	aidmp_t *ai;
966 
967 
968 	ASSERT(GOODREGS(sii->curwrap));
969 	ai = sii->curwrap;
970 
971 	/* if core is already in reset, just return */
972 	if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) {
973 		return;
974 	}
975 
976 	/* ensure there are no pending backplane operations */
977 	SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
978 
979 	/* if pending backplane ops still, try waiting longer */
980 	if (status != 0) {
981 		/* 300usecs was sufficient to allow backplane ops to clear for big hammer */
982 		/* during driver load we may need more time */
983 		SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 10000);
984 		/* if still pending ops, continue on and try disable anyway */
985 		/* this is in big hammer path, so don't call wl_reinit in this case... */
986 	}
987 
988 	W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
989 	dummy = R_REG(sii->osh, &ai->resetctrl);
990 	BCM_REFERENCE(dummy);
991 	OSL_DELAY(1);
992 
993 	W_REG(sii->osh, &ai->ioctrl, bits);
994 	dummy = R_REG(sii->osh, &ai->ioctrl);
995 	BCM_REFERENCE(dummy);
996 	OSL_DELAY(10);
997 }
998 
999 /* reset and re-enable a core
1000  * inputs:
1001  * bits - core specific bits that are set during and after reset sequence
1002  * resetbits - core specific bits that are set only during reset sequence
1003  */
1004 static void
_ai_core_reset(si_t * sih,uint32 bits,uint32 resetbits)1005 _ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
1006 {
1007 	si_info_t *sii = SI_INFO(sih);
1008 	aidmp_t *ai;
1009 	volatile uint32 dummy;
1010 	uint loop_counter = 10;
1011 
1012 	ASSERT(GOODREGS(sii->curwrap));
1013 	ai = sii->curwrap;
1014 
1015 	/* if core is already out of reset, just return */
1016 
1017 	/* ensure there are no pending backplane operations */
1018 	SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
1019 
1020 
1021 	/* put core into reset state */
1022 	W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
1023 	OSL_DELAY(10);
1024 
1025 	/* ensure there are no pending backplane operations */
1026 	SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
1027 
1028 	W_REG(sii->osh, &ai->ioctrl, (bits | resetbits | SICF_FGC | SICF_CLOCK_EN));
1029 	dummy = R_REG(sii->osh, &ai->ioctrl);
1030 	BCM_REFERENCE(dummy);
1031 
1032 	/* ensure there are no pending backplane operations */
1033 	SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
1034 
1035 
1036 	while (R_REG(sii->osh, &ai->resetctrl) != 0 && --loop_counter != 0) {
1037 		/* ensure there are no pending backplane operations */
1038 		SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
1039 
1040 
1041 		/* take core out of reset */
1042 		W_REG(sii->osh, &ai->resetctrl, 0);
1043 
1044 		/* ensure there are no pending backplane operations */
1045 		SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
1046 	}
1047 
1048 
1049 	W_REG(sii->osh, &ai->ioctrl, (bits | SICF_CLOCK_EN));
1050 	dummy = R_REG(sii->osh, &ai->ioctrl);
1051 	BCM_REFERENCE(dummy);
1052 	OSL_DELAY(1);
1053 }
1054 
1055 void
ai_core_reset(si_t * sih,uint32 bits,uint32 resetbits)1056 ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
1057 {
1058 	si_info_t *sii = SI_INFO(sih);
1059 	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
1060 	uint idx = sii->curidx;
1061 
1062 	if (cores_info->wrapba2[idx] != 0) {
1063 		ai_setcoreidx_2ndwrap(sih, idx);
1064 		_ai_core_reset(sih, bits, resetbits);
1065 		ai_setcoreidx(sih, idx);
1066 	}
1067 
1068 	_ai_core_reset(sih, bits, resetbits);
1069 }
1070 
1071 void
ai_core_cflags_wo(si_t * sih,uint32 mask,uint32 val)1072 ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
1073 {
1074 	si_info_t *sii = SI_INFO(sih);
1075 	aidmp_t *ai;
1076 	uint32 w;
1077 
1078 	if (BCM5357_DMP()) {
1079 		SI_ERROR(("%s: Accessing USB20H DMP register (ioctrl) on 5357\n",
1080 		          __FUNCTION__));
1081 		return;
1082 	}
1083 	if (BCM4707_DMP()) {
1084 		SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
1085 			__FUNCTION__));
1086 		return;
1087 	}
1088 	if (PMU_DMP()) {
1089 		SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n",
1090 			__FUNCTION__));
1091 		return;
1092 	}
1093 
1094 	ASSERT(GOODREGS(sii->curwrap));
1095 	ai = sii->curwrap;
1096 
1097 	ASSERT((val & ~mask) == 0);
1098 
1099 	if (mask || val) {
1100 		w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
1101 		W_REG(sii->osh, &ai->ioctrl, w);
1102 	}
1103 }
1104 
1105 uint32
ai_core_cflags(si_t * sih,uint32 mask,uint32 val)1106 ai_core_cflags(si_t *sih, uint32 mask, uint32 val)
1107 {
1108 	si_info_t *sii = SI_INFO(sih);
1109 	aidmp_t *ai;
1110 	uint32 w;
1111 
1112 	if (BCM5357_DMP()) {
1113 		SI_ERROR(("%s: Accessing USB20H DMP register (ioctrl) on 5357\n",
1114 		          __FUNCTION__));
1115 		return 0;
1116 	}
1117 	if (BCM4707_DMP()) {
1118 		SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
1119 			__FUNCTION__));
1120 		return 0;
1121 	}
1122 
1123 	if (PMU_DMP()) {
1124 		SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n",
1125 			__FUNCTION__));
1126 		return 0;
1127 	}
1128 	ASSERT(GOODREGS(sii->curwrap));
1129 	ai = sii->curwrap;
1130 
1131 	ASSERT((val & ~mask) == 0);
1132 
1133 	if (mask || val) {
1134 		w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
1135 		W_REG(sii->osh, &ai->ioctrl, w);
1136 	}
1137 
1138 	return R_REG(sii->osh, &ai->ioctrl);
1139 }
1140 
1141 uint32
ai_core_sflags(si_t * sih,uint32 mask,uint32 val)1142 ai_core_sflags(si_t *sih, uint32 mask, uint32 val)
1143 {
1144 	si_info_t *sii = SI_INFO(sih);
1145 	aidmp_t *ai;
1146 	uint32 w;
1147 
1148 	if (BCM5357_DMP()) {
1149 		SI_ERROR(("%s: Accessing USB20H DMP register (iostatus) on 5357\n",
1150 		          __FUNCTION__));
1151 		return 0;
1152 	}
1153 	if (BCM4707_DMP()) {
1154 		SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
1155 			__FUNCTION__));
1156 		return 0;
1157 	}
1158 	if (PMU_DMP()) {
1159 		SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n",
1160 			__FUNCTION__));
1161 		return 0;
1162 	}
1163 
1164 	ASSERT(GOODREGS(sii->curwrap));
1165 	ai = sii->curwrap;
1166 
1167 	ASSERT((val & ~mask) == 0);
1168 	ASSERT((mask & ~SISF_CORE_BITS) == 0);
1169 
1170 	if (mask || val) {
1171 		w = ((R_REG(sii->osh, &ai->iostatus) & ~mask) | val);
1172 		W_REG(sii->osh, &ai->iostatus, w);
1173 	}
1174 
1175 	return R_REG(sii->osh, &ai->iostatus);
1176 }
1177 
1178 #if defined(BCMDBG_PHYDUMP)
1179 /* print interesting aidmp registers */
1180 void
ai_dumpregs(si_t * sih,struct bcmstrbuf * b)1181 ai_dumpregs(si_t *sih, struct bcmstrbuf *b)
1182 {
1183 	si_info_t *sii = SI_INFO(sih);
1184 	osl_t *osh;
1185 	aidmp_t *ai;
1186 	uint i;
1187 	uint32 prev_value = 0;
1188 	axi_wrapper_t * axi_wrapper = sii->axi_wrapper;
1189 	uint32 cfg_reg = 0;
1190 	uint bar0_win_offset = 0;
1191 
1192 	osh = sii->osh;
1193 
1194 
1195 	/* Save and restore wrapper access window */
1196 	if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1197 		if (PCIE_GEN2(sii)) {
1198 			cfg_reg = PCIE2_BAR0_CORE2_WIN2;
1199 			bar0_win_offset = PCIE2_BAR0_CORE2_WIN2_OFFSET;
1200 		} else {
1201 			cfg_reg = PCI_BAR0_WIN2;
1202 			bar0_win_offset = PCI_BAR0_WIN2_OFFSET;
1203 		}
1204 
1205 		prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 4);
1206 
1207 		if (prev_value == ID32_INVALID) {
1208 			SI_PRINT(("%s, PCI_BAR0_WIN2 - %x\n", __FUNCTION__, prev_value));
1209 			return;
1210 		}
1211 	}
1212 
1213 	bcm_bprintf(b, "ChipNum:%x, ChipRev;%x, BusType:%x, BoardType:%x, BoardVendor:%x\n\n",
1214 		sih->chip, sih->chiprev, sih->bustype, sih->boardtype, sih->boardvendor);
1215 
1216 	for (i = 0; i < sii->axi_num_wrappers; i++) {
1217 
1218 		if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1219 			/* Set BAR0 window to bridge wapper base address */
1220 			OSL_PCI_WRITE_CONFIG(osh,
1221 				cfg_reg, 4, axi_wrapper[i].wrapper_addr);
1222 
1223 			ai = (aidmp_t *) ((volatile uint8*)sii->curmap + bar0_win_offset);
1224 		} else {
1225 			ai = (aidmp_t *)(uintptr) axi_wrapper[i].wrapper_addr;
1226 		}
1227 
1228 		bcm_bprintf(b, "core 0x%x: core_rev:%d, %s_WR ADDR:%x \n", axi_wrapper[i].cid,
1229 			axi_wrapper[i].rev,
1230 			axi_wrapper[i].wrapper_type == AI_SLAVE_WRAPPER ? "SLAVE" : "MASTER",
1231 			axi_wrapper[i].wrapper_addr);
1232 
1233 		/* BCM5357_DMP() */
1234 		if (((CHIPID(sih->chip) == BCM5357_CHIP_ID) ||
1235 			(CHIPID(sih->chip) == BCM4749_CHIP_ID)) &&
1236 			(sih->chippkg == BCM5357_PKG_ID) &&
1237 			(axi_wrapper[i].cid == USB20H_CORE_ID)) {
1238 			bcm_bprintf(b, "Skipping usb20h in 5357\n");
1239 			continue;
1240 		}
1241 
1242 		/* BCM4707_DMP() */
1243 		if (BCM4707_CHIP(CHIPID(sih->chip)) &&
1244 			(axi_wrapper[i].cid == NS_CCB_CORE_ID)) {
1245 			bcm_bprintf(b, "Skipping chipcommonb in 4707\n");
1246 			continue;
1247 		}
1248 
1249 		bcm_bprintf(b, "ioctrlset 0x%x ioctrlclear 0x%x ioctrl 0x%x iostatus 0x%x "
1250 			    "ioctrlwidth 0x%x iostatuswidth 0x%x\n"
1251 			    "resetctrl 0x%x resetstatus 0x%x resetreadid 0x%x resetwriteid 0x%x\n"
1252 			    "errlogctrl 0x%x errlogdone 0x%x errlogstatus 0x%x "
1253 			    "errlogaddrlo 0x%x errlogaddrhi 0x%x\n"
1254 			    "errlogid 0x%x errloguser 0x%x errlogflags 0x%x\n"
1255 			    "intstatus 0x%x config 0x%x itcr 0x%x\n\n",
1256 			    R_REG(osh, &ai->ioctrlset),
1257 			    R_REG(osh, &ai->ioctrlclear),
1258 			    R_REG(osh, &ai->ioctrl),
1259 			    R_REG(osh, &ai->iostatus),
1260 			    R_REG(osh, &ai->ioctrlwidth),
1261 			    R_REG(osh, &ai->iostatuswidth),
1262 			    R_REG(osh, &ai->resetctrl),
1263 			    R_REG(osh, &ai->resetstatus),
1264 			    R_REG(osh, &ai->resetreadid),
1265 			    R_REG(osh, &ai->resetwriteid),
1266 			    R_REG(osh, &ai->errlogctrl),
1267 			    R_REG(osh, &ai->errlogdone),
1268 			    R_REG(osh, &ai->errlogstatus),
1269 			    R_REG(osh, &ai->errlogaddrlo),
1270 			    R_REG(osh, &ai->errlogaddrhi),
1271 			    R_REG(osh, &ai->errlogid),
1272 			    R_REG(osh, &ai->errloguser),
1273 			    R_REG(osh, &ai->errlogflags),
1274 			    R_REG(osh, &ai->intstatus),
1275 			    R_REG(osh, &ai->config),
1276 			    R_REG(osh, &ai->itcr));
1277 	}
1278 
1279 	/* Restore the initial wrapper space */
1280 	if (prev_value && cfg_reg) {
1281 		OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 4, prev_value);
1282 	}
1283 
1284 }
1285 #endif
1286 
1287 
1288 void
ai_enable_backplane_timeouts(si_t * sih)1289 ai_enable_backplane_timeouts(si_t *sih)
1290 {
1291 #if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
1292 	si_info_t *sii = SI_INFO(sih);
1293 	aidmp_t *ai;
1294 	uint32 i;
1295 	axi_wrapper_t * axi_wrapper = sii->axi_wrapper;
1296 
1297 #ifdef BCM_BACKPLANE_TIMEOUT
1298 	uint32 prev_value = 0;
1299 	osl_t *osh = sii->osh;
1300 	uint32 cfg_reg = 0;
1301 	uint32 offset = 0;
1302 #endif /* BCM_BACKPLANE_TIMEOUT */
1303 
1304 	if ((sii->axi_num_wrappers == 0) ||
1305 #ifdef BCM_BACKPLANE_TIMEOUT
1306 		(!PCIE(sii)) ||
1307 #endif /* BCM_BACKPLANE_TIMEOUT */
1308 		FALSE) {
1309 		SI_VMSG((" %s, axi_num_wrappers:%d, Is_PCIE:%d, BUS_TYPE:%d, ID:%x\n",
1310 			__FUNCTION__, sii->axi_num_wrappers, PCIE(sii),
1311 			BUSTYPE(sii->pub.bustype), sii->pub.buscoretype));
1312 		return;
1313 	}
1314 
1315 #ifdef BCM_BACKPLANE_TIMEOUT
1316 	/* Save and restore the wrapper access window */
1317 	if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1318 		if (PCIE_GEN1(sii)) {
1319 			cfg_reg = PCI_BAR0_WIN2;
1320 			offset = PCI_BAR0_WIN2_OFFSET;
1321 		} else if (PCIE_GEN2(sii)) {
1322 			cfg_reg = PCIE2_BAR0_CORE2_WIN2;
1323 			offset = PCIE2_BAR0_CORE2_WIN2_OFFSET;
1324 		}
1325 		else {
1326 			osl_panic("!PCIE_GEN1 && !PCIE_GEN2\n");
1327 		}
1328 
1329 		prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 4);
1330 		if (prev_value == ID32_INVALID) {
1331 			SI_PRINT(("%s, PCI_BAR0_WIN2 - %x\n", __FUNCTION__, prev_value));
1332 			return;
1333 		}
1334 	}
1335 
1336 #endif /* BCM_BACKPLANE_TIMEOUT */
1337 
1338 	for (i = 0; i < sii->axi_num_wrappers; ++i) {
1339 
1340 		if (axi_wrapper[i].wrapper_type != AI_SLAVE_WRAPPER) {
1341 			SI_VMSG(("SKIP ENABLE BPT: MFG:%x, CID:%x, ADDR:%x\n",
1342 				axi_wrapper[i].mfg,
1343 				axi_wrapper[i].cid,
1344 				axi_wrapper[i].wrapper_addr));
1345 			continue;
1346 		}
1347 
1348 #ifdef BCM_BACKPLANE_TIMEOUT
1349 		if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1350 			/* Set BAR0_CORE2_WIN2 to wapper base address */
1351 			OSL_PCI_WRITE_CONFIG(osh,
1352 				cfg_reg, 4, axi_wrapper[i].wrapper_addr);
1353 
1354 			/* set AI to BAR0 + Offset corresponding to Gen1 or gen2 */
1355 			ai = (aidmp_t *) ((uint8*)sii->curmap + offset);
1356 		}
1357 		else
1358 #endif /* BCM_BACKPLANE_TIMEOUT */
1359 		{
1360 			ai = (aidmp_t *)(uintptr) axi_wrapper[i].wrapper_addr;
1361 		}
1362 
1363 		W_REG(sii->osh, &ai->errlogctrl, (1 << AIELC_TO_ENAB_SHIFT) |
1364 		      ((AXI_TO_VAL << AIELC_TO_EXP_SHIFT) & AIELC_TO_EXP_MASK));
1365 
1366 		SI_VMSG(("ENABLED BPT: MFG:%x, CID:%x, ADDR:%x, ERR_CTRL:%x\n",
1367 			axi_wrapper[i].mfg,
1368 			axi_wrapper[i].cid,
1369 			axi_wrapper[i].wrapper_addr,
1370 			R_REG(sii->osh, &ai->errlogctrl)));
1371 	}
1372 
1373 #ifdef BCM_BACKPLANE_TIMEOUT
1374 	/* Restore the initial wrapper space */
1375 	if (prev_value) {
1376 		OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 4, prev_value);
1377 	}
1378 #endif /* BCM_BACKPLANE_TIMEOUT */
1379 
1380 #endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */
1381 }
1382 
1383 #if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
1384 
1385 /* slave error is ignored, so account for those cases */
1386 static uint32 si_ignore_errlog_cnt = 0;
1387 
1388 static bool
ai_ignore_errlog(si_info_t * sii,uint32 lo_addr,uint32 hi_addr,uint32 err_axi_id,uint32 errsts)1389 ai_ignore_errlog(si_info_t *sii, uint32 lo_addr, uint32 hi_addr, uint32 err_axi_id, uint32 errsts)
1390 {
1391 	uint32 axi_id;
1392 
1393 	/* ignore the BT slave errors if the errlog is to chipcommon addr 0x190 */
1394 	switch (CHIPID(sii->pub.chip)) {
1395 		case BCM4350_CHIP_ID:
1396 			axi_id = BCM4350_BT_AXI_ID;
1397 			break;
1398 		case BCM4345_CHIP_ID:
1399 			axi_id = BCM4345_BT_AXI_ID;
1400 			break;
1401 		default:
1402 			return FALSE;
1403 	}
1404 
1405 	/* AXI ID check */
1406 	if ((err_axi_id & AI_ERRLOGID_AXI_ID_MASK) != axi_id)
1407 		return FALSE;
1408 
1409 	/* slave errors */
1410 	if ((errsts & AIELS_TIMEOUT_MASK) != AIELS_SLAVE_ERR)
1411 		return FALSE;
1412 
1413 	/* chipc reg 0x190 */
1414 	if ((hi_addr != BT_CC_SPROM_BADREG_HI) || (lo_addr != BT_CC_SPROM_BADREG_LO))
1415 		return FALSE;
1416 
1417 	return TRUE;
1418 }
1419 #endif /* defined (AXI_TIMEOUTS) || defined (BCM_BACKPLANE_TIMEOUT) */
1420 
1421 #ifdef BCM_BACKPLANE_TIMEOUT
1422 
1423 /* Function to return the APB bridge details corresponding to the core */
1424 bool
ai_get_apb_bridge(si_t * sih,uint32 coreidx,uint32 * apb_id,uint32 * apb_coreuinit)1425 ai_get_apb_bridge(si_t * sih, uint32 coreidx, uint32 *apb_id, uint32 * apb_coreuinit)
1426 {
1427 	uint i;
1428 	uint32 core_base, core_end;
1429 	si_info_t *sii = SI_INFO(sih);
1430 	static uint32 coreidx_cached = 0, apb_id_cached = 0, apb_coreunit_cached = 0;
1431 	uint32 tmp_coreunit = 0;
1432 	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
1433 
1434 	if (coreidx >= MIN(sii->numcores, SI_MAXCORES))
1435 		return FALSE;
1436 
1437 	/* Most of the time apb bridge query will be for d11 core.
1438 	 * Maintain the last cache and return if found rather than iterating the table
1439 	 */
1440 	if (coreidx_cached == coreidx) {
1441 		*apb_id = apb_id_cached;
1442 		*apb_coreuinit = apb_coreunit_cached;
1443 		return TRUE;
1444 	}
1445 
1446 	core_base = cores_info->coresba[coreidx];
1447 	core_end = core_base + cores_info->coresba_size[coreidx];
1448 
1449 	for (i = 0; i < sii->numcores; i++) {
1450 		if (cores_info->coreid[i] == APB_BRIDGE_ID) {
1451 			uint32 apb_base;
1452 			uint32 apb_end;
1453 
1454 			apb_base = cores_info->coresba[i];
1455 			apb_end = apb_base + cores_info->coresba_size[i];
1456 
1457 			if ((core_base >= apb_base) &&
1458 				(core_end <= apb_end)) {
1459 				/* Current core is attached to this APB bridge */
1460 				*apb_id = apb_id_cached = APB_BRIDGE_ID;
1461 				*apb_coreuinit = apb_coreunit_cached = tmp_coreunit;
1462 				coreidx_cached = coreidx;
1463 				return TRUE;
1464 			}
1465 			/* Increment the coreunit */
1466 			tmp_coreunit++;
1467 		}
1468 	}
1469 
1470 	return FALSE;
1471 }
1472 
1473 uint32
ai_clear_backplane_to_fast(si_t * sih,void * addr)1474 ai_clear_backplane_to_fast(si_t *sih, void * addr)
1475 {
1476 	si_info_t *sii = SI_INFO(sih);
1477 	void * curmap = sii->curmap;
1478 	bool core_reg = FALSE;
1479 
1480 	/* Use fast path only for core register access */
1481 	if ((addr >= curmap) && (addr < (curmap + SI_CORE_SIZE))) {
1482 		/* address being accessed is within current core reg map */
1483 		core_reg = TRUE;
1484 	}
1485 
1486 	if (core_reg) {
1487 		uint32 apb_id, apb_coreuinit;
1488 
1489 		if (ai_get_apb_bridge(sih, si_coreidx(&sii->pub),
1490 			&apb_id, &apb_coreuinit) == TRUE) {
1491 			/* Found the APB bridge corresponding to current core,
1492 			 * Check for bus errors in APB wrapper
1493 			 */
1494 			return ai_clear_backplane_to_per_core(sih,
1495 				apb_id, apb_coreuinit, NULL);
1496 		}
1497 	}
1498 
1499 	/* Default is to poll for errors on all slave wrappers */
1500 	return si_clear_backplane_to(sih);
1501 }
1502 #endif /* BCM_BACKPLANE_TIMEOUT */
1503 
1504 #if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
1505 /*
1506  * API to clear the back plane timeout per core.
1507  * Caller may passs optional wrapper address. If present this will be used as
1508  * the wrapper base address. If wrapper base address is provided then caller
1509  * must provide the coreid also.
1510  * If both coreid and wrapper is zero, then err status of current bridge
1511  * will be verified.
1512  */
1513 uint32
ai_clear_backplane_to_per_core(si_t * sih,uint coreid,uint coreunit,void * wrap)1514 ai_clear_backplane_to_per_core(si_t *sih, uint coreid, uint coreunit, void * wrap)
1515 {
1516 	int ret = AXI_WRAP_STS_NONE;
1517 	aidmp_t *ai = NULL;
1518 	uint32 errlog_status = 0;
1519 	si_info_t *sii = SI_INFO(sih);
1520 	uint32 errlog_lo = 0, errlog_hi = 0, errlog_id = 0, errlog_flags = 0;
1521 	uint32 current_coreidx = si_coreidx(sih);
1522 	uint32 target_coreidx = si_findcoreidx(sih, coreid, coreunit);
1523 
1524 #if defined(BCM_BACKPLANE_TIMEOUT)
1525 	si_axi_error_t * axi_error = &sih->err_info->axi_error[sih->err_info->count];
1526 #endif /* BCM_BACKPLANE_TIMEOUT */
1527 	bool restore_core = FALSE;
1528 
1529 	if ((sii->axi_num_wrappers == 0) ||
1530 #ifdef BCM_BACKPLANE_TIMEOUT
1531 		(!PCIE(sii)) ||
1532 #endif /* BCM_BACKPLANE_TIMEOUT */
1533 		FALSE) {
1534 		SI_VMSG((" %s, axi_num_wrappers:%d, Is_PCIE:%d, BUS_TYPE:%d, ID:%x\n",
1535 			__FUNCTION__, sii->axi_num_wrappers, PCIE(sii),
1536 			BUSTYPE(sii->pub.bustype), sii->pub.buscoretype));
1537 		return AXI_WRAP_STS_NONE;
1538 	}
1539 
1540 	if (wrap != NULL) {
1541 		ai = (aidmp_t *)wrap;
1542 	} else if (coreid && (target_coreidx != current_coreidx)) {
1543 
1544 		if (ai_setcoreidx(sih, target_coreidx) == NULL) {
1545 			/* Unable to set the core */
1546 			SI_PRINT(("Set Code Failed: coreid:%x, unit:%d, target_coreidx:%d\n",
1547 				coreid, coreunit, target_coreidx));
1548 			errlog_lo = target_coreidx;
1549 			ret = AXI_WRAP_STS_SET_CORE_FAIL;
1550 			goto end;
1551 		}
1552 
1553 		restore_core = TRUE;
1554 		ai = (aidmp_t *)si_wrapperregs(sih);
1555 	} else {
1556 		/* Read error status of current wrapper */
1557 		ai = (aidmp_t *)si_wrapperregs(sih);
1558 
1559 		/* Update CoreID to current Code ID */
1560 		coreid = si_coreid(sih);
1561 	}
1562 
1563 	/* read error log status */
1564 	errlog_status = R_REG(sii->osh, &ai->errlogstatus);
1565 
1566 	if (errlog_status == ID32_INVALID) {
1567 		/* Do not try to peek further */
1568 		SI_PRINT(("%s, errlogstatus:%x - Slave Wrapper:%x\n",
1569 			__FUNCTION__, errlog_status, coreid));
1570 		ret = AXI_WRAP_STS_WRAP_RD_ERR;
1571 		errlog_lo = (uint32)&ai->errlogstatus;
1572 		goto end;
1573 	}
1574 
1575 	if ((errlog_status & AIELS_TIMEOUT_MASK) != 0) {
1576 		uint32 tmp;
1577 		uint32 count = 0;
1578 		/* set ErrDone to clear the condition */
1579 		W_REG(sii->osh, &ai->errlogdone, AIELD_ERRDONE_MASK);
1580 
1581 		/* SPINWAIT on errlogstatus timeout status bits */
1582 		while ((tmp = R_REG(sii->osh, &ai->errlogstatus)) & AIELS_TIMEOUT_MASK) {
1583 
1584 			if (tmp == ID32_INVALID) {
1585 				SI_PRINT(("%s: prev errlogstatus:%x, errlogstatus:%x\n",
1586 					__FUNCTION__, errlog_status, tmp));
1587 				ret = AXI_WRAP_STS_WRAP_RD_ERR;
1588 				errlog_lo = (uint32)&ai->errlogstatus;
1589 				goto end;
1590 			}
1591 			/*
1592 			 * Clear again, to avoid getting stuck in the loop, if a new error
1593 			 * is logged after we cleared the first timeout
1594 			 */
1595 			W_REG(sii->osh, &ai->errlogdone, AIELD_ERRDONE_MASK);
1596 
1597 			count++;
1598 			OSL_DELAY(10);
1599 			if ((10 * count) > AI_REG_READ_TIMEOUT) {
1600 				errlog_status = tmp;
1601 				break;
1602 			}
1603 		}
1604 
1605 		errlog_lo = R_REG(sii->osh, &ai->errlogaddrlo);
1606 		errlog_hi = R_REG(sii->osh, &ai->errlogaddrhi);
1607 		errlog_id = R_REG(sii->osh, &ai->errlogid);
1608 		errlog_flags = R_REG(sii->osh, &ai->errlogflags);
1609 
1610 		/* we are already in the error path, so OK to check for the  slave error */
1611 		if (ai_ignore_errlog(sii, errlog_lo, errlog_hi, errlog_id,
1612 			errlog_status)) {
1613 			si_ignore_errlog_cnt++;
1614 			goto end;
1615 		}
1616 
1617 		/* only reset APB Bridge on timeout (not slave error, or dec error) */
1618 		switch (errlog_status & AIELS_TIMEOUT_MASK) {
1619 			case AIELS_SLAVE_ERR:
1620 				SI_PRINT(("AXI slave error"));
1621 				ret = AXI_WRAP_STS_SLAVE_ERR;
1622 				break;
1623 
1624 			case AIELS_TIMEOUT:
1625 				/* reset APB Bridge */
1626 				OR_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
1627 				/* sync write */
1628 				(void)R_REG(sii->osh, &ai->resetctrl);
1629 				/* clear Reset bit */
1630 				AND_REG(sii->osh, &ai->resetctrl, ~(AIRC_RESET));
1631 				/* sync write */
1632 				(void)R_REG(sii->osh, &ai->resetctrl);
1633 				SI_PRINT(("AXI timeout"));
1634 				ret = AXI_WRAP_STS_TIMEOUT;
1635 				break;
1636 
1637 			case AIELS_DECODE:
1638 				SI_PRINT(("AXI decode error"));
1639 				ret = AXI_WRAP_STS_DECODE_ERR;
1640 				break;
1641 			default:
1642 				ASSERT(0);	/* should be impossible */
1643 		}
1644 
1645 		SI_PRINT(("\tCoreID: %x\n", coreid));
1646 		SI_PRINT(("\t errlog: lo 0x%08x, hi 0x%08x, id 0x%08x, flags 0x%08x"
1647 			", status 0x%08x\n",
1648 			errlog_lo, errlog_hi, errlog_id, errlog_flags,
1649 			errlog_status));
1650 	}
1651 
1652 end:
1653 
1654 #if defined(BCM_BACKPLANE_TIMEOUT)
1655 	if (axi_error && (ret != AXI_WRAP_STS_NONE)) {
1656 		axi_error->error = ret;
1657 		axi_error->coreid = coreid;
1658 		axi_error->errlog_lo = errlog_lo;
1659 		axi_error->errlog_hi = errlog_hi;
1660 		axi_error->errlog_id = errlog_id;
1661 		axi_error->errlog_flags = errlog_flags;
1662 		axi_error->errlog_status = errlog_status;
1663 		sih->err_info->count++;
1664 
1665 		if (sih->err_info->count == SI_MAX_ERRLOG_SIZE) {
1666 			sih->err_info->count = SI_MAX_ERRLOG_SIZE - 1;
1667 			SI_PRINT(("AXI Error log overflow\n"));
1668 		}
1669 	}
1670 #endif /* BCM_BACKPLANE_TIMEOUT */
1671 
1672 	if (restore_core) {
1673 		if (ai_setcoreidx(sih, current_coreidx) == NULL) {
1674 			/* Unable to set the core */
1675 			return ID32_INVALID;
1676 		}
1677 	}
1678 
1679 	return ret;
1680 }
1681 #endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */
1682 
1683 /*
1684  * This API polls all slave wrappers for errors and returns bit map of
1685  * all reported errors.
1686  * return - bit map of
1687  *	AXI_WRAP_STS_NONE
1688  *	AXI_WRAP_STS_TIMEOUT
1689  *	AXI_WRAP_STS_SLAVE_ERR
1690  *	AXI_WRAP_STS_DECODE_ERR
1691  *	AXI_WRAP_STS_PCI_RD_ERR
1692  *	AXI_WRAP_STS_WRAP_RD_ERR
1693  *	AXI_WRAP_STS_SET_CORE_FAIL
1694  * On timeout detection, correspondign bridge will be reset to
1695  * unblock the bus.
1696  * Error reported in each wrapper can be retrieved using the API
1697  * si_get_axi_errlog_info()
1698  */
1699 uint32
ai_clear_backplane_to(si_t * sih)1700 ai_clear_backplane_to(si_t *sih)
1701 {
1702 	uint32 ret = 0;
1703 #if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
1704 
1705 	si_info_t *sii = SI_INFO(sih);
1706 	aidmp_t *ai;
1707 	uint32 i;
1708 	axi_wrapper_t * axi_wrapper = sii->axi_wrapper;
1709 
1710 #ifdef BCM_BACKPLANE_TIMEOUT
1711 	uint32 prev_value = 0;
1712 	osl_t *osh = sii->osh;
1713 	uint32 cfg_reg = 0;
1714 	uint32 offset = 0;
1715 
1716 	if ((sii->axi_num_wrappers == 0) || (!PCIE(sii)))
1717 #else
1718 	if (sii->axi_num_wrappers == 0)
1719 #endif
1720 	{
1721 		SI_VMSG((" %s, axi_num_wrappers:%d, Is_PCIE:%d, BUS_TYPE:%d, ID:%x\n",
1722 			__FUNCTION__, sii->axi_num_wrappers, PCIE(sii),
1723 			BUSTYPE(sii->pub.bustype), sii->pub.buscoretype));
1724 		return AXI_WRAP_STS_NONE;
1725 	}
1726 
1727 #ifdef BCM_BACKPLANE_TIMEOUT
1728 	/* Save and restore wrapper access window */
1729 	if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1730 		if (PCIE_GEN1(sii)) {
1731 			cfg_reg = PCI_BAR0_WIN2;
1732 			offset = PCI_BAR0_WIN2_OFFSET;
1733 		} else if (PCIE_GEN2(sii)) {
1734 			cfg_reg = PCIE2_BAR0_CORE2_WIN2;
1735 			offset = PCIE2_BAR0_CORE2_WIN2_OFFSET;
1736 		}
1737 		else {
1738 			osl_panic("!PCIE_GEN1 && !PCIE_GEN2\n");
1739 		}
1740 
1741 		prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 4);
1742 
1743 		if (prev_value == ID32_INVALID) {
1744 			si_axi_error_t * axi_error =
1745 				&sih->err_info->axi_error[sih->err_info->count];
1746 			SI_PRINT(("%s, PCI_BAR0_WIN2 - %x\n", __FUNCTION__, prev_value));
1747 
1748 			axi_error->error = ret = AXI_WRAP_STS_PCI_RD_ERR;
1749 			axi_error->errlog_lo = cfg_reg;
1750 			sih->err_info->count++;
1751 
1752 			if (sih->err_info->count == SI_MAX_ERRLOG_SIZE) {
1753 				sih->err_info->count = SI_MAX_ERRLOG_SIZE - 1;
1754 				SI_PRINT(("AXI Error log overflow\n"));
1755 			}
1756 
1757 			return ret;
1758 		}
1759 	}
1760 #endif /* BCM_BACKPLANE_TIMEOUT */
1761 
1762 	for (i = 0; i < sii->axi_num_wrappers; ++i) {
1763 		uint32 tmp;
1764 
1765 		if (axi_wrapper[i].wrapper_type != AI_SLAVE_WRAPPER) {
1766 			continue;
1767 		}
1768 
1769 #ifdef BCM_BACKPLANE_TIMEOUT
1770 
1771 		if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
1772 			/* Set BAR0_CORE2_WIN2 to bridge wapper base address */
1773 			OSL_PCI_WRITE_CONFIG(osh,
1774 				cfg_reg, 4, axi_wrapper[i].wrapper_addr);
1775 
1776 			/* set AI to BAR0 + Offset corresponding to Gen1 or gen2 */
1777 			ai = (aidmp_t *) ((uint8*)sii->curmap + offset);
1778 		}
1779 		else
1780 #endif /* BCM_BACKPLANE_TIMEOUT */
1781 		{
1782 			ai = (aidmp_t *)(uintptr) axi_wrapper[i].wrapper_addr;
1783 		}
1784 
1785 		tmp = ai_clear_backplane_to_per_core(sih, axi_wrapper[i].cid, 0, (void*)ai);
1786 
1787 		ret |= tmp;
1788 	}
1789 
1790 #ifdef BCM_BACKPLANE_TIMEOUT
1791 	/* Restore the initial wrapper space */
1792 	if (prev_value) {
1793 		OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 4, prev_value);
1794 	}
1795 #endif /* BCM_BACKPLANE_TIMEOUT */
1796 
1797 #endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */
1798 
1799 	return ret;
1800 }
1801 
1802 uint
ai_num_slaveports(si_t * sih,uint coreidx)1803 ai_num_slaveports(si_t *sih, uint coreidx)
1804 {
1805 	si_info_t *sii = SI_INFO(sih);
1806 	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
1807 	uint32 cib;
1808 
1809 	cib = cores_info->cib[coreidx];
1810 	return ((cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT);
1811 }
1812