xref: /OK3568_Linux_fs/external/rkwifibt/drivers/infineon/sbutils.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * Misc utility routines for accessing chip-specific features
3  * of the SiliconBackplane-based Broadcom chips.
4  *
5  * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
6  *
7  * Copyright (C) 1999-2017, Broadcom Corporation
8  *
9  *      Unless you and Broadcom execute a separate written software license
10  * agreement governing use of this software, this software is licensed to you
11  * under the terms of the GNU General Public License version 2 (the "GPL"),
12  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
13  * following added to such license:
14  *
15  *      As a special exception, the copyright holders of this software give you
16  * permission to link this software with independent modules, and to copy and
17  * distribute the resulting executable under terms of your choice, provided that
18  * you also meet, for each linked independent module, the terms and conditions of
19  * the license of that module.  An independent module is a module which is not
20  * derived from this software.  The special exception does not apply to any
21  * modifications of the software.
22  *
23  *      Notwithstanding the above, under no circumstances may you combine this
24  * software in any way with any other Broadcom software provided under a license
25  * other than the GPL, without Broadcom's express prior written consent.
26  *
27  *
28  * <<Broadcom-WL-IPTag/Open:>>
29  *
30  * $Id: sbutils.c 700323 2017-05-18 16:12:11Z $
31  */
32 
33 #include <bcm_cfg.h>
34 #include <typedefs.h>
35 #include <bcmdefs.h>
36 #include <osl.h>
37 #include <bcmutils.h>
38 #include <siutils.h>
39 #include <bcmdevs.h>
40 #include <hndsoc.h>
41 #include <sbchipc.h>
42 #include <pcicfg.h>
43 #include <sbpcmcia.h>
44 
45 #include "siutils_priv.h"
46 
47 /* local prototypes */
48 static uint _sb_coreidx(si_info_t *sii, uint32 sba);
49 static uint _sb_scan(si_info_t *sii, uint32 sba, volatile void *regs, uint bus, uint32 sbba,
50                      uint ncores, uint devid);
51 static uint32 _sb_coresba(si_info_t *sii);
52 static volatile void *_sb_setcoreidx(si_info_t *sii, uint coreidx);
53 #define	SET_SBREG(sii, r, mask, val)	\
54 		W_SBREG((sii), (r), ((R_SBREG((sii), (r)) & ~(mask)) | (val)))
55 #define	REGS2SB(va)	(sbconfig_t*) ((volatile int8*)(va) + SBCONFIGOFF)
56 
57 /* sonicsrev */
58 #define	SONICS_2_2	(SBIDL_RV_2_2 >> SBIDL_RV_SHIFT)
59 #define	SONICS_2_3	(SBIDL_RV_2_3 >> SBIDL_RV_SHIFT)
60 
61 #define	R_SBREG(sii, sbr)	sb_read_sbreg((sii), (sbr))
62 #define	W_SBREG(sii, sbr, v)	sb_write_sbreg((sii), (sbr), (v))
63 #define	AND_SBREG(sii, sbr, v)	W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) & (v)))
64 #define	OR_SBREG(sii, sbr, v)	W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) | (v)))
65 
66 static uint32
sb_read_sbreg(si_info_t * sii,volatile uint32 * sbr)67 sb_read_sbreg(si_info_t *sii, volatile uint32 *sbr)
68 {
69 	uint8 tmp;
70 	uint32 val, intr_val = 0;
71 
72 	/*
73 	 * compact flash only has 11 bits address, while we needs 12 bits address.
74 	 * MEM_SEG will be OR'd with other 11 bits address in hardware,
75 	 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
76 	 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
77 	 */
78 	if (PCMCIA(sii)) {
79 		INTR_OFF(sii, intr_val);
80 		tmp = 1;
81 		OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
82 		sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
83 	}
84 
85 	val = R_REG(sii->osh, sbr);
86 
87 	if (PCMCIA(sii)) {
88 		tmp = 0;
89 		OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
90 		INTR_RESTORE(sii, intr_val);
91 	}
92 
93 	return (val);
94 }
95 
96 static void
sb_write_sbreg(si_info_t * sii,volatile uint32 * sbr,uint32 v)97 sb_write_sbreg(si_info_t *sii, volatile uint32 *sbr, uint32 v)
98 {
99 	uint8 tmp;
100 	volatile uint32 dummy;
101 	uint32 intr_val = 0;
102 
103 	/*
104 	 * compact flash only has 11 bits address, while we needs 12 bits address.
105 	 * MEM_SEG will be OR'd with other 11 bits address in hardware,
106 	 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
107 	 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
108 	 */
109 	if (PCMCIA(sii)) {
110 		INTR_OFF(sii, intr_val);
111 		tmp = 1;
112 		OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
113 		sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
114 	}
115 
116 	if (BUSTYPE(sii->pub.bustype) == PCMCIA_BUS) {
117 		dummy = R_REG(sii->osh, sbr);
118 		BCM_REFERENCE(dummy);
119 		W_REG(sii->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff));
120 		dummy = R_REG(sii->osh, sbr);
121 		BCM_REFERENCE(dummy);
122 		W_REG(sii->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff));
123 	} else
124 		W_REG(sii->osh, sbr, v);
125 
126 	if (PCMCIA(sii)) {
127 		tmp = 0;
128 		OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
129 		INTR_RESTORE(sii, intr_val);
130 	}
131 }
132 
133 uint
sb_coreid(si_t * sih)134 sb_coreid(si_t *sih)
135 {
136 	si_info_t *sii;
137 	sbconfig_t *sb;
138 
139 	sii = SI_INFO(sih);
140 	sb = REGS2SB(sii->curmap);
141 
142 	return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT);
143 }
144 
145 uint
sb_intflag(si_t * sih)146 sb_intflag(si_t *sih)
147 {
148 	si_info_t *sii = SI_INFO(sih);
149 	volatile void *corereg;
150 	sbconfig_t *sb;
151 	uint origidx, intflag, intr_val = 0;
152 
153 	INTR_OFF(sii, intr_val);
154 	origidx = si_coreidx(sih);
155 	corereg = si_setcore(sih, CC_CORE_ID, 0);
156 	ASSERT(corereg != NULL);
157 	sb = REGS2SB(corereg);
158 	intflag = R_SBREG(sii, &sb->sbflagst);
159 	sb_setcoreidx(sih, origidx);
160 	INTR_RESTORE(sii, intr_val);
161 
162 	return intflag;
163 }
164 
165 uint
sb_flag(si_t * sih)166 sb_flag(si_t *sih)
167 {
168 	si_info_t *sii;
169 	sbconfig_t *sb;
170 
171 	sii = SI_INFO(sih);
172 	sb = REGS2SB(sii->curmap);
173 
174 	return R_SBREG(sii, &sb->sbtpsflag) & SBTPS_NUM0_MASK;
175 }
176 
177 void
sb_setint(si_t * sih,int siflag)178 sb_setint(si_t *sih, int siflag)
179 {
180 	si_info_t *sii;
181 	sbconfig_t *sb;
182 	uint32 vec;
183 
184 	sii = SI_INFO(sih);
185 	sb = REGS2SB(sii->curmap);
186 
187 	if (siflag == -1)
188 		vec = 0;
189 	else
190 		vec = 1 << siflag;
191 	W_SBREG(sii, &sb->sbintvec, vec);
192 }
193 
194 /* return core index of the core with address 'sba' */
195 static uint
_sb_coreidx(si_info_t * sii,uint32 sba)196 _sb_coreidx(si_info_t *sii, uint32 sba)
197 {
198 	uint i;
199 	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
200 
201 	for (i = 0; i < sii->numcores; i ++)
202 		if (sba == cores_info->coresba[i])
203 			return i;
204 	return BADIDX;
205 }
206 
207 /* return core address of the current core */
208 static uint32
_sb_coresba(si_info_t * sii)209 _sb_coresba(si_info_t *sii)
210 {
211 	uint32 sbaddr;
212 
213 	switch (BUSTYPE(sii->pub.bustype)) {
214 	case SI_BUS: {
215 		sbconfig_t *sb = REGS2SB(sii->curmap);
216 		sbaddr = sb_base(R_SBREG(sii, &sb->sbadmatch0));
217 		break;
218 	}
219 
220 	case PCI_BUS:
221 		sbaddr = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
222 		break;
223 
224 	case PCMCIA_BUS: {
225 		uint8 tmp = 0;
226 		OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
227 		sbaddr  = (uint32)tmp << 12;
228 		OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
229 		sbaddr |= (uint32)tmp << 16;
230 		OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
231 		sbaddr |= (uint32)tmp << 24;
232 		break;
233 	}
234 
235 #ifdef BCMSDIO
236 	case SPI_BUS:
237 	case SDIO_BUS:
238 		sbaddr = (uint32)(uintptr)sii->curmap;
239 		break;
240 #endif // endif
241 
242 	default:
243 		sbaddr = BADCOREADDR;
244 		break;
245 	}
246 
247 	return sbaddr;
248 }
249 
250 uint
sb_corevendor(si_t * sih)251 sb_corevendor(si_t *sih)
252 {
253 	si_info_t *sii;
254 	sbconfig_t *sb;
255 
256 	sii = SI_INFO(sih);
257 	sb = REGS2SB(sii->curmap);
258 
259 	return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_VC_MASK) >> SBIDH_VC_SHIFT);
260 }
261 
262 uint
sb_corerev(si_t * sih)263 sb_corerev(si_t *sih)
264 {
265 	si_info_t *sii;
266 	sbconfig_t *sb;
267 	uint sbidh;
268 
269 	sii = SI_INFO(sih);
270 	sb = REGS2SB(sii->curmap);
271 	sbidh = R_SBREG(sii, &sb->sbidhigh);
272 
273 	return (SBCOREREV(sbidh));
274 }
275 
276 /* set core-specific control flags */
277 void
sb_core_cflags_wo(si_t * sih,uint32 mask,uint32 val)278 sb_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
279 {
280 	si_info_t *sii;
281 	sbconfig_t *sb;
282 	uint32 w;
283 
284 	sii = SI_INFO(sih);
285 	sb = REGS2SB(sii->curmap);
286 
287 	ASSERT((val & ~mask) == 0);
288 
289 	/* mask and set */
290 	w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
291 	        (val << SBTML_SICF_SHIFT);
292 	W_SBREG(sii, &sb->sbtmstatelow, w);
293 }
294 
295 /* set/clear core-specific control flags */
296 uint32
sb_core_cflags(si_t * sih,uint32 mask,uint32 val)297 sb_core_cflags(si_t *sih, uint32 mask, uint32 val)
298 {
299 	si_info_t *sii;
300 	sbconfig_t *sb;
301 	uint32 w;
302 
303 	sii = SI_INFO(sih);
304 	sb = REGS2SB(sii->curmap);
305 
306 	ASSERT((val & ~mask) == 0);
307 
308 	/* mask and set */
309 	if (mask || val) {
310 		w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
311 		        (val << SBTML_SICF_SHIFT);
312 		W_SBREG(sii, &sb->sbtmstatelow, w);
313 	}
314 
315 	/* return the new value
316 	 * for write operation, the following readback ensures the completion of write opration.
317 	 */
318 	return (R_SBREG(sii, &sb->sbtmstatelow) >> SBTML_SICF_SHIFT);
319 }
320 
321 /* set/clear core-specific status flags */
322 uint32
sb_core_sflags(si_t * sih,uint32 mask,uint32 val)323 sb_core_sflags(si_t *sih, uint32 mask, uint32 val)
324 {
325 	si_info_t *sii;
326 	sbconfig_t *sb;
327 	uint32 w;
328 
329 	sii = SI_INFO(sih);
330 	sb = REGS2SB(sii->curmap);
331 
332 	ASSERT((val & ~mask) == 0);
333 	ASSERT((mask & ~SISF_CORE_BITS) == 0);
334 
335 	/* mask and set */
336 	if (mask || val) {
337 		w = (R_SBREG(sii, &sb->sbtmstatehigh) & ~(mask << SBTMH_SISF_SHIFT)) |
338 		        (val << SBTMH_SISF_SHIFT);
339 		W_SBREG(sii, &sb->sbtmstatehigh, w);
340 	}
341 
342 	/* return the new value */
343 	return (R_SBREG(sii, &sb->sbtmstatehigh) >> SBTMH_SISF_SHIFT);
344 }
345 
346 bool
sb_iscoreup(si_t * sih)347 sb_iscoreup(si_t *sih)
348 {
349 	si_info_t *sii;
350 	sbconfig_t *sb;
351 
352 	sii = SI_INFO(sih);
353 	sb = REGS2SB(sii->curmap);
354 
355 	return ((R_SBREG(sii, &sb->sbtmstatelow) &
356 	         (SBTML_RESET | SBTML_REJ_MASK | (SICF_CLOCK_EN << SBTML_SICF_SHIFT))) ==
357 	        (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
358 }
359 
360 /*
361  * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
362  * switch back to the original core, and return the new value.
363  *
364  * When using the silicon backplane, no fidleing with interrupts or core switches are needed.
365  *
366  * Also, when using pci/pcie, we can optimize away the core switching for pci registers
367  * and (on newer pci cores) chipcommon registers.
368  */
369 uint
sb_corereg(si_t * sih,uint coreidx,uint regoff,uint mask,uint val)370 sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
371 {
372 	uint origidx = 0;
373 	volatile uint32 *r = NULL;
374 	uint w;
375 	uint intr_val = 0;
376 	bool fast = FALSE;
377 	si_info_t *sii = SI_INFO(sih);
378 	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
379 
380 	ASSERT(GOODIDX(coreidx));
381 	ASSERT(regoff < SI_CORE_SIZE);
382 	ASSERT((val & ~mask) == 0);
383 
384 	if (coreidx >= SI_MAXCORES)
385 		return 0;
386 
387 	if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
388 		/* If internal bus, we can always get at everything */
389 		fast = TRUE;
390 		/* map if does not exist */
391 		if (!cores_info->regs[coreidx]) {
392 			cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
393 			                            SI_CORE_SIZE);
394 			ASSERT(GOODREGS(cores_info->regs[coreidx]));
395 		}
396 		r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
397 	} else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
398 		/* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
399 
400 		if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
401 			/* Chipc registers are mapped at 12KB */
402 
403 			fast = TRUE;
404 			r = (volatile uint32 *)((volatile char *)sii->curmap +
405 			               PCI_16KB0_CCREGS_OFFSET + regoff);
406 		} else if (sii->pub.buscoreidx == coreidx) {
407 			/* pci registers are at either in the last 2KB of an 8KB window
408 			 * or, in pcie and pci rev 13 at 8KB
409 			 */
410 			fast = TRUE;
411 			if (SI_FAST(sii))
412 				r = (volatile uint32 *)((volatile char *)sii->curmap +
413 				               PCI_16KB0_PCIREGS_OFFSET + regoff);
414 			else
415 				r = (volatile uint32 *)((volatile char *)sii->curmap +
416 				               ((regoff >= SBCONFIGOFF) ?
417 				                PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
418 				               regoff);
419 		}
420 	}
421 
422 	if (!fast) {
423 		INTR_OFF(sii, intr_val);
424 
425 		/* save current core index */
426 		origidx = si_coreidx(&sii->pub);
427 
428 		/* switch core */
429 		r = (volatile uint32*) ((volatile uchar*)sb_setcoreidx(&sii->pub, coreidx) +
430 		               regoff);
431 	}
432 	ASSERT(r != NULL);
433 
434 	/* mask and set */
435 	if (mask || val) {
436 		if (regoff >= SBCONFIGOFF) {
437 			w = (R_SBREG(sii, r) & ~mask) | val;
438 			W_SBREG(sii, r, w);
439 		} else {
440 			w = (R_REG(sii->osh, r) & ~mask) | val;
441 			W_REG(sii->osh, r, w);
442 		}
443 	}
444 
445 	/* readback */
446 	if (regoff >= SBCONFIGOFF)
447 		w = R_SBREG(sii, r);
448 	else {
449 		w = R_REG(sii->osh, r);
450 	}
451 
452 	if (!fast) {
453 		/* restore core index */
454 		if (origidx != coreidx)
455 			sb_setcoreidx(&sii->pub, origidx);
456 
457 		INTR_RESTORE(sii, intr_val);
458 	}
459 
460 	return (w);
461 }
462 
463 /*
464  * If there is no need for fiddling with interrupts or core switches (typically silicon
465  * back plane registers, pci registers and chipcommon registers), this function
466  * returns the register offset on this core to a mapped address. This address can
467  * be used for W_REG/R_REG directly.
468  *
469  * For accessing registers that would need a core switch, this function will return
470  * NULL.
471  */
472 volatile uint32 *
sb_corereg_addr(si_t * sih,uint coreidx,uint regoff)473 sb_corereg_addr(si_t *sih, uint coreidx, uint regoff)
474 {
475 	volatile uint32 *r = NULL;
476 	bool fast = FALSE;
477 	si_info_t *sii = SI_INFO(sih);
478 	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
479 
480 	ASSERT(GOODIDX(coreidx));
481 	ASSERT(regoff < SI_CORE_SIZE);
482 
483 	if (coreidx >= SI_MAXCORES)
484 		return 0;
485 
486 	if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
487 		/* If internal bus, we can always get at everything */
488 		fast = TRUE;
489 		/* map if does not exist */
490 		if (!cores_info->regs[coreidx]) {
491 			cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
492 			                            SI_CORE_SIZE);
493 			ASSERT(GOODREGS(cores_info->regs[coreidx]));
494 		}
495 		r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
496 	} else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
497 		/* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
498 
499 		if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
500 			/* Chipc registers are mapped at 12KB */
501 
502 			fast = TRUE;
503 			r = (volatile uint32 *)((volatile char *)sii->curmap +
504 			               PCI_16KB0_CCREGS_OFFSET + regoff);
505 		} else if (sii->pub.buscoreidx == coreidx) {
506 			/* pci registers are at either in the last 2KB of an 8KB window
507 			 * or, in pcie and pci rev 13 at 8KB
508 			 */
509 			fast = TRUE;
510 			if (SI_FAST(sii))
511 				r = (volatile uint32 *)((volatile char *)sii->curmap +
512 				               PCI_16KB0_PCIREGS_OFFSET + regoff);
513 			else
514 				r = (volatile uint32 *)((volatile char *)sii->curmap +
515 				               ((regoff >= SBCONFIGOFF) ?
516 				                PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
517 				               regoff);
518 		}
519 	}
520 
521 	if (!fast)
522 		return 0;
523 
524 	return (r);
525 }
526 
527 /* Scan the enumeration space to find all cores starting from the given
528  * bus 'sbba'. Append coreid and other info to the lists in 'si'. 'sba'
529  * is the default core address at chip POR time and 'regs' is the virtual
530  * address that the default core is mapped at. 'ncores' is the number of
531  * cores expected on bus 'sbba'. It returns the total number of cores
532  * starting from bus 'sbba', inclusive.
533  */
534 #define SB_MAXBUSES	2
535 static uint
_sb_scan(si_info_t * sii,uint32 sba,volatile void * regs,uint bus,uint32 sbba,uint numcores,uint devid)536 _sb_scan(si_info_t *sii, uint32 sba, volatile void *regs, uint bus,
537 	uint32 sbba, uint numcores, uint devid)
538 {
539 	uint next;
540 	uint ncc = 0;
541 	uint i;
542 	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
543 
544 	if (bus >= SB_MAXBUSES) {
545 		SI_ERROR(("_sb_scan: bus 0x%08x at level %d is too deep to scan\n", sbba, bus));
546 		return 0;
547 	}
548 	SI_MSG(("_sb_scan: scan bus 0x%08x assume %u cores\n", sbba, numcores));
549 
550 	/* Scan all cores on the bus starting from core 0.
551 	 * Core addresses must be contiguous on each bus.
552 	 */
553 	for (i = 0, next = sii->numcores; i < numcores && next < SB_BUS_MAXCORES; i++, next++) {
554 		cores_info->coresba[next] = sbba + (i * SI_CORE_SIZE);
555 
556 		/* keep and reuse the initial register mapping */
557 		if ((BUSTYPE(sii->pub.bustype) == SI_BUS) && (cores_info->coresba[next] == sba)) {
558 			SI_VMSG(("_sb_scan: reuse mapped regs %p for core %u\n", regs, next));
559 			cores_info->regs[next] = regs;
560 		}
561 
562 		/* change core to 'next' and read its coreid */
563 		sii->curmap = _sb_setcoreidx(sii, next);
564 		sii->curidx = next;
565 
566 		cores_info->coreid[next] = sb_coreid(&sii->pub);
567 
568 		/* core specific processing... */
569 		/* chipc provides # cores */
570 		if (cores_info->coreid[next] == CC_CORE_ID) {
571 			chipcregs_t *cc = (chipcregs_t *)sii->curmap;
572 			uint32 ccrev = sb_corerev(&sii->pub);
573 
574 			/* determine numcores - this is the total # cores in the chip */
575 			if (((ccrev == 4) || (ccrev >= 6))) {
576 				ASSERT(cc);
577 				numcores = (R_REG(sii->osh, &cc->chipid) & CID_CC_MASK) >>
578 				        CID_CC_SHIFT;
579 			} else {
580 				/* Older chips */
581 				uint chip = CHIPID(sii->pub.chip);
582 
583 				if (chip == BCM4704_CHIP_ID)
584 					numcores = 9;
585 				else if (chip == BCM5365_CHIP_ID)
586 					numcores = 7;
587 				else {
588 					SI_ERROR(("sb_chip2numcores: unsupported chip 0x%x\n",
589 					          chip));
590 					ASSERT(0);
591 					numcores = 1;
592 				}
593 			}
594 			SI_VMSG(("_sb_scan: there are %u cores in the chip %s\n", numcores,
595 				sii->pub.issim ? "QT" : ""));
596 		}
597 		/* scan bridged SB(s) and add results to the end of the list */
598 		else if (cores_info->coreid[next] == OCP_CORE_ID) {
599 			sbconfig_t *sb = REGS2SB(sii->curmap);
600 			uint32 nsbba = R_SBREG(sii, &sb->sbadmatch1);
601 			uint nsbcc;
602 
603 			sii->numcores = next + 1;
604 
605 			if ((nsbba & 0xfff00000) != si_enum_base(devid))
606 				continue;
607 			nsbba &= 0xfffff000;
608 			if (_sb_coreidx(sii, nsbba) != BADIDX)
609 				continue;
610 
611 			nsbcc = (R_SBREG(sii, &sb->sbtmstatehigh) & 0x000f0000) >> 16;
612 			nsbcc = _sb_scan(sii, sba, regs, bus + 1, nsbba, nsbcc, devid);
613 			if (sbba == si_enum_base(devid))
614 				numcores -= nsbcc;
615 			ncc += nsbcc;
616 		}
617 	}
618 
619 	SI_MSG(("_sb_scan: found %u cores on bus 0x%08x\n", i, sbba));
620 
621 	sii->numcores = i + ncc;
622 	return sii->numcores;
623 }
624 
625 /* scan the sb enumerated space to identify all cores */
626 void
sb_scan(si_t * sih,volatile void * regs,uint devid)627 sb_scan(si_t *sih, volatile void *regs, uint devid)
628 {
629 	uint32 origsba;
630 	sbconfig_t *sb;
631 	si_info_t *sii = SI_INFO(sih);
632 	BCM_REFERENCE(devid);
633 
634 	sb = REGS2SB(sii->curmap);
635 
636 	sii->pub.socirev = (R_SBREG(sii, &sb->sbidlow) & SBIDL_RV_MASK) >> SBIDL_RV_SHIFT;
637 
638 	/* Save the current core info and validate it later till we know
639 	 * for sure what is good and what is bad.
640 	 */
641 	origsba = _sb_coresba(sii);
642 
643 	/* scan all SB(s) starting from SI_ENUM_BASE_DEFAULT */
644 	sii->numcores = _sb_scan(sii, origsba, regs, 0, si_enum_base(devid), 1, devid);
645 }
646 
647 /*
648  * This function changes logical "focus" to the indicated core;
649  * must be called with interrupts off.
650  * Moreover, callers should keep interrupts off during switching out of and back to d11 core
651  */
652 volatile void *
sb_setcoreidx(si_t * sih,uint coreidx)653 sb_setcoreidx(si_t *sih, uint coreidx)
654 {
655 	si_info_t *sii = SI_INFO(sih);
656 
657 	if (coreidx >= sii->numcores)
658 		return (NULL);
659 
660 	/*
661 	 * If the user has provided an interrupt mask enabled function,
662 	 * then assert interrupts are disabled before switching the core.
663 	 */
664 	ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
665 
666 	sii->curmap = _sb_setcoreidx(sii, coreidx);
667 	sii->curidx = coreidx;
668 
669 	return (sii->curmap);
670 }
671 
672 /* This function changes the logical "focus" to the indicated core.
673  * Return the current core's virtual address.
674  */
675 static volatile void *
_sb_setcoreidx(si_info_t * sii,uint coreidx)676 _sb_setcoreidx(si_info_t *sii, uint coreidx)
677 {
678 	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
679 	uint32 sbaddr = cores_info->coresba[coreidx];
680 	volatile void *regs;
681 
682 	switch (BUSTYPE(sii->pub.bustype)) {
683 	case SI_BUS:
684 		/* map new one */
685 		if (!cores_info->regs[coreidx]) {
686 			cores_info->regs[coreidx] = REG_MAP(sbaddr, SI_CORE_SIZE);
687 			ASSERT(GOODREGS(cores_info->regs[coreidx]));
688 		}
689 		regs = cores_info->regs[coreidx];
690 		break;
691 
692 	case PCI_BUS:
693 		/* point bar0 window */
694 		OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, sbaddr);
695 		regs = sii->curmap;
696 		break;
697 
698 	case PCMCIA_BUS: {
699 		uint8 tmp = (sbaddr >> 12) & 0x0f;
700 		OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
701 		tmp = (sbaddr >> 16) & 0xff;
702 		OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
703 		tmp = (sbaddr >> 24) & 0xff;
704 		OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
705 		regs = sii->curmap;
706 		break;
707 	}
708 #ifdef BCMSDIO
709 	case SPI_BUS:
710 	case SDIO_BUS:
711 		/* map new one */
712 		if (!cores_info->regs[coreidx]) {
713 			cores_info->regs[coreidx] = (void *)(uintptr)sbaddr;
714 			ASSERT(GOODREGS(cores_info->regs[coreidx]));
715 		}
716 		regs = cores_info->regs[coreidx];
717 		break;
718 #endif	/* BCMSDIO */
719 
720 	default:
721 		ASSERT(0);
722 		regs = NULL;
723 		break;
724 	}
725 
726 	return regs;
727 }
728 
729 /* Return the address of sbadmatch0/1/2/3 register */
730 static volatile uint32 *
sb_admatch(si_info_t * sii,uint asidx)731 sb_admatch(si_info_t *sii, uint asidx)
732 {
733 	sbconfig_t *sb;
734 	volatile uint32 *addrm;
735 
736 	sb = REGS2SB(sii->curmap);
737 
738 	switch (asidx) {
739 	case 0:
740 		addrm =  &sb->sbadmatch0;
741 		break;
742 
743 	case 1:
744 		addrm =  &sb->sbadmatch1;
745 		break;
746 
747 	case 2:
748 		addrm =  &sb->sbadmatch2;
749 		break;
750 
751 	case 3:
752 		addrm =  &sb->sbadmatch3;
753 		break;
754 
755 	default:
756 		SI_ERROR(("%s: Address space index (%d) out of range\n", __FUNCTION__, asidx));
757 		return 0;
758 	}
759 
760 	return (addrm);
761 }
762 
763 /* Return the number of address spaces in current core */
764 int
sb_numaddrspaces(si_t * sih)765 sb_numaddrspaces(si_t *sih)
766 {
767 	si_info_t *sii;
768 	sbconfig_t *sb;
769 
770 	sii = SI_INFO(sih);
771 	sb = REGS2SB(sii->curmap);
772 
773 	/* + 1 because of enumeration space */
774 	return ((R_SBREG(sii, &sb->sbidlow) & SBIDL_AR_MASK) >> SBIDL_AR_SHIFT) + 1;
775 }
776 
777 /* Return the address of the nth address space in the current core */
778 uint32
sb_addrspace(si_t * sih,uint asidx)779 sb_addrspace(si_t *sih, uint asidx)
780 {
781 	si_info_t *sii;
782 
783 	sii = SI_INFO(sih);
784 
785 	return (sb_base(R_SBREG(sii, sb_admatch(sii, asidx))));
786 }
787 
788 /* Return the size of the nth address space in the current core */
789 uint32
sb_addrspacesize(si_t * sih,uint asidx)790 sb_addrspacesize(si_t *sih, uint asidx)
791 {
792 	si_info_t *sii;
793 
794 	sii = SI_INFO(sih);
795 
796 	return (sb_size(R_SBREG(sii, sb_admatch(sii, asidx))));
797 }
798 
799 /* do buffered registers update */
800 void
sb_commit(si_t * sih)801 sb_commit(si_t *sih)
802 {
803 	si_info_t *sii = SI_INFO(sih);
804 	uint origidx;
805 	uint intr_val = 0;
806 
807 	origidx = sii->curidx;
808 	ASSERT(GOODIDX(origidx));
809 
810 	INTR_OFF(sii, intr_val);
811 
812 	/* switch over to chipcommon core if there is one, else use pci */
813 	if (sii->pub.ccrev != NOREV) {
814 		chipcregs_t *ccregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
815 		ASSERT(ccregs != NULL);
816 
817 		/* do the buffer registers update */
818 		W_REG(sii->osh, &ccregs->broadcastaddress, SB_COMMIT);
819 		W_REG(sii->osh, &ccregs->broadcastdata, 0x0);
820 	} else
821 		ASSERT(0);
822 
823 	/* restore core index */
824 	sb_setcoreidx(sih, origidx);
825 	INTR_RESTORE(sii, intr_val);
826 }
827 
828 void
sb_core_disable(si_t * sih,uint32 bits)829 sb_core_disable(si_t *sih, uint32 bits)
830 {
831 	si_info_t *sii;
832 	volatile uint32 dummy;
833 	sbconfig_t *sb;
834 
835 	sii = SI_INFO(sih);
836 
837 	ASSERT(GOODREGS(sii->curmap));
838 	sb = REGS2SB(sii->curmap);
839 
840 	/* if core is already in reset, just return */
841 	if (R_SBREG(sii, &sb->sbtmstatelow) & SBTML_RESET)
842 		return;
843 
844 	/* if clocks are not enabled, put into reset and return */
845 	if ((R_SBREG(sii, &sb->sbtmstatelow) & (SICF_CLOCK_EN << SBTML_SICF_SHIFT)) == 0)
846 		goto disable;
847 
848 	/* set target reject and spin until busy is clear (preserve core-specific bits) */
849 	OR_SBREG(sii, &sb->sbtmstatelow, SBTML_REJ);
850 	dummy = R_SBREG(sii, &sb->sbtmstatelow);
851 	BCM_REFERENCE(dummy);
852 	OSL_DELAY(1);
853 	SPINWAIT((R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY), 100000);
854 	if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY)
855 		SI_ERROR(("%s: target state still busy\n", __FUNCTION__));
856 
857 	if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT) {
858 		OR_SBREG(sii, &sb->sbimstate, SBIM_RJ);
859 		dummy = R_SBREG(sii, &sb->sbimstate);
860 		BCM_REFERENCE(dummy);
861 		OSL_DELAY(1);
862 		SPINWAIT((R_SBREG(sii, &sb->sbimstate) & SBIM_BY), 100000);
863 	}
864 
865 	/* set reset and reject while enabling the clocks */
866 	W_SBREG(sii, &sb->sbtmstatelow,
867 	        (((bits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
868 	         SBTML_REJ | SBTML_RESET));
869 	dummy = R_SBREG(sii, &sb->sbtmstatelow);
870 	BCM_REFERENCE(dummy);
871 	OSL_DELAY(10);
872 
873 	/* don't forget to clear the initiator reject bit */
874 	if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT)
875 		AND_SBREG(sii, &sb->sbimstate, ~SBIM_RJ);
876 
877 disable:
878 	/* leave reset and reject asserted */
879 	W_SBREG(sii, &sb->sbtmstatelow, ((bits << SBTML_SICF_SHIFT) | SBTML_REJ | SBTML_RESET));
880 	OSL_DELAY(1);
881 }
882 
883 /* reset and re-enable a core
884  * inputs:
885  * bits - core specific bits that are set during and after reset sequence
886  * resetbits - core specific bits that are set only during reset sequence
887  */
888 void
sb_core_reset(si_t * sih,uint32 bits,uint32 resetbits)889 sb_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
890 {
891 	si_info_t *sii;
892 	sbconfig_t *sb;
893 	volatile uint32 dummy;
894 
895 	sii = SI_INFO(sih);
896 	ASSERT(GOODREGS(sii->curmap));
897 	sb = REGS2SB(sii->curmap);
898 
899 	/*
900 	 * Must do the disable sequence first to work for arbitrary current core state.
901 	 */
902 	sb_core_disable(sih, (bits | resetbits));
903 
904 	/*
905 	 * Now do the initialization sequence.
906 	 */
907 
908 	/* set reset while enabling the clock and forcing them on throughout the core */
909 	W_SBREG(sii, &sb->sbtmstatelow,
910 	        (((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
911 	         SBTML_RESET));
912 	dummy = R_SBREG(sii, &sb->sbtmstatelow);
913 	BCM_REFERENCE(dummy);
914 	OSL_DELAY(1);
915 
916 	if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_SERR) {
917 		W_SBREG(sii, &sb->sbtmstatehigh, 0);
918 	}
919 	if ((dummy = R_SBREG(sii, &sb->sbimstate)) & (SBIM_IBE | SBIM_TO)) {
920 		AND_SBREG(sii, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
921 	}
922 
923 	/* clear reset and allow it to propagate throughout the core */
924 	W_SBREG(sii, &sb->sbtmstatelow,
925 	        ((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
926 	dummy = R_SBREG(sii, &sb->sbtmstatelow);
927 	BCM_REFERENCE(dummy);
928 	OSL_DELAY(1);
929 
930 	/* leave clock enabled */
931 	W_SBREG(sii, &sb->sbtmstatelow, ((bits | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
932 	dummy = R_SBREG(sii, &sb->sbtmstatelow);
933 	BCM_REFERENCE(dummy);
934 	OSL_DELAY(1);
935 }
936 
937 /*
938  * Set the initiator timeout for the "master core".
939  * The master core is defined to be the core in control
940  * of the chip and so it issues accesses to non-memory
941  * locations (Because of dma *any* core can access memeory).
942  *
943  * The routine uses the bus to decide who is the master:
944  *	SI_BUS => mips
945  *	JTAG_BUS => chipc
946  *	PCI_BUS => pci or pcie
947  *	PCMCIA_BUS => pcmcia
948  *	SDIO_BUS => pcmcia
949  *
950  * This routine exists so callers can disable initiator
951  * timeouts so accesses to very slow devices like otp
952  * won't cause an abort. The routine allows arbitrary
953  * settings of the service and request timeouts, though.
954  *
955  * Returns the timeout state before changing it or -1
956  * on error.
957  */
958 
959 #define	TO_MASK	(SBIMCL_RTO_MASK | SBIMCL_STO_MASK)
960 
961 uint32
sb_set_initiator_to(si_t * sih,uint32 to,uint idx)962 sb_set_initiator_to(si_t *sih, uint32 to, uint idx)
963 {
964 	si_info_t *sii = SI_INFO(sih);
965 	uint origidx;
966 	uint intr_val = 0;
967 	uint32 tmp, ret = 0xffffffff;
968 	sbconfig_t *sb;
969 
970 	if ((to & ~TO_MASK) != 0)
971 		return ret;
972 
973 	/* Figure out the master core */
974 	if (idx == BADIDX) {
975 		switch (BUSTYPE(sii->pub.bustype)) {
976 		case PCI_BUS:
977 			idx = sii->pub.buscoreidx;
978 			break;
979 		case JTAG_BUS:
980 			idx = SI_CC_IDX;
981 			break;
982 		case PCMCIA_BUS:
983 #ifdef BCMSDIO
984 		case SDIO_BUS:
985 #endif // endif
986 			idx = si_findcoreidx(sih, PCMCIA_CORE_ID, 0);
987 			break;
988 		case SI_BUS:
989 			idx = si_findcoreidx(sih, MIPS33_CORE_ID, 0);
990 			break;
991 		default:
992 			ASSERT(0);
993 		}
994 		if (idx == BADIDX)
995 			return ret;
996 	}
997 
998 	INTR_OFF(sii, intr_val);
999 	origidx = si_coreidx(sih);
1000 
1001 	sb = REGS2SB(sb_setcoreidx(sih, idx));
1002 
1003 	tmp = R_SBREG(sii, &sb->sbimconfiglow);
1004 	ret = tmp & TO_MASK;
1005 	W_SBREG(sii, &sb->sbimconfiglow, (tmp & ~TO_MASK) | to);
1006 
1007 	sb_commit(sih);
1008 	sb_setcoreidx(sih, origidx);
1009 	INTR_RESTORE(sii, intr_val);
1010 	return ret;
1011 }
1012 
1013 uint32
sb_base(uint32 admatch)1014 sb_base(uint32 admatch)
1015 {
1016 	uint32 base;
1017 	uint type;
1018 
1019 	type = admatch & SBAM_TYPE_MASK;
1020 	ASSERT(type < 3);
1021 
1022 	base = 0;
1023 
1024 	if (type == 0) {
1025 		base = admatch & SBAM_BASE0_MASK;
1026 	} else if (type == 1) {
1027 		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
1028 		base = admatch & SBAM_BASE1_MASK;
1029 	} else if (type == 2) {
1030 		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
1031 		base = admatch & SBAM_BASE2_MASK;
1032 	}
1033 
1034 	return (base);
1035 }
1036 
1037 uint32
sb_size(uint32 admatch)1038 sb_size(uint32 admatch)
1039 {
1040 	uint32 size;
1041 	uint type;
1042 
1043 	type = admatch & SBAM_TYPE_MASK;
1044 	ASSERT(type < 3);
1045 
1046 	size = 0;
1047 
1048 	if (type == 0) {
1049 		size = 1 << (((admatch & SBAM_ADINT0_MASK) >> SBAM_ADINT0_SHIFT) + 1);
1050 	} else if (type == 1) {
1051 		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
1052 		size = 1 << (((admatch & SBAM_ADINT1_MASK) >> SBAM_ADINT1_SHIFT) + 1);
1053 	} else if (type == 2) {
1054 		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
1055 		size = 1 << (((admatch & SBAM_ADINT2_MASK) >> SBAM_ADINT2_SHIFT) + 1);
1056 	}
1057 
1058 	return (size);
1059 }
1060 
1061 #if defined(BCMDBG_PHYDUMP)
1062 /* print interesting sbconfig registers */
1063 void
sb_dumpregs(si_t * sih,struct bcmstrbuf * b)1064 sb_dumpregs(si_t *sih, struct bcmstrbuf *b)
1065 {
1066 	sbconfig_t *sb;
1067 	uint origidx, i, intr_val = 0;
1068 	si_info_t *sii = SI_INFO(sih);
1069 	si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
1070 
1071 	origidx = sii->curidx;
1072 
1073 	INTR_OFF(sii, intr_val);
1074 
1075 	for (i = 0; i < sii->numcores; i++) {
1076 		sb = REGS2SB(sb_setcoreidx(sih, i));
1077 
1078 		bcm_bprintf(b, "core 0x%x: \n", cores_info->coreid[i]);
1079 
1080 		if (sii->pub.socirev > SONICS_2_2)
1081 			bcm_bprintf(b, "sbimerrlog 0x%x sbimerrloga 0x%x\n",
1082 			          sb_corereg(sih, si_coreidx(&sii->pub), SBIMERRLOG, 0, 0),
1083 			          sb_corereg(sih, si_coreidx(&sii->pub), SBIMERRLOGA, 0, 0));
1084 
1085 		bcm_bprintf(b, "sbtmstatelow 0x%x sbtmstatehigh 0x%x sbidhigh 0x%x "
1086 		            "sbimstate 0x%x\n sbimconfiglow 0x%x sbimconfighigh 0x%x\n",
1087 		            R_SBREG(sii, &sb->sbtmstatelow), R_SBREG(sii, &sb->sbtmstatehigh),
1088 		            R_SBREG(sii, &sb->sbidhigh), R_SBREG(sii, &sb->sbimstate),
1089 		            R_SBREG(sii, &sb->sbimconfiglow), R_SBREG(sii, &sb->sbimconfighigh));
1090 	}
1091 
1092 	sb_setcoreidx(sih, origidx);
1093 	INTR_RESTORE(sii, intr_val);
1094 }
1095 #endif // endif
1096