xref: /OK3568_Linux_fs/external/rkwifibt/drivers/infineon/hndpmu.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * Misc utility routines for accessing PMU corerev specific features
3  * of the SiliconBackplane-based Broadcom chips.
4  *
5  * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
6  *
7  * Copyright (C) 1999-2017, Broadcom Corporation
8  *
9  *      Unless you and Broadcom execute a separate written software license
10  * agreement governing use of this software, this software is licensed to you
11  * under the terms of the GNU General Public License version 2 (the "GPL"),
12  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
13  * following added to such license:
14  *
15  *      As a special exception, the copyright holders of this software give you
16  * permission to link this software with independent modules, and to copy and
17  * distribute the resulting executable under terms of your choice, provided that
18  * you also meet, for each linked independent module, the terms and conditions of
19  * the license of that module.  An independent module is a module which is not
20  * derived from this software.  The special exception does not apply to any
21  * modifications of the software.
22  *
23  *      Notwithstanding the above, under no circumstances may you combine this
24  * software in any way with any other Broadcom software provided under a license
25  * other than the GPL, without Broadcom's express prior written consent.
26  *
27  *
28  * <<Broadcom-WL-IPTag/Open:>>
29  *
30  * $Id: hndpmu.c 700652 2017-05-20 02:44:31Z $
31  */
32 
33 /**
34  * @file
35  * Note: this file contains PLL/FLL related functions. A chip can contain multiple PLLs/FLLs.
36  * However, in the context of this file the baseband ('BB') PLL/FLL is referred to.
37  *
38  * Throughout this code, the prefixes 'pmu1_' and 'pmu2_' are used.
39  * They refer to different revisions of the PMU (which is at revision 18 @ Apr 25, 2012)
40  * pmu1_ marks the transition from PLL to ADFLL (Digital Frequency Locked Loop). It supports
41  * fractional frequency generation. pmu2_ does not support fractional frequency generation.
42  */
43 
44 #include <bcm_cfg.h>
45 #include <typedefs.h>
46 #include <bcmdefs.h>
47 #include <osl.h>
48 #include <bcmutils.h>
49 #include <siutils.h>
50 #include <bcmdevs.h>
51 #include <hndsoc.h>
52 #include <sbchipc.h>
53 #include <hndchipc.h>
54 #include <hndpmu.h>
55 #include <hndlhl.h>
56 #if defined(BCMULP)
57 #include <ulp.h>
58 #endif /* defined(BCMULP) */
59 #include <sbgci.h>
60 #ifdef EVENT_LOG_COMPILE
61 #include <event_log.h>
62 #endif // endif
63 #include <sbgci.h>
64 #include <lpflags.h>
65 
66 #define	PMU_ERROR(args)
67 
68 #define	PMU_MSG(args)
69 
70 /* To check in verbose debugging messages not intended
71  * to be on except on private builds.
72  */
73 #define	PMU_NONE(args)
74 #define flags_shift	14
75 
76 /** contains resource bit positions for a specific chip */
77 struct rsc_per_chip_s {
78 	uint8 ht_avail;
79 	uint8 macphy_clkavail;
80 	uint8 ht_start;
81 	uint8 otp_pu;
82 	uint8 macphy_aux_clkavail;
83 };
84 
85 typedef struct rsc_per_chip_s rsc_per_chip_t;
86 
87 #if defined(BCMPMU_STATS) && !defined(BCMPMU_STATS_DISABLED)
88 bool	_pmustatsenab = TRUE;
89 #else
90 bool	_pmustatsenab = FALSE;
91 #endif /* BCMPMU_STATS */
92 
93 /**
94  * Balance between stable SDIO operation and power consumption is achieved using this function.
95  * Note that each drive strength table is for a specific VDDIO of the SDIO pads, ideally this
96  * function should read the VDDIO itself to select the correct table. For now it has been solved
97  * with the 'BCM_SDIO_VDDIO' preprocessor constant.
98  *
99  * 'drivestrength': desired pad drive strength in mA. Drive strength of 0 requests tri-state (if
100  *		    hardware supports this), if no hw support drive strength is not programmed.
101  */
102 void
si_sdiod_drive_strength_init(si_t * sih,osl_t * osh,uint32 drivestrength)103 si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength)
104 {
105 	/*
106 	 * Note:
107 	 * This function used to set the SDIO drive strength via PMU_CHIPCTL1 for the
108 	 * 43143, 4330, 4334, 4336, 43362 chips.  These chips are now no longer supported, so
109 	 * the code has been deleted.
110 	 * Newer chips have the SDIO drive strength setting via a GCI Chip Control register,
111 	 * but the bit definitions are chip-specific.  We are keeping this function available
112 	 * (accessed via DHD 'sdiod_drive' IOVar) in case these newer chips need to provide access.
113 	 */
114 	UNUSED_PARAMETER(sih);
115 	UNUSED_PARAMETER(osh);
116 	UNUSED_PARAMETER(drivestrength);
117 }
118 
119 void
si_switch_pmu_dependency(si_t * sih,uint mode)120 si_switch_pmu_dependency(si_t *sih, uint mode)
121 {
122 #ifdef DUAL_PMU_SEQUENCE
123 	osl_t *osh = si_osh(sih);
124 	uint32 current_res_state;
125 	uint32 min_mask, max_mask;
126 	const pmu_res_depend_t *pmu_res_depend_table = NULL;
127 	uint pmu_res_depend_table_sz = 0;
128 	uint origidx;
129 	pmuregs_t *pmu;
130 	chipcregs_t *cc;
131 	BCM_REFERENCE(cc);
132 
133 	origidx = si_coreidx(sih);
134 	if (AOB_ENAB(sih)) {
135 		pmu = si_setcore(sih, PMU_CORE_ID, 0);
136 		cc  = si_setcore(sih, CC_CORE_ID, 0);
137 	} else {
138 		pmu = si_setcoreidx(sih, SI_CC_IDX);
139 		cc  = si_setcoreidx(sih, SI_CC_IDX);
140 	}
141 	ASSERT(pmu != NULL);
142 
143 	current_res_state = R_REG(osh, &pmu->res_state);
144 	min_mask = R_REG(osh, &pmu->min_res_mask);
145 	max_mask = R_REG(osh, &pmu->max_res_mask);
146 	W_REG(osh, &pmu->min_res_mask, (min_mask | current_res_state));
147 	switch (mode) {
148 		case PMU_4364_1x1_MODE:
149 		{
150 			if (CHIPID(sih->chip) == BCM4364_CHIP_ID) {
151 					pmu_res_depend_table = bcm4364a0_res_depend_1x1;
152 					pmu_res_depend_table_sz =
153 						ARRAYSIZE(bcm4364a0_res_depend_1x1);
154 			max_mask = PMU_4364_MAX_MASK_1x1;
155 			W_REG(osh, &pmu->res_table_sel, RES4364_SR_SAVE_RESTORE);
156 			W_REG(osh, &pmu->res_updn_timer, PMU_4364_SAVE_RESTORE_UPDNTIME_1x1);
157 #if defined(SAVERESTORE)
158 				if (SR_ENAB()) {
159 					/* Disable 3x3 SR engine */
160 					W_REG(osh, &cc->sr1_control0,
161 					CC_SR0_4364_SR_ENG_CLK_EN |
162 					CC_SR0_4364_SR_RSRC_TRIGGER |
163 					CC_SR0_4364_SR_WD_MEM_MIN_DIV |
164 					CC_SR0_4364_SR_INVERT_CLK |
165 					CC_SR0_4364_SR_ENABLE_HT |
166 					CC_SR0_4364_SR_ALLOW_PIC |
167 					CC_SR0_4364_SR_PMU_MEM_DISABLE);
168 				}
169 #endif /* SAVERESTORE */
170 			}
171 			break;
172 		}
173 		case PMU_4364_3x3_MODE:
174 		{
175 			if (CHIPID(sih->chip) == BCM4364_CHIP_ID) {
176 				W_REG(osh, &pmu->res_table_sel, RES4364_SR_SAVE_RESTORE);
177 				W_REG(osh, &pmu->res_updn_timer,
178 					PMU_4364_SAVE_RESTORE_UPDNTIME_3x3);
179 				/* Change the dependency table only if required */
180 				if ((max_mask != PMU_4364_MAX_MASK_3x3) ||
181 					(max_mask != PMU_4364_MAX_MASK_RSDB)) {
182 						pmu_res_depend_table = bcm4364a0_res_depend_rsdb;
183 						pmu_res_depend_table_sz =
184 							ARRAYSIZE(bcm4364a0_res_depend_rsdb);
185 						max_mask = PMU_4364_MAX_MASK_3x3;
186 				}
187 #if defined(SAVERESTORE)
188 				if (SR_ENAB()) {
189 					/* Enable 3x3 SR engine */
190 					W_REG(osh, &cc->sr1_control0,
191 					CC_SR0_4364_SR_ENG_CLK_EN |
192 					CC_SR0_4364_SR_RSRC_TRIGGER |
193 					CC_SR0_4364_SR_WD_MEM_MIN_DIV |
194 					CC_SR0_4364_SR_INVERT_CLK |
195 					CC_SR0_4364_SR_ENABLE_HT |
196 					CC_SR0_4364_SR_ALLOW_PIC |
197 					CC_SR0_4364_SR_PMU_MEM_DISABLE |
198 					CC_SR0_4364_SR_ENG_EN_MASK);
199 				}
200 #endif /* SAVERESTORE */
201 			}
202 			break;
203 		}
204 		case PMU_4364_RSDB_MODE:
205 		default:
206 		{
207 			if (CHIPID(sih->chip) == BCM4364_CHIP_ID) {
208 				W_REG(osh, &pmu->res_table_sel, RES4364_SR_SAVE_RESTORE);
209 				W_REG(osh, &pmu->res_updn_timer,
210 					PMU_4364_SAVE_RESTORE_UPDNTIME_3x3);
211 				/* Change the dependency table only if required */
212 				if ((max_mask != PMU_4364_MAX_MASK_3x3) ||
213 					(max_mask != PMU_4364_MAX_MASK_RSDB)) {
214 						pmu_res_depend_table =
215 							bcm4364a0_res_depend_rsdb;
216 						pmu_res_depend_table_sz =
217 							ARRAYSIZE(bcm4364a0_res_depend_rsdb);
218 						max_mask = PMU_4364_MAX_MASK_RSDB;
219 				}
220 #if defined(SAVERESTORE)
221 			if (SR_ENAB()) {
222 					/* Enable 3x3 SR engine */
223 					W_REG(osh, &cc->sr1_control0,
224 					CC_SR0_4364_SR_ENG_CLK_EN |
225 					CC_SR0_4364_SR_RSRC_TRIGGER |
226 					CC_SR0_4364_SR_WD_MEM_MIN_DIV |
227 					CC_SR0_4364_SR_INVERT_CLK |
228 					CC_SR0_4364_SR_ENABLE_HT |
229 					CC_SR0_4364_SR_ALLOW_PIC |
230 					CC_SR0_4364_SR_PMU_MEM_DISABLE |
231 					CC_SR0_4364_SR_ENG_EN_MASK);
232 				}
233 #endif /* SAVERESTORE */
234 			}
235 			break;
236 		}
237 	}
238 	si_pmu_resdeptbl_upd(sih, osh, pmu, pmu_res_depend_table, pmu_res_depend_table_sz);
239 	W_REG(osh, &pmu->max_res_mask, max_mask);
240 	W_REG(osh, &pmu->min_res_mask, min_mask);
241 	si_pmu_wait_for_steady_state(sih, osh, pmu);
242 	/* Add some delay; allow resources to come up and settle. */
243 	OSL_DELAY(200);
244 	si_setcoreidx(sih, origidx);
245 #endif /* DUAL_PMU_SEQUENCE */
246 }
247 
248 #if defined(BCMULP)
249 
250 int
si_pmu_ulp_register(si_t * sih)251 si_pmu_ulp_register(si_t *sih)
252 {
253 	return ulp_p1_module_register(ULP_MODULE_ID_PMU, &ulp_pmu_ctx, (void *)sih);
254 }
255 
256 static uint
si_pmu_ulp_get_retention_size_cb(void * handle,ulp_ext_info_t * einfo)257 si_pmu_ulp_get_retention_size_cb(void *handle, ulp_ext_info_t *einfo)
258 {
259 	ULP_DBG(("%s: sz: %d\n", __FUNCTION__, sizeof(si_pmu_ulp_cr_dat_t)));
260 	return sizeof(si_pmu_ulp_cr_dat_t);
261 }
262 
263 static int
si_pmu_ulp_enter_cb(void * handle,ulp_ext_info_t * einfo,uint8 * cache_data)264 si_pmu_ulp_enter_cb(void *handle, ulp_ext_info_t *einfo, uint8 *cache_data)
265 {
266 	si_pmu_ulp_cr_dat_t crinfo = {0};
267 	crinfo.ilpcycles_per_sec = ilpcycles_per_sec;
268 	ULP_DBG(("%s: ilpcycles_per_sec: %x\n", __FUNCTION__, ilpcycles_per_sec));
269 	memcpy(cache_data, (void*)&crinfo, sizeof(crinfo));
270 	return BCME_OK;
271 }
272 
273 static int
si_pmu_ulp_exit_cb(void * handle,uint8 * cache_data,uint8 * p2_cache_data)274 si_pmu_ulp_exit_cb(void *handle, uint8 *cache_data,
275 	uint8 *p2_cache_data)
276 {
277 	si_pmu_ulp_cr_dat_t *crinfo = (si_pmu_ulp_cr_dat_t *)cache_data;
278 
279 	ilpcycles_per_sec = crinfo->ilpcycles_per_sec;
280 	ULP_DBG(("%s: ilpcycles_per_sec: %x, cache_data: %p\n", __FUNCTION__,
281 		ilpcycles_per_sec, cache_data));
282 	return BCME_OK;
283 }
284 
285 void
si_pmu_ulp_chipconfig(si_t * sih,osl_t * osh)286 si_pmu_ulp_chipconfig(si_t *sih, osl_t *osh)
287 {
288 	uint32 reg_val;
289 
290 	BCM_REFERENCE(reg_val);
291 
292 	if (CHIPID(sih->chip) == BCM43012_CHIP_ID) {
293 		/* DS1 reset and clk enable init value config */
294 		si_pmu_chipcontrol(sih, PMU_CHIPCTL14, ~0x0,
295 			(PMUCCTL14_43012_ARMCM3_RESET_INITVAL |
296 			PMUCCTL14_43012_DOT11MAC_CLKEN_INITVAL |
297 			PMUCCTL14_43012_SDIOD_RESET_INIVAL |
298 			PMUCCTL14_43012_SDIO_CLK_DMN_RESET_INITVAL |
299 			PMUCCTL14_43012_SOCRAM_CLKEN_INITVAL |
300 			PMUCCTL14_43012_M2MDMA_RESET_INITVAL |
301 			PMUCCTL14_43012_DOT11MAC_PHY_CLK_EN_INITVAL |
302 			PMUCCTL14_43012_DOT11MAC_PHY_CNTL_EN_INITVAL));
303 
304 		/* Clear SFlash clock request and enable High Quality clock */
305 		CHIPC_REG(sih, clk_ctl_st, CCS_SFLASH_CLKREQ | CCS_HQCLKREQ, CCS_HQCLKREQ);
306 
307 		reg_val = PMU_REG(sih, min_res_mask, ~0x0, ULP_MIN_RES_MASK);
308 		ULP_DBG(("si_pmu_ulp_chipconfig: min_res_mask: 0x%08x\n", reg_val));
309 
310 		/* Force power switch off */
311 		si_pmu_chipcontrol(sih, PMU_CHIPCTL2,
312 				(PMUCCTL02_43012_SUBCORE_PWRSW_FORCE_ON |
313 				PMUCCTL02_43012_PHY_PWRSW_FORCE_ON), 0);
314 
315 	}
316 }
317 
318 void
si_pmu_ulp_ilp_config(si_t * sih,osl_t * osh,uint32 ilp_period)319 si_pmu_ulp_ilp_config(si_t *sih, osl_t *osh, uint32 ilp_period)
320 {
321 	pmuregs_t *pmu;
322 	pmu = si_setcoreidx(sih, si_findcoreidx(sih, PMU_CORE_ID, 0));
323 	W_REG(osh, &pmu->ILPPeriod, ilp_period);
324 	si_lhl_ilp_config(sih, osh, ilp_period);
325 }
326 
327 /** Initialize DS1 PMU hardware resources */
328 void
si_pmu_ds1_res_init(si_t * sih,osl_t * osh)329 si_pmu_ds1_res_init(si_t *sih, osl_t *osh)
330 {
331 	pmuregs_t *pmu;
332 	uint origidx;
333 	const pmu_res_updown_t *pmu_res_updown_table = NULL;
334 	uint pmu_res_updown_table_sz = 0;
335 
336 	/* Remember original core before switch to chipc/pmu */
337 	origidx = si_coreidx(sih);
338 	if (AOB_ENAB(sih)) {
339 		pmu = si_setcore(sih, PMU_CORE_ID, 0);
340 	} else {
341 		pmu = si_setcoreidx(sih, SI_CC_IDX);
342 	}
343 	ASSERT(pmu != NULL);
344 
345 	switch (CHIPID(sih->chip)) {
346 	case BCM43012_CHIP_ID:
347 		pmu_res_updown_table = bcm43012a0_res_updown_ds1;
348 		pmu_res_updown_table_sz = ARRAYSIZE(bcm43012a0_res_updown_ds1);
349 		break;
350 
351 	default:
352 		break;
353 	}
354 
355 	/* Program up/down timers */
356 	while (pmu_res_updown_table_sz--) {
357 		ASSERT(pmu_res_updown_table != NULL);
358 		PMU_MSG(("DS1: Changing rsrc %d res_updn_timer to 0x%x\n",
359 			pmu_res_updown_table[pmu_res_updown_table_sz].resnum,
360 			pmu_res_updown_table[pmu_res_updown_table_sz].updown));
361 		W_REG(osh, &pmu->res_table_sel,
362 			pmu_res_updown_table[pmu_res_updown_table_sz].resnum);
363 		W_REG(osh, &pmu->res_updn_timer,
364 			pmu_res_updown_table[pmu_res_updown_table_sz].updown);
365 	}
366 
367 	/* Return to original core */
368 	si_setcoreidx(sih, origidx);
369 }
370 
371 #endif /* defined(BCMULP) */
372 
373 uint32
si_pmu_wake_bit_offset(si_t * sih)374 si_pmu_wake_bit_offset(si_t *sih)
375 {
376 	uint32 wakebit;
377 
378 	switch (CHIPID(sih->chip)) {
379 	case BCM4347_CHIP_GRPID:
380 		wakebit = CC2_4347_GCI2WAKE_MASK;
381 		break;
382 	default:
383 		wakebit = 0;
384 		ASSERT(0);
385 		break;
386 	}
387 
388 	return wakebit;
389 }
390 
si_pmu_set_min_res_mask(si_t * sih,osl_t * osh,uint min_res_mask)391 void si_pmu_set_min_res_mask(si_t *sih, osl_t *osh, uint min_res_mask)
392 {
393 	pmuregs_t *pmu;
394 	uint origidx;
395 
396 	/* Remember original core before switch to chipc/pmu */
397 	origidx = si_coreidx(sih);
398 	if (AOB_ENAB(sih)) {
399 		pmu = si_setcore(sih, PMU_CORE_ID, 0);
400 	}
401 	else {
402 		pmu = si_setcoreidx(sih, SI_CC_IDX);
403 	}
404 	ASSERT(pmu != NULL);
405 
406 	W_REG(osh, &pmu->min_res_mask, min_res_mask);
407 	OSL_DELAY(100);
408 
409 	/* Return to original core */
410 	si_setcoreidx(sih, origidx);
411 }
412 
413 bool
si_pmu_cap_fast_lpo(si_t * sih)414 si_pmu_cap_fast_lpo(si_t *sih)
415 {
416 	return (PMU_REG(sih, core_cap_ext, 0, 0) & PCAP_EXT_USE_MUXED_ILP_CLK_MASK) ? TRUE : FALSE;
417 }
418 
419 int
si_pmu_fast_lpo_disable(si_t * sih)420 si_pmu_fast_lpo_disable(si_t *sih)
421 {
422 	if (!si_pmu_cap_fast_lpo(sih)) {
423 		PMU_ERROR(("%s: No Fast LPO capability\n", __FUNCTION__));
424 		return BCME_ERROR;
425 	}
426 
427 	PMU_REG(sih, pmucontrol_ext,
428 		PCTL_EXT_FASTLPO_ENAB |
429 		PCTL_EXT_FASTLPO_SWENAB |
430 		PCTL_EXT_FASTLPO_PCIE_SWENAB,
431 		0);
432 	OSL_DELAY(1000);
433 	return BCME_OK;
434 }
435 
436 #ifdef BCMPMU_STATS
437 /*
438  * 8 pmu statistics timer default map
439  *
440  * for CORE_RDY_AUX measure, set as below for timer 6 and 7 instead of CORE_RDY_MAIN.
441  *	//core-n active duration : pmu_rsrc_state(CORE_RDY_AUX)
442  *	{ SRC_CORE_RDY_AUX, FALSE, TRUE, LEVEL_HIGH},
443  *	//core-n active duration : pmu_rsrc_state(CORE_RDY_AUX)
444  *	{ SRC_CORE_RDY_AUX, FALSE, TRUE, EDGE_RISE}
445  */
446 static pmu_stats_timer_t pmustatstimer[] = {
447 	{ SRC_LINK_IN_L12, FALSE, TRUE, PMU_STATS_LEVEL_HIGH},	//link_in_l12
448 	{ SRC_LINK_IN_L23, FALSE, TRUE, PMU_STATS_LEVEL_HIGH},	//link_in_l23
449 	{ SRC_PM_ST_IN_D0, FALSE, TRUE, PMU_STATS_LEVEL_HIGH},	//pm_st_in_d0
450 	{ SRC_PM_ST_IN_D3, FALSE, TRUE, PMU_STATS_LEVEL_HIGH},	//pm_st_in_d3
451 	//deep-sleep duration : pmu_rsrc_state(XTAL_PU)
452 	{ SRC_XTAL_PU, FALSE, TRUE, PMU_STATS_LEVEL_LOW},
453 	//deep-sleep entry count : pmu_rsrc_state(XTAL_PU)
454 	{ SRC_XTAL_PU, FALSE, TRUE, PMU_STATS_EDGE_FALL},
455 	//core-n active duration : pmu_rsrc_state(CORE_RDY_MAIN)
456 	{ SRC_CORE_RDY_MAIN, FALSE, TRUE, PMU_STATS_LEVEL_HIGH},
457 	//core-n active duration : pmu_rsrc_state(CORE_RDY_MAIN)
458 	{ SRC_CORE_RDY_MAIN, FALSE, TRUE, PMU_STATS_EDGE_RISE}
459 };
460 
461 static void
si_pmustatstimer_update(osl_t * osh,pmuregs_t * pmu,uint8 timerid)462 si_pmustatstimer_update(osl_t *osh, pmuregs_t *pmu, uint8 timerid)
463 {
464 	uint32 stats_timer_ctrl;
465 
466 	W_REG(osh, &pmu->pmu_statstimer_addr, timerid);
467 	stats_timer_ctrl =
468 		((pmustatstimer[timerid].src_num << PMU_ST_SRC_SHIFT) &
469 			PMU_ST_SRC_MASK) |
470 		((pmustatstimer[timerid].cnt_mode << PMU_ST_CNT_MODE_SHIFT) &
471 			PMU_ST_CNT_MODE_MASK) |
472 		((pmustatstimer[timerid].enable << PMU_ST_EN_SHIFT) & PMU_ST_EN_MASK) |
473 		((pmustatstimer[timerid].int_enable << PMU_ST_INT_EN_SHIFT) & PMU_ST_INT_EN_MASK);
474 	W_REG(osh, &pmu->pmu_statstimer_ctrl, stats_timer_ctrl);
475 	W_REG(osh, &pmu->pmu_statstimer_N, 0);
476 }
477 
478 void
si_pmustatstimer_int_enable(si_t * sih)479 si_pmustatstimer_int_enable(si_t *sih)
480 {
481 	pmuregs_t *pmu;
482 	uint origidx;
483 	osl_t *osh = si_osh(sih);
484 
485 	/* Remember original core before switch to chipc/pmu */
486 	origidx = si_coreidx(sih);
487 	if (AOB_ENAB(sih)) {
488 		pmu = si_setcore(sih, PMU_CORE_ID, 0);
489 	} else {
490 		pmu = si_setcoreidx(sih, SI_CC_IDX);
491 	}
492 	ASSERT(pmu != NULL);
493 
494 	OR_REG(osh, &pmu->pmuintmask0, PMU_INT_STAT_TIMER_INT_MASK);
495 
496 	/* Return to original core */
497 	si_setcoreidx(sih, origidx);
498 }
499 
500 void
si_pmustatstimer_int_disable(si_t * sih)501 si_pmustatstimer_int_disable(si_t *sih)
502 {
503 	pmuregs_t *pmu;
504 	uint origidx;
505 	osl_t *osh = si_osh(sih);
506 
507 	/* Remember original core before switch to chipc/pmu */
508 	origidx = si_coreidx(sih);
509 	if (AOB_ENAB(sih)) {
510 		pmu = si_setcore(sih, PMU_CORE_ID, 0);
511 	} else {
512 		pmu = si_setcoreidx(sih, SI_CC_IDX);
513 	}
514 	ASSERT(pmu != NULL);
515 
516 	AND_REG(osh, &pmu->pmuintmask0, ~PMU_INT_STAT_TIMER_INT_MASK);
517 
518 	/* Return to original core */
519 	si_setcoreidx(sih, origidx);
520 }
521 
522 void
si_pmustatstimer_init(si_t * sih)523 si_pmustatstimer_init(si_t *sih)
524 {
525 	pmuregs_t *pmu;
526 	uint origidx;
527 	osl_t *osh = si_osh(sih);
528 	uint32 core_cap_ext;
529 	uint8 max_stats_timer_num;
530 	int8 i;
531 
532 	/* Remember original core before switch to chipc/pmu */
533 	origidx = si_coreidx(sih);
534 	if (AOB_ENAB(sih)) {
535 		pmu = si_setcore(sih, PMU_CORE_ID, 0);
536 	} else {
537 		pmu = si_setcoreidx(sih, SI_CC_IDX);
538 	}
539 	ASSERT(pmu != NULL);
540 
541 	core_cap_ext = R_REG(osh, &pmu->core_cap_ext);
542 
543 	max_stats_timer_num = ((core_cap_ext & PCAP_EXT_ST_NUM_MASK) >> PCAP_EXT_ST_NUM_SHIFT) + 1;
544 
545 	for (i = 0; i < max_stats_timer_num; i++) {
546 		si_pmustatstimer_update(osh, pmu, i);
547 	}
548 
549 	OR_REG(osh, &pmu->pmuintmask0, PMU_INT_STAT_TIMER_INT_MASK);
550 
551 	/* Return to original core */
552 	si_setcoreidx(sih, origidx);
553 }
554 
555 void
si_pmustatstimer_dump(si_t * sih)556 si_pmustatstimer_dump(si_t *sih)
557 {
558 	pmuregs_t *pmu;
559 	uint origidx;
560 	osl_t *osh = si_osh(sih);
561 	uint32 core_cap_ext, pmucapabilities, AlpPeriod, ILPPeriod, pmuintmask0, pmuintstatus;
562 	uint8 max_stats_timer_num, max_stats_timer_src_num;
563 	uint32 stat_timer_ctrl, stat_timer_N;
564 	uint8 i;
565 	uint32 current_time_ms = OSL_SYSUPTIME();
566 
567 	/* Remember original core before switch to chipc/pmu */
568 	origidx = si_coreidx(sih);
569 	if (AOB_ENAB(sih)) {
570 		pmu = si_setcore(sih, PMU_CORE_ID, 0);
571 	} else {
572 		pmu = si_setcoreidx(sih, SI_CC_IDX);
573 	}
574 	ASSERT(pmu != NULL);
575 
576 	pmucapabilities = R_REG(osh, &pmu->pmucapabilities);
577 	core_cap_ext = R_REG(osh, &pmu->core_cap_ext);
578 	AlpPeriod = R_REG(osh, &pmu->slowclkperiod);
579 	ILPPeriod = R_REG(osh, &pmu->ILPPeriod);
580 
581 	max_stats_timer_num = ((core_cap_ext & PCAP_EXT_ST_NUM_MASK) >>
582 		PCAP_EXT_ST_NUM_SHIFT) + 1;
583 	max_stats_timer_src_num = ((core_cap_ext & PCAP_EXT_ST_SRC_NUM_MASK) >>
584 		PCAP_EXT_ST_SRC_NUM_SHIFT) + 1;
585 
586 	pmuintstatus = R_REG(osh, &pmu->pmuintstatus);
587 	pmuintmask0 = R_REG(osh, &pmu->pmuintmask0);
588 
589 	PMU_ERROR(("%s : TIME %d\n", __FUNCTION__, current_time_ms));
590 
591 	PMU_ERROR(("\tMAX Timer Num %d, MAX Source Num %d\n",
592 		max_stats_timer_num, max_stats_timer_src_num));
593 	PMU_ERROR(("\tpmucapabilities 0x%8x, core_cap_ext 0x%8x, AlpPeriod 0x%8x, ILPPeriod 0x%8x, "
594 		"pmuintmask0 0x%8x, pmuintstatus 0x%8x, pmurev %d\n",
595 		pmucapabilities, core_cap_ext, AlpPeriod, ILPPeriod,
596 		pmuintmask0, pmuintstatus, PMUREV(sih->pmurev)));
597 
598 	for (i = 0; i < max_stats_timer_num; i++) {
599 		W_REG(osh, &pmu->pmu_statstimer_addr, i);
600 		stat_timer_ctrl = R_REG(osh, &pmu->pmu_statstimer_ctrl);
601 		stat_timer_N = R_REG(osh, &pmu->pmu_statstimer_N);
602 		PMU_ERROR(("\t Timer %d : control 0x%8x, %d\n",
603 			i, stat_timer_ctrl, stat_timer_N));
604 	}
605 
606 	/* Return to original core */
607 	si_setcoreidx(sih, origidx);
608 }
609 
610 void
si_pmustatstimer_start(si_t * sih,uint8 timerid)611 si_pmustatstimer_start(si_t *sih, uint8 timerid)
612 {
613 	pmuregs_t *pmu;
614 	uint origidx;
615 	osl_t *osh = si_osh(sih);
616 
617 	/* Remember original core before switch to chipc/pmu */
618 	origidx = si_coreidx(sih);
619 	if (AOB_ENAB(sih)) {
620 		pmu = si_setcore(sih, PMU_CORE_ID, 0);
621 	} else {
622 		pmu = si_setcoreidx(sih, SI_CC_IDX);
623 	}
624 	ASSERT(pmu != NULL);
625 
626 	pmustatstimer[timerid].enable = TRUE;
627 
628 	W_REG(osh, &pmu->pmu_statstimer_addr, timerid);
629 	OR_REG(osh, &pmu->pmu_statstimer_ctrl, PMU_ST_ENAB << PMU_ST_EN_SHIFT);
630 
631 	/* Return to original core */
632 	si_setcoreidx(sih, origidx);
633 }
634 
635 void
si_pmustatstimer_stop(si_t * sih,uint8 timerid)636 si_pmustatstimer_stop(si_t *sih, uint8 timerid)
637 {
638 	pmuregs_t *pmu;
639 	uint origidx;
640 	osl_t *osh = si_osh(sih);
641 
642 	/* Remember original core before switch to chipc/pmu */
643 	origidx = si_coreidx(sih);
644 	if (AOB_ENAB(sih)) {
645 		pmu = si_setcore(sih, PMU_CORE_ID, 0);
646 	} else {
647 		pmu = si_setcoreidx(sih, SI_CC_IDX);
648 	}
649 	ASSERT(pmu != NULL);
650 
651 	pmustatstimer[timerid].enable = FALSE;
652 
653 	W_REG(osh, &pmu->pmu_statstimer_addr, timerid);
654 	AND_REG(osh, &pmu->pmu_statstimer_ctrl, ~(PMU_ST_ENAB << PMU_ST_EN_SHIFT));
655 
656 	/* Return to original core */
657 	si_setcoreidx(sih, origidx);
658 }
659 
660 void
si_pmustatstimer_clear(si_t * sih,uint8 timerid)661 si_pmustatstimer_clear(si_t *sih, uint8 timerid)
662 {
663 	pmuregs_t *pmu;
664 	uint origidx;
665 	osl_t *osh = si_osh(sih);
666 
667 	/* Remember original core before switch to chipc/pmu */
668 	origidx = si_coreidx(sih);
669 	if (AOB_ENAB(sih)) {
670 		pmu = si_setcore(sih, PMU_CORE_ID, 0);
671 	} else {
672 		pmu = si_setcoreidx(sih, SI_CC_IDX);
673 	}
674 	ASSERT(pmu != NULL);
675 
676 	W_REG(osh, &pmu->pmu_statstimer_addr, timerid);
677 	W_REG(osh, &pmu->pmu_statstimer_N, 0);
678 
679 	/* Return to original core */
680 	si_setcoreidx(sih, origidx);
681 }
682 
683 void
si_pmustatstimer_clear_overflow(si_t * sih)684 si_pmustatstimer_clear_overflow(si_t *sih)
685 {
686 	uint8 i;
687 	uint32 core_cap_ext;
688 	uint8 max_stats_timer_num;
689 	uint32 timerN;
690 	pmuregs_t *pmu;
691 	uint origidx;
692 	osl_t *osh = si_osh(sih);
693 
694 	/* Remember original core before switch to chipc/pmu */
695 	origidx = si_coreidx(sih);
696 	if (AOB_ENAB(sih)) {
697 		pmu = si_setcore(sih, PMU_CORE_ID, 0);
698 	} else {
699 		pmu = si_setcoreidx(sih, SI_CC_IDX);
700 	}
701 	ASSERT(pmu != NULL);
702 
703 	core_cap_ext = R_REG(osh, &pmu->core_cap_ext);
704 	max_stats_timer_num = ((core_cap_ext & PCAP_EXT_ST_NUM_MASK) >> PCAP_EXT_ST_NUM_SHIFT) + 1;
705 
706 	for (i = 0; i < max_stats_timer_num; i++) {
707 		W_REG(osh, &pmu->pmu_statstimer_addr, i);
708 		timerN = R_REG(osh, &pmu->pmu_statstimer_N);
709 		if (timerN == 0xFFFFFFFF) {
710 			PMU_ERROR(("pmustatstimer overflow clear - timerid : %d\n", i));
711 			si_pmustatstimer_clear(sih, i);
712 		}
713 	}
714 
715 	/* Return to original core */
716 	si_setcoreidx(sih, origidx);
717 }
718 
719 uint32
si_pmustatstimer_read(si_t * sih,uint8 timerid)720 si_pmustatstimer_read(si_t *sih, uint8 timerid)
721 {
722 	pmuregs_t *pmu;
723 	uint origidx;
724 	osl_t *osh = si_osh(sih);
725 	uint32 stats_timer_N;
726 
727 	/* Remember original core before switch to chipc/pmu */
728 	origidx = si_coreidx(sih);
729 	if (AOB_ENAB(sih)) {
730 		pmu = si_setcore(sih, PMU_CORE_ID, 0);
731 	} else {
732 		pmu = si_setcoreidx(sih, SI_CC_IDX);
733 	}
734 	ASSERT(pmu != NULL);
735 
736 	W_REG(osh, &pmu->pmu_statstimer_addr, timerid);
737 	stats_timer_N = R_REG(osh, &pmu->pmu_statstimer_N);
738 
739 	/* Return to original core */
740 	si_setcoreidx(sih, origidx);
741 
742 	return stats_timer_N;
743 }
744 
745 void
si_pmustatstimer_cfg_src_num(si_t * sih,uint8 src_num,uint8 timerid)746 si_pmustatstimer_cfg_src_num(si_t *sih, uint8 src_num, uint8 timerid)
747 {
748 	pmuregs_t *pmu;
749 	uint origidx;
750 	osl_t *osh = si_osh(sih);
751 
752 	/* Remember original core before switch to chipc/pmu */
753 	origidx = si_coreidx(sih);
754 	if (AOB_ENAB(sih)) {
755 		pmu = si_setcore(sih, PMU_CORE_ID, 0);
756 	} else {
757 		pmu = si_setcoreidx(sih, SI_CC_IDX);
758 	}
759 	ASSERT(pmu != NULL);
760 
761 	pmustatstimer[timerid].src_num = src_num;
762 	si_pmustatstimer_update(osh, pmu, timerid);
763 
764 	/* Return to original core */
765 	si_setcoreidx(sih, origidx);
766 }
767 
768 void
si_pmustatstimer_cfg_cnt_mode(si_t * sih,uint8 cnt_mode,uint8 timerid)769 si_pmustatstimer_cfg_cnt_mode(si_t *sih, uint8 cnt_mode, uint8 timerid)
770 {
771 	pmuregs_t *pmu;
772 	uint origidx;
773 	osl_t *osh = si_osh(sih);
774 
775 	/* Remember original core before switch to chipc/pmu */
776 	origidx = si_coreidx(sih);
777 	if (AOB_ENAB(sih)) {
778 		pmu = si_setcore(sih, PMU_CORE_ID, 0);
779 	} else {
780 		pmu = si_setcoreidx(sih, SI_CC_IDX);
781 	}
782 	ASSERT(pmu != NULL);
783 
784 	pmustatstimer[timerid].cnt_mode = cnt_mode;
785 	si_pmustatstimer_update(osh, pmu, timerid);
786 
787 	/* Return to original core */
788 	si_setcoreidx(sih, origidx);
789 }
790 #endif /* BCMPMU_STATS */
791