• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Misc utility routines for accessing PMU corerev specific features
4  * of the SiliconBackplane-based Broadcom chips.
5  *
6  * Copyright (C) 1999-2019, Broadcom.
7  *
8  *      Unless you and Broadcom execute a separate written software license
9  * agreement governing use of this software, this software is licensed to you
10  * under the terms of the GNU General Public License version 2 (the "GPL"),
11  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
12  * following added to such license:
13  *
14  *      As a special exception, the copyright holders of this software give you
15  * permission to link this software with independent modules, and to copy and
16  * distribute the resulting executable under terms of your choice, provided that
17  * you also meet, for each linked independent module, the terms and conditions of
18  * the license of that module.  An independent module is a module which is not
19  * derived from this software.  The special exception does not apply to any
20  * modifications of the software.
21  *
22  *      Notwithstanding the above, under no circumstances may you combine this
23  * software in any way with any other Broadcom software provided under a license
24  * other than the GPL, without Broadcom's express prior written consent.
25  *
26  *
27  * <<Broadcom-WL-IPTag/Open:>>
28  *
29  * $Id: hndpmu.c 783841 2018-10-09 06:24:16Z $
30  */
31 
32 /**
33  * @file
34  * Note: this file contains PLL/FLL related functions. A chip can contain multiple PLLs/FLLs.
35  * However, in the context of this file the baseband ('BB') PLL/FLL is referred to.
36  *
37  * Throughout this code, the prefixes 'pmu1_' and 'pmu2_' are used.
38  * They refer to different revisions of the PMU (which is at revision 18 @ Apr 25, 2012)
39  * pmu1_ marks the transition from PLL to ADFLL (Digital Frequency Locked Loop). It supports
40  * fractional frequency generation. pmu2_ does not support fractional frequency generation.
41  */
42 
43 #include <bcm_cfg.h>
44 #include <typedefs.h>
45 #include <bcmdefs.h>
46 #include <osl.h>
47 #include <bcmutils.h>
48 #include <siutils.h>
49 #include <bcmdevs.h>
50 #include <hndsoc.h>
51 #include <sbchipc.h>
52 #include <hndchipc.h>
53 #include <hndpmu.h>
54 #include <hndlhl.h>
55 #if defined(BCMULP)
56 #include <ulp.h>
57 #endif /* defined(BCMULP) */
58 #include <sbgci.h>
59 #ifdef EVENT_LOG_COMPILE
60 #include <event_log.h>
61 #endif // endif
62 #include <sbgci.h>
63 #include <lpflags.h>
64 
65 #define	PMU_ERROR(args)
66 
67 #define	PMU_MSG(args)
68 
69 /* To check in verbose debugging messages not intended
70  * to be on except on private builds.
71  */
72 #define	PMU_NONE(args)
73 #define flags_shift	14
74 
75 /** contains resource bit positions for a specific chip */
76 struct rsc_per_chip_s {
77 	uint8 ht_avail;
78 	uint8 macphy_clkavail;
79 	uint8 ht_start;
80 	uint8 otp_pu;
81 	uint8 macphy_aux_clkavail;
82 };
83 
84 typedef struct rsc_per_chip_s rsc_per_chip_t;
85 
86 #if defined(BCMPMU_STATS) && !defined(BCMPMU_STATS_DISABLED)
87 bool	_pmustatsenab = TRUE;
88 #else
89 bool	_pmustatsenab = FALSE;
90 #endif /* BCMPMU_STATS */
91 
92 /**
93  * Balance between stable SDIO operation and power consumption is achieved using this function.
94  * Note that each drive strength table is for a specific VDDIO of the SDIO pads, ideally this
95  * function should read the VDDIO itself to select the correct table. For now it has been solved
96  * with the 'BCM_SDIO_VDDIO' preprocessor constant.
97  *
98  * 'drivestrength': desired pad drive strength in mA. Drive strength of 0 requests tri-state (if
99  *		    hardware supports this), if no hw support drive strength is not programmed.
100  */
101 void
si_sdiod_drive_strength_init(si_t * sih,osl_t * osh,uint32 drivestrength)102 si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength)
103 {
104 	/*
105 	 * Note:
106 	 * This function used to set the SDIO drive strength via PMU_CHIPCTL1 for the
107 	 * 43143, 4330, 4334, 4336, 43362 chips.  These chips are now no longer supported, so
108 	 * the code has been deleted.
109 	 * Newer chips have the SDIO drive strength setting via a GCI Chip Control register,
110 	 * but the bit definitions are chip-specific.  We are keeping this function available
111 	 * (accessed via DHD 'sdiod_drive' IOVar) in case these newer chips need to provide access.
112 	 */
113 	UNUSED_PARAMETER(sih);
114 	UNUSED_PARAMETER(osh);
115 	UNUSED_PARAMETER(drivestrength);
116 }
117 
118 void
si_switch_pmu_dependency(si_t * sih,uint mode)119 si_switch_pmu_dependency(si_t *sih, uint mode)
120 {
121 #ifdef DUAL_PMU_SEQUENCE
122 	osl_t *osh = si_osh(sih);
123 	uint32 current_res_state;
124 	uint32 min_mask, max_mask;
125 	const pmu_res_depend_t *pmu_res_depend_table = NULL;
126 	uint pmu_res_depend_table_sz = 0;
127 	uint origidx;
128 	pmuregs_t *pmu;
129 	chipcregs_t *cc;
130 	BCM_REFERENCE(cc);
131 
132 	origidx = si_coreidx(sih);
133 	if (AOB_ENAB(sih)) {
134 		pmu = si_setcore(sih, PMU_CORE_ID, 0);
135 		cc  = si_setcore(sih, CC_CORE_ID, 0);
136 	} else {
137 		pmu = si_setcoreidx(sih, SI_CC_IDX);
138 		cc  = si_setcoreidx(sih, SI_CC_IDX);
139 	}
140 	ASSERT(pmu != NULL);
141 
142 	current_res_state = R_REG(osh, &pmu->res_state);
143 	min_mask = R_REG(osh, &pmu->min_res_mask);
144 	max_mask = R_REG(osh, &pmu->max_res_mask);
145 	W_REG(osh, &pmu->min_res_mask, (min_mask | current_res_state));
146 	switch (mode) {
147 		case PMU_4364_1x1_MODE:
148 		{
149 			if (CHIPID(sih->chip) == BCM4364_CHIP_ID) {
150 					pmu_res_depend_table = bcm4364a0_res_depend_1x1;
151 					pmu_res_depend_table_sz =
152 						ARRAYSIZE(bcm4364a0_res_depend_1x1);
153 			max_mask = PMU_4364_MAX_MASK_1x1;
154 			W_REG(osh, &pmu->res_table_sel, RES4364_SR_SAVE_RESTORE);
155 			W_REG(osh, &pmu->res_updn_timer, PMU_4364_SAVE_RESTORE_UPDNTIME_1x1);
156 #if defined(SAVERESTORE)
157 				if (SR_ENAB()) {
158 					/* Disable 3x3 SR engine */
159 					W_REG(osh, &cc->sr1_control0,
160 					CC_SR0_4364_SR_ENG_CLK_EN |
161 					CC_SR0_4364_SR_RSRC_TRIGGER |
162 					CC_SR0_4364_SR_WD_MEM_MIN_DIV |
163 					CC_SR0_4364_SR_INVERT_CLK |
164 					CC_SR0_4364_SR_ENABLE_HT |
165 					CC_SR0_4364_SR_ALLOW_PIC |
166 					CC_SR0_4364_SR_PMU_MEM_DISABLE);
167 				}
168 #endif /* SAVERESTORE */
169 			}
170 			break;
171 		}
172 		case PMU_4364_3x3_MODE:
173 		{
174 			if (CHIPID(sih->chip) == BCM4364_CHIP_ID) {
175 				W_REG(osh, &pmu->res_table_sel, RES4364_SR_SAVE_RESTORE);
176 				W_REG(osh, &pmu->res_updn_timer,
177 					PMU_4364_SAVE_RESTORE_UPDNTIME_3x3);
178 				/* Change the dependency table only if required */
179 				if ((max_mask != PMU_4364_MAX_MASK_3x3) ||
180 					(max_mask != PMU_4364_MAX_MASK_RSDB)) {
181 						pmu_res_depend_table = bcm4364a0_res_depend_rsdb;
182 						pmu_res_depend_table_sz =
183 							ARRAYSIZE(bcm4364a0_res_depend_rsdb);
184 						max_mask = PMU_4364_MAX_MASK_3x3;
185 				}
186 #if defined(SAVERESTORE)
187 				if (SR_ENAB()) {
188 					/* Enable 3x3 SR engine */
189 					W_REG(osh, &cc->sr1_control0,
190 					CC_SR0_4364_SR_ENG_CLK_EN |
191 					CC_SR0_4364_SR_RSRC_TRIGGER |
192 					CC_SR0_4364_SR_WD_MEM_MIN_DIV |
193 					CC_SR0_4364_SR_INVERT_CLK |
194 					CC_SR0_4364_SR_ENABLE_HT |
195 					CC_SR0_4364_SR_ALLOW_PIC |
196 					CC_SR0_4364_SR_PMU_MEM_DISABLE |
197 					CC_SR0_4364_SR_ENG_EN_MASK);
198 				}
199 #endif /* SAVERESTORE */
200 			}
201 			break;
202 		}
203 		case PMU_4364_RSDB_MODE:
204 		default:
205 		{
206 			if (CHIPID(sih->chip) == BCM4364_CHIP_ID) {
207 				W_REG(osh, &pmu->res_table_sel, RES4364_SR_SAVE_RESTORE);
208 				W_REG(osh, &pmu->res_updn_timer,
209 					PMU_4364_SAVE_RESTORE_UPDNTIME_3x3);
210 				/* Change the dependency table only if required */
211 				if ((max_mask != PMU_4364_MAX_MASK_3x3) ||
212 					(max_mask != PMU_4364_MAX_MASK_RSDB)) {
213 						pmu_res_depend_table =
214 							bcm4364a0_res_depend_rsdb;
215 						pmu_res_depend_table_sz =
216 							ARRAYSIZE(bcm4364a0_res_depend_rsdb);
217 						max_mask = PMU_4364_MAX_MASK_RSDB;
218 				}
219 #if defined(SAVERESTORE)
220 			if (SR_ENAB()) {
221 					/* Enable 3x3 SR engine */
222 					W_REG(osh, &cc->sr1_control0,
223 					CC_SR0_4364_SR_ENG_CLK_EN |
224 					CC_SR0_4364_SR_RSRC_TRIGGER |
225 					CC_SR0_4364_SR_WD_MEM_MIN_DIV |
226 					CC_SR0_4364_SR_INVERT_CLK |
227 					CC_SR0_4364_SR_ENABLE_HT |
228 					CC_SR0_4364_SR_ALLOW_PIC |
229 					CC_SR0_4364_SR_PMU_MEM_DISABLE |
230 					CC_SR0_4364_SR_ENG_EN_MASK);
231 				}
232 #endif /* SAVERESTORE */
233 			}
234 			break;
235 		}
236 	}
237 	si_pmu_resdeptbl_upd(sih, osh, pmu, pmu_res_depend_table, pmu_res_depend_table_sz);
238 	W_REG(osh, &pmu->max_res_mask, max_mask);
239 	W_REG(osh, &pmu->min_res_mask, min_mask);
240 	si_pmu_wait_for_steady_state(sih, osh, pmu);
241 	/* Add some delay; allow resources to come up and settle. */
242 	OSL_DELAY(200);
243 	si_setcoreidx(sih, origidx);
244 #endif /* DUAL_PMU_SEQUENCE */
245 }
246 
247 #if defined(BCMULP)
248 
249 int
si_pmu_ulp_register(si_t * sih)250 si_pmu_ulp_register(si_t *sih)
251 {
252 	return ulp_p1_module_register(ULP_MODULE_ID_PMU, &ulp_pmu_ctx, (void *)sih);
253 }
254 
255 static uint
si_pmu_ulp_get_retention_size_cb(void * handle,ulp_ext_info_t * einfo)256 si_pmu_ulp_get_retention_size_cb(void *handle, ulp_ext_info_t *einfo)
257 {
258 	ULP_DBG(("%s: sz: %d\n", __FUNCTION__, sizeof(si_pmu_ulp_cr_dat_t)));
259 	return sizeof(si_pmu_ulp_cr_dat_t);
260 }
261 
262 static int
si_pmu_ulp_enter_cb(void * handle,ulp_ext_info_t * einfo,uint8 * cache_data)263 si_pmu_ulp_enter_cb(void *handle, ulp_ext_info_t *einfo, uint8 *cache_data)
264 {
265 	si_pmu_ulp_cr_dat_t crinfo = {0};
266 	crinfo.ilpcycles_per_sec = ilpcycles_per_sec;
267 	ULP_DBG(("%s: ilpcycles_per_sec: %x\n", __FUNCTION__, ilpcycles_per_sec));
268 	memcpy(cache_data, (void*)&crinfo, sizeof(crinfo));
269 	return BCME_OK;
270 }
271 
272 static int
si_pmu_ulp_exit_cb(void * handle,uint8 * cache_data,uint8 * p2_cache_data)273 si_pmu_ulp_exit_cb(void *handle, uint8 *cache_data,
274 	uint8 *p2_cache_data)
275 {
276 	si_pmu_ulp_cr_dat_t *crinfo = (si_pmu_ulp_cr_dat_t *)cache_data;
277 
278 	ilpcycles_per_sec = crinfo->ilpcycles_per_sec;
279 	ULP_DBG(("%s: ilpcycles_per_sec: %x, cache_data: %p\n", __FUNCTION__,
280 		ilpcycles_per_sec, cache_data));
281 	return BCME_OK;
282 }
283 
284 void
si_pmu_ulp_chipconfig(si_t * sih,osl_t * osh)285 si_pmu_ulp_chipconfig(si_t *sih, osl_t *osh)
286 {
287 	uint32 reg_val;
288 
289 	BCM_REFERENCE(reg_val);
290 
291 	if (CHIPID(sih->chip) == BCM43012_CHIP_ID) {
292 		/* DS1 reset and clk enable init value config */
293 		si_pmu_chipcontrol(sih, PMU_CHIPCTL14, ~0x0,
294 			(PMUCCTL14_43012_ARMCM3_RESET_INITVAL |
295 			PMUCCTL14_43012_DOT11MAC_CLKEN_INITVAL |
296 			PMUCCTL14_43012_SDIOD_RESET_INIVAL |
297 			PMUCCTL14_43012_SDIO_CLK_DMN_RESET_INITVAL |
298 			PMUCCTL14_43012_SOCRAM_CLKEN_INITVAL |
299 			PMUCCTL14_43012_M2MDMA_RESET_INITVAL |
300 			PMUCCTL14_43012_DOT11MAC_PHY_CLK_EN_INITVAL |
301 			PMUCCTL14_43012_DOT11MAC_PHY_CNTL_EN_INITVAL));
302 
303 		/* Clear SFlash clock request and enable High Quality clock */
304 		CHIPC_REG(sih, clk_ctl_st, CCS_SFLASH_CLKREQ | CCS_HQCLKREQ, CCS_HQCLKREQ);
305 
306 		reg_val = PMU_REG(sih, min_res_mask, ~0x0, ULP_MIN_RES_MASK);
307 		ULP_DBG(("si_pmu_ulp_chipconfig: min_res_mask: 0x%08x\n", reg_val));
308 
309 		/* Force power switch off */
310 		si_pmu_chipcontrol(sih, PMU_CHIPCTL2,
311 				(PMUCCTL02_43012_SUBCORE_PWRSW_FORCE_ON |
312 				PMUCCTL02_43012_PHY_PWRSW_FORCE_ON), 0);
313 
314 	}
315 }
316 
317 void
si_pmu_ulp_ilp_config(si_t * sih,osl_t * osh,uint32 ilp_period)318 si_pmu_ulp_ilp_config(si_t *sih, osl_t *osh, uint32 ilp_period)
319 {
320 	pmuregs_t *pmu;
321 	pmu = si_setcoreidx(sih, si_findcoreidx(sih, PMU_CORE_ID, 0));
322 	W_REG(osh, &pmu->ILPPeriod, ilp_period);
323 	si_lhl_ilp_config(sih, osh, ilp_period);
324 }
325 
326 /** Initialize DS1 PMU hardware resources */
327 void
si_pmu_ds1_res_init(si_t * sih,osl_t * osh)328 si_pmu_ds1_res_init(si_t *sih, osl_t *osh)
329 {
330 	pmuregs_t *pmu;
331 	uint origidx;
332 	const pmu_res_updown_t *pmu_res_updown_table = NULL;
333 	uint pmu_res_updown_table_sz = 0;
334 
335 	/* Remember original core before switch to chipc/pmu */
336 	origidx = si_coreidx(sih);
337 	if (AOB_ENAB(sih)) {
338 		pmu = si_setcore(sih, PMU_CORE_ID, 0);
339 	} else {
340 		pmu = si_setcoreidx(sih, SI_CC_IDX);
341 	}
342 	ASSERT(pmu != NULL);
343 
344 	switch (CHIPID(sih->chip)) {
345 	case BCM43012_CHIP_ID:
346 		pmu_res_updown_table = bcm43012a0_res_updown_ds1;
347 		pmu_res_updown_table_sz = ARRAYSIZE(bcm43012a0_res_updown_ds1);
348 		break;
349 
350 	default:
351 		break;
352 	}
353 
354 	/* Program up/down timers */
355 	while (pmu_res_updown_table_sz--) {
356 		ASSERT(pmu_res_updown_table != NULL);
357 		PMU_MSG(("DS1: Changing rsrc %d res_updn_timer to 0x%x\n",
358 			pmu_res_updown_table[pmu_res_updown_table_sz].resnum,
359 			pmu_res_updown_table[pmu_res_updown_table_sz].updown));
360 		W_REG(osh, &pmu->res_table_sel,
361 			pmu_res_updown_table[pmu_res_updown_table_sz].resnum);
362 		W_REG(osh, &pmu->res_updn_timer,
363 			pmu_res_updown_table[pmu_res_updown_table_sz].updown);
364 	}
365 
366 	/* Return to original core */
367 	si_setcoreidx(sih, origidx);
368 }
369 
370 #endif /* defined(BCMULP) */
371 
372 uint32
si_pmu_wake_bit_offset(si_t * sih)373 si_pmu_wake_bit_offset(si_t *sih)
374 {
375 	uint32 wakebit;
376 
377 	switch (CHIPID(sih->chip)) {
378 	case BCM4347_CHIP_GRPID:
379 		wakebit = CC2_4347_GCI2WAKE_MASK;
380 		break;
381 	default:
382 		wakebit = 0;
383 		ASSERT(0);
384 		break;
385 	}
386 
387 	return wakebit;
388 }
389 
si_pmu_set_min_res_mask(si_t * sih,osl_t * osh,uint min_res_mask)390 void si_pmu_set_min_res_mask(si_t *sih, osl_t *osh, uint min_res_mask)
391 {
392 	pmuregs_t *pmu;
393 	uint origidx;
394 
395 	/* Remember original core before switch to chipc/pmu */
396 	origidx = si_coreidx(sih);
397 	if (AOB_ENAB(sih)) {
398 		pmu = si_setcore(sih, PMU_CORE_ID, 0);
399 	}
400 	else {
401 		pmu = si_setcoreidx(sih, SI_CC_IDX);
402 	}
403 	ASSERT(pmu != NULL);
404 
405 	W_REG(osh, &pmu->min_res_mask, min_res_mask);
406 	OSL_DELAY(100);
407 
408 	/* Return to original core */
409 	si_setcoreidx(sih, origidx);
410 }
411 
412 bool
si_pmu_cap_fast_lpo(si_t * sih)413 si_pmu_cap_fast_lpo(si_t *sih)
414 {
415 	return (PMU_REG(sih, core_cap_ext, 0, 0) & PCAP_EXT_USE_MUXED_ILP_CLK_MASK) ? TRUE : FALSE;
416 }
417 
418 int
si_pmu_fast_lpo_disable(si_t * sih)419 si_pmu_fast_lpo_disable(si_t *sih)
420 {
421 	if (!si_pmu_cap_fast_lpo(sih)) {
422 		PMU_ERROR(("%s: No Fast LPO capability\n", __FUNCTION__));
423 		return BCME_ERROR;
424 	}
425 
426 	PMU_REG(sih, pmucontrol_ext,
427 		PCTL_EXT_FASTLPO_ENAB |
428 		PCTL_EXT_FASTLPO_SWENAB |
429 		PCTL_EXT_FASTLPO_PCIE_SWENAB,
430 		0);
431 	OSL_DELAY(1000);
432 	return BCME_OK;
433 }
434 
435 #ifdef BCMPMU_STATS
436 /*
437  * 8 pmu statistics timer default map
438  *
439  * for CORE_RDY_AUX measure, set as below for timer 6 and 7 instead of CORE_RDY_MAIN.
440  *	//core-n active duration : pmu_rsrc_state(CORE_RDY_AUX)
441  *	{ SRC_CORE_RDY_AUX, FALSE, TRUE, LEVEL_HIGH},
442  *	//core-n active duration : pmu_rsrc_state(CORE_RDY_AUX)
443  *	{ SRC_CORE_RDY_AUX, FALSE, TRUE, EDGE_RISE}
444  */
445 static pmu_stats_timer_t pmustatstimer[] = {
446 	{ SRC_LINK_IN_L12, FALSE, TRUE, PMU_STATS_LEVEL_HIGH},	//link_in_l12
447 	{ SRC_LINK_IN_L23, FALSE, TRUE, PMU_STATS_LEVEL_HIGH},	//link_in_l23
448 	{ SRC_PM_ST_IN_D0, FALSE, TRUE, PMU_STATS_LEVEL_HIGH},	//pm_st_in_d0
449 	{ SRC_PM_ST_IN_D3, FALSE, TRUE, PMU_STATS_LEVEL_HIGH},	//pm_st_in_d3
450 	//deep-sleep duration : pmu_rsrc_state(XTAL_PU)
451 	{ SRC_XTAL_PU, FALSE, TRUE, PMU_STATS_LEVEL_LOW},
452 	//deep-sleep entry count : pmu_rsrc_state(XTAL_PU)
453 	{ SRC_XTAL_PU, FALSE, TRUE, PMU_STATS_EDGE_FALL},
454 	//core-n active duration : pmu_rsrc_state(CORE_RDY_MAIN)
455 	{ SRC_CORE_RDY_MAIN, FALSE, TRUE, PMU_STATS_LEVEL_HIGH},
456 	//core-n active duration : pmu_rsrc_state(CORE_RDY_MAIN)
457 	{ SRC_CORE_RDY_MAIN, FALSE, TRUE, PMU_STATS_EDGE_RISE}
458 };
459 
460 static void
si_pmustatstimer_update(osl_t * osh,pmuregs_t * pmu,uint8 timerid)461 si_pmustatstimer_update(osl_t *osh, pmuregs_t *pmu, uint8 timerid)
462 {
463 	uint32 stats_timer_ctrl;
464 
465 	W_REG(osh, &pmu->pmu_statstimer_addr, timerid);
466 	stats_timer_ctrl =
467 		((pmustatstimer[timerid].src_num << PMU_ST_SRC_SHIFT) &
468 			PMU_ST_SRC_MASK) |
469 		((pmustatstimer[timerid].cnt_mode << PMU_ST_CNT_MODE_SHIFT) &
470 			PMU_ST_CNT_MODE_MASK) |
471 		((pmustatstimer[timerid].enable << PMU_ST_EN_SHIFT) & PMU_ST_EN_MASK) |
472 		((pmustatstimer[timerid].int_enable << PMU_ST_INT_EN_SHIFT) & PMU_ST_INT_EN_MASK);
473 	W_REG(osh, &pmu->pmu_statstimer_ctrl, stats_timer_ctrl);
474 	W_REG(osh, &pmu->pmu_statstimer_N, 0);
475 }
476 
477 void
si_pmustatstimer_int_enable(si_t * sih)478 si_pmustatstimer_int_enable(si_t *sih)
479 {
480 	pmuregs_t *pmu;
481 	uint origidx;
482 	osl_t *osh = si_osh(sih);
483 
484 	/* Remember original core before switch to chipc/pmu */
485 	origidx = si_coreidx(sih);
486 	if (AOB_ENAB(sih)) {
487 		pmu = si_setcore(sih, PMU_CORE_ID, 0);
488 	} else {
489 		pmu = si_setcoreidx(sih, SI_CC_IDX);
490 	}
491 	ASSERT(pmu != NULL);
492 
493 	OR_REG(osh, &pmu->pmuintmask0, PMU_INT_STAT_TIMER_INT_MASK);
494 
495 	/* Return to original core */
496 	si_setcoreidx(sih, origidx);
497 }
498 
499 void
si_pmustatstimer_int_disable(si_t * sih)500 si_pmustatstimer_int_disable(si_t *sih)
501 {
502 	pmuregs_t *pmu;
503 	uint origidx;
504 	osl_t *osh = si_osh(sih);
505 
506 	/* Remember original core before switch to chipc/pmu */
507 	origidx = si_coreidx(sih);
508 	if (AOB_ENAB(sih)) {
509 		pmu = si_setcore(sih, PMU_CORE_ID, 0);
510 	} else {
511 		pmu = si_setcoreidx(sih, SI_CC_IDX);
512 	}
513 	ASSERT(pmu != NULL);
514 
515 	AND_REG(osh, &pmu->pmuintmask0, ~PMU_INT_STAT_TIMER_INT_MASK);
516 
517 	/* Return to original core */
518 	si_setcoreidx(sih, origidx);
519 }
520 
521 void
si_pmustatstimer_init(si_t * sih)522 si_pmustatstimer_init(si_t *sih)
523 {
524 	pmuregs_t *pmu;
525 	uint origidx;
526 	osl_t *osh = si_osh(sih);
527 	uint32 core_cap_ext;
528 	uint8 max_stats_timer_num;
529 	int8 i;
530 
531 	/* Remember original core before switch to chipc/pmu */
532 	origidx = si_coreidx(sih);
533 	if (AOB_ENAB(sih)) {
534 		pmu = si_setcore(sih, PMU_CORE_ID, 0);
535 	} else {
536 		pmu = si_setcoreidx(sih, SI_CC_IDX);
537 	}
538 	ASSERT(pmu != NULL);
539 
540 	core_cap_ext = R_REG(osh, &pmu->core_cap_ext);
541 
542 	max_stats_timer_num = ((core_cap_ext & PCAP_EXT_ST_NUM_MASK) >> PCAP_EXT_ST_NUM_SHIFT) + 1;
543 
544 	for (i = 0; i < max_stats_timer_num; i++) {
545 		si_pmustatstimer_update(osh, pmu, i);
546 	}
547 
548 	OR_REG(osh, &pmu->pmuintmask0, PMU_INT_STAT_TIMER_INT_MASK);
549 
550 	/* Return to original core */
551 	si_setcoreidx(sih, origidx);
552 }
553 
554 void
si_pmustatstimer_dump(si_t * sih)555 si_pmustatstimer_dump(si_t *sih)
556 {
557 	pmuregs_t *pmu;
558 	uint origidx;
559 	osl_t *osh = si_osh(sih);
560 	uint32 core_cap_ext, pmucapabilities, AlpPeriod, ILPPeriod, pmuintmask0, pmuintstatus;
561 	uint8 max_stats_timer_num, max_stats_timer_src_num;
562 	uint32 stat_timer_ctrl, stat_timer_N;
563 	uint8 i;
564 	uint32 current_time_ms = OSL_SYSUPTIME();
565 
566 	/* Remember original core before switch to chipc/pmu */
567 	origidx = si_coreidx(sih);
568 	if (AOB_ENAB(sih)) {
569 		pmu = si_setcore(sih, PMU_CORE_ID, 0);
570 	} else {
571 		pmu = si_setcoreidx(sih, SI_CC_IDX);
572 	}
573 	ASSERT(pmu != NULL);
574 
575 	pmucapabilities = R_REG(osh, &pmu->pmucapabilities);
576 	core_cap_ext = R_REG(osh, &pmu->core_cap_ext);
577 	AlpPeriod = R_REG(osh, &pmu->slowclkperiod);
578 	ILPPeriod = R_REG(osh, &pmu->ILPPeriod);
579 
580 	max_stats_timer_num = ((core_cap_ext & PCAP_EXT_ST_NUM_MASK) >>
581 		PCAP_EXT_ST_NUM_SHIFT) + 1;
582 	max_stats_timer_src_num = ((core_cap_ext & PCAP_EXT_ST_SRC_NUM_MASK) >>
583 		PCAP_EXT_ST_SRC_NUM_SHIFT) + 1;
584 
585 	pmuintstatus = R_REG(osh, &pmu->pmuintstatus);
586 	pmuintmask0 = R_REG(osh, &pmu->pmuintmask0);
587 
588 	PMU_ERROR(("%s : TIME %d\n", __FUNCTION__, current_time_ms));
589 
590 	PMU_ERROR(("\tMAX Timer Num %d, MAX Source Num %d\n",
591 		max_stats_timer_num, max_stats_timer_src_num));
592 	PMU_ERROR(("\tpmucapabilities 0x%8x, core_cap_ext 0x%8x, AlpPeriod 0x%8x, ILPPeriod 0x%8x, "
593 		"pmuintmask0 0x%8x, pmuintstatus 0x%8x, pmurev %d\n",
594 		pmucapabilities, core_cap_ext, AlpPeriod, ILPPeriod,
595 		pmuintmask0, pmuintstatus, PMUREV(sih->pmurev)));
596 
597 	for (i = 0; i < max_stats_timer_num; i++) {
598 		W_REG(osh, &pmu->pmu_statstimer_addr, i);
599 		stat_timer_ctrl = R_REG(osh, &pmu->pmu_statstimer_ctrl);
600 		stat_timer_N = R_REG(osh, &pmu->pmu_statstimer_N);
601 		PMU_ERROR(("\t Timer %d : control 0x%8x, %d\n",
602 			i, stat_timer_ctrl, stat_timer_N));
603 	}
604 
605 	/* Return to original core */
606 	si_setcoreidx(sih, origidx);
607 }
608 
609 void
si_pmustatstimer_start(si_t * sih,uint8 timerid)610 si_pmustatstimer_start(si_t *sih, uint8 timerid)
611 {
612 	pmuregs_t *pmu;
613 	uint origidx;
614 	osl_t *osh = si_osh(sih);
615 
616 	/* Remember original core before switch to chipc/pmu */
617 	origidx = si_coreidx(sih);
618 	if (AOB_ENAB(sih)) {
619 		pmu = si_setcore(sih, PMU_CORE_ID, 0);
620 	} else {
621 		pmu = si_setcoreidx(sih, SI_CC_IDX);
622 	}
623 	ASSERT(pmu != NULL);
624 
625 	pmustatstimer[timerid].enable = TRUE;
626 
627 	W_REG(osh, &pmu->pmu_statstimer_addr, timerid);
628 	OR_REG(osh, &pmu->pmu_statstimer_ctrl, PMU_ST_ENAB << PMU_ST_EN_SHIFT);
629 
630 	/* Return to original core */
631 	si_setcoreidx(sih, origidx);
632 }
633 
634 void
si_pmustatstimer_stop(si_t * sih,uint8 timerid)635 si_pmustatstimer_stop(si_t *sih, uint8 timerid)
636 {
637 	pmuregs_t *pmu;
638 	uint origidx;
639 	osl_t *osh = si_osh(sih);
640 
641 	/* Remember original core before switch to chipc/pmu */
642 	origidx = si_coreidx(sih);
643 	if (AOB_ENAB(sih)) {
644 		pmu = si_setcore(sih, PMU_CORE_ID, 0);
645 	} else {
646 		pmu = si_setcoreidx(sih, SI_CC_IDX);
647 	}
648 	ASSERT(pmu != NULL);
649 
650 	pmustatstimer[timerid].enable = FALSE;
651 
652 	W_REG(osh, &pmu->pmu_statstimer_addr, timerid);
653 	AND_REG(osh, &pmu->pmu_statstimer_ctrl, ~(PMU_ST_ENAB << PMU_ST_EN_SHIFT));
654 
655 	/* Return to original core */
656 	si_setcoreidx(sih, origidx);
657 }
658 
659 void
si_pmustatstimer_clear(si_t * sih,uint8 timerid)660 si_pmustatstimer_clear(si_t *sih, uint8 timerid)
661 {
662 	pmuregs_t *pmu;
663 	uint origidx;
664 	osl_t *osh = si_osh(sih);
665 
666 	/* Remember original core before switch to chipc/pmu */
667 	origidx = si_coreidx(sih);
668 	if (AOB_ENAB(sih)) {
669 		pmu = si_setcore(sih, PMU_CORE_ID, 0);
670 	} else {
671 		pmu = si_setcoreidx(sih, SI_CC_IDX);
672 	}
673 	ASSERT(pmu != NULL);
674 
675 	W_REG(osh, &pmu->pmu_statstimer_addr, timerid);
676 	W_REG(osh, &pmu->pmu_statstimer_N, 0);
677 
678 	/* Return to original core */
679 	si_setcoreidx(sih, origidx);
680 }
681 
682 void
si_pmustatstimer_clear_overflow(si_t * sih)683 si_pmustatstimer_clear_overflow(si_t *sih)
684 {
685 	uint8 i;
686 	uint32 core_cap_ext;
687 	uint8 max_stats_timer_num;
688 	uint32 timerN;
689 	pmuregs_t *pmu;
690 	uint origidx;
691 	osl_t *osh = si_osh(sih);
692 
693 	/* Remember original core before switch to chipc/pmu */
694 	origidx = si_coreidx(sih);
695 	if (AOB_ENAB(sih)) {
696 		pmu = si_setcore(sih, PMU_CORE_ID, 0);
697 	} else {
698 		pmu = si_setcoreidx(sih, SI_CC_IDX);
699 	}
700 	ASSERT(pmu != NULL);
701 
702 	core_cap_ext = R_REG(osh, &pmu->core_cap_ext);
703 	max_stats_timer_num = ((core_cap_ext & PCAP_EXT_ST_NUM_MASK) >> PCAP_EXT_ST_NUM_SHIFT) + 1;
704 
705 	for (i = 0; i < max_stats_timer_num; i++) {
706 		W_REG(osh, &pmu->pmu_statstimer_addr, i);
707 		timerN = R_REG(osh, &pmu->pmu_statstimer_N);
708 		if (timerN == 0xFFFFFFFF) {
709 			PMU_ERROR(("pmustatstimer overflow clear - timerid : %d\n", i));
710 			si_pmustatstimer_clear(sih, i);
711 		}
712 	}
713 
714 	/* Return to original core */
715 	si_setcoreidx(sih, origidx);
716 }
717 
718 uint32
si_pmustatstimer_read(si_t * sih,uint8 timerid)719 si_pmustatstimer_read(si_t *sih, uint8 timerid)
720 {
721 	pmuregs_t *pmu;
722 	uint origidx;
723 	osl_t *osh = si_osh(sih);
724 	uint32 stats_timer_N;
725 
726 	/* Remember original core before switch to chipc/pmu */
727 	origidx = si_coreidx(sih);
728 	if (AOB_ENAB(sih)) {
729 		pmu = si_setcore(sih, PMU_CORE_ID, 0);
730 	} else {
731 		pmu = si_setcoreidx(sih, SI_CC_IDX);
732 	}
733 	ASSERT(pmu != NULL);
734 
735 	W_REG(osh, &pmu->pmu_statstimer_addr, timerid);
736 	stats_timer_N = R_REG(osh, &pmu->pmu_statstimer_N);
737 
738 	/* Return to original core */
739 	si_setcoreidx(sih, origidx);
740 
741 	return stats_timer_N;
742 }
743 
744 void
si_pmustatstimer_cfg_src_num(si_t * sih,uint8 src_num,uint8 timerid)745 si_pmustatstimer_cfg_src_num(si_t *sih, uint8 src_num, uint8 timerid)
746 {
747 	pmuregs_t *pmu;
748 	uint origidx;
749 	osl_t *osh = si_osh(sih);
750 
751 	/* Remember original core before switch to chipc/pmu */
752 	origidx = si_coreidx(sih);
753 	if (AOB_ENAB(sih)) {
754 		pmu = si_setcore(sih, PMU_CORE_ID, 0);
755 	} else {
756 		pmu = si_setcoreidx(sih, SI_CC_IDX);
757 	}
758 	ASSERT(pmu != NULL);
759 
760 	pmustatstimer[timerid].src_num = src_num;
761 	si_pmustatstimer_update(osh, pmu, timerid);
762 
763 	/* Return to original core */
764 	si_setcoreidx(sih, origidx);
765 }
766 
767 void
si_pmustatstimer_cfg_cnt_mode(si_t * sih,uint8 cnt_mode,uint8 timerid)768 si_pmustatstimer_cfg_cnt_mode(si_t *sih, uint8 cnt_mode, uint8 timerid)
769 {
770 	pmuregs_t *pmu;
771 	uint origidx;
772 	osl_t *osh = si_osh(sih);
773 
774 	/* Remember original core before switch to chipc/pmu */
775 	origidx = si_coreidx(sih);
776 	if (AOB_ENAB(sih)) {
777 		pmu = si_setcore(sih, PMU_CORE_ID, 0);
778 	} else {
779 		pmu = si_setcoreidx(sih, SI_CC_IDX);
780 	}
781 	ASSERT(pmu != NULL);
782 
783 	pmustatstimer[timerid].cnt_mode = cnt_mode;
784 	si_pmustatstimer_update(osh, pmu, timerid);
785 
786 	/* Return to original core */
787 	si_setcoreidx(sih, origidx);
788 }
789 #endif /* BCMPMU_STATS */
790