1 /*
2 * Misc utility routines for accessing PMU corerev specific features
3 * of the SiliconBackplane-based Broadcom chips.
4 *
5 * Copyright (C) 1999-2019, Broadcom.
6 *
7 * Unless you and Broadcom execute a separate written software license
8 * agreement governing use of this software, this software is licensed to you
9 * under the terms of the GNU General Public License version 2 (the "GPL"),
10 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11 * following added to such license:
12 *
13 * As a special exception, the copyright holders of this software give you
14 * permission to link this software with independent modules, and to copy and
15 * distribute the resulting executable under terms of your choice, provided that
16 * you also meet, for each linked independent module, the terms and conditions
17 * of the license of that module. An independent module is a module which is
18 * not derived from this software. The special exception does not apply to any
19 * modifications of the software.
20 *
21 * Notwithstanding the above, under no circumstances may you combine this
22 * software in any way with any other Broadcom software provided under a license
23 * other than the GPL, without Broadcom's express prior written consent.
24 *
25 *
26 * <<Broadcom-WL-IPTag/Open:>>
27 *
28 * $Id: hndpmu.c 783841 2018-10-09 06:24:16Z $
29 */
30
31 /**
32 * @file
33 * Note: this file contains PLL/FLL related functions. A chip can contain
34 * multiple PLLs/FLLs. However, in the context of this file the baseband ('BB')
35 * PLL/FLL is referred to.
36 *
37 * Throughout this code, the prefixes 'pmu1_' and 'pmu2_' are used.
38 * They refer to different revisions of the PMU (which is at revision 18 @ Apr
39 * 25, 2012) pmu1_ marks the transition from PLL to ADFLL (Digital Frequency
40 * Locked Loop). It supports fractional frequency generation. pmu2_ does not
41 * support fractional frequency generation.
42 */
43
44 #include <bcm_cfg.h>
45 #include <typedefs.h>
46 #include <bcmdefs.h>
47 #include <osl.h>
48 #include <bcmutils.h>
49 #include <siutils.h>
50 #include <bcmdevs.h>
51 #include <hndsoc.h>
52 #include <sbchipc.h>
53 #include <hndchipc.h>
54 #include <hndpmu.h>
55 #include <hndlhl.h>
56 #if defined(BCMULP)
57 #include <ulp.h>
58 #endif /* defined(BCMULP) */
59 #include <sbgci.h>
60 #ifdef EVENT_LOG_COMPILE
61 #include <event_log.h>
62 #endif // endif
63 #include <sbgci.h>
64 #include <lpflags.h>
65
66 #define PMU_ERROR(args)
67
68 #define PMU_MSG(args)
69
70 /* To check in verbose debugging messages not intended
71 * to be on except on private builds.
72 */
73 #define PMU_NONE(args)
74 #define flags_shift 14
75
76 /** contains resource bit positions for a specific chip */
77 struct rsc_per_chip_s {
78 uint8 ht_avail;
79 uint8 macphy_clkavail;
80 uint8 ht_start;
81 uint8 otp_pu;
82 uint8 macphy_aux_clkavail;
83 };
84
85 typedef struct rsc_per_chip_s rsc_per_chip_t;
86
87 #if defined(BCMPMU_STATS) && !defined(BCMPMU_STATS_DISABLED)
88 bool _pmustatsenab = TRUE;
89 #else
90 bool _pmustatsenab = FALSE;
91 #endif /* BCMPMU_STATS */
92
93 /**
94 * Balance between stable SDIO operation and power consumption is achieved using
95 * this function. Note that each drive strength table is for a specific VDDIO of
96 * the SDIO pads, ideally this function should read the VDDIO itself to select
97 * the correct table. For now it has been solved with the 'BCM_SDIO_VDDIO'
98 * preprocessor constant.
99 *
100 * 'drivestrength': desired pad drive strength in mA. Drive strength of 0
101 * requests tri-state (if hardware supports this), if no hw support drive
102 * strength is not programmed.
103 */
si_sdiod_drive_strength_init(si_t * sih,osl_t * osh,uint32 drivestrength)104 void si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength)
105 {
106 /*
107 * Note:
108 * This function used to set the SDIO drive strength via PMU_CHIPCTL1 for
109 * the 43143, 4330, 4334, 4336, 43362 chips. These chips are now no longer
110 * supported, so the code has been deleted. Newer chips have the SDIO drive
111 * strength setting via a GCI Chip Control register, but the bit definitions
112 * are chip-specific. We are keeping this function available (accessed via
113 * DHD 'sdiod_drive' IOVar) in case these newer chips need to provide
114 * access.
115 */
116 UNUSED_PARAMETER(sih);
117 UNUSED_PARAMETER(osh);
118 UNUSED_PARAMETER(drivestrength);
119 }
120
si_switch_pmu_dependency(si_t * sih,uint mode)121 void si_switch_pmu_dependency(si_t *sih, uint mode)
122 {
123 #ifdef DUAL_PMU_SEQUENCE
124 osl_t *osh = si_osh(sih);
125 uint32 current_res_state;
126 uint32 min_mask, max_mask;
127 const pmu_res_depend_t *pmu_res_depend_table = NULL;
128 uint pmu_res_depend_table_sz = 0;
129 uint origidx;
130 pmuregs_t *pmu;
131 chipcregs_t *cc;
132 BCM_REFERENCE(cc);
133
134 origidx = si_coreidx(sih);
135 if (AOB_ENAB(sih)) {
136 pmu = si_setcore(sih, PMU_CORE_ID, 0);
137 cc = si_setcore(sih, CC_CORE_ID, 0);
138 } else {
139 pmu = si_setcoreidx(sih, SI_CC_IDX);
140 cc = si_setcoreidx(sih, SI_CC_IDX);
141 }
142 ASSERT(pmu != NULL);
143
144 current_res_state = R_REG(osh, &pmu->res_state);
145 min_mask = R_REG(osh, &pmu->min_res_mask);
146 max_mask = R_REG(osh, &pmu->max_res_mask);
147 W_REG(osh, &pmu->min_res_mask, (min_mask | current_res_state));
148 switch (mode) {
149 case PMU_4364_1x1_MODE: {
150 if (CHIPID(sih->chip) == BCM4364_CHIP_ID) {
151 pmu_res_depend_table = bcm4364a0_res_depend_1x1;
152 pmu_res_depend_table_sz = ARRAYSIZE(bcm4364a0_res_depend_1x1);
153 max_mask = PMU_4364_MAX_MASK_1x1;
154 W_REG(osh, &pmu->res_table_sel, RES4364_SR_SAVE_RESTORE);
155 W_REG(osh, &pmu->res_updn_timer,
156 PMU_4364_SAVE_RESTORE_UPDNTIME_1x1);
157 #if defined(SAVERESTORE)
158 if (SR_ENAB()) {
159 /* Disable 3x3 SR engine */
160 W_REG(osh, &cc->sr1_control0,
161 CC_SR0_4364_SR_ENG_CLK_EN |
162 CC_SR0_4364_SR_RSRC_TRIGGER |
163 CC_SR0_4364_SR_WD_MEM_MIN_DIV |
164 CC_SR0_4364_SR_INVERT_CLK |
165 CC_SR0_4364_SR_ENABLE_HT |
166 CC_SR0_4364_SR_ALLOW_PIC |
167 CC_SR0_4364_SR_PMU_MEM_DISABLE);
168 }
169 #endif /* SAVERESTORE */
170 }
171 break;
172 }
173 case PMU_4364_3x3_MODE: {
174 if (CHIPID(sih->chip) == BCM4364_CHIP_ID) {
175 W_REG(osh, &pmu->res_table_sel, RES4364_SR_SAVE_RESTORE);
176 W_REG(osh, &pmu->res_updn_timer,
177 PMU_4364_SAVE_RESTORE_UPDNTIME_3x3);
178 /* Change the dependency table only if required */
179 if ((max_mask != PMU_4364_MAX_MASK_3x3) ||
180 (max_mask != PMU_4364_MAX_MASK_RSDB)) {
181 pmu_res_depend_table = bcm4364a0_res_depend_rsdb;
182 pmu_res_depend_table_sz =
183 ARRAYSIZE(bcm4364a0_res_depend_rsdb);
184 max_mask = PMU_4364_MAX_MASK_3x3;
185 }
186 #if defined(SAVERESTORE)
187 if (SR_ENAB()) {
188 /* Enable 3x3 SR engine */
189 W_REG(osh, &cc->sr1_control0,
190 CC_SR0_4364_SR_ENG_CLK_EN |
191 CC_SR0_4364_SR_RSRC_TRIGGER |
192 CC_SR0_4364_SR_WD_MEM_MIN_DIV |
193 CC_SR0_4364_SR_INVERT_CLK |
194 CC_SR0_4364_SR_ENABLE_HT |
195 CC_SR0_4364_SR_ALLOW_PIC |
196 CC_SR0_4364_SR_PMU_MEM_DISABLE |
197 CC_SR0_4364_SR_ENG_EN_MASK);
198 }
199 #endif /* SAVERESTORE */
200 }
201 break;
202 }
203 case PMU_4364_RSDB_MODE:
204 default: {
205 if (CHIPID(sih->chip) == BCM4364_CHIP_ID) {
206 W_REG(osh, &pmu->res_table_sel, RES4364_SR_SAVE_RESTORE);
207 W_REG(osh, &pmu->res_updn_timer,
208 PMU_4364_SAVE_RESTORE_UPDNTIME_3x3);
209 /* Change the dependency table only if required */
210 if ((max_mask != PMU_4364_MAX_MASK_3x3) ||
211 (max_mask != PMU_4364_MAX_MASK_RSDB)) {
212 pmu_res_depend_table = bcm4364a0_res_depend_rsdb;
213 pmu_res_depend_table_sz =
214 ARRAYSIZE(bcm4364a0_res_depend_rsdb);
215 max_mask = PMU_4364_MAX_MASK_RSDB;
216 }
217 #if defined(SAVERESTORE)
218 if (SR_ENAB()) {
219 /* Enable 3x3 SR engine */
220 W_REG(osh, &cc->sr1_control0,
221 CC_SR0_4364_SR_ENG_CLK_EN |
222 CC_SR0_4364_SR_RSRC_TRIGGER |
223 CC_SR0_4364_SR_WD_MEM_MIN_DIV |
224 CC_SR0_4364_SR_INVERT_CLK |
225 CC_SR0_4364_SR_ENABLE_HT |
226 CC_SR0_4364_SR_ALLOW_PIC |
227 CC_SR0_4364_SR_PMU_MEM_DISABLE |
228 CC_SR0_4364_SR_ENG_EN_MASK);
229 }
230 #endif /* SAVERESTORE */
231 }
232 break;
233 }
234 }
235 si_pmu_resdeptbl_upd(sih, osh, pmu, pmu_res_depend_table,
236 pmu_res_depend_table_sz);
237 W_REG(osh, &pmu->max_res_mask, max_mask);
238 W_REG(osh, &pmu->min_res_mask, min_mask);
239 si_pmu_wait_for_steady_state(sih, osh, pmu);
240 /* Add some delay; allow resources to come up and settle. */
241 OSL_DELAY(0xC8);
242 si_setcoreidx(sih, origidx);
243 #endif /* DUAL_PMU_SEQUENCE */
244 }
245
246 #if defined(BCMULP)
247
si_pmu_ulp_register(si_t * sih)248 int si_pmu_ulp_register(si_t *sih)
249 {
250 return ulp_p1_module_register(ULP_MODULE_ID_PMU, &ulp_pmu_ctx, (void *)sih);
251 }
252
si_pmu_ulp_get_retention_size_cb(void * handle,ulp_ext_info_t * einfo)253 static uint si_pmu_ulp_get_retention_size_cb(void *handle,
254 ulp_ext_info_t *einfo)
255 {
256 ULP_DBG(("%s: sz: %d\n", __FUNCTION__, sizeof(si_pmu_ulp_cr_dat_t)));
257 return sizeof(si_pmu_ulp_cr_dat_t);
258 }
259
si_pmu_ulp_enter_cb(void * handle,ulp_ext_info_t * einfo,uint8 * cache_data)260 static int si_pmu_ulp_enter_cb(void *handle, ulp_ext_info_t *einfo,
261 uint8 *cache_data)
262 {
263 si_pmu_ulp_cr_dat_t crinfo = {0};
264 crinfo.ilpcycles_per_sec = ilpcycles_per_sec;
265 ULP_DBG(("%s: ilpcycles_per_sec: %x\n", __FUNCTION__, ilpcycles_per_sec));
266 memcpy(cache_data, (void *)&crinfo, sizeof(crinfo));
267 return BCME_OK;
268 }
269
si_pmu_ulp_exit_cb(void * handle,uint8 * cache_data,uint8 * p2_cache_data)270 static int si_pmu_ulp_exit_cb(void *handle, uint8 *cache_data,
271 uint8 *p2_cache_data)
272 {
273 si_pmu_ulp_cr_dat_t *crinfo = (si_pmu_ulp_cr_dat_t *)cache_data;
274
275 ilpcycles_per_sec = crinfo->ilpcycles_per_sec;
276 ULP_DBG(("%s: ilpcycles_per_sec: %x, cache_data: %p\n", __FUNCTION__,
277 ilpcycles_per_sec, cache_data));
278 return BCME_OK;
279 }
280
si_pmu_ulp_chipconfig(si_t * sih,osl_t * osh)281 void si_pmu_ulp_chipconfig(si_t *sih, osl_t *osh)
282 {
283 uint32 reg_val;
284
285 BCM_REFERENCE(reg_val);
286
287 if (CHIPID(sih->chip) == BCM43012_CHIP_ID) {
288 /* DS1 reset and clk enable init value config */
289 si_pmu_chipcontrol(sih, PMU_CHIPCTL14, ~0x0,
290 (PMUCCTL14_43012_ARMCM3_RESET_INITVAL |
291 PMUCCTL14_43012_DOT11MAC_CLKEN_INITVAL |
292 PMUCCTL14_43012_SDIOD_RESET_INIVAL |
293 PMUCCTL14_43012_SDIO_CLK_DMN_RESET_INITVAL |
294 PMUCCTL14_43012_SOCRAM_CLKEN_INITVAL |
295 PMUCCTL14_43012_M2MDMA_RESET_INITVAL |
296 PMUCCTL14_43012_DOT11MAC_PHY_CLK_EN_INITVAL |
297 PMUCCTL14_43012_DOT11MAC_PHY_CNTL_EN_INITVAL));
298
299 /* Clear SFlash clock request and enable High Quality clock */
300 CHIPC_REG(sih, clk_ctl_st, CCS_SFLASH_CLKREQ | CCS_HQCLKREQ,
301 CCS_HQCLKREQ);
302
303 reg_val = PMU_REG(sih, min_res_mask, ~0x0, ULP_MIN_RES_MASK);
304 ULP_DBG(("si_pmu_ulp_chipconfig: min_res_mask: 0x%08x\n", reg_val));
305
306 /* Force power switch off */
307 si_pmu_chipcontrol(sih, PMU_CHIPCTL2,
308 (PMUCCTL02_43012_SUBCORE_PWRSW_FORCE_ON |
309 PMUCCTL02_43012_PHY_PWRSW_FORCE_ON),
310 0);
311 }
312 }
313
si_pmu_ulp_ilp_config(si_t * sih,osl_t * osh,uint32 ilp_period)314 void si_pmu_ulp_ilp_config(si_t *sih, osl_t *osh, uint32 ilp_period)
315 {
316 pmuregs_t *pmu;
317 pmu = si_setcoreidx(sih, si_findcoreidx(sih, PMU_CORE_ID, 0));
318 W_REG(osh, &pmu->ILPPeriod, ilp_period);
319 si_lhl_ilp_config(sih, osh, ilp_period);
320 }
321
322 /** Initialize DS1 PMU hardware resources */
si_pmu_ds1_res_init(si_t * sih,osl_t * osh)323 void si_pmu_ds1_res_init(si_t *sih, osl_t *osh)
324 {
325 pmuregs_t *pmu;
326 uint origidx;
327 const pmu_res_updown_t *pmu_res_updown_table = NULL;
328 uint pmu_res_updown_table_sz = 0;
329
330 /* Remember original core before switch to chipc/pmu */
331 origidx = si_coreidx(sih);
332 if (AOB_ENAB(sih)) {
333 pmu = si_setcore(sih, PMU_CORE_ID, 0);
334 } else {
335 pmu = si_setcoreidx(sih, SI_CC_IDX);
336 }
337 ASSERT(pmu != NULL);
338
339 switch (CHIPID(sih->chip)) {
340 case BCM43012_CHIP_ID:
341 pmu_res_updown_table = bcm43012a0_res_updown_ds1;
342 pmu_res_updown_table_sz = ARRAYSIZE(bcm43012a0_res_updown_ds1);
343 break;
344
345 default:
346 break;
347 }
348
349 /* Program up/down timers */
350 while (pmu_res_updown_table_sz--) {
351 ASSERT(pmu_res_updown_table != NULL);
352 PMU_MSG(("DS1: Changing rsrc %d res_updn_timer to 0x%x\n",
353 pmu_res_updown_table[pmu_res_updown_table_sz].resnum,
354 pmu_res_updown_table[pmu_res_updown_table_sz].updown));
355 W_REG(osh, &pmu->res_table_sel,
356 pmu_res_updown_table[pmu_res_updown_table_sz].resnum);
357 W_REG(osh, &pmu->res_updn_timer,
358 pmu_res_updown_table[pmu_res_updown_table_sz].updown);
359 }
360
361 /* Return to original core */
362 si_setcoreidx(sih, origidx);
363 }
364
365 #endif /* defined(BCMULP) */
366
si_pmu_wake_bit_offset(si_t * sih)367 uint32 si_pmu_wake_bit_offset(si_t *sih)
368 {
369 uint32 wakebit;
370
371 switch (CHIPID(sih->chip)) {
372 case BCM4347_CHIP_GRPID:
373 wakebit = CC2_4347_GCI2WAKE_MASK;
374 break;
375 default:
376 wakebit = 0;
377 ASSERT(0);
378 break;
379 }
380
381 return wakebit;
382 }
383
si_pmu_set_min_res_mask(si_t * sih,osl_t * osh,uint min_res_mask)384 void si_pmu_set_min_res_mask(si_t *sih, osl_t *osh, uint min_res_mask)
385 {
386 pmuregs_t *pmu;
387 uint origidx;
388
389 /* Remember original core before switch to chipc/pmu */
390 origidx = si_coreidx(sih);
391 if (AOB_ENAB(sih)) {
392 pmu = si_setcore(sih, PMU_CORE_ID, 0);
393 } else {
394 pmu = si_setcoreidx(sih, SI_CC_IDX);
395 }
396 ASSERT(pmu != NULL);
397
398 W_REG(osh, &pmu->min_res_mask, min_res_mask);
399 OSL_DELAY(0x64);
400
401 /* Return to original core */
402 si_setcoreidx(sih, origidx);
403 }
404
si_pmu_cap_fast_lpo(si_t * sih)405 bool si_pmu_cap_fast_lpo(si_t *sih)
406 {
407 return (PMU_REG(sih, core_cap_ext, 0, 0) & PCAP_EXT_USE_MUXED_ILP_CLK_MASK)
408 ? TRUE
409 : FALSE;
410 }
411
si_pmu_fast_lpo_disable(si_t * sih)412 int si_pmu_fast_lpo_disable(si_t *sih)
413 {
414 if (!si_pmu_cap_fast_lpo(sih)) {
415 PMU_ERROR(("%s: No Fast LPO capability\n", __FUNCTION__));
416 return BCME_ERROR;
417 }
418
419 PMU_REG(sih, pmucontrol_ext,
420 PCTL_EXT_FASTLPO_ENAB | PCTL_EXT_FASTLPO_SWENAB |
421 PCTL_EXT_FASTLPO_PCIE_SWENAB,
422 0);
423 OSL_DELAY(0x3E8);
424 return BCME_OK;
425 }
426
427 #ifdef BCMPMU_STATS
428 /*
429 * 8 pmu statistics timer default map
430 *
431 * for CORE_RDY_AUX measure, set as below for timer 6 and 7 instead of
432 * CORE_RDY_MAIN.
433 * //core-n active duration : pmu_rsrc_state(CORE_RDY_AUX)
434 * { SRC_CORE_RDY_AUX, FALSE, TRUE, LEVEL_HIGH},
435 * //core-n active duration : pmu_rsrc_state(CORE_RDY_AUX)
436 * { SRC_CORE_RDY_AUX, FALSE, TRUE, EDGE_RISE}
437 */
438 static pmu_stats_timer_t pmustatstimer[] = {
439 {SRC_LINK_IN_L12, FALSE, TRUE, PMU_STATS_LEVEL_HIGH}, // link_in_l12
440 {SRC_LINK_IN_L23, FALSE, TRUE, PMU_STATS_LEVEL_HIGH}, // link_in_l23
441 {SRC_PM_ST_IN_D0, FALSE, TRUE, PMU_STATS_LEVEL_HIGH}, // pm_st_in_d0
442 {SRC_PM_ST_IN_D3, FALSE, TRUE, PMU_STATS_LEVEL_HIGH}, // pm_st_in_d3
443 // deep-sleep duration : pmu_rsrc_state(XTAL_PU)
444 {SRC_XTAL_PU, FALSE, TRUE, PMU_STATS_LEVEL_LOW},
445 // deep-sleep entry count : pmu_rsrc_state(XTAL_PU)
446 {SRC_XTAL_PU, FALSE, TRUE, PMU_STATS_EDGE_FALL},
447 // core-n active duration : pmu_rsrc_state(CORE_RDY_MAIN)
448 {SRC_CORE_RDY_MAIN, FALSE, TRUE, PMU_STATS_LEVEL_HIGH},
449 // core-n active duration : pmu_rsrc_state(CORE_RDY_MAIN)
450 {SRC_CORE_RDY_MAIN, FALSE, TRUE, PMU_STATS_EDGE_RISE}};
451
si_pmustatstimer_update(osl_t * osh,pmuregs_t * pmu,uint8 timerid)452 static void si_pmustatstimer_update(osl_t *osh, pmuregs_t *pmu, uint8 timerid)
453 {
454 uint32 stats_timer_ctrl;
455
456 W_REG(osh, &pmu->pmu_statstimer_addr, timerid);
457 stats_timer_ctrl =
458 ((pmustatstimer[timerid].src_num << PMU_ST_SRC_SHIFT) &
459 PMU_ST_SRC_MASK) |
460 ((pmustatstimer[timerid].cnt_mode << PMU_ST_CNT_MODE_SHIFT) &
461 PMU_ST_CNT_MODE_MASK) |
462 ((pmustatstimer[timerid].enable << PMU_ST_EN_SHIFT) & PMU_ST_EN_MASK) |
463 ((pmustatstimer[timerid].int_enable << PMU_ST_INT_EN_SHIFT) &
464 PMU_ST_INT_EN_MASK);
465 W_REG(osh, &pmu->pmu_statstimer_ctrl, stats_timer_ctrl);
466 W_REG(osh, &pmu->pmu_statstimer_N, 0);
467 }
468
si_pmustatstimer_int_enable(si_t * sih)469 void si_pmustatstimer_int_enable(si_t *sih)
470 {
471 pmuregs_t *pmu;
472 uint origidx;
473 osl_t *osh = si_osh(sih);
474
475 /* Remember original core before switch to chipc/pmu */
476 origidx = si_coreidx(sih);
477 if (AOB_ENAB(sih)) {
478 pmu = si_setcore(sih, PMU_CORE_ID, 0);
479 } else {
480 pmu = si_setcoreidx(sih, SI_CC_IDX);
481 }
482 ASSERT(pmu != NULL);
483
484 OR_REG(osh, &pmu->pmuintmask0, PMU_INT_STAT_TIMER_INT_MASK);
485
486 /* Return to original core */
487 si_setcoreidx(sih, origidx);
488 }
489
si_pmustatstimer_int_disable(si_t * sih)490 void si_pmustatstimer_int_disable(si_t *sih)
491 {
492 pmuregs_t *pmu;
493 uint origidx;
494 osl_t *osh = si_osh(sih);
495
496 /* Remember original core before switch to chipc/pmu */
497 origidx = si_coreidx(sih);
498 if (AOB_ENAB(sih)) {
499 pmu = si_setcore(sih, PMU_CORE_ID, 0);
500 } else {
501 pmu = si_setcoreidx(sih, SI_CC_IDX);
502 }
503 ASSERT(pmu != NULL);
504
505 AND_REG(osh, &pmu->pmuintmask0, ~PMU_INT_STAT_TIMER_INT_MASK);
506
507 /* Return to original core */
508 si_setcoreidx(sih, origidx);
509 }
510
si_pmustatstimer_init(si_t * sih)511 void si_pmustatstimer_init(si_t *sih)
512 {
513 pmuregs_t *pmu;
514 uint origidx;
515 osl_t *osh = si_osh(sih);
516 uint32 core_cap_ext;
517 uint8 max_stats_timer_num;
518 int8 i;
519
520 /* Remember original core before switch to chipc/pmu */
521 origidx = si_coreidx(sih);
522 if (AOB_ENAB(sih)) {
523 pmu = si_setcore(sih, PMU_CORE_ID, 0);
524 } else {
525 pmu = si_setcoreidx(sih, SI_CC_IDX);
526 }
527 ASSERT(pmu != NULL);
528
529 core_cap_ext = R_REG(osh, &pmu->core_cap_ext);
530
531 max_stats_timer_num =
532 ((core_cap_ext & PCAP_EXT_ST_NUM_MASK) >> PCAP_EXT_ST_NUM_SHIFT) + 1;
533
534 for (i = 0; i < max_stats_timer_num; i++) {
535 si_pmustatstimer_update(osh, pmu, i);
536 }
537
538 OR_REG(osh, &pmu->pmuintmask0, PMU_INT_STAT_TIMER_INT_MASK);
539
540 /* Return to original core */
541 si_setcoreidx(sih, origidx);
542 }
543
si_pmustatstimer_dump(si_t * sih)544 void si_pmustatstimer_dump(si_t *sih)
545 {
546 pmuregs_t *pmu;
547 uint origidx;
548 osl_t *osh = si_osh(sih);
549 uint32 core_cap_ext, pmucapabilities, AlpPeriod, ILPPeriod, pmuintmask0,
550 pmuintstatus;
551 uint8 max_stats_timer_num, max_stats_timer_src_num;
552 uint32 stat_timer_ctrl, stat_timer_N;
553 uint8 i;
554 uint32 current_time_ms = OSL_SYSUPTIME();
555
556 /* Remember original core before switch to chipc/pmu */
557 origidx = si_coreidx(sih);
558 if (AOB_ENAB(sih)) {
559 pmu = si_setcore(sih, PMU_CORE_ID, 0);
560 } else {
561 pmu = si_setcoreidx(sih, SI_CC_IDX);
562 }
563 ASSERT(pmu != NULL);
564
565 pmucapabilities = R_REG(osh, &pmu->pmucapabilities);
566 core_cap_ext = R_REG(osh, &pmu->core_cap_ext);
567 AlpPeriod = R_REG(osh, &pmu->slowclkperiod);
568 ILPPeriod = R_REG(osh, &pmu->ILPPeriod);
569
570 max_stats_timer_num =
571 ((core_cap_ext & PCAP_EXT_ST_NUM_MASK) >> PCAP_EXT_ST_NUM_SHIFT) + 1;
572 max_stats_timer_src_num = ((core_cap_ext & PCAP_EXT_ST_SRC_NUM_MASK) >>
573 PCAP_EXT_ST_SRC_NUM_SHIFT) +
574 1;
575
576 pmuintstatus = R_REG(osh, &pmu->pmuintstatus);
577 pmuintmask0 = R_REG(osh, &pmu->pmuintmask0);
578
579 PMU_ERROR(("%s : TIME %d\n", __FUNCTION__, current_time_ms));
580
581 PMU_ERROR(("\tMAX Timer Num %d, MAX Source Num %d\n", max_stats_timer_num,
582 max_stats_timer_src_num));
583 PMU_ERROR(("\tpmucapabilities 0x%8x, core_cap_ext 0x%8x, AlpPeriod 0x%8x, "
584 "ILPPeriod 0x%8x, "
585 "pmuintmask0 0x%8x, pmuintstatus 0x%8x, pmurev %d\n",
586 pmucapabilities, core_cap_ext, AlpPeriod, ILPPeriod, pmuintmask0,
587 pmuintstatus, PMUREV(sih->pmurev)));
588
589 for (i = 0; i < max_stats_timer_num; i++) {
590 W_REG(osh, &pmu->pmu_statstimer_addr, i);
591 stat_timer_ctrl = R_REG(osh, &pmu->pmu_statstimer_ctrl);
592 stat_timer_N = R_REG(osh, &pmu->pmu_statstimer_N);
593 PMU_ERROR(("\t Timer %d : control 0x%8x, %d\n", i, stat_timer_ctrl,
594 stat_timer_N));
595 }
596
597 /* Return to original core */
598 si_setcoreidx(sih, origidx);
599 }
600
si_pmustatstimer_start(si_t * sih,uint8 timerid)601 void si_pmustatstimer_start(si_t *sih, uint8 timerid)
602 {
603 pmuregs_t *pmu;
604 uint origidx;
605 osl_t *osh = si_osh(sih);
606
607 /* Remember original core before switch to chipc/pmu */
608 origidx = si_coreidx(sih);
609 if (AOB_ENAB(sih)) {
610 pmu = si_setcore(sih, PMU_CORE_ID, 0);
611 } else {
612 pmu = si_setcoreidx(sih, SI_CC_IDX);
613 }
614 ASSERT(pmu != NULL);
615
616 pmustatstimer[timerid].enable = TRUE;
617
618 W_REG(osh, &pmu->pmu_statstimer_addr, timerid);
619 OR_REG(osh, &pmu->pmu_statstimer_ctrl, PMU_ST_ENAB << PMU_ST_EN_SHIFT);
620
621 /* Return to original core */
622 si_setcoreidx(sih, origidx);
623 }
624
si_pmustatstimer_stop(si_t * sih,uint8 timerid)625 void si_pmustatstimer_stop(si_t *sih, uint8 timerid)
626 {
627 pmuregs_t *pmu;
628 uint origidx;
629 osl_t *osh = si_osh(sih);
630
631 /* Remember original core before switch to chipc/pmu */
632 origidx = si_coreidx(sih);
633 if (AOB_ENAB(sih)) {
634 pmu = si_setcore(sih, PMU_CORE_ID, 0);
635 } else {
636 pmu = si_setcoreidx(sih, SI_CC_IDX);
637 }
638 ASSERT(pmu != NULL);
639
640 pmustatstimer[timerid].enable = FALSE;
641
642 W_REG(osh, &pmu->pmu_statstimer_addr, timerid);
643 AND_REG(osh, &pmu->pmu_statstimer_ctrl, ~(PMU_ST_ENAB << PMU_ST_EN_SHIFT));
644
645 /* Return to original core */
646 si_setcoreidx(sih, origidx);
647 }
648
si_pmustatstimer_clear(si_t * sih,uint8 timerid)649 void si_pmustatstimer_clear(si_t *sih, uint8 timerid)
650 {
651 pmuregs_t *pmu;
652 uint origidx;
653 osl_t *osh = si_osh(sih);
654
655 /* Remember original core before switch to chipc/pmu */
656 origidx = si_coreidx(sih);
657 if (AOB_ENAB(sih)) {
658 pmu = si_setcore(sih, PMU_CORE_ID, 0);
659 } else {
660 pmu = si_setcoreidx(sih, SI_CC_IDX);
661 }
662 ASSERT(pmu != NULL);
663
664 W_REG(osh, &pmu->pmu_statstimer_addr, timerid);
665 W_REG(osh, &pmu->pmu_statstimer_N, 0);
666
667 /* Return to original core */
668 si_setcoreidx(sih, origidx);
669 }
670
si_pmustatstimer_clear_overflow(si_t * sih)671 void si_pmustatstimer_clear_overflow(si_t *sih)
672 {
673 uint8 i;
674 uint32 core_cap_ext;
675 uint8 max_stats_timer_num;
676 uint32 timerN;
677 pmuregs_t *pmu;
678 uint origidx;
679 osl_t *osh = si_osh(sih);
680
681 /* Remember original core before switch to chipc/pmu */
682 origidx = si_coreidx(sih);
683 if (AOB_ENAB(sih)) {
684 pmu = si_setcore(sih, PMU_CORE_ID, 0);
685 } else {
686 pmu = si_setcoreidx(sih, SI_CC_IDX);
687 }
688 ASSERT(pmu != NULL);
689
690 core_cap_ext = R_REG(osh, &pmu->core_cap_ext);
691 max_stats_timer_num =
692 ((core_cap_ext & PCAP_EXT_ST_NUM_MASK) >> PCAP_EXT_ST_NUM_SHIFT) + 1;
693
694 for (i = 0; i < max_stats_timer_num; i++) {
695 W_REG(osh, &pmu->pmu_statstimer_addr, i);
696 timerN = R_REG(osh, &pmu->pmu_statstimer_N);
697 if (timerN == 0xFFFFFFFF) {
698 PMU_ERROR(("pmustatstimer overflow clear - timerid : %d\n", i));
699 si_pmustatstimer_clear(sih, i);
700 }
701 }
702
703 /* Return to original core */
704 si_setcoreidx(sih, origidx);
705 }
706
si_pmustatstimer_read(si_t * sih,uint8 timerid)707 uint32 si_pmustatstimer_read(si_t *sih, uint8 timerid)
708 {
709 pmuregs_t *pmu;
710 uint origidx;
711 osl_t *osh = si_osh(sih);
712 uint32 stats_timer_N;
713
714 /* Remember original core before switch to chipc/pmu */
715 origidx = si_coreidx(sih);
716 if (AOB_ENAB(sih)) {
717 pmu = si_setcore(sih, PMU_CORE_ID, 0);
718 } else {
719 pmu = si_setcoreidx(sih, SI_CC_IDX);
720 }
721 ASSERT(pmu != NULL);
722
723 W_REG(osh, &pmu->pmu_statstimer_addr, timerid);
724 stats_timer_N = R_REG(osh, &pmu->pmu_statstimer_N);
725
726 /* Return to original core */
727 si_setcoreidx(sih, origidx);
728
729 return stats_timer_N;
730 }
731
si_pmustatstimer_cfg_src_num(si_t * sih,uint8 src_num,uint8 timerid)732 void si_pmustatstimer_cfg_src_num(si_t *sih, uint8 src_num, uint8 timerid)
733 {
734 pmuregs_t *pmu;
735 uint origidx;
736 osl_t *osh = si_osh(sih);
737
738 /* Remember original core before switch to chipc/pmu */
739 origidx = si_coreidx(sih);
740 if (AOB_ENAB(sih)) {
741 pmu = si_setcore(sih, PMU_CORE_ID, 0);
742 } else {
743 pmu = si_setcoreidx(sih, SI_CC_IDX);
744 }
745 ASSERT(pmu != NULL);
746
747 pmustatstimer[timerid].src_num = src_num;
748 si_pmustatstimer_update(osh, pmu, timerid);
749
750 /* Return to original core */
751 si_setcoreidx(sih, origidx);
752 }
753
si_pmustatstimer_cfg_cnt_mode(si_t * sih,uint8 cnt_mode,uint8 timerid)754 void si_pmustatstimer_cfg_cnt_mode(si_t *sih, uint8 cnt_mode, uint8 timerid)
755 {
756 pmuregs_t *pmu;
757 uint origidx;
758 osl_t *osh = si_osh(sih);
759
760 /* Remember original core before switch to chipc/pmu */
761 origidx = si_coreidx(sih);
762 if (AOB_ENAB(sih)) {
763 pmu = si_setcore(sih, PMU_CORE_ID, 0);
764 } else {
765 pmu = si_setcoreidx(sih, SI_CC_IDX);
766 }
767 ASSERT(pmu != NULL);
768
769 pmustatstimer[timerid].cnt_mode = cnt_mode;
770 si_pmustatstimer_update(osh, pmu, timerid);
771
772 /* Return to original core */
773 si_setcoreidx(sih, origidx);
774 }
775 #endif /* BCMPMU_STATS */
776