1 /*
2 * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <arch.h>
8 #include <arch_helpers.h>
9 #include <assert.h>
10 #include <bl_common.h>
11 #include <context.h>
12 #include <context_mgmt.h>
13 #include <debug.h>
14 #include <denver.h>
15 #include <mce.h>
16 #include <mce_private.h>
17 #include <mmio.h>
18 #include <string.h>
19 #include <sys/errno.h>
20 #include <t18x_ari.h>
21 #include <tegra_def.h>
22 #include <tegra_platform.h>
23
24 /* NVG functions handlers */
25 static arch_mce_ops_t nvg_mce_ops = {
26 .enter_cstate = nvg_enter_cstate,
27 .update_cstate_info = nvg_update_cstate_info,
28 .update_crossover_time = nvg_update_crossover_time,
29 .read_cstate_stats = nvg_read_cstate_stats,
30 .write_cstate_stats = nvg_write_cstate_stats,
31 .call_enum_misc = ari_enumeration_misc,
32 .is_ccx_allowed = nvg_is_ccx_allowed,
33 .is_sc7_allowed = nvg_is_sc7_allowed,
34 .online_core = nvg_online_core,
35 .cc3_ctrl = nvg_cc3_ctrl,
36 .update_reset_vector = ari_reset_vector_update,
37 .roc_flush_cache = ari_roc_flush_cache,
38 .roc_flush_cache_trbits = ari_roc_flush_cache_trbits,
39 .roc_clean_cache = ari_roc_clean_cache,
40 .read_write_mca = ari_read_write_mca,
41 .update_ccplex_gsc = ari_update_ccplex_gsc,
42 .enter_ccplex_state = ari_enter_ccplex_state,
43 .read_write_uncore_perfmon = ari_read_write_uncore_perfmon,
44 .misc_ccplex = ari_misc_ccplex
45 };
46
47 /* ARI functions handlers */
48 static arch_mce_ops_t ari_mce_ops = {
49 .enter_cstate = ari_enter_cstate,
50 .update_cstate_info = ari_update_cstate_info,
51 .update_crossover_time = ari_update_crossover_time,
52 .read_cstate_stats = ari_read_cstate_stats,
53 .write_cstate_stats = ari_write_cstate_stats,
54 .call_enum_misc = ari_enumeration_misc,
55 .is_ccx_allowed = ari_is_ccx_allowed,
56 .is_sc7_allowed = ari_is_sc7_allowed,
57 .online_core = ari_online_core,
58 .cc3_ctrl = ari_cc3_ctrl,
59 .update_reset_vector = ari_reset_vector_update,
60 .roc_flush_cache = ari_roc_flush_cache,
61 .roc_flush_cache_trbits = ari_roc_flush_cache_trbits,
62 .roc_clean_cache = ari_roc_clean_cache,
63 .read_write_mca = ari_read_write_mca,
64 .update_ccplex_gsc = ari_update_ccplex_gsc,
65 .enter_ccplex_state = ari_enter_ccplex_state,
66 .read_write_uncore_perfmon = ari_read_write_uncore_perfmon,
67 .misc_ccplex = ari_misc_ccplex
68 };
69
70 typedef struct {
71 uint32_t ari_base;
72 arch_mce_ops_t *ops;
73 } mce_config_t;
74
75 /* Table to hold the per-CPU ARI base address and function handlers */
76 static mce_config_t mce_cfg_table[MCE_ARI_APERTURES_MAX] = {
77 {
78 /* A57 Core 0 */
79 .ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_0_OFFSET,
80 .ops = &ari_mce_ops,
81 },
82 {
83 /* A57 Core 1 */
84 .ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_1_OFFSET,
85 .ops = &ari_mce_ops,
86 },
87 {
88 /* A57 Core 2 */
89 .ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_2_OFFSET,
90 .ops = &ari_mce_ops,
91 },
92 {
93 /* A57 Core 3 */
94 .ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_3_OFFSET,
95 .ops = &ari_mce_ops,
96 },
97 {
98 /* D15 Core 0 */
99 .ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_4_OFFSET,
100 .ops = &nvg_mce_ops,
101 },
102 {
103 /* D15 Core 1 */
104 .ari_base = TEGRA_MMCRAB_BASE + MCE_ARI_APERTURE_5_OFFSET,
105 .ops = &nvg_mce_ops,
106 }
107 };
108
mce_get_curr_cpu_ari_base(void)109 static uint32_t mce_get_curr_cpu_ari_base(void)
110 {
111 uint64_t mpidr = read_mpidr();
112 uint64_t cpuid = mpidr & (uint64_t)MPIDR_CPU_MASK;
113 uint64_t impl = (read_midr() >> (uint64_t)MIDR_IMPL_SHIFT) & (uint64_t)MIDR_IMPL_MASK;
114
115 /*
116 * T186 has 2 CPU clusters, one with Denver CPUs and the other with
117 * ARM CortexA-57 CPUs. Each cluster consists of 4 CPUs and the CPU
118 * numbers start from 0. In order to get the proper arch_mce_ops_t
119 * struct, we have to convert the Denver CPU ids to the corresponding
120 * indices in the mce_ops_table array.
121 */
122 if (impl == DENVER_IMPL) {
123 cpuid |= 0x4U;
124 }
125
126 return mce_cfg_table[cpuid].ari_base;
127 }
128
mce_get_curr_cpu_ops(void)129 static arch_mce_ops_t *mce_get_curr_cpu_ops(void)
130 {
131 uint64_t mpidr = read_mpidr();
132 uint64_t cpuid = mpidr & (uint64_t)MPIDR_CPU_MASK;
133 uint64_t impl = (read_midr() >> (uint64_t)MIDR_IMPL_SHIFT) &
134 (uint64_t)MIDR_IMPL_MASK;
135
136 /*
137 * T186 has 2 CPU clusters, one with Denver CPUs and the other with
138 * ARM CortexA-57 CPUs. Each cluster consists of 4 CPUs and the CPU
139 * numbers start from 0. In order to get the proper arch_mce_ops_t
140 * struct, we have to convert the Denver CPU ids to the corresponding
141 * indices in the mce_ops_table array.
142 */
143 if (impl == DENVER_IMPL) {
144 cpuid |= 0x4U;
145 }
146
147 return mce_cfg_table[cpuid].ops;
148 }
149
150 /*******************************************************************************
151 * Common handler for all MCE commands
152 ******************************************************************************/
mce_command_handler(uint64_t cmd,uint64_t arg0,uint64_t arg1,uint64_t arg2)153 int32_t mce_command_handler(uint64_t cmd, uint64_t arg0, uint64_t arg1,
154 uint64_t arg2)
155 {
156 const arch_mce_ops_t *ops;
157 gp_regs_t *gp_regs = get_gpregs_ctx(cm_get_context(NON_SECURE));
158 uint32_t cpu_ari_base;
159 uint64_t ret64 = 0, arg3, arg4, arg5;
160 int32_t ret = 0;
161
162 assert(gp_regs != NULL);
163
164 /* get a pointer to the CPU's arch_mce_ops_t struct */
165 ops = mce_get_curr_cpu_ops();
166
167 /* get the CPU's ARI base address */
168 cpu_ari_base = mce_get_curr_cpu_ari_base();
169
170 switch (cmd) {
171 case MCE_CMD_ENTER_CSTATE:
172 ret = ops->enter_cstate(cpu_ari_base, arg0, arg1);
173 if (ret < 0) {
174 ERROR("%s: enter_cstate failed(%d)\n", __func__, ret);
175 }
176
177 break;
178
179 case MCE_CMD_UPDATE_CSTATE_INFO:
180 /*
181 * get the parameters required for the update cstate info
182 * command
183 */
184 arg3 = read_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X4));
185 arg4 = read_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X5));
186 arg5 = read_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X6));
187
188 ret = ops->update_cstate_info(cpu_ari_base, (uint32_t)arg0,
189 (uint32_t)arg1, (uint32_t)arg2, (uint8_t)arg3,
190 (uint32_t)arg4, (uint8_t)arg5);
191 if (ret < 0) {
192 ERROR("%s: update_cstate_info failed(%d)\n",
193 __func__, ret);
194 }
195
196 write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X4), (0));
197 write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X5), (0));
198 write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X6), (0));
199
200 break;
201
202 case MCE_CMD_UPDATE_CROSSOVER_TIME:
203 ret = ops->update_crossover_time(cpu_ari_base, arg0, arg1);
204 if (ret < 0) {
205 ERROR("%s: update_crossover_time failed(%d)\n",
206 __func__, ret);
207 }
208
209 break;
210
211 case MCE_CMD_READ_CSTATE_STATS:
212 ret64 = ops->read_cstate_stats(cpu_ari_base, arg0);
213
214 /* update context to return cstate stats value */
215 write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), (ret64));
216 write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X2), (ret64));
217
218 break;
219
220 case MCE_CMD_WRITE_CSTATE_STATS:
221 ret = ops->write_cstate_stats(cpu_ari_base, arg0, arg1);
222 if (ret < 0) {
223 ERROR("%s: write_cstate_stats failed(%d)\n",
224 __func__, ret);
225 }
226
227 break;
228
229 case MCE_CMD_IS_CCX_ALLOWED:
230 ret = ops->is_ccx_allowed(cpu_ari_base, arg0, arg1);
231 if (ret < 0) {
232 ERROR("%s: is_ccx_allowed failed(%d)\n", __func__, ret);
233 break;
234 }
235
236 /* update context to return CCx status value */
237 write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1),
238 (uint64_t)(ret));
239
240 break;
241
242 case MCE_CMD_IS_SC7_ALLOWED:
243 ret = ops->is_sc7_allowed(cpu_ari_base, arg0, arg1);
244 if (ret < 0) {
245 ERROR("%s: is_sc7_allowed failed(%d)\n", __func__, ret);
246 break;
247 }
248
249 /* update context to return SC7 status value */
250 write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1),
251 (uint64_t)(ret));
252 write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X3),
253 (uint64_t)(ret));
254
255 break;
256
257 case MCE_CMD_ONLINE_CORE:
258 ret = ops->online_core(cpu_ari_base, arg0);
259 if (ret < 0) {
260 ERROR("%s: online_core failed(%d)\n", __func__, ret);
261 }
262
263 break;
264
265 case MCE_CMD_CC3_CTRL:
266 ret = ops->cc3_ctrl(cpu_ari_base, arg0, arg1, arg2);
267 if (ret < 0) {
268 ERROR("%s: cc3_ctrl failed(%d)\n", __func__, ret);
269 }
270
271 break;
272
273 case MCE_CMD_ECHO_DATA:
274 ret64 = ops->call_enum_misc(cpu_ari_base, TEGRA_ARI_MISC_ECHO,
275 arg0);
276
277 /* update context to return if echo'd data matched source */
278 write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1),
279 ((ret64 == arg0) ? 1ULL : 0ULL));
280 write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X2),
281 ((ret64 == arg0) ? 1ULL : 0ULL));
282
283 break;
284
285 case MCE_CMD_READ_VERSIONS:
286 ret64 = ops->call_enum_misc(cpu_ari_base, TEGRA_ARI_MISC_VERSION,
287 arg0);
288
289 /*
290 * version = minor(63:32) | major(31:0). Update context
291 * to return major and minor version number.
292 */
293 write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1),
294 (ret64));
295 write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X2),
296 (ret64 >> 32ULL));
297
298 break;
299
300 case MCE_CMD_ENUM_FEATURES:
301 ret64 = ops->call_enum_misc(cpu_ari_base,
302 TEGRA_ARI_MISC_FEATURE_LEAF_0, arg0);
303
304 /* update context to return features value */
305 write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), (ret64));
306
307 break;
308
309 case MCE_CMD_ROC_FLUSH_CACHE_TRBITS:
310 ret = ops->roc_flush_cache_trbits(cpu_ari_base);
311 if (ret < 0) {
312 ERROR("%s: flush cache_trbits failed(%d)\n", __func__,
313 ret);
314 }
315
316 break;
317
318 case MCE_CMD_ROC_FLUSH_CACHE:
319 ret = ops->roc_flush_cache(cpu_ari_base);
320 if (ret < 0) {
321 ERROR("%s: flush cache failed(%d)\n", __func__, ret);
322 }
323
324 break;
325
326 case MCE_CMD_ROC_CLEAN_CACHE:
327 ret = ops->roc_clean_cache(cpu_ari_base);
328 if (ret < 0) {
329 ERROR("%s: clean cache failed(%d)\n", __func__, ret);
330 }
331
332 break;
333
334 case MCE_CMD_ENUM_READ_MCA:
335 ret64 = ops->read_write_mca(cpu_ari_base, arg0, &arg1);
336
337 /* update context to return MCA data/error */
338 write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), (ret64));
339 write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X2), (arg1));
340 write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X3), (ret64));
341
342 break;
343
344 case MCE_CMD_ENUM_WRITE_MCA:
345 ret64 = ops->read_write_mca(cpu_ari_base, arg0, &arg1);
346
347 /* update context to return MCA error */
348 write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), (ret64));
349 write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X3), (ret64));
350
351 break;
352
353 #if ENABLE_CHIP_VERIFICATION_HARNESS
354 case MCE_CMD_ENABLE_LATIC:
355 /*
356 * This call is not for production use. The constant value,
357 * 0xFFFF0000, is specific to allowing for enabling LATIC on
358 * pre-production parts for the chip verification harness.
359 *
360 * Enabling LATIC allows S/W to read the MINI ISPs in the
361 * CCPLEX. The ISMs are used for various measurements relevant
362 * to particular locations in the Silicon. They are small
363 * counters which can be polled to determine how fast a
364 * particular location in the Silicon is.
365 */
366 ops->enter_ccplex_state(mce_get_curr_cpu_ari_base(),
367 0xFFFF0000);
368
369 break;
370 #endif
371
372 case MCE_CMD_UNCORE_PERFMON_REQ:
373 ret = ops->read_write_uncore_perfmon(cpu_ari_base, arg0, &arg1);
374
375 /* update context to return data */
376 write_ctx_reg((gp_regs), (uint32_t)(CTX_GPREG_X1), (arg1));
377 break;
378
379 case MCE_CMD_MISC_CCPLEX:
380 ops->misc_ccplex(cpu_ari_base, arg0, arg1);
381
382 break;
383
384 default:
385 ERROR("unknown MCE command (%lu)\n", cmd);
386 ret = EINVAL;
387 break;
388 }
389
390 return ret;
391 }
392
393 /*******************************************************************************
394 * Handler to update the reset vector for CPUs
395 ******************************************************************************/
mce_update_reset_vector(void)396 int32_t mce_update_reset_vector(void)
397 {
398 const arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
399
400 ops->update_reset_vector(mce_get_curr_cpu_ari_base());
401
402 return 0;
403 }
404
mce_update_ccplex_gsc(tegra_ari_gsc_index_t gsc_idx)405 static int32_t mce_update_ccplex_gsc(tegra_ari_gsc_index_t gsc_idx)
406 {
407 const arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
408
409 ops->update_ccplex_gsc(mce_get_curr_cpu_ari_base(), gsc_idx);
410
411 return 0;
412 }
413
414 /*******************************************************************************
415 * Handler to update carveout values for Video Memory Carveout region
416 ******************************************************************************/
mce_update_gsc_videomem(void)417 int32_t mce_update_gsc_videomem(void)
418 {
419 return mce_update_ccplex_gsc(TEGRA_ARI_GSC_VPR_IDX);
420 }
421
422 /*******************************************************************************
423 * Handler to update carveout values for TZDRAM aperture
424 ******************************************************************************/
mce_update_gsc_tzdram(void)425 int32_t mce_update_gsc_tzdram(void)
426 {
427 return mce_update_ccplex_gsc(TEGRA_ARI_GSC_TZ_DRAM_IDX);
428 }
429
430 /*******************************************************************************
431 * Handler to update carveout values for TZ SysRAM aperture
432 ******************************************************************************/
mce_update_gsc_tzram(void)433 int32_t mce_update_gsc_tzram(void)
434 {
435 return mce_update_ccplex_gsc(TEGRA_ARI_GSC_TZRAM);
436 }
437
438 /*******************************************************************************
439 * Handler to shutdown/reset the entire system
440 ******************************************************************************/
mce_enter_ccplex_state(uint32_t state_idx)441 __dead2 void mce_enter_ccplex_state(uint32_t state_idx)
442 {
443 const arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
444
445 /* sanity check state value */
446 if ((state_idx != TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF) &&
447 (state_idx != TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT)) {
448 panic();
449 }
450
451 ops->enter_ccplex_state(mce_get_curr_cpu_ari_base(), state_idx);
452
453 /* wait till the CCPLEX powers down */
454 for (;;) {
455 ;
456 }
457
458 }
459
460 /*******************************************************************************
461 * Handler to issue the UPDATE_CSTATE_INFO request
462 ******************************************************************************/
mce_update_cstate_info(const mce_cstate_info_t * cstate)463 void mce_update_cstate_info(const mce_cstate_info_t *cstate)
464 {
465 const arch_mce_ops_t *ops = mce_get_curr_cpu_ops();
466
467 /* issue the UPDATE_CSTATE_INFO request */
468 ops->update_cstate_info(mce_get_curr_cpu_ari_base(), cstate->cluster,
469 cstate->ccplex, cstate->system, cstate->system_state_force,
470 cstate->wake_mask, cstate->update_wake_mask);
471 }
472
473 /*******************************************************************************
474 * Handler to read the MCE firmware version and check if it is compatible
475 * with interface header the BL3-1 was compiled against
476 ******************************************************************************/
mce_verify_firmware_version(void)477 void mce_verify_firmware_version(void)
478 {
479 const arch_mce_ops_t *ops;
480 uint32_t cpu_ari_base;
481 uint64_t version;
482 uint32_t major, minor;
483
484 /*
485 * MCE firmware is not supported on simulation platforms.
486 */
487 if (tegra_platform_is_emulation()) {
488
489 INFO("MCE firmware is not supported\n");
490
491 } else {
492 /* get a pointer to the CPU's arch_mce_ops_t struct */
493 ops = mce_get_curr_cpu_ops();
494
495 /* get the CPU's ARI base address */
496 cpu_ari_base = mce_get_curr_cpu_ari_base();
497
498 /*
499 * Read the MCE firmware version and extract the major and minor
500 * version fields
501 */
502 version = ops->call_enum_misc(cpu_ari_base, TEGRA_ARI_MISC_VERSION, 0);
503 major = (uint32_t)version;
504 minor = (uint32_t)(version >> 32);
505
506 INFO("MCE Version - HW=%d:%d, SW=%d:%d\n", major, minor,
507 TEGRA_ARI_VERSION_MAJOR, TEGRA_ARI_VERSION_MINOR);
508
509 /*
510 * Verify that the MCE firmware version and the interface header
511 * match
512 */
513 if (major != TEGRA_ARI_VERSION_MAJOR) {
514 ERROR("ARI major version mismatch\n");
515 panic();
516 }
517
518 if (minor < TEGRA_ARI_VERSION_MINOR) {
519 ERROR("ARI minor version mismatch\n");
520 panic();
521 }
522 }
523 }
524