1 /*
2 * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <stdbool.h>
8 #include <stdint.h>
9
10 #include <common/debug.h>
11 #include <lib/bakery_lock.h>
12 #include <lib/cassert.h>
13 #include <lib/extensions/ras.h>
14 #include <lib/utils_def.h>
15 #include <services/sdei.h>
16
17 #include <plat/common/platform.h>
18 #include <platform_def.h>
19 #include <tegra194_ras_private.h>
20 #include <tegra_def.h>
21 #include <tegra_platform.h>
22 #include <tegra_private.h>
23
24 /*
25 * ERR<n>FR bits[63:32], it indicates supported RAS errors which can be enabled
26 * by setting corresponding bits in ERR<n>CTLR
27 */
28 #define ERR_FR_EN_BITS_MASK 0xFFFFFFFF00000000ULL
29
30 /*
31 * Number of RAS errors will be cleared per 'tegra194_ras_corrected_err_clear'
32 * function call.
33 */
34 #define RAS_ERRORS_PER_CALL 8
35
36 /*
37 * the max possible RAS node index value.
38 */
39 #define RAS_NODE_INDEX_MAX 0x1FFFFFFFU
40
41 /* bakery lock for platform RAS handler. */
42 static DEFINE_BAKERY_LOCK(ras_handler_lock);
43 #define ras_lock() bakery_lock_get(&ras_handler_lock)
44 #define ras_unlock() bakery_lock_release(&ras_handler_lock)
45
46 /*
47 * Function to handle an External Abort received at EL3.
48 * This function is invoked by RAS framework.
49 */
tegra194_ea_handler(unsigned int ea_reason,uint64_t syndrome,void * cookie,void * handle,uint64_t flags)50 static void tegra194_ea_handler(unsigned int ea_reason, uint64_t syndrome,
51 void *cookie, void *handle, uint64_t flags)
52 {
53 int32_t ret;
54
55 ras_lock();
56
57 ERROR("MPIDR 0x%lx: exception reason=%u syndrome=0x%llx\n",
58 read_mpidr(), ea_reason, syndrome);
59
60 /* Call RAS EA handler */
61 ret = ras_ea_handler(ea_reason, syndrome, cookie, handle, flags);
62 if (ret != 0) {
63 ERROR("RAS error handled!\n");
64 ret = sdei_dispatch_event(TEGRA_SDEI_EP_EVENT_0 +
65 plat_my_core_pos());
66 if (ret != 0)
67 ERROR("sdei_dispatch_event returned %d\n", ret);
68 } else {
69 ERROR("Not a RAS error!\n");
70 }
71
72 ras_unlock();
73 }
74
75 /*
76 * Function to enable all supported RAS error report.
77 *
78 * Uncorrected errors are set to report as External abort (SError)
79 * Corrected errors are set to report as interrupt.
80 */
tegra194_ras_enable(void)81 void tegra194_ras_enable(void)
82 {
83 VERBOSE("%s\n", __func__);
84
85 /* skip RAS enablement if not a silicon platform. */
86 if (!tegra_platform_is_silicon()) {
87 return;
88 }
89
90 /*
91 * Iterate for each group(num_idx ERRSELRs starting from idx_start)
92 * use normal for loop instead of for_each_err_record_info to get rid
93 * of MISRA noise..
94 */
95 for (uint32_t i = 0U; i < err_record_mappings.num_err_records; i++) {
96
97 const struct err_record_info *info = &err_record_mappings.err_records[i];
98
99 uint32_t idx_start = info->sysreg.idx_start;
100 uint32_t num_idx = info->sysreg.num_idx;
101 const struct ras_aux_data *aux_data = (const struct ras_aux_data *)info->aux_data;
102
103 assert(aux_data != NULL);
104
105 for (uint32_t j = 0; j < num_idx; j++) {
106
107 /* ERR<n>CTLR register value. */
108 uint64_t err_ctrl = 0ULL;
109 /* all supported errors for this node. */
110 uint64_t err_fr;
111 /* uncorrectable errors */
112 uint64_t uncorr_errs;
113 /* correctable errors */
114 uint64_t corr_errs;
115
116 /*
117 * Catch error if something wrong with the RAS aux data
118 * record table.
119 */
120 assert(aux_data[j].err_ctrl != NULL);
121
122 /*
123 * Write to ERRSELR_EL1 to select the RAS error node.
124 * Always program this at first to select corresponding
125 * RAS node before any other RAS register r/w.
126 */
127 ser_sys_select_record(idx_start + j);
128
129 err_fr = read_erxfr_el1() & ERR_FR_EN_BITS_MASK;
130 uncorr_errs = aux_data[j].err_ctrl();
131 corr_errs = ~uncorr_errs & err_fr;
132
133 /* enable error reporting */
134 ERR_CTLR_ENABLE_FIELD(err_ctrl, ED);
135
136 /* enable SError reporting for uncorrectable errors */
137 if ((uncorr_errs & err_fr) != 0ULL) {
138 ERR_CTLR_ENABLE_FIELD(err_ctrl, UE);
139 }
140
141 /* generate interrupt for corrected errors. */
142 if (corr_errs != 0ULL) {
143 ERR_CTLR_ENABLE_FIELD(err_ctrl, CFI);
144 }
145
146 /* enable the supported errors */
147 err_ctrl |= err_fr;
148
149 VERBOSE("errselr_el1:0x%x, erxfr:0x%llx, err_ctrl:0x%llx\n",
150 idx_start + j, err_fr, err_ctrl);
151
152 /* enable specified errors, or set to 0 if no supported error */
153 write_erxctlr_el1(err_ctrl);
154
155 /*
156 * Check if all the bit settings have been enabled to detect
157 * uncorrected/corrected errors, if not assert.
158 */
159 assert(read_erxctlr_el1() == err_ctrl);
160 }
161 }
162 }
163
164 /*
165 * Function to clear RAS ERR<n>STATUS for corrected RAS error.
166 *
167 * This function clears number of 'RAS_ERRORS_PER_CALL' RAS errors at most.
168 * 'cookie' - in/out cookie parameter to specify/store last visited RAS
169 * error record index. it is set to '0' to indicate no more RAS
170 * error record to clear.
171 */
tegra194_ras_corrected_err_clear(uint64_t * cookie)172 void tegra194_ras_corrected_err_clear(uint64_t *cookie)
173 {
174 /*
175 * 'last_node' and 'last_idx' represent last visited RAS node index from
176 * previous function call. they are set to 0 when first smc call is made
177 * or all RAS error are visited by followed multipile smc calls.
178 */
179 union prev_record {
180 struct record {
181 uint32_t last_node;
182 uint32_t last_idx;
183 } rec;
184 uint64_t value;
185 } prev;
186
187 uint64_t clear_ce_status = 0ULL;
188 int32_t nerrs_per_call = RAS_ERRORS_PER_CALL;
189 uint32_t i;
190
191 if (cookie == NULL) {
192 return;
193 }
194
195 prev.value = *cookie;
196
197 if ((prev.rec.last_node >= RAS_NODE_INDEX_MAX) ||
198 (prev.rec.last_idx >= RAS_NODE_INDEX_MAX)) {
199 return;
200 }
201
202 ERR_STATUS_SET_FIELD(clear_ce_status, AV, 0x1UL);
203 ERR_STATUS_SET_FIELD(clear_ce_status, V, 0x1UL);
204 ERR_STATUS_SET_FIELD(clear_ce_status, OF, 0x1UL);
205 ERR_STATUS_SET_FIELD(clear_ce_status, MV, 0x1UL);
206 ERR_STATUS_SET_FIELD(clear_ce_status, CE, 0x3UL);
207
208
209 for (i = prev.rec.last_node; i < err_record_mappings.num_err_records; i++) {
210
211 const struct err_record_info *info = &err_record_mappings.err_records[i];
212 uint32_t idx_start = info->sysreg.idx_start;
213 uint32_t num_idx = info->sysreg.num_idx;
214
215 uint32_t j;
216
217 j = (i == prev.rec.last_node && prev.value != 0UL) ?
218 (prev.rec.last_idx + 1U) : 0U;
219
220 for (; j < num_idx; j++) {
221
222 uint64_t status;
223 uint32_t err_idx = idx_start + j;
224
225 if (err_idx >= RAS_NODE_INDEX_MAX) {
226 return;
227 }
228
229 write_errselr_el1(err_idx);
230 status = read_erxstatus_el1();
231
232 if (ERR_STATUS_GET_FIELD(status, CE) != 0U) {
233 write_erxstatus_el1(clear_ce_status);
234 }
235
236 --nerrs_per_call;
237
238 /* only clear 'nerrs_per_call' errors each time. */
239 if (nerrs_per_call <= 0) {
240 prev.rec.last_idx = j;
241 prev.rec.last_node = i;
242 /* save last visited error record index
243 * into cookie.
244 */
245 *cookie = prev.value;
246
247 return;
248 }
249 }
250 }
251
252 /*
253 * finish if all ras error records are checked or provided index is out
254 * of range.
255 */
256 *cookie = 0ULL;
257 return;
258 }
259
260 /* Function to probe an error from error record group. */
tegra194_ras_record_probe(const struct err_record_info * info,int * probe_data)261 static int32_t tegra194_ras_record_probe(const struct err_record_info *info,
262 int *probe_data)
263 {
264 /* Skip probing if not a silicon platform */
265 if (!tegra_platform_is_silicon()) {
266 return 0;
267 }
268
269 return ser_probe_sysreg(info->sysreg.idx_start, info->sysreg.num_idx, probe_data);
270 }
271
272 /* Function to handle error from one given node */
tegra194_ras_node_handler(uint32_t errselr,const char * name,const struct ras_error * errors,uint64_t status)273 static int32_t tegra194_ras_node_handler(uint32_t errselr, const char *name,
274 const struct ras_error *errors, uint64_t status)
275 {
276 bool found = false;
277 uint32_t ierr = (uint32_t)ERR_STATUS_GET_FIELD(status, IERR);
278 uint32_t serr = (uint32_t)ERR_STATUS_GET_FIELD(status, SERR);
279 uint64_t val = 0;
280
281 /* not a valid error. */
282 if (ERR_STATUS_GET_FIELD(status, V) == 0U) {
283 return 0;
284 }
285
286 ERR_STATUS_SET_FIELD(val, V, 1);
287
288 /* keep the log print same as linux arm64_ras driver. */
289 ERROR("**************************************\n");
290 ERROR("RAS Error in %s, ERRSELR_EL1=0x%x:\n", name, errselr);
291 ERROR("\tStatus = 0x%llx\n", status);
292
293 /* Print uncorrectable errror information. */
294 if (ERR_STATUS_GET_FIELD(status, UE) != 0U) {
295
296 ERR_STATUS_SET_FIELD(val, UE, 1);
297 ERR_STATUS_SET_FIELD(val, UET, 1);
298
299 /* IERR to error message */
300 for (uint32_t i = 0; errors[i].error_msg != NULL; i++) {
301 if (ierr == errors[i].error_code) {
302 ERROR("\tIERR = %s: 0x%x\n",
303 errors[i].error_msg, ierr);
304
305 found = true;
306 break;
307 }
308 }
309
310 if (!found) {
311 ERROR("\tUnknown IERR: 0x%x\n", ierr);
312 }
313
314 ERROR("SERR = %s: 0x%x\n", ras_serr_to_str(serr), serr);
315
316 /* Overflow, multiple errors have been detected. */
317 if (ERR_STATUS_GET_FIELD(status, OF) != 0U) {
318 ERROR("\tOverflow (there may be more errors) - "
319 "Uncorrectable\n");
320 ERR_STATUS_SET_FIELD(val, OF, 1);
321 }
322
323 ERROR("\tUncorrectable (this is fatal)\n");
324
325 /* Miscellaneous Register Valid. */
326 if (ERR_STATUS_GET_FIELD(status, MV) != 0U) {
327 ERROR("\tMISC0 = 0x%lx\n", read_erxmisc0_el1());
328 ERROR("\tMISC1 = 0x%lx\n", read_erxmisc1_el1());
329 ERR_STATUS_SET_FIELD(val, MV, 1);
330 }
331
332 /* Address Valid. */
333 if (ERR_STATUS_GET_FIELD(status, AV) != 0U) {
334 ERROR("\tADDR = 0x%lx\n", read_erxaddr_el1());
335 ERR_STATUS_SET_FIELD(val, AV, 1);
336 }
337
338 /* Deferred error */
339 if (ERR_STATUS_GET_FIELD(status, DE) != 0U) {
340 ERROR("\tDeferred error\n");
341 ERR_STATUS_SET_FIELD(val, DE, 1);
342 }
343
344 } else {
345 /* For corrected error, simply clear it. */
346 VERBOSE("corrected RAS error is cleared: ERRSELR_EL1:0x%x, "
347 "IERR:0x%x, SERR:0x%x\n", errselr, ierr, serr);
348 ERR_STATUS_SET_FIELD(val, CE, 1);
349 }
350
351 ERROR("**************************************\n");
352
353 /* Write to clear reported errors. */
354 write_erxstatus_el1(val);
355
356 /* error handled */
357 return 0;
358 }
359
360 /* Function to handle one error node from an error record group. */
tegra194_ras_record_handler(const struct err_record_info * info,int probe_data,const struct err_handler_data * const data __unused)361 static int32_t tegra194_ras_record_handler(const struct err_record_info *info,
362 int probe_data, const struct err_handler_data *const data __unused)
363 {
364 uint32_t num_idx = info->sysreg.num_idx;
365 uint32_t idx_start = info->sysreg.idx_start;
366 const struct ras_aux_data *aux_data = info->aux_data;
367 const struct ras_error *errors;
368 uint32_t offset;
369 const char *node_name;
370
371 uint64_t status = 0ULL;
372
373 VERBOSE("%s\n", __func__);
374
375 assert(probe_data >= 0);
376 assert((uint32_t)probe_data < num_idx);
377
378 offset = (uint32_t)probe_data;
379 errors = aux_data[offset].error_records;
380 node_name = aux_data[offset].name;
381
382 assert(errors != NULL);
383
384 /* Write to ERRSELR_EL1 to select the error record */
385 ser_sys_select_record(idx_start + offset);
386
387 /* Retrieve status register from the error record */
388 status = read_erxstatus_el1();
389
390 return tegra194_ras_node_handler(idx_start + offset, node_name,
391 errors, status);
392 }
393
394
395 /* Instantiate RAS nodes */
396 PER_CORE_RAS_NODE_LIST(DEFINE_ONE_RAS_NODE)
397 PER_CLUSTER_RAS_NODE_LIST(DEFINE_ONE_RAS_NODE)
398 SCF_L3_BANK_RAS_NODE_LIST(DEFINE_ONE_RAS_NODE)
399 CCPLEX_RAS_NODE_LIST(DEFINE_ONE_RAS_NODE)
400
401 /* Instantiate RAS node groups */
402 static struct ras_aux_data per_core_ras_group[] = {
403 PER_CORE_RAS_GROUP_NODES
404 };
405 CASSERT(ARRAY_SIZE(per_core_ras_group) < RAS_NODE_INDEX_MAX,
406 assert_max_per_core_ras_group_size);
407
408 static struct ras_aux_data per_cluster_ras_group[] = {
409 PER_CLUSTER_RAS_GROUP_NODES
410 };
411 CASSERT(ARRAY_SIZE(per_cluster_ras_group) < RAS_NODE_INDEX_MAX,
412 assert_max_per_cluster_ras_group_size);
413
414 static struct ras_aux_data scf_l3_ras_group[] = {
415 SCF_L3_BANK_RAS_GROUP_NODES
416 };
417 CASSERT(ARRAY_SIZE(scf_l3_ras_group) < RAS_NODE_INDEX_MAX,
418 assert_max_scf_l3_ras_group_size);
419
420 static struct ras_aux_data ccplex_ras_group[] = {
421 CCPLEX_RAS_GROUP_NODES
422 };
423 CASSERT(ARRAY_SIZE(ccplex_ras_group) < RAS_NODE_INDEX_MAX,
424 assert_max_ccplex_ras_group_size);
425
426 /*
427 * We have same probe and handler for each error record group, use a macro to
428 * simply the record definition.
429 */
430 #define ADD_ONE_ERR_GROUP(errselr_start, group) \
431 ERR_RECORD_SYSREG_V1((errselr_start), (uint32_t)ARRAY_SIZE((group)), \
432 &tegra194_ras_record_probe, \
433 &tegra194_ras_record_handler, (group))
434
435 /* RAS error record group information */
436 static struct err_record_info carmel_ras_records[] = {
437 /*
438 * Per core ras error records
439 * ERRSELR starts from 0*256 + Logical_CPU_ID*16 + 0 to
440 * 0*256 + Logical_CPU_ID*16 + 5 for each group.
441 * 8 cores/groups, 6 * 8 nodes in total.
442 */
443 ADD_ONE_ERR_GROUP(0x000, per_core_ras_group),
444 ADD_ONE_ERR_GROUP(0x010, per_core_ras_group),
445 ADD_ONE_ERR_GROUP(0x020, per_core_ras_group),
446 ADD_ONE_ERR_GROUP(0x030, per_core_ras_group),
447 ADD_ONE_ERR_GROUP(0x040, per_core_ras_group),
448 ADD_ONE_ERR_GROUP(0x050, per_core_ras_group),
449 ADD_ONE_ERR_GROUP(0x060, per_core_ras_group),
450 ADD_ONE_ERR_GROUP(0x070, per_core_ras_group),
451
452 /*
453 * Per cluster ras error records
454 * ERRSELR starts from 2*256 + Logical_Cluster_ID*16 + 0 to
455 * 2*256 + Logical_Cluster_ID*16 + 3.
456 * 4 clusters/groups, 3 * 4 nodes in total.
457 */
458 ADD_ONE_ERR_GROUP(0x200, per_cluster_ras_group),
459 ADD_ONE_ERR_GROUP(0x210, per_cluster_ras_group),
460 ADD_ONE_ERR_GROUP(0x220, per_cluster_ras_group),
461 ADD_ONE_ERR_GROUP(0x230, per_cluster_ras_group),
462
463 /*
464 * SCF L3_Bank ras error records
465 * ERRSELR: 3*256 + L3_Bank_ID, L3_Bank_ID: 0-3
466 * 1 groups, 4 nodes in total.
467 */
468 ADD_ONE_ERR_GROUP(0x300, scf_l3_ras_group),
469
470 /*
471 * CCPLEX ras error records
472 * ERRSELR: 4*256 + Unit_ID, Unit_ID: 0 - 4
473 * 1 groups, 5 nodes in total.
474 */
475 ADD_ONE_ERR_GROUP(0x400, ccplex_ras_group),
476 };
477
478 CASSERT(ARRAY_SIZE(carmel_ras_records) < RAS_NODE_INDEX_MAX,
479 assert_max_carmel_ras_records_size);
480
481 REGISTER_ERR_RECORD_INFO(carmel_ras_records);
482
483 /* dummy RAS interrupt */
484 static struct ras_interrupt carmel_ras_interrupts[] = {};
485 REGISTER_RAS_INTERRUPTS(carmel_ras_interrupts);
486
487 /*******************************************************************************
488 * RAS handler for the platform
489 ******************************************************************************/
plat_ea_handler(unsigned int ea_reason,uint64_t syndrome,void * cookie,void * handle,uint64_t flags)490 void plat_ea_handler(unsigned int ea_reason, uint64_t syndrome, void *cookie,
491 void *handle, uint64_t flags)
492 {
493 #if RAS_EXTENSION
494 tegra194_ea_handler(ea_reason, syndrome, cookie, handle, flags);
495 #else
496 ERROR("Unhandled External Abort received on 0x%llx at EL3!\n",
497 read_mpidr_el1());
498 ERROR(" exception reason=%u syndrome=0x%lx\n", ea_reason, syndrome);
499 panic();
500 #endif
501 }
502