1 /*
2 * Copyright (c) 2020 HiSilicon (Shanghai) Technologies CO., LIMITED.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 * Description: LOG OML EXCEPTION MODULE
15 */
16
17 #ifdef SUPPORT_DFX_EXCEPTION
18 #include "exception.h"
19 #endif
20 #include "debug_print.h"
21 #include "securec.h"
22 #include "log_def.h"
23 #include "log_common.h"
24 #include "log_reg_dump.h"
25 #include "tcxo.h"
26 #include "non_os.h"
27 #include "log_oml_exception.h"
28 #ifdef USE_CMSIS_OS
29 #ifdef __LITEOS__
30 #include "los_task_pri.h"
31 #endif
32 #endif
33 #include "watchdog.h"
34 #include "watchdog_porting.h"
35 #if SLAVE_BY_WS53_ONLY
36 #include "preserve.h"
37 #endif
38 #if CORE != CORE_LOGGING
39 #include "log_buffer.h"
40 #endif
41 #if defined(CHIP_CAT1) && (CHIP_CAT1 == 1) && (CORE != CORE_LOGGING)
42 #else
43 #include "uart.h"
44 #endif
45 #ifdef SDT_LOG_BY_UART
46 #include "sdt_by_uart_external.h"
47 #endif
48 #if MCU_ONLY
49 #include "non_os_reboot.h"
50 #include "preserve.h"
51 #endif
52
53 #if CORE == CORE_LOGGING
54 #ifdef HSO_SUPPORT
55 #include "last_dump.h"
56 #include "last_dump_adapt.h"
57 #endif
58 #endif
59
60 #if ((ARCH == RISCV31) || (ARCH == RISCV32) || (ARCH == RISCV70))
61 #include "dfx_feature_config.h"
62 #endif
63 #define STACK_SIZE 3
64 #define ONLY_ACORE_NUM 1
65
66 #if ((ARCH == RISCV31) || (ARCH == RISCV32) || (ARCH == RISCV70))
67 #define GENERAL_REG_NUM 32
68 #elif((ARCH == CM3) || (ARCH == CM7))
69 #define GENERAL_REG_NUM 13
70 #endif
71
72 #define DUMP_DELAY 6000ULL
73 #define LOG_DELAY 2000ULL
74 #if defined(__GNUC__)
75 extern uint32_t g_stack_system;
76 #endif
77
78 #if ((ARCH == RISCV31) || (ARCH == RISCV32) || (ARCH == RISCV70))
log_oml_get_reg_value(uint32_t * general_reg,const exc_context_t * exc_buf_addr)79 static void log_oml_get_reg_value(uint32_t *general_reg, const exc_context_t *exc_buf_addr)
80 {
81 general_reg[REG_NUM_0] = 0x0;
82 general_reg[REG_NUM_1] = exc_buf_addr->task_context.ra;
83 general_reg[REG_NUM_2] = exc_buf_addr->task_context.sp;
84 general_reg[REG_NUM_3] = exc_buf_addr->gp;
85 general_reg[REG_NUM_4] = exc_buf_addr->task_context.tp;
86 general_reg[REG_NUM_5] = exc_buf_addr->task_context.t0;
87 general_reg[REG_NUM_6] = exc_buf_addr->task_context.t1;
88 general_reg[REG_NUM_7] = exc_buf_addr->task_context.t2;
89 general_reg[REG_NUM_8] = exc_buf_addr->task_context.s0;
90 general_reg[REG_NUM_9] = exc_buf_addr->task_context.s1;
91 general_reg[REG_NUM_10] = exc_buf_addr->task_context.a0;
92 general_reg[REG_NUM_11] = exc_buf_addr->task_context.a1;
93 general_reg[REG_NUM_12] = exc_buf_addr->task_context.a2;
94 general_reg[REG_NUM_13] = exc_buf_addr->task_context.a3;
95 general_reg[REG_NUM_14] = exc_buf_addr->task_context.a4;
96 general_reg[REG_NUM_15] = exc_buf_addr->task_context.a5;
97 general_reg[REG_NUM_16] = exc_buf_addr->task_context.a6;
98 general_reg[REG_NUM_17] = exc_buf_addr->task_context.a7;
99 general_reg[REG_NUM_18] = exc_buf_addr->task_context.s2;
100 general_reg[REG_NUM_19] = exc_buf_addr->task_context.s3;
101 general_reg[REG_NUM_20] = exc_buf_addr->task_context.s4;
102 general_reg[REG_NUM_21] = exc_buf_addr->task_context.s5;
103 general_reg[REG_NUM_22] = exc_buf_addr->task_context.s6;
104 general_reg[REG_NUM_23] = exc_buf_addr->task_context.s7;
105 general_reg[REG_NUM_24] = exc_buf_addr->task_context.s8;
106 general_reg[REG_NUM_25] = exc_buf_addr->task_context.s9;
107 general_reg[REG_NUM_26] = exc_buf_addr->task_context.s10;
108 general_reg[REG_NUM_27] = exc_buf_addr->task_context.s11;
109 general_reg[REG_NUM_28] = exc_buf_addr->task_context.t3;
110 general_reg[REG_NUM_29] = exc_buf_addr->task_context.t4;
111 general_reg[REG_NUM_30] = exc_buf_addr->task_context.t5;
112 general_reg[REG_NUM_31] = exc_buf_addr->task_context.t6;
113 }
114 #endif
115
log_oml_hard_fault(uint32_t reason)116 static void log_oml_hard_fault(uint32_t reason)
117 {
118 #if (ARCH == CM3) || (ARCH == CM7)
119 if (reason & SCB_HFSR_DEBUGEVT_Msk) {
120 PRINT("[Hard Fault]: Caused by Debug Event" NEWLINE);
121 }
122 if (reason & SCB_HFSR_FORCED_Msk) {
123 PRINT("[Hard Fault]: Caused by Other Faults" NEWLINE);
124 }
125 if (reason & SCB_HFSR_VECTTBL_Msk) {
126 PRINT("[Hard Fault]: Caused by Fetching vector" NEWLINE);
127 }
128 #else
129 UNUSED(reason);
130 #endif
131 }
132
log_oml_mem_fault(uint32_t reason)133 static void log_oml_mem_fault(uint32_t reason)
134 {
135 #if (ARCH == CM3) || (ARCH == CM7)
136 if (reason & SCB_CFSR_MSTKERR_Msk) {
137 PRINT("[Mem Fault] Enter Stack Fault" NEWLINE);
138 } else if (reason & SCB_CFSR_MUNSTKERR_Msk) {
139 PRINT("[Mem Fault] Quit Stack Fault" NEWLINE);
140 } else if (reason & SCB_CFSR_DACCVIOL_Msk) {
141 PRINT("[Mem Fault] Data Access Fault" NEWLINE);
142 } else if (reason & SCB_CFSR_IACCVIOL_Msk) {
143 PRINT("[Mem Fault] Instruction Access Fault" NEWLINE);
144 }
145 #else
146 UNUSED(reason);
147 #endif
148 }
149
log_oml_bus_fault(uint32_t reason)150 static void log_oml_bus_fault(uint32_t reason)
151 {
152 #if (ARCH == CM3) || (ARCH == CM7)
153 if (reason & SCB_CFSR_STKERR_Msk) {
154 PRINT("[Bus Fault] Enter Stack Fault" NEWLINE);
155 }
156 if (reason & SCB_CFSR_UNSTKERR_Msk) {
157 PRINT("[Bus Fault] Quit Stack Fault" NEWLINE);
158 }
159 if (reason & SCB_CFSR_IMPRECISERR_Msk) {
160 PRINT("[Bus Fault] Non Exact Data Access Fault" NEWLINE);
161 }
162 if (reason & SCB_CFSR_PRECISERR_Msk) {
163 PRINT("[Bus Fault] Data Access Fault" NEWLINE);
164 }
165 if (reason & SCB_CFSR_IBUSERR_Msk) {
166 PRINT("[Bus Fault] Instruction Access Fault" NEWLINE);
167 }
168 #else
169 UNUSED(reason);
170 #endif
171 }
172
log_oml_usage_fault(uint32_t reason)173 static void log_oml_usage_fault(uint32_t reason)
174 {
175 #if (ARCH == CM3) || (ARCH == CM7)
176 if (reason & SCB_CFSR_DIVBYZERO_Msk) {
177 PRINT("[Usage Fault] DIV Zero Fault" NEWLINE);
178 }
179 if (reason & SCB_CFSR_UNALIGNED_Msk) {
180 PRINT("[Usage Fault] unaligned access Fault" NEWLINE);
181 }
182 if (reason & SCB_CFSR_NOCP_Msk) {
183 PRINT("[Usage Fault] Try to execute co-processor instr Fault" NEWLINE);
184 }
185 if (reason & SCB_CFSR_INVPC_Msk) {
186 PRINT("[Usage Fault] Invalid EXC_RETURN to PC Fault" NEWLINE);
187 }
188 if (reason & SCB_CFSR_INVSTATE_Msk) {
189 PRINT("[Usage Fault] Try to Enter ARM State Fault" NEWLINE);
190 }
191 if (reason & SCB_CFSR_UNDEFINSTR_Msk) {
192 PRINT("[Usage Fault] Undefined instruction Fault" NEWLINE);
193 }
194 #else
195 UNUSED(reason);
196 #endif
197 }
198
199 #if SLAVE_BY_WS53_ONLY
log_exception_print_para(const exc_context_t * e_contex)200 static void log_exception_print_para(const exc_context_t *e_contex)
201 {
202 task_context_t contex = e_contex->task_context;
203 PRINT("mcause:0x%x\n""ccause:0x%x\n""mstatus:0x%x\n""ra:0x%x\n""sp:0x%x\n""gp:0x%x\n",
204 e_contex->mcause, e_contex->ccause, contex.mstatus, contex.ra, contex.sp, e_contex->gp);
205
206 PRINT("a0:0x%x\n""a1:0x%x\n""a2:0x%x\n""a3:0x%x\n""a4:0x%x\n""a5:0x%x\n""a6:0x%x\n""a7:0x%x\n"
207 "tp:0x%x\n""t0:0x%x\n""t1:0x%x\n""t2:0x%x\n""t3:0x%x\n""t4:0x%x\n""t5:0x%x\n""t6:0x%x\n",
208 contex.a0, contex.a1, contex.a2, contex.a3, contex.a4, contex.a5, contex.a6, contex.a7,
209 contex.tp, contex.t0, contex.t1, contex.t2, contex.t3, contex.t4, contex.t5, contex.t6);
210
211 PRINT("s0/fp:0x%x\n""s1:0x%x\n""s2:0x%x\n""s3:0x%x\n""s4:0x%x\n""s5:0x%x\n"
212 "s6:0x%x\n""s7:0x%x\n""s8:0x%x\n""s9:0x%x\n""s10:0x%x\n""s11:0x%x\n",
213 contex.s0, contex.s1, contex.s2, contex.s3, contex.s4, contex.s5,
214 contex.s6, contex.s7, contex.s8, contex.s9, contex.s10, contex.s11);
215 }
216 #endif
217
218 #if ((ARCH == RISCV31) || (ARCH == RISCV32) || (ARCH == RISCV70))
219 #if defined(CONFIG_DFX_SDT_DUMP_EXCEPTION) && (CONFIG_DFX_SDT_DUMP_EXCEPTION == DFX_YES)
log_get_fault_type(uint32_t irq_id)220 static uint32_t log_get_fault_type(uint32_t irq_id)
221 {
222 switch (irq_id) {
223 case IRQ_ID_INSTRUCTION_ADDRESS_MISALIGNED:
224 return OM_INSTRUCTION_ADDRESS_MISALIGNED;
225 case IRQ_ID_INSTRUCTION_ACCESS_FAULT:
226 return OM_INSTRUCTION_ACCESS_FAULT;
227 case IRQ_ID_ILLEGAL_INSTRUCTION:
228 return OM_ILLEGAL_INSTRUCTION;
229 case IRQ_ID_BREAKPOINT:
230 return OM_BREAKPOINT;
231 case IRQ_ID_LOAD_ADDERSS_MISALIGNED:
232 return OM_LOAD_ADDERSS_MISALIGNED;
233 case IRQ_ID_LOAD_ACCESS_FAULT:
234 return OM_LOAD_ACCESS_FAULT;
235 case IRQ_ID_STORE_OR_AMO_ADDRESS_MISALIGNED:
236 return OM_STORE_OR_AMO_ADDRESS_MISALIGNED;
237 case IRQ_ID_STORE_OR_AMO_ACCESS_FALUT:
238 return OM_STORE_OR_AMO_ACCESS_FALUT;
239 case IRQ_ID_ENVIRONMENT_CALL_FROM_UMODE:
240 return OM_ENVIRONMENT_CALL_FROM_UMODE;
241 case IRQ_ID_ENVIRONMENT_CALL_FROM_SMODE:
242 return OM_ENVIRONMENT_CALL_FROM_SMODE;
243 case IRQ_ID_ENVIRONMENT_CALL_FROM_MMODE:
244 return OM_ENVIRONMENT_CALL_FROM_MMODE;
245 case IRQ_ID_INSTRUCTION_PAGE_FAULT:
246 return OM_INSTRUCTION_PAGE_FAULT;
247 case IRQ_ID_LOAD_PAGE_FAULT:
248 return OM_LOAD_PAGE_FAULT;
249 case IRQ_ID_STORE_OR_AMO_PAGE_FAULT:
250 return OM_STORE_OR_AMO_PAGE_FAULT;
251 case IRQ_ID_NMI_INTERRUPT:
252 return OM_NMI_INTERRUPT;
253 #if (ARCH == RISCV31) || (ARCH == RISCV32)
254 case IRQ_ID_HARD_FAULT:
255 return OM_RISCV_HARD_FAULT;
256 case IRQ_ID_LOCK_UP:
257 return OM_LOCK_UP;
258 #else
259 case IRQ_ID_ASYNCHRONOUS_EXCEPTION:
260 return OM_ASYNCHRONOUS_EXCEPTION;
261 #endif
262 default:
263 PRINT("Unknown Fault[%x]" NEWLINE, irq_id);
264 return 0;
265 }
266 }
267 #endif
268
log_exception_dump(uint32_t irq_id,exc_context_t * exc_buf_addr)269 void log_exception_dump(uint32_t irq_id, exc_context_t *exc_buf_addr)
270 {
271 UNUSED(log_oml_usage_fault);
272 UNUSED(log_oml_bus_fault);
273 UNUSED(log_oml_mem_fault);
274 UNUSED(log_oml_hard_fault);
275 UNUSED(irq_id);
276 UNUSED(exc_buf_addr);
277 #if defined(CONFIG_DFX_SDT_DUMP_EXCEPTION) && (CONFIG_DFX_SDT_DUMP_EXCEPTION == DFX_YES)
278
279 uint32_t fault_type = 0;
280 #if defined(HSO_SUPPORT)
281 fault_type = log_get_fault_type(irq_id);
282 #if MCU_ONLY
283 reboot_cause_t reset_cause = get_cpu_utils_reset_cause();
284 switch (reset_cause) {
285 case REBOOT_CAUSE_BT_WATCHDOG:
286 case REBOOT_CAUSE_APPLICATION_CHIP_WDT:
287 fault_type = OM_WDT_TIMEOUT_INTERRUPT;
288 break;
289 case REBOOT_CAUSE_APPLICATION_XIP_CTRL:
290 fault_type = OM_APPLICATION_XIP_CTRL_INTERRUPT;
291 break;
292 case REBOOT_CAUSE_APPLICATION_XIP_CACHE:
293 fault_type = OM_APPLICATION_XIP_CACHE_INTERRUPT;
294 break;
295 case REBOOT_CAUSE_APPLICATION_MDMA:
296 fault_type = OM_APPLICATION_MDMA_INTERRUPT;
297 break;
298 case REBOOT_CAUSE_APPLICATION_SMDMA:
299 fault_type = OM_APPLICATION_SMDMA_INTERRUPT;
300 break;
301 default:
302 break;
303 }
304 #endif
305 #else
306 UNUSED(irq_id);
307 UNUSED(log_get_fault_type);
308 fault_type = OM_WDT_TIMEOUT;
309 #endif
310 #if SLAVE_BY_WS53_ONLY
311 log_exception_print_para(exc_buf_addr);
312 fault_type = log_get_fault_type(irq_id);
313 #endif
314 log_oml_exception_info_send(fault_type, exc_buf_addr);
315 #endif
316 /* Send ram */
317 log_oml_memory_dump();
318 }
319 #else
log_exception_dump(uint32_t int_id,uint32_t reason,uint32_t addr,exc_info_t * exc_info)320 void log_exception_dump(uint32_t int_id, uint32_t reason, uint32_t addr, exc_info_t *exc_info)
321 {
322 uint32_t fault_type = 0;
323 switch (int_id) {
324 case INT_ID_HARD_FAULT:
325 fault_type = OM_HARD_FAULT;
326 log_oml_hard_fault(reason);
327 break;
328 case INT_ID_MEM_FAULT:
329 fault_type = OM_MEM_FAULT;
330 log_oml_mem_fault(reason);
331 break;
332 case INT_ID_BUS_FAULT:
333 fault_type = OM_BUS_FAULT;
334 log_oml_bus_fault(reason);
335 break;
336 case INT_ID_USAGE_FAULT:
337 fault_type = OM_USAGE_FAULT;
338 log_oml_usage_fault(reason);
339 break;
340 #if CORE == MASTER_BY_ALL
341 case INT_ID_CHIP_WATCHDOG_FAULT:
342 #endif
343 case INT_ID_WATCHDOG_FAULT:
344 fault_type = OM_WDT_TIMEOUT;
345 break;
346 default:
347 PRINT("Unknown Fault[%d]" NEWLINE, int_id);
348 break;
349 }
350 #if MCU_ONLY
351 if ((get_cpu_utils_reset_cause() == REBOOT_CAUSE_BT_WATCHDOG) ||
352 (get_cpu_utils_reset_cause() == REBOOT_CAUSE_APPLICATION_CHIP_WDT)) {
353 fault_type = OM_WDT_TIMEOUT;
354 }
355 #endif
356 log_oml_exception_info_send(addr, fault_type, reason, exc_info);
357
358 /* Send ram */
359 log_oml_memory_dump();
360 }
361 #endif
362
log_exception_send_data(const uint8_t * data,uint16_t length)363 void log_exception_send_data(const uint8_t *data, uint16_t length)
364 {
365 #if CORE == CORE_LOGGING
366 uapi_uart_write(LOG_UART_BUS, (const void *)data, length, 0);
367 #elif defined(SDT_LOG_BY_UART)
368 oml_write_uart_fifo((uint8*)data, length, LOGUART_BASE);
369 #else
370 /* APP core need to wait enough share memory to write next block of data */
371 uint32_t available = 0;
372 while (available <= length) {
373 (void)log_buffer_get_available_for_next_message(&available);
374 }
375
376 log_event(data, length);
377 #endif
378 }
379
log_exception_dump_memory(uint32_t addr,uint32_t length)380 static void log_exception_dump_memory(uint32_t addr, uint32_t length)
381 {
382 #if (USE_COMPRESS_LOG_INSTEAD_OF_SDT_LOG == NO)
383 uint32_t ram_addr = addr;
384 uint32_t ram_size = length;
385 uint32_t length_tmp = length;
386 uint8_t msg_tail = OM_FRAME_DELIMITER;
387 om_msg_header_stru_t msg_header = { 0 };
388 om_msg_dump_header_stru_t dump_header = { 0 };
389
390 msg_header.frame_start = OM_FRAME_DELIMITER;
391 msg_header.func_type = OM_MSG_TYPE_LAST;
392 msg_header.prime_id = OM_LOG_SAVE_STACK;
393
394 dump_header.end_flag = 0;
395 dump_header.count = 0;
396
397 non_os_enter_critical();
398 while (ram_size > 0) {
399 length_tmp = MIN(ram_size, DUMP_MAX_LENGTH_PER_TRANS);
400 if (length_tmp == ram_size) {
401 dump_header.end_flag = OM_FRAME_DUMP_DELIMITER;
402 }
403 msg_header.frame_len = (uint16_t)(sizeof(om_msg_header_stru_t) + length_tmp + sizeof(msg_tail) +
404 sizeof(om_msg_dump_header_stru_t));
405 /* Send exception stack */
406 log_exception_send_data((uint8_t *)(&msg_header), sizeof(om_msg_header_stru_t));
407 log_exception_send_data((uint8_t *)(&dump_header), sizeof(om_msg_dump_header_stru_t));
408 log_exception_send_data((uint8_t *)(uintptr_t)ram_addr, (uint16_t)length_tmp);
409 log_exception_send_data((uint8_t *)(&msg_tail), sizeof(msg_tail));
410 dump_header.count++;
411 ram_addr += length_tmp;
412 ram_size -= length_tmp;
413 }
414 non_os_exit_critical();
415 #else
416 UNUSED(addr);
417 UNUSED(length);
418 #endif
419 }
420
log_oml_dump_stack(void)421 void log_oml_dump_stack(void)
422 {
423 #if (ARCH == CM3) || (ARCH == CM7)
424 #ifdef USE_CMSIS_OS
425 #ifdef __LITEOS__
426 LosTaskCB *los_task_cb = NULL;
427 uint32_t loops;
428 uint32_t stack_addr = 0;
429 uint32_t stack_size = 0;
430 uint32_t sp = __get_PSP();
431
432 for (loops = 0; loops < g_taskMaxNum; loops++) {
433 los_task_cb = (((LosTaskCB *)g_osTaskCBArray) + loops);
434
435 if (los_task_cb->taskStatus & OS_TASK_STATUS_UNUSED) {
436 continue;
437 }
438
439 if (sp > los_task_cb->topOfStack && sp < los_task_cb->topOfStack + los_task_cb->stackSize) {
440 stack_addr = los_task_cb->topOfStack;
441 stack_size = los_task_cb->stackSize;
442 PRINT("current task stack_point: 0x%x\r\n", sp);
443 PRINT("stack addr:0x%x,stack size:0x%x\r\n", stack_addr, stack_size);
444 pf_write_fifo_log_alter(LOG_PFMODULE, LOG_NUM_LIB_LOG, LOG_LEVEL_ERROR,
445 "[PF][DUMP INFO] stack point: 0x%x, stack address: 0x%x, stack size: 0x%x",
446 STACK_SIZE, sp, stack_addr, stack_size);
447 break;
448 }
449 }
450 log_exception_dump_memory(stack_addr, stack_size);
451 #endif
452 #endif
453 #else
454 UNUSED(log_exception_dump_memory);
455 #endif
456 }
457
log_watch_dog_disable(void)458 static void log_watch_dog_disable(void)
459 {
460 watchdog_turnon_clk();
461 uapi_watchdog_disable();
462 watchdog_turnoff_clk();
463 }
464
log_oml_memory_dump(void)465 void log_oml_memory_dump(void)
466 {
467 #if (USE_COMPRESS_LOG_INSTEAD_OF_SDT_LOG == NO)
468 log_watch_dog_disable();
469 #if CORE == BT
470 /* BT core has just one block of ram */
471 uapi_tcxo_delay_ms(DUMP_DELAY);
472 #elif CORE == APPS
473 #ifdef HSO_SUPPORT
474 dfx_last_dump();
475 #else
476 log_exception_dump_memory(APP_ITCM_ORIGIN, APP_ITCM_LENGTH);
477 log_exception_dump_memory(APP_DTCM_ORIGIN, APP_DTCM_LENGTH);
478 log_exception_dump_memory(SHARED_MEM_START, SHARED_MEM_LENGTH);
479 log_exception_dump_memory(MCPU_TRACE_MEM_REGION_START, CPU_TRACE_MEM_REGION_LENGTH);
480 #if CORE_NUMS != ONLY_ACORE_NUM
481 log_exception_dump_memory(BT_RAM_ORIGIN_APP_MAPPING, BT_RAM_ORIGIN_APP_MAPPING_LENGTH);
482 log_exception_dump_memory(BCPU_TRACE_MEM_REGION_START, CPU_TRACE_MEM_REGION_LENGTH);
483 #endif
484 log_exception_dump_reg();
485 #endif
486 /* mem dump save in flash */
487 #if (FLASH_DUMP_START != 0)
488 dfx_last_dump2flash(FLASH_DUMP_START, FLASH_DUMP_SIZE);
489 #endif
490 #ifdef NO_TCXO_SUPPORT
491 volatile int time_cnt = 0xFFFF;
492 while ((time_cnt--) > 0) {
493 __asm__ __volatile__("nop");
494 __asm__ __volatile__("nop");
495 }
496 #else
497 uapi_tcxo_delay_ms(LOG_DELAY);
498 #endif
499 #elif CORE == WIFI
500 log_exception_dump_memory(WL_BOOT_ROM_START, WL_BOOT_ROM_LEN);
501 log_exception_dump_memory(WL_ITCM_IROM_START, WL_ITCM_IROM_LEN);
502 log_exception_dump_memory(WL_TCM_RAM_START, WL_TCM_RAM_LEN);
503 log_exception_dump_memory(WL_DTCM_PKTMEM_START, WL_DTCM_PKTMEM_LEN);
504 log_exception_dump_memory(WCPU_TRACE_MEM_REGION_START, CPU_TRACE_MEM_REGION_LENGTH);
505 tcxo_delay_ms(DUMP_DELAY);
506 return;
507 #elif CORE == CONTROL_CORE
508 log_exception_dump_memory(CT_SRAM_ORIGIN, CT_SRAM_LENGTH);
509 log_exception_dump_memory(CT_ROM_ORIGIN, CT_ROM_LENGTH);
510 log_exception_dump_memory(CCORE_FLASH_PROGRAM_ORIGIN, CCORE_FLASH_PROGRAM_LENGTH);
511 uapi_tcxo_delay_ms(DUMP_DELAY);
512 return;
513 #endif
514 #else /* USE_COMPRESS_LOG_INSTEAD_OF_SDT_LOG == YES */
515 #endif /* USE_COMPRESS_LOG_INSTEAD_OF_SDT_LOG */
516 }
517
518 #if defined(__ICCARM__)
519 #pragma segment = "CSTACK"
520 #endif
521
522 #if ((ARCH == RISCV31) || (ARCH == RISCV32) || (ARCH == RISCV70))
log_oml_build_last_run_info(om_exception_info_stru_t * last_run_info,uint32_t irq_id,const exc_context_t * exc_buf_addr)523 static void log_oml_build_last_run_info(om_exception_info_stru_t *last_run_info,
524 uint32_t irq_id, const exc_context_t *exc_buf_addr)
525 {
526 uint32_t stack_limit;
527 #if defined(__GNUC__)
528 stack_limit = (uint32_t)((uintptr_t)&(g_stack_system));
529 #else
530 stack_limit = 0;
531 #endif
532
533 last_run_info->msg_header.frame_start = OM_FRAME_DELIMITER;
534 last_run_info->msg_header.func_type = OM_MSG_TYPE_LAST;
535 last_run_info->msg_header.prime_id = OM_LOG_RPT_IND;
536 last_run_info->msg_header.sn = get_log_sn_number();
537 last_run_info->stack_limit = stack_limit;
538 last_run_info->fault_type = irq_id;
539 last_run_info->fault_reason = 0;
540 last_run_info->address = 0;
541
542 last_run_info->psp_value = exc_buf_addr->task_context.sp;
543 last_run_info->lr_value = exc_buf_addr->task_context.ra;
544 last_run_info->pc_value = exc_buf_addr->task_context.mepc;
545 last_run_info->psps_value = exc_buf_addr->gp;
546 last_run_info->primask_value = exc_buf_addr->task_context.mstatus;
547 last_run_info->fault_mask_value = exc_buf_addr->mtval;
548 last_run_info->bserpri_value = exc_buf_addr->ccause;
549 last_run_info->control_value = exc_buf_addr->mtval;
550 last_run_info->msg_header.frame_len = (uint16_t)sizeof(om_exception_info_stru_t);
551 last_run_info->msg_tail = OM_FRAME_DELIMITER;
552 }
log_oml_exception_info_send(uint32_t irq_id,const exc_context_t * exc_buf_addr)553 void log_oml_exception_info_send(uint32_t irq_id, const exc_context_t *exc_buf_addr)
554 {
555 om_exception_info_stru_t last_run_info = { 0 };
556
557 log_oml_build_last_run_info(&last_run_info, irq_id, exc_buf_addr);
558
559 log_oml_get_reg_value(last_run_info.reg_value, exc_buf_addr);
560
561 /*
562 * Waiting for the entire log to be sent
563 * Because it is possible that the uart transmission is not completed,
564 * the structure assignment is placed in front to ensure that there is information in the stack.
565 */
566 #if CORE == CORE_LOGGING
567 #ifdef HSO_SUPPORT
568 dfx_last_word_send((uint8_t *)&last_run_info + sizeof(om_msg_header_stru_t),
569 sizeof(last_run_info) - sizeof(om_msg_header_stru_t) - OM_FRAME_DELIMITER_LEN);
570 #else
571 uapi_uart_write(LOG_UART_BUS, (const void *)&last_run_info, sizeof(last_run_info), 0);
572 #endif
573 #elif defined(SDT_LOG_BY_UART)
574 oml_write_uart_fifo((uint8_t *)&last_run_info, sizeof(last_run_info), LOGUART_BASE);
575 #else
576 log_event((uint8_t *)&last_run_info, sizeof(last_run_info));
577 log_trigger();
578 #endif
579
580 #ifdef NO_TCXO_SUPPORT
581 volatile int time_cnt = 0xFFFF;
582 while ((time_cnt--) > 0) {
583 __asm__ __volatile__("nop");
584 __asm__ __volatile__("nop");
585 }
586 #else
587 uapi_tcxo_delay_ms(LOG_DELAY);
588 #endif
589 }
590 #else
log_oml_exception_info_send(uint32_t address,uint32_t fault_type,uint32_t fault_reason,const exc_info_t * exc_info)591 void log_oml_exception_info_send(uint32_t address, uint32_t fault_type, uint32_t fault_reason,
592 const exc_info_t *exc_info)
593 {
594 if (exc_info == NULL) {
595 return;
596 }
597
598 om_exception_info_stru_t last_run_info = { 0 };
599 uint8_t loop;
600 uint32_t stack_limit;
601
602 #if defined(__GNUC__)
603 stack_limit = (uint32_t)((uintptr_t)&(g_stack_system));
604 #elif defined(__ICCARM__)
605 stack_limit = (uint32_t)__sfe("CSTACK") - 4; // 4 byte
606 #else
607 stack_limit = 0;
608 #endif
609
610 last_run_info.msg_header.frame_start = OM_FRAME_DELIMITER;
611 last_run_info.msg_header.func_type = OM_MSG_TYPE_LAST;
612 last_run_info.msg_header.prime_id = OM_LOG_RPT_IND;
613 last_run_info.msg_header.sn = get_log_sn_number();
614 last_run_info.stack_limit = stack_limit;
615 last_run_info.fault_type = fault_type;
616 last_run_info.fault_reason = fault_reason;
617 last_run_info.address = address;
618
619 memcpy_s((void *)&last_run_info.reg_value[0], AULREG_VALUE_INDEX, &(exc_info->context->r4), AULREG_VALUE_INDEX);
620
621 #if (ARCH == CM3) || (ARCH == CM7)
622 last_run_info.psp_value = __get_PSP();
623 last_run_info.lr_value = exc_info->context->lr;
624 last_run_info.pc_value = exc_info->context->pc;
625 last_run_info.psps_value = exc_info->context->xpsr;
626 last_run_info.primask_value = __get_PRIMASK();
627 last_run_info.fault_mask_value = (uint32_t)__get_FAULTMASK();
628 last_run_info.bserpri_value = (uint32_t)__get_BASEPRI();
629 last_run_info.control_value = __get_CONTROL();
630 last_run_info.msg_header.frame_len = sizeof(om_exception_info_stru_t);
631 last_run_info.msg_tail = OM_FRAME_DELIMITER;
632 #endif
633 /*
634 * Waiting for the entire log to be sent
635 * Because it is possible that the uart transmission is not completed,
636 * the structure assignment is placed in front to ensure that there is information in the stack.
637 */
638 #if CORE == CORE_LOGGING
639 #ifdef HSO_SUPPORT
640 dfx_last_word_send((uint8_t *)&last_run_info + sizeof(om_msg_header_stru_t),
641 sizeof(last_run_info) - sizeof(om_msg_header_stru_t) - OM_FRAME_DELIMITER_LEN);
642 #else
643 uapi_uart_write(LOG_UART_BUS, (const void *)&last_run_info, sizeof(last_run_info), 0);
644 #endif
645 #else
646 log_event((uint8_t *)&last_run_info, sizeof(last_run_info));
647 log_trigger();
648 #endif
649 uapi_tcxo_delay_ms(LOG_DELAY);
650 }
651 #endif
652
default_register_hal_exception_dump_callback(void)653 void default_register_hal_exception_dump_callback(void)
654 {
655 #ifdef SUPPORT_DFX_EXCEPTION
656 hal_register_exception_dump_callback(log_exception_dump);
657 #endif
658 }
659