1/* 2 * Copyright (c) 2013-2021, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7#include <platform_def.h> 8 9#include <arch.h> 10#include <asm_macros.S> 11#include <bl31/ea_handle.h> 12#include <bl31/interrupt_mgmt.h> 13#include <common/runtime_svc.h> 14#include <context.h> 15#include <el3_common_macros.S> 16#include <lib/el3_runtime/cpu_data.h> 17#include <lib/smccc.h> 18 19 .globl runtime_exceptions 20 21 .globl sync_exception_sp_el0 22 .globl irq_sp_el0 23 .globl fiq_sp_el0 24 .globl serror_sp_el0 25 26 .globl sync_exception_sp_elx 27 .globl irq_sp_elx 28 .globl fiq_sp_elx 29 .globl serror_sp_elx 30 31 .globl sync_exception_aarch64 32 .globl irq_aarch64 33 .globl fiq_aarch64 34 .globl serror_aarch64 35 36 .globl sync_exception_aarch32 37 .globl irq_aarch32 38 .globl fiq_aarch32 39 .globl serror_aarch32 40 41 /* 42 * Macro that prepares entry to EL3 upon taking an exception. 43 * 44 * With RAS_EXTENSION, this macro synchronizes pending errors with an ESB 45 * instruction. When an error is thus synchronized, the handling is 46 * delegated to platform EA handler. 47 * 48 * Without RAS_EXTENSION, this macro synchronizes pending errors using 49 * a DSB, unmasks Asynchronous External Aborts and saves X30 before 50 * setting the flag CTX_IS_IN_EL3. 51 */ 52 .macro check_and_unmask_ea 53#if RAS_EXTENSION 54 /* Synchronize pending External Aborts */ 55 esb 56 57 /* Unmask the SError interrupt */ 58 msr daifclr, #DAIF_ABT_BIT 59 60 /* 61 * Explicitly save x30 so as to free up a register and to enable 62 * branching 63 */ 64 str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 65 66 /* Check for SErrors synchronized by the ESB instruction */ 67 mrs x30, DISR_EL1 68 tbz x30, #DISR_A_BIT, 1f 69 70 /* 71 * Save general purpose and ARMv8.3-PAuth registers (if enabled). 72 * If Secure Cycle Counter is not disabled in MDCR_EL3 when 73 * ARMv8.5-PMU is implemented, save PMCR_EL0 and disable Cycle Counter. 74 */ 75 bl save_gp_pmcr_pauth_regs 76 77 bl handle_lower_el_ea_esb 78 79 /* Restore general purpose, PMCR_EL0 and ARMv8.3-PAuth registers */ 80 bl restore_gp_pmcr_pauth_regs 811: 82#else 83 /* 84 * For SoCs which do not implement RAS, use DSB as a barrier to 85 * synchronize pending external aborts. 86 */ 87 dsb sy 88 89 /* Unmask the SError interrupt */ 90 msr daifclr, #DAIF_ABT_BIT 91 92 /* Use ISB for the above unmask operation to take effect immediately */ 93 isb 94 95 /* 96 * Refer Note 1. No need to restore X30 as both handle_sync_exception 97 * and handle_interrupt_exception macro which follow this macro modify 98 * X30 anyway. 99 */ 100 str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 101 mov x30, #1 102 str x30, [sp, #CTX_EL3STATE_OFFSET + CTX_IS_IN_EL3] 103 dmb sy 104#endif 105 .endm 106 107#if !RAS_EXTENSION 108 /* 109 * Note 1: The explicit DSB at the entry of various exception vectors 110 * for handling exceptions from lower ELs can inadvertently trigger an 111 * SError exception in EL3 due to pending asynchronous aborts in lower 112 * ELs. This will end up being handled by serror_sp_elx which will 113 * ultimately panic and die. 114 * The way to workaround is to update a flag to indicate if the exception 115 * truly came from EL3. This flag is allocated in the cpu_context 116 * structure and located at offset "CTX_EL3STATE_OFFSET + CTX_IS_IN_EL3" 117 * This is not a bullet proof solution to the problem at hand because 118 * we assume the instructions following "isb" that help to update the 119 * flag execute without causing further exceptions. 120 */ 121 122 /* --------------------------------------------------------------------- 123 * This macro handles Asynchronous External Aborts. 124 * --------------------------------------------------------------------- 125 */ 126 .macro handle_async_ea 127 /* 128 * Use a barrier to synchronize pending external aborts. 129 */ 130 dsb sy 131 132 /* Unmask the SError interrupt */ 133 msr daifclr, #DAIF_ABT_BIT 134 135 /* Use ISB for the above unmask operation to take effect immediately */ 136 isb 137 138 /* Refer Note 1 */ 139 str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 140 mov x30, #1 141 str x30, [sp, #CTX_EL3STATE_OFFSET + CTX_IS_IN_EL3] 142 dmb sy 143 144 b handle_lower_el_async_ea 145 .endm 146 147 /* 148 * This macro checks if the exception was taken due to SError in EL3 or 149 * because of pending asynchronous external aborts from lower EL that got 150 * triggered due to explicit synchronization in EL3. Refer Note 1. 151 */ 152 .macro check_if_serror_from_EL3 153 /* Assumes SP_EL3 on entry */ 154 str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 155 ldr x30, [sp, #CTX_EL3STATE_OFFSET + CTX_IS_IN_EL3] 156 cbnz x30, exp_from_EL3 157 158 /* Handle asynchronous external abort from lower EL */ 159 b handle_lower_el_async_ea 160 161exp_from_EL3: 162 /* Jump to plat_handle_el3_ea which does not return */ 163 .endm 164#endif 165 166 /* --------------------------------------------------------------------- 167 * This macro handles Synchronous exceptions. 168 * Only SMC exceptions are supported. 169 * --------------------------------------------------------------------- 170 */ 171 .macro handle_sync_exception 172#if ENABLE_RUNTIME_INSTRUMENTATION 173 /* 174 * Read the timestamp value and store it in per-cpu data. The value 175 * will be extracted from per-cpu data by the C level SMC handler and 176 * saved to the PMF timestamp region. 177 */ 178 mrs x30, cntpct_el0 179 str x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] 180 mrs x29, tpidr_el3 181 str x30, [x29, #CPU_DATA_PMF_TS0_OFFSET] 182 ldr x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] 183#endif 184 185 mrs x30, esr_el3 186 ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH 187 188 /* Handle SMC exceptions separately from other synchronous exceptions */ 189 cmp x30, #EC_AARCH32_SMC 190 b.eq smc_handler32 191 192 cmp x30, #EC_AARCH64_SMC 193 b.eq smc_handler64 194 195 /* Synchronous exceptions other than the above are assumed to be EA */ 196 ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 197 b enter_lower_el_sync_ea 198 .endm 199 200 201 /* --------------------------------------------------------------------- 202 * This macro handles FIQ or IRQ interrupts i.e. EL3, S-EL1 and NS 203 * interrupts. 204 * --------------------------------------------------------------------- 205 */ 206 .macro handle_interrupt_exception label 207 208 /* 209 * Save general purpose and ARMv8.3-PAuth registers (if enabled). 210 * If Secure Cycle Counter is not disabled in MDCR_EL3 when 211 * ARMv8.5-PMU is implemented, save PMCR_EL0 and disable Cycle Counter. 212 */ 213 bl save_gp_pmcr_pauth_regs 214 215#if ENABLE_PAUTH 216 /* Load and program APIAKey firmware key */ 217 bl pauth_load_bl31_apiakey 218#endif 219 220 /* Save the EL3 system registers needed to return from this exception */ 221 mrs x0, spsr_el3 222 mrs x1, elr_el3 223 stp x0, x1, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3] 224 225 /* Switch to the runtime stack i.e. SP_EL0 */ 226 ldr x2, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP] 227 mov x20, sp 228 msr spsel, #MODE_SP_EL0 229 mov sp, x2 230 231 /* 232 * Find out whether this is a valid interrupt type. 233 * If the interrupt controller reports a spurious interrupt then return 234 * to where we came from. 235 */ 236 bl plat_ic_get_pending_interrupt_type 237 cmp x0, #INTR_TYPE_INVAL 238 b.eq interrupt_exit_\label 239 240 /* 241 * Get the registered handler for this interrupt type. 242 * A NULL return value could be 'cause of the following conditions: 243 * 244 * a. An interrupt of a type was routed correctly but a handler for its 245 * type was not registered. 246 * 247 * b. An interrupt of a type was not routed correctly so a handler for 248 * its type was not registered. 249 * 250 * c. An interrupt of a type was routed correctly to EL3, but was 251 * deasserted before its pending state could be read. Another 252 * interrupt of a different type pended at the same time and its 253 * type was reported as pending instead. However, a handler for this 254 * type was not registered. 255 * 256 * a. and b. can only happen due to a programming error. The 257 * occurrence of c. could be beyond the control of Trusted Firmware. 258 * It makes sense to return from this exception instead of reporting an 259 * error. 260 */ 261 bl get_interrupt_type_handler 262 cbz x0, interrupt_exit_\label 263 mov x21, x0 264 265 mov x0, #INTR_ID_UNAVAILABLE 266 267 /* Set the current security state in the 'flags' parameter */ 268 mrs x2, scr_el3 269 ubfx x1, x2, #0, #1 270 271 /* Restore the reference to the 'handle' i.e. SP_EL3 */ 272 mov x2, x20 273 274 /* x3 will point to a cookie (not used now) */ 275 mov x3, xzr 276 277 /* Call the interrupt type handler */ 278 blr x21 279 280interrupt_exit_\label: 281 /* Return from exception, possibly in a different security state */ 282 b el3_exit 283 284 .endm 285 286 287vector_base runtime_exceptions 288 289 /* --------------------------------------------------------------------- 290 * Current EL with SP_EL0 : 0x0 - 0x200 291 * --------------------------------------------------------------------- 292 */ 293vector_entry sync_exception_sp_el0 294#ifdef MONITOR_TRAPS 295 stp x29, x30, [sp, #-16]! 296 297 mrs x30, esr_el3 298 ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH 299 300 /* Check for BRK */ 301 cmp x30, #EC_BRK 302 b.eq brk_handler 303 304 ldp x29, x30, [sp], #16 305#endif /* MONITOR_TRAPS */ 306 307 /* We don't expect any synchronous exceptions from EL3 */ 308 b report_unhandled_exception 309end_vector_entry sync_exception_sp_el0 310 311vector_entry irq_sp_el0 312 /* 313 * EL3 code is non-reentrant. Any asynchronous exception is a serious 314 * error. Loop infinitely. 315 */ 316 b report_unhandled_interrupt 317end_vector_entry irq_sp_el0 318 319 320vector_entry fiq_sp_el0 321 b report_unhandled_interrupt 322end_vector_entry fiq_sp_el0 323 324 325vector_entry serror_sp_el0 326 no_ret plat_handle_el3_ea 327end_vector_entry serror_sp_el0 328 329 /* --------------------------------------------------------------------- 330 * Current EL with SP_ELx: 0x200 - 0x400 331 * --------------------------------------------------------------------- 332 */ 333vector_entry sync_exception_sp_elx 334 /* 335 * This exception will trigger if anything went wrong during a previous 336 * exception entry or exit or while handling an earlier unexpected 337 * synchronous exception. There is a high probability that SP_EL3 is 338 * corrupted. 339 */ 340 b report_unhandled_exception 341end_vector_entry sync_exception_sp_elx 342 343vector_entry irq_sp_elx 344 b report_unhandled_interrupt 345end_vector_entry irq_sp_elx 346 347vector_entry fiq_sp_elx 348 b report_unhandled_interrupt 349end_vector_entry fiq_sp_elx 350 351vector_entry serror_sp_elx 352#if !RAS_EXTENSION 353 check_if_serror_from_EL3 354#endif 355 no_ret plat_handle_el3_ea 356end_vector_entry serror_sp_elx 357 358 /* --------------------------------------------------------------------- 359 * Lower EL using AArch64 : 0x400 - 0x600 360 * --------------------------------------------------------------------- 361 */ 362vector_entry sync_exception_aarch64 363 /* 364 * This exception vector will be the entry point for SMCs and traps 365 * that are unhandled at lower ELs most commonly. SP_EL3 should point 366 * to a valid cpu context where the general purpose and system register 367 * state can be saved. 368 */ 369 apply_at_speculative_wa 370 check_and_unmask_ea 371 handle_sync_exception 372end_vector_entry sync_exception_aarch64 373 374vector_entry irq_aarch64 375 apply_at_speculative_wa 376 check_and_unmask_ea 377 handle_interrupt_exception irq_aarch64 378end_vector_entry irq_aarch64 379 380vector_entry fiq_aarch64 381 apply_at_speculative_wa 382 check_and_unmask_ea 383 handle_interrupt_exception fiq_aarch64 384end_vector_entry fiq_aarch64 385 386vector_entry serror_aarch64 387 apply_at_speculative_wa 388#if RAS_EXTENSION 389 msr daifclr, #DAIF_ABT_BIT 390 b enter_lower_el_async_ea 391#else 392 handle_async_ea 393#endif 394end_vector_entry serror_aarch64 395 396 /* --------------------------------------------------------------------- 397 * Lower EL using AArch32 : 0x600 - 0x800 398 * --------------------------------------------------------------------- 399 */ 400vector_entry sync_exception_aarch32 401 /* 402 * This exception vector will be the entry point for SMCs and traps 403 * that are unhandled at lower ELs most commonly. SP_EL3 should point 404 * to a valid cpu context where the general purpose and system register 405 * state can be saved. 406 */ 407 apply_at_speculative_wa 408 check_and_unmask_ea 409 handle_sync_exception 410end_vector_entry sync_exception_aarch32 411 412vector_entry irq_aarch32 413 apply_at_speculative_wa 414 check_and_unmask_ea 415 handle_interrupt_exception irq_aarch32 416end_vector_entry irq_aarch32 417 418vector_entry fiq_aarch32 419 apply_at_speculative_wa 420 check_and_unmask_ea 421 handle_interrupt_exception fiq_aarch32 422end_vector_entry fiq_aarch32 423 424vector_entry serror_aarch32 425 apply_at_speculative_wa 426#if RAS_EXTENSION 427 msr daifclr, #DAIF_ABT_BIT 428 b enter_lower_el_async_ea 429#else 430 handle_async_ea 431#endif 432end_vector_entry serror_aarch32 433 434#ifdef MONITOR_TRAPS 435 .section .rodata.brk_string, "aS" 436brk_location: 437 .asciz "Error at instruction 0x" 438brk_message: 439 .asciz "Unexpected BRK instruction with value 0x" 440#endif /* MONITOR_TRAPS */ 441 442 /* --------------------------------------------------------------------- 443 * The following code handles secure monitor calls. 444 * Depending upon the execution state from where the SMC has been 445 * invoked, it frees some general purpose registers to perform the 446 * remaining tasks. They involve finding the runtime service handler 447 * that is the target of the SMC & switching to runtime stacks (SP_EL0) 448 * before calling the handler. 449 * 450 * Note that x30 has been explicitly saved and can be used here 451 * --------------------------------------------------------------------- 452 */ 453func smc_handler 454smc_handler32: 455 /* Check whether aarch32 issued an SMC64 */ 456 tbnz x0, #FUNCID_CC_SHIFT, smc_prohibited 457 458smc_handler64: 459 /* NOTE: The code below must preserve x0-x4 */ 460 461 /* 462 * Save general purpose and ARMv8.3-PAuth registers (if enabled). 463 * If Secure Cycle Counter is not disabled in MDCR_EL3 when 464 * ARMv8.5-PMU is implemented, save PMCR_EL0 and disable Cycle Counter. 465 */ 466 bl save_gp_pmcr_pauth_regs 467 468#if ENABLE_PAUTH 469 /* Load and program APIAKey firmware key */ 470 bl pauth_load_bl31_apiakey 471#endif 472 473 /* 474 * Populate the parameters for the SMC handler. 475 * We already have x0-x4 in place. x5 will point to a cookie (not used 476 * now). x6 will point to the context structure (SP_EL3) and x7 will 477 * contain flags we need to pass to the handler. 478 */ 479 mov x5, xzr 480 mov x6, sp 481 482 /* 483 * Restore the saved C runtime stack value which will become the new 484 * SP_EL0 i.e. EL3 runtime stack. It was saved in the 'cpu_context' 485 * structure prior to the last ERET from EL3. 486 */ 487 ldr x12, [x6, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP] 488 489 /* Switch to SP_EL0 */ 490 msr spsel, #MODE_SP_EL0 491 492 /* 493 * Save the SPSR_EL3, ELR_EL3, & SCR_EL3 in case there is a world 494 * switch during SMC handling. 495 * TODO: Revisit if all system registers can be saved later. 496 */ 497 mrs x16, spsr_el3 498 mrs x17, elr_el3 499 mrs x18, scr_el3 500 stp x16, x17, [x6, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3] 501 str x18, [x6, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3] 502 503 /* Copy SCR_EL3.NS bit to the flag to indicate caller's security */ 504 bfi x7, x18, #0, #1 505 506 mov sp, x12 507 508 /* Get the unique owning entity number */ 509 ubfx x16, x0, #FUNCID_OEN_SHIFT, #FUNCID_OEN_WIDTH 510 ubfx x15, x0, #FUNCID_TYPE_SHIFT, #FUNCID_TYPE_WIDTH 511 orr x16, x16, x15, lsl #FUNCID_OEN_WIDTH 512 513 /* Load descriptor index from array of indices */ 514 adrp x14, rt_svc_descs_indices 515 add x14, x14, :lo12:rt_svc_descs_indices 516 ldrb w15, [x14, x16] 517 518 /* Any index greater than 127 is invalid. Check bit 7. */ 519 tbnz w15, 7, smc_unknown 520 521 /* 522 * Get the descriptor using the index 523 * x11 = (base + off), w15 = index 524 * 525 * handler = (base + off) + (index << log2(size)) 526 */ 527 adr x11, (__RT_SVC_DESCS_START__ + RT_SVC_DESC_HANDLE) 528 lsl w10, w15, #RT_SVC_SIZE_LOG2 529 ldr x15, [x11, w10, uxtw] 530 531 /* 532 * Call the Secure Monitor Call handler and then drop directly into 533 * el3_exit() which will program any remaining architectural state 534 * prior to issuing the ERET to the desired lower EL. 535 */ 536#if DEBUG 537 cbz x15, rt_svc_fw_critical_error 538#endif 539 blr x15 540 541 b el3_exit 542 543smc_unknown: 544 /* 545 * Unknown SMC call. Populate return value with SMC_UNK and call 546 * el3_exit() which will restore the remaining architectural state 547 * i.e., SYS, GP and PAuth registers(if any) prior to issuing the ERET 548 * to the desired lower EL. 549 */ 550 mov x0, #SMC_UNK 551 str x0, [x6, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] 552 b el3_exit 553 554smc_prohibited: 555 restore_ptw_el1_sys_regs 556 ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28] 557 ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 558 mov x0, #SMC_UNK 559 exception_return 560 561#if DEBUG 562rt_svc_fw_critical_error: 563 /* Switch to SP_ELx */ 564 msr spsel, #MODE_SP_ELX 565 no_ret report_unhandled_exception 566#endif 567endfunc smc_handler 568 569 /* --------------------------------------------------------------------- 570 * The following code handles exceptions caused by BRK instructions. 571 * Following a BRK instruction, the only real valid cause of action is 572 * to print some information and panic, as the code that caused it is 573 * likely in an inconsistent internal state. 574 * 575 * This is initially intended to be used in conjunction with 576 * __builtin_trap. 577 * --------------------------------------------------------------------- 578 */ 579#ifdef MONITOR_TRAPS 580func brk_handler 581 /* Extract the ISS */ 582 mrs x10, esr_el3 583 ubfx x10, x10, #ESR_ISS_SHIFT, #ESR_ISS_LENGTH 584 585 /* Ensure the console is initialized */ 586 bl plat_crash_console_init 587 588 adr x4, brk_location 589 bl asm_print_str 590 mrs x4, elr_el3 591 bl asm_print_hex 592 bl asm_print_newline 593 594 adr x4, brk_message 595 bl asm_print_str 596 mov x4, x10 597 mov x5, #28 598 bl asm_print_hex_bits 599 bl asm_print_newline 600 601 no_ret plat_panic_handler 602endfunc brk_handler 603#endif /* MONITOR_TRAPS */ 604