1// 2// Copyright (c) 2011 - 2014 ARM LTD. All rights reserved.<BR> 3// Portion of Copyright (c) 2014 NVIDIA Corporation. All rights reserved.<BR> 4// Copyright (c) 2016 HP Development Company, L.P. 5// 6// This program and the accompanying materials 7// are licensed and made available under the terms and conditions of the BSD License 8// which accompanies this distribution. The full text of the license may be found at 9// http://opensource.org/licenses/bsd-license.php 10// 11// THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, 12// WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. 13// 14//------------------------------------------------------------------------------ 15 16#include <Chipset/AArch64.h> 17#include <Library/PcdLib.h> 18#include <AsmMacroIoLibV8.h> 19#include <Protocol/DebugSupport.h> // for exception type definitions 20 21/* 22 This is the stack constructed by the exception handler (low address to high address). 23 X0 to FAR makes up the EFI_SYSTEM_CONTEXT for AArch64. 24 25 UINT64 X0; 0x000 26 UINT64 X1; 0x008 27 UINT64 X2; 0x010 28 UINT64 X3; 0x018 29 UINT64 X4; 0x020 30 UINT64 X5; 0x028 31 UINT64 X6; 0x030 32 UINT64 X7; 0x038 33 UINT64 X8; 0x040 34 UINT64 X9; 0x048 35 UINT64 X10; 0x050 36 UINT64 X11; 0x058 37 UINT64 X12; 0x060 38 UINT64 X13; 0x068 39 UINT64 X14; 0x070 40 UINT64 X15; 0x078 41 UINT64 X16; 0x080 42 UINT64 X17; 0x088 43 UINT64 X18; 0x090 44 UINT64 X19; 0x098 45 UINT64 X20; 0x0a0 46 UINT64 X21; 0x0a8 47 UINT64 X22; 0x0b0 48 UINT64 X23; 0x0b8 49 UINT64 X24; 0x0c0 50 UINT64 X25; 0x0c8 51 UINT64 X26; 0x0d0 52 UINT64 X27; 0x0d8 53 UINT64 X28; 0x0e0 54 UINT64 FP; 0x0e8 // x29 - Frame Pointer 55 UINT64 LR; 0x0f0 // x30 - Link Register 56 UINT64 SP; 0x0f8 // x31 - Stack Pointer 57 58 // FP/SIMD Registers. 128bit if used as Q-regs. 59 UINT64 V0[2]; 0x100 60 UINT64 V1[2]; 0x110 61 UINT64 V2[2]; 0x120 62 UINT64 V3[2]; 0x130 63 UINT64 V4[2]; 0x140 64 UINT64 V5[2]; 0x150 65 UINT64 V6[2]; 0x160 66 UINT64 V7[2]; 0x170 67 UINT64 V8[2]; 0x180 68 UINT64 V9[2]; 0x190 69 UINT64 V10[2]; 0x1a0 70 UINT64 V11[2]; 0x1b0 71 UINT64 V12[2]; 0x1c0 72 UINT64 V13[2]; 0x1d0 73 UINT64 V14[2]; 0x1e0 74 UINT64 V15[2]; 0x1f0 75 UINT64 V16[2]; 0x200 76 UINT64 V17[2]; 0x210 77 UINT64 V18[2]; 0x220 78 UINT64 V19[2]; 0x230 79 UINT64 V20[2]; 0x240 80 UINT64 V21[2]; 0x250 81 UINT64 V22[2]; 0x260 82 UINT64 V23[2]; 0x270 83 UINT64 V24[2]; 0x280 84 UINT64 V25[2]; 0x290 85 UINT64 V26[2]; 0x2a0 86 UINT64 V27[2]; 0x2b0 87 UINT64 V28[2]; 0x2c0 88 UINT64 V29[2]; 0x2d0 89 UINT64 V30[2]; 0x2e0 90 UINT64 V31[2]; 0x2f0 91 92 // System Context 93 UINT64 ELR; 0x300 // Exception Link Register 94 UINT64 SPSR; 0x308 // Saved Processor Status Register 95 UINT64 FPSR; 0x310 // Floating Point Status Register 96 UINT64 ESR; 0x318 // Exception syndrome register 97 UINT64 FAR; 0x320 // Fault Address Register 98 UINT64 Padding;0x328 // Required for stack alignment 99*/ 100 101GCC_ASM_EXPORT(ExceptionHandlersEnd) 102GCC_ASM_EXPORT(CommonCExceptionHandler) 103 104.text 105 106#define GP_CONTEXT_SIZE (32 * 8) 107#define FP_CONTEXT_SIZE (32 * 16) 108#define SYS_CONTEXT_SIZE ( 6 * 8) // 5 SYS regs + Alignment requirement (ie: the stack must be aligned on 0x10) 109 110// 111// There are two methods for installing AArch64 exception vectors: 112// 1. Install a copy of the vectors to a location specified by a PCD 113// 2. Write VBAR directly, requiring that vectors have proper alignment (2K) 114// The conditional below adjusts the alignment requirement based on which 115// exception vector initialization method is used. 116// 117 118#if defined(ARM_RELOCATE_VECTORS) 119GCC_ASM_EXPORT(ExceptionHandlersStart) 120ASM_PFX(ExceptionHandlersStart): 121#else 122VECTOR_BASE(ExceptionHandlersStart) 123#endif 124 125 .macro ExceptionEntry, val 126 // Move the stackpointer so we can reach our structure with the str instruction. 127 sub sp, sp, #(FP_CONTEXT_SIZE + SYS_CONTEXT_SIZE) 128 129 // Push some GP registers so we can record the exception context 130 stp x0, x1, [sp, #-GP_CONTEXT_SIZE]! 131 stp x2, x3, [sp, #0x10] 132 stp x4, x5, [sp, #0x20] 133 stp x6, x7, [sp, #0x30] 134 135 EL1_OR_EL2_OR_EL3(x1) 1361:mrs x2, elr_el1 // Exception Link Register 137 mrs x3, spsr_el1 // Saved Processor Status Register 32bit 138 mrs x5, esr_el1 // EL1 Exception syndrome register 32bit 139 mrs x6, far_el1 // EL1 Fault Address Register 140 b 4f 141 1422:mrs x2, elr_el2 // Exception Link Register 143 mrs x3, spsr_el2 // Saved Processor Status Register 32bit 144 mrs x5, esr_el2 // EL2 Exception syndrome register 32bit 145 mrs x6, far_el2 // EL2 Fault Address Register 146 b 4f 147 1483:mrs x2, elr_el3 // Exception Link Register 149 mrs x3, spsr_el3 // Saved Processor Status Register 32bit 150 mrs x5, esr_el3 // EL3 Exception syndrome register 32bit 151 mrs x6, far_el3 // EL3 Fault Address Register 152 1534:mrs x4, fpsr // Floating point Status Register 32bit 154 155 // Record the type of exception that occurred. 156 mov x0, #\val 157 158 // Jump to our general handler to deal with all the common parts and process the exception. 159#if defined(ARM_RELOCATE_VECTORS) 160 ldr x1, =ASM_PFX(CommonExceptionEntry) 161 br x1 162 .ltorg 163#else 164 b ASM_PFX(CommonExceptionEntry) 165#endif 166 .endm 167 168// 169// Current EL with SP0 : 0x0 - 0x180 170// 171VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SP0_SYNC) 172ASM_PFX(SynchronousExceptionSP0): 173 ExceptionEntry EXCEPT_AARCH64_SYNCHRONOUS_EXCEPTIONS 174 175VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SP0_IRQ) 176ASM_PFX(IrqSP0): 177 ExceptionEntry EXCEPT_AARCH64_IRQ 178 179VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SP0_FIQ) 180ASM_PFX(FiqSP0): 181 ExceptionEntry EXCEPT_AARCH64_FIQ 182 183VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SP0_SERR) 184ASM_PFX(SErrorSP0): 185 ExceptionEntry EXCEPT_AARCH64_SERROR 186 187// 188// Current EL with SPx: 0x200 - 0x380 189// 190VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SPx_SYNC) 191ASM_PFX(SynchronousExceptionSPx): 192 ExceptionEntry EXCEPT_AARCH64_SYNCHRONOUS_EXCEPTIONS 193 194VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SPx_IRQ) 195ASM_PFX(IrqSPx): 196 ExceptionEntry EXCEPT_AARCH64_IRQ 197 198VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SPx_FIQ) 199ASM_PFX(FiqSPx): 200 ExceptionEntry EXCEPT_AARCH64_FIQ 201 202VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SPx_SERR) 203ASM_PFX(SErrorSPx): 204 ExceptionEntry EXCEPT_AARCH64_SERROR 205 206// 207// Lower EL using AArch64 : 0x400 - 0x580 208// 209VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A64_SYNC) 210ASM_PFX(SynchronousExceptionA64): 211 ExceptionEntry EXCEPT_AARCH64_SYNCHRONOUS_EXCEPTIONS 212 213VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A64_IRQ) 214ASM_PFX(IrqA64): 215 ExceptionEntry EXCEPT_AARCH64_IRQ 216 217VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A64_FIQ) 218ASM_PFX(FiqA64): 219 ExceptionEntry EXCEPT_AARCH64_FIQ 220 221VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A64_SERR) 222ASM_PFX(SErrorA64): 223 ExceptionEntry EXCEPT_AARCH64_SERROR 224 225// 226// Lower EL using AArch32 : 0x600 - 0x780 227// 228VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A32_SYNC) 229ASM_PFX(SynchronousExceptionA32): 230 ExceptionEntry EXCEPT_AARCH64_SYNCHRONOUS_EXCEPTIONS 231 232VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A32_IRQ) 233ASM_PFX(IrqA32): 234 ExceptionEntry EXCEPT_AARCH64_IRQ 235 236VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A32_FIQ) 237ASM_PFX(FiqA32): 238 ExceptionEntry EXCEPT_AARCH64_FIQ 239 240VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A32_SERR) 241ASM_PFX(SErrorA32): 242 ExceptionEntry EXCEPT_AARCH64_SERROR 243 244VECTOR_END(ExceptionHandlersStart) 245 246ASM_PFX(ExceptionHandlersEnd): 247 248 249ASM_PFX(CommonExceptionEntry): 250 251 // Stack the remaining GP registers 252 stp x8, x9, [sp, #0x40] 253 stp x10, x11, [sp, #0x50] 254 stp x12, x13, [sp, #0x60] 255 stp x14, x15, [sp, #0x70] 256 stp x16, x17, [sp, #0x80] 257 stp x18, x19, [sp, #0x90] 258 stp x20, x21, [sp, #0xa0] 259 stp x22, x23, [sp, #0xb0] 260 stp x24, x25, [sp, #0xc0] 261 stp x26, x27, [sp, #0xd0] 262 stp x28, x29, [sp, #0xe0] 263 add x28, sp, #GP_CONTEXT_SIZE + FP_CONTEXT_SIZE + SYS_CONTEXT_SIZE 264 stp x30, x28, [sp, #0xf0] 265 266 // Save the SYS regs 267 stp x2, x3, [x28, #-SYS_CONTEXT_SIZE]! 268 stp x4, x5, [x28, #0x10] 269 str x6, [x28, #0x20] 270 271 // Push FP regs to Stack. 272 stp q0, q1, [x28, #-FP_CONTEXT_SIZE]! 273 stp q2, q3, [x28, #0x20] 274 stp q4, q5, [x28, #0x40] 275 stp q6, q7, [x28, #0x60] 276 stp q8, q9, [x28, #0x80] 277 stp q10, q11, [x28, #0xa0] 278 stp q12, q13, [x28, #0xc0] 279 stp q14, q15, [x28, #0xe0] 280 stp q16, q17, [x28, #0x100] 281 stp q18, q19, [x28, #0x120] 282 stp q20, q21, [x28, #0x140] 283 stp q22, q23, [x28, #0x160] 284 stp q24, q25, [x28, #0x180] 285 stp q26, q27, [x28, #0x1a0] 286 stp q28, q29, [x28, #0x1c0] 287 stp q30, q31, [x28, #0x1e0] 288 289 // x0 still holds the exception type. 290 // Set x1 to point to the top of our struct on the Stack 291 mov x1, sp 292 293// CommonCExceptionHandler ( 294// IN EFI_EXCEPTION_TYPE ExceptionType, R0 295// IN OUT EFI_SYSTEM_CONTEXT SystemContext R1 296// ) 297 298 // Call the handler as defined above 299 300 // For now we spin in the handler if we received an abort of some kind. 301 // We do not try to recover. 302 bl ASM_PFX(CommonCExceptionHandler) // Call exception handler 303 304 // Pop as many GP regs as we can before entering the critical section below 305 ldp x2, x3, [sp, #0x10] 306 ldp x4, x5, [sp, #0x20] 307 ldp x6, x7, [sp, #0x30] 308 ldp x8, x9, [sp, #0x40] 309 ldp x10, x11, [sp, #0x50] 310 ldp x12, x13, [sp, #0x60] 311 ldp x14, x15, [sp, #0x70] 312 ldp x16, x17, [sp, #0x80] 313 ldp x18, x19, [sp, #0x90] 314 ldp x20, x21, [sp, #0xa0] 315 ldp x22, x23, [sp, #0xb0] 316 ldp x24, x25, [sp, #0xc0] 317 ldp x26, x27, [sp, #0xd0] 318 ldp x0, x1, [sp], #0xe0 319 320 // Pop FP regs from Stack. 321 ldp q2, q3, [x28, #0x20] 322 ldp q4, q5, [x28, #0x40] 323 ldp q6, q7, [x28, #0x60] 324 ldp q8, q9, [x28, #0x80] 325 ldp q10, q11, [x28, #0xa0] 326 ldp q12, q13, [x28, #0xc0] 327 ldp q14, q15, [x28, #0xe0] 328 ldp q16, q17, [x28, #0x100] 329 ldp q18, q19, [x28, #0x120] 330 ldp q20, q21, [x28, #0x140] 331 ldp q22, q23, [x28, #0x160] 332 ldp q24, q25, [x28, #0x180] 333 ldp q26, q27, [x28, #0x1a0] 334 ldp q28, q29, [x28, #0x1c0] 335 ldp q30, q31, [x28, #0x1e0] 336 ldp q0, q1, [x28], #FP_CONTEXT_SIZE 337 338 // Pop the SYS regs we need 339 ldp x29, x30, [x28] 340 ldr x28, [x28, #0x10] 341 msr fpsr, x28 342 343 // 344 // Disable interrupt(IRQ and FIQ) before restoring context, 345 // or else the context will be corrupted by interrupt reentrance. 346 // Interrupt mask will be restored from spsr by hardware when we call eret 347 // 348 msr daifset, #3 349 isb 350 351 EL1_OR_EL2_OR_EL3(x28) 3521:msr elr_el1, x29 // Exception Link Register 353 msr spsr_el1, x30 // Saved Processor Status Register 32bit 354 b 4f 3552:msr elr_el2, x29 // Exception Link Register 356 msr spsr_el2, x30 // Saved Processor Status Register 32bit 357 b 4f 3583:msr elr_el3, x29 // Exception Link Register 359 msr spsr_el3, x30 // Saved Processor Status Register 32bit 3604: 361 362 // pop remaining GP regs and return from exception. 363 ldr x30, [sp, #0xf0 - 0xe0] 364 ldp x28, x29, [sp], #GP_CONTEXT_SIZE - 0xe0 365 366 // Adjust SP to be where we started from when we came into the handler. 367 // The handler can not change the SP. 368 add sp, sp, #FP_CONTEXT_SIZE + SYS_CONTEXT_SIZE 369 370 eret 371