1/* 2 * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved. 3 * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without modification, 6 * are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, this list of 9 * conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright notice, this list 12 * of conditions and the following disclaimer in the documentation and/or other materials 13 * provided with the distribution. 14 * 15 * 3. Neither the name of the copyright holder nor the names of its contributors may be used 16 * to endorse or promote products derived from this software without specific prior written 17 * permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32#include "asm.h" 33#include "arch_config.h" 34 35 .extern OsSaveSignalContext 36 .extern OsSchedToUserReleaseLock 37 .global OsTaskSchedule 38 .global OsTaskContextLoad 39 .global OsIrqHandler 40 41 .fpu vfpv4 42 43/* macros to align and unalign the stack on 8 byte boundary for ABI compliance */ 44.macro STACK_ALIGN, reg 45 MOV \reg, sp 46 TST SP, #4 47 SUBEQ SP, #4 48 PUSH { \reg } 49.endm 50 51.macro STACK_RESTORE, reg 52 POP { \reg } 53 MOV sp, \reg 54.endm 55 56/* macros to save and restore fpu regs */ 57.macro PUSH_FPU_REGS reg1 58#if !defined(LOSCFG_ARCH_FPU_DISABLE) 59 VMRS \reg1, FPEXC 60 PUSH {\reg1} 61 VMRS \reg1, FPSCR 62 PUSH {\reg1} 63#if defined(LOSCFG_ARCH_FPU_VFP_D32) 64 VPUSH {D16-D31} 65#endif 66 VPUSH {D0-D15} 67#endif 68.endm 69 70.macro POP_FPU_REGS reg1 71#if !defined(LOSCFG_ARCH_FPU_DISABLE) 72 VPOP {D0-D15} 73#if defined(LOSCFG_ARCH_FPU_VFP_D32) 74 VPOP {D16-D31} 75#endif 76 POP {\reg1} 77 VMSR FPSCR, \reg1 78 POP {\reg1} 79 VMSR FPEXC, \reg1 80#endif 81.endm 82 83/* 84 * R0: new task 85 * R1: run task 86 */ 87OsTaskSchedule: 88 MRS R2, CPSR 89 STMFD SP!, {R2} 90 STMFD SP!, {LR} 91 STMFD SP!, {LR} 92 STMFD SP!, {R12} 93 94 /* jump R0 - R3 USP, ULR reserved */ 95 SUB SP, SP, #(8 * 4) 96 97 /* push R4 - R11*/ 98 STMFD SP!, {R4-R11} 99 100 /* save fpu registers */ 101 PUSH_FPU_REGS R2 102 103 /* store sp on running task */ 104 STR SP, [R1] 105 106OsTaskContextLoad: 107 /* clear the flag of ldrex */ 108 CLREX 109 110 /* switch to new task's sp */ 111 LDR SP, [R0] 112 113 /* restore fpu registers */ 114 POP_FPU_REGS R2 115 116 LDMFD SP!, {R4-R11} 117 LDR R3, [SP, #(11 * 4)] 118 AND R0, R3, #CPSR_MASK_MODE 119 CMP R0, #CPSR_USER_MODE 120 BNE OsKernelTaskLoad 121 122 MVN R2, #CPSR_INT_DISABLE 123 AND R3, R3, R2 124 STR R3, [SP, #(11 * 4)] 125 126#ifdef LOSCFG_KERNEL_SMP 127 BL OsSchedToUserReleaseLock 128#endif 129 130 /* jump sp, reserved */ 131 ADD SP, SP, #(2 * 4) 132 LDMFD SP, {R13, R14}^ 133 ADD SP, SP, #(2 * 4) 134 LDMFD SP!, {R0-R3, R12, LR} 135 RFEIA SP! 136 137OsKernelTaskLoad: 138 ADD SP, SP, #(4 * 4) 139 LDMFD SP!, {R0-R3, R12, LR} 140 RFEIA SP! 141 142OsIrqHandler: 143 SUB LR, LR, #4 144 145 /* Save pc and cpsr to svc sp, ARMv6 and above support */ 146 SRSFD #0x13! 147 /* disable irq, switch to svc mode */ 148 CPSID i, #0x13 149 150#ifdef LOSCFG_KERNEL_PERF 151 PUSH {R0-R3, R12, LR} 152 MOV R0, LR 153 MOV R1, FP 154 BL OsPerfSetIrqRegs 155 POP {R0-R3, R12, LR} 156#endif 157 158 STMFD SP!, {R0-R3, R12, LR} 159 STMFD SP, {R13, R14}^ 160 SUB SP, SP, #(4 * 4) 161 STR R4, [SP, #0] 162 163 /* 164 * save fpu regs in case in case those been 165 * altered in interrupt handlers. 166 */ 167 PUSH_FPU_REGS R0 168 169 MOV R4, SP 170 EXC_SP_SET __svc_stack_top, OS_EXC_SVC_STACK_SIZE, R1, R2 171 172 BLX HalIrqHandler 173 174 MOV SP, R4 175 176 /* process pending signals */ 177 BLX OsTaskProcSignal 178 BLX OsSchedIrqEndCheckNeedSched 179 180 /* restore fpu regs */ 181 POP_FPU_REGS R0 182 LDR R4, [SP, #0] 183 184#ifdef LOSCFG_KERNEL_VM 185 /* Obtain the CPSR to determine the mode the system is in when the interrupt is triggered */ 186 LDR R3, [SP, #(11 * 4)] 187 AND R1, R3, #CPSR_MASK_MODE 188 CMP R1, #CPSR_USER_MODE 189 BNE 1f 190 191 MOV R0, SP 192 STR R7, [SP, #0] 193 /* sp - sizeof(IrqContext) */ 194 SUB SP, SP, #(12 * 4) 195 MOV R1, SP 196 BLX OsSaveSignalContext 197 MOV SP, R0 1981: 199#endif 200 ADD SP, SP, #(2 * 4) 201 /* load user sp and lr, and jump cpsr */ 202 LDMFD SP, {R13, R14}^ 203 ADD SP, SP, #(2 * 4) 204 LDMFD SP!, {R0-R3, R12, LR} 205 RFEIA SP! 206 207FUNCTION(ArchSpinLock) 208 mov r1, #1 2091: 210 ldrex r2, [r0] 211 cmp r2, #0 212 wfene 213 strexeq r2, r1, [r0] 214 cmpeq r2, #0 215 bne 1b 216 dmb 217 bx lr 218 219FUNCTION(ArchSpinTrylock) 220 mov r1, #1 221 mov r2, r0 222 ldrex r0, [r2] 223 cmp r0, #0 224 strexeq r0, r1, [r2] 225 dmb 226 bx lr 227 228FUNCTION(ArchSpinUnlock) 229 mov r1, #0 230 dmb 231 str r1, [r0] 232 dsb 233 sev 234 bx lr 235