1/* 2 * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6#ifndef ASM_MACROS_S 7#define ASM_MACROS_S 8 9#include <arch.h> 10#include <common/asm_macros_common.S> 11#include <lib/spinlock.h> 12 13/* 14 * TLBI instruction with type specifier that implements the workaround for 15 * errata 813419 of Cortex-A57. 16 */ 17#if ERRATA_A57_813419 18#define TLB_INVALIDATE(_reg, _coproc) \ 19 stcopr _reg, _coproc; \ 20 dsb ish; \ 21 stcopr _reg, _coproc 22#else 23#define TLB_INVALIDATE(_reg, _coproc) \ 24 stcopr _reg, _coproc 25#endif 26 27#define WORD_SIZE 4 28 29 /* 30 * Co processor register accessors 31 */ 32 .macro ldcopr reg, coproc, opc1, CRn, CRm, opc2 33 mrc \coproc, \opc1, \reg, \CRn, \CRm, \opc2 34 .endm 35 36 .macro ldcopr16 reg1, reg2, coproc, opc1, CRm 37 mrrc \coproc, \opc1, \reg1, \reg2, \CRm 38 .endm 39 40 .macro stcopr reg, coproc, opc1, CRn, CRm, opc2 41 mcr \coproc, \opc1, \reg, \CRn, \CRm, \opc2 42 .endm 43 44 .macro stcopr16 reg1, reg2, coproc, opc1, CRm 45 mcrr \coproc, \opc1, \reg1, \reg2, \CRm 46 .endm 47 48 /* Cache line size helpers */ 49 .macro dcache_line_size reg, tmp 50 ldcopr \tmp, CTR 51 ubfx \tmp, \tmp, #CTR_DMINLINE_SHIFT, #CTR_DMINLINE_WIDTH 52 mov \reg, #WORD_SIZE 53 lsl \reg, \reg, \tmp 54 .endm 55 56 .macro icache_line_size reg, tmp 57 ldcopr \tmp, CTR 58 and \tmp, \tmp, #CTR_IMINLINE_MASK 59 mov \reg, #WORD_SIZE 60 lsl \reg, \reg, \tmp 61 .endm 62 63 /* 64 * Declare the exception vector table, enforcing it is aligned on a 65 * 32 byte boundary. 66 */ 67 .macro vector_base label 68 .section .vectors, "ax" 69 .align 5 70 \label: 71 .endm 72 73 /* 74 * This macro calculates the base address of the current CPU's multi 75 * processor(MP) stack using the plat_my_core_pos() index, the name of 76 * the stack storage and the size of each stack. 77 * Out: r0 = physical address of stack base 78 * Clobber: r14, r1, r2 79 */ 80 .macro get_my_mp_stack _name, _size 81 bl plat_my_core_pos 82 ldr r2, =(\_name + \_size) 83 mov r1, #\_size 84 mla r0, r0, r1, r2 85 .endm 86 87 /* 88 * This macro calculates the base address of a uniprocessor(UP) stack 89 * using the name of the stack storage and the size of the stack 90 * Out: r0 = physical address of stack base 91 */ 92 .macro get_up_stack _name, _size 93 ldr r0, =(\_name + \_size) 94 .endm 95 96#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_VIRTUALIZATION) 97 /* 98 * ARMv7 cores without Virtualization extension do not support the 99 * eret instruction. 100 */ 101 .macro eret 102 movs pc, lr 103 .endm 104#endif 105 106#if (ARM_ARCH_MAJOR == 7) 107 /* ARMv7 does not support stl instruction */ 108 .macro stl _reg, _write_lock 109 dmb 110 str \_reg, \_write_lock 111 dsb 112 .endm 113#endif 114 115 /* 116 * Helper macro to generate the best mov/movw/movt combinations 117 * according to the value to be moved. 118 */ 119 .macro mov_imm _reg, _val 120 .if ((\_val) & 0xffff0000) == 0 121 mov \_reg, #(\_val) 122 .else 123 movw \_reg, #((\_val) & 0xffff) 124 movt \_reg, #((\_val) >> 16) 125 .endif 126 .endm 127 128 /* 129 * Macro to mark instances where we're jumping to a function and don't 130 * expect a return. To provide the function being jumped to with 131 * additional information, we use 'bl' instruction to jump rather than 132 * 'b'. 133 * 134 * Debuggers infer the location of a call from where LR points to, which 135 * is usually the instruction after 'bl'. If this macro expansion 136 * happens to be the last location in a function, that'll cause the LR 137 * to point a location beyond the function, thereby misleading debugger 138 * back trace. We therefore insert a 'nop' after the function call for 139 * debug builds, unless 'skip_nop' parameter is non-zero. 140 */ 141 .macro no_ret _func:req, skip_nop=0 142 bl \_func 143#if DEBUG 144 .ifeq \skip_nop 145 nop 146 .endif 147#endif 148 .endm 149 150 /* 151 * Reserve space for a spin lock in assembly file. 152 */ 153 .macro define_asm_spinlock _name:req 154 .align SPINLOCK_ASM_ALIGN 155 \_name: 156 .space SPINLOCK_ASM_SIZE 157 .endm 158 159 /* 160 * Helper macro to OR the bottom 32 bits of `_val` into `_reg_l` 161 * and the top 32 bits of `_val` into `_reg_h`. If either the bottom 162 * or top word of `_val` is zero, the corresponding OR operation 163 * is skipped. 164 */ 165 .macro orr64_imm _reg_l, _reg_h, _val 166 .if (\_val >> 32) 167 orr \_reg_h, \_reg_h, #(\_val >> 32) 168 .endif 169 .if (\_val & 0xffffffff) 170 orr \_reg_l, \_reg_l, #(\_val & 0xffffffff) 171 .endif 172 .endm 173 174 /* 175 * Helper macro to bitwise-clear bits in `_reg_l` and 176 * `_reg_h` given a 64 bit immediate `_val`. The set bits 177 * in the bottom word of `_val` dictate which bits from 178 * `_reg_l` should be cleared. Similarly, the set bits in 179 * the top word of `_val` dictate which bits from `_reg_h` 180 * should be cleared. If either the bottom or top word of 181 * `_val` is zero, the corresponding BIC operation is skipped. 182 */ 183 .macro bic64_imm _reg_l, _reg_h, _val 184 .if (\_val >> 32) 185 bic \_reg_h, \_reg_h, #(\_val >> 32) 186 .endif 187 .if (\_val & 0xffffffff) 188 bic \_reg_l, \_reg_l, #(\_val & 0xffffffff) 189 .endif 190 .endm 191 192 /* 193 * Helper macro for carrying out division in software when 194 * hardware division is not suported. \top holds the dividend 195 * in the function call and the remainder after 196 * the function is executed. \bot holds the divisor. \div holds 197 * the quotient and \temp is a temporary registed used in calcualtion. 198 * The division algorithm has been obtained from: 199 * http://www.keil.com/support/man/docs/armasm/armasm_dom1359731155623.htm 200 */ 201 .macro softudiv div:req,top:req,bot:req,temp:req 202 203 mov \temp, \bot 204 cmp \temp, \top, lsr #1 205div1: 206 movls \temp, \temp, lsl #1 207 cmp \temp, \top, lsr #1 208 bls div1 209 mov \div, #0 210 211div2: 212 cmp \top, \temp 213 subcs \top, \top,\temp 214 ADC \div, \div, \div 215 mov \temp, \temp, lsr #1 216 cmp \temp, \bot 217 bhs div2 218 .endm 219#endif /* ASM_MACROS_S */ 220