1 /* 2 * Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S 3 * 4 * Copyright (C) 1996-2000 Russell King 5 * Copyright (C) 2012 ARM Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 #ifndef __ASSEMBLY__ 20 #error "Only include this from assembly code" 21 #endif 22 23 #ifndef __ASM_ASSEMBLER_H 24 #define __ASM_ASSEMBLER_H 25 26 #include <asm/asm-offsets.h> 27 #include <asm/pgtable-hwdef.h> 28 #include <asm/ptrace.h> 29 #include <asm/thread_info.h> 30 31 /* 32 * Stack pushing/popping (register pairs only). Equivalent to store decrement 33 * before, load increment after. 34 */ 35 .macro push, xreg1, xreg2 36 stp \xreg1, \xreg2, [sp, #-16]! 37 .endm 38 39 .macro pop, xreg1, xreg2 40 ldp \xreg1, \xreg2, [sp], #16 41 .endm 42 43 /* 44 * Enable and disable interrupts. 45 */ 46 .macro disable_irq 47 msr daifset, #2 48 .endm 49 50 .macro enable_irq 51 msr daifclr, #2 52 .endm 53 54 .macro save_and_disable_irq, flags 55 mrs \flags, daif 56 msr daifset, #2 57 .endm 58 59 .macro restore_irq, flags 60 msr daif, \flags 61 .endm 62 63 /* 64 * Save/disable and restore interrupts. 65 */ 66 .macro save_and_disable_irqs, olddaif 67 mrs \olddaif, daif 68 disable_irq 69 .endm 70 71 .macro restore_irqs, olddaif 72 msr daif, \olddaif 73 .endm 74 75 /* 76 * Enable and disable debug exceptions. 77 */ 78 .macro disable_dbg 79 msr daifset, #8 80 .endm 81 82 .macro enable_dbg 83 msr daifclr, #8 84 .endm 85 86 .macro disable_step_tsk, flgs, tmp 87 tbz \flgs, #TIF_SINGLESTEP, 9990f 88 mrs \tmp, mdscr_el1 89 bic \tmp, \tmp, #1 90 msr mdscr_el1, \tmp 91 isb // Synchronise with enable_dbg 92 9990: 93 .endm 94 95 .macro enable_step_tsk, flgs, tmp 96 tbz \flgs, #TIF_SINGLESTEP, 9990f 97 disable_dbg 98 mrs \tmp, mdscr_el1 99 orr \tmp, \tmp, #1 100 msr mdscr_el1, \tmp 101 9990: 102 .endm 103 104 /* 105 * Enable both debug exceptions and interrupts. This is likely to be 106 * faster than two daifclr operations, since writes to this register 107 * are self-synchronising. 108 */ 109 .macro enable_dbg_and_irq 110 msr daifclr, #(8 | 2) 111 .endm 112 113 /* 114 * SMP data memory barrier 115 */ 116 .macro smp_dmb, opt 117 dmb \opt 118 .endm 119 120 /* 121 * NOP sequence 122 */ 123 .macro nops, num 124 .rept \num 125 nop 126 .endr 127 .endm 128 129 #define USER(l, x...) \ 130 9999: x; \ 131 .section __ex_table,"a"; \ 132 .align 3; \ 133 .quad 9999b,l; \ 134 .previous 135 136 /* 137 * Register aliases. 138 */ 139 lr .req x30 // link register 140 141 /* 142 * Vector entry 143 */ 144 .macro ventry label 145 .align 7 146 b \label 147 .endm 148 149 /* 150 * Select code when configured for BE. 151 */ 152 #ifdef CONFIG_CPU_BIG_ENDIAN 153 #define CPU_BE(code...) code 154 #else 155 #define CPU_BE(code...) 156 #endif 157 158 /* 159 * Select code when configured for LE. 160 */ 161 #ifdef CONFIG_CPU_BIG_ENDIAN 162 #define CPU_LE(code...) 163 #else 164 #define CPU_LE(code...) code 165 #endif 166 167 /* 168 * Define a macro that constructs a 64-bit value by concatenating two 169 * 32-bit registers. Note that on big endian systems the order of the 170 * registers is swapped. 171 */ 172 #ifndef CONFIG_CPU_BIG_ENDIAN 173 .macro regs_to_64, rd, lbits, hbits 174 #else 175 .macro regs_to_64, rd, hbits, lbits 176 #endif 177 orr \rd, \lbits, \hbits, lsl #32 178 .endm 179 180 /* 181 * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where 182 * <symbol> is within the range +/- 4 GB of the PC. 183 */ 184 /* 185 * @dst: destination register (64 bit wide) 186 * @sym: name of the symbol 187 * @tmp: optional scratch register to be used if <dst> == sp, which 188 * is not allowed in an adrp instruction 189 */ 190 .macro adr_l, dst, sym, tmp= 191 .ifb \tmp 192 adrp \dst, \sym 193 add \dst, \dst, :lo12:\sym 194 .else 195 adrp \tmp, \sym 196 add \dst, \tmp, :lo12:\sym 197 .endif 198 .endm 199 200 /* 201 * @dst: destination register (32 or 64 bit wide) 202 * @sym: name of the symbol 203 * @tmp: optional 64-bit scratch register to be used if <dst> is a 204 * 32-bit wide register, in which case it cannot be used to hold 205 * the address 206 */ 207 .macro ldr_l, dst, sym, tmp= 208 .ifb \tmp 209 adrp \dst, \sym 210 ldr \dst, [\dst, :lo12:\sym] 211 .else 212 adrp \tmp, \sym 213 ldr \dst, [\tmp, :lo12:\sym] 214 .endif 215 .endm 216 217 /* 218 * @src: source register (32 or 64 bit wide) 219 * @sym: name of the symbol 220 * @tmp: mandatory 64-bit scratch register to calculate the address 221 * while <src> needs to be preserved. 222 */ 223 .macro str_l, src, sym, tmp 224 adrp \tmp, \sym 225 str \src, [\tmp, :lo12:\sym] 226 .endm 227 228 /* 229 * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm) 230 */ 231 .macro vma_vm_mm, rd, rn 232 ldr \rd, [\rn, #VMA_VM_MM] 233 .endm 234 235 /* 236 * mmid - get context id from mm pointer (mm->context.id) 237 */ 238 .macro mmid, rd, rn 239 ldr \rd, [\rn, #MM_CONTEXT_ID] 240 .endm 241 242 /* 243 * dcache_line_size - get the minimum D-cache line size from the CTR register. 244 */ 245 .macro dcache_line_size, reg, tmp 246 mrs \tmp, ctr_el0 // read CTR 247 ubfm \tmp, \tmp, #16, #19 // cache line size encoding 248 mov \reg, #4 // bytes per word 249 lsl \reg, \reg, \tmp // actual cache line size 250 .endm 251 252 /* 253 * icache_line_size - get the minimum I-cache line size from the CTR register. 254 */ 255 .macro icache_line_size, reg, tmp 256 mrs \tmp, ctr_el0 // read CTR 257 and \tmp, \tmp, #0xf // cache line size encoding 258 mov \reg, #4 // bytes per word 259 lsl \reg, \reg, \tmp // actual cache line size 260 .endm 261 262 /* 263 * tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map 264 */ 265 .macro tcr_set_idmap_t0sz, valreg, tmpreg 266 #ifndef CONFIG_ARM64_VA_BITS_48 267 ldr_l \tmpreg, idmap_t0sz 268 bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH 269 #endif 270 .endm 271 272 /* 273 * Macro to perform a data cache maintenance for the interval 274 * [kaddr, kaddr + size) 275 * 276 * op: operation passed to dc instruction 277 * domain: domain used in dsb instruciton 278 * kaddr: starting virtual address of the region 279 * size: size of the region 280 * Corrupts: kaddr, size, tmp1, tmp2 281 */ 282 .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2 283 dcache_line_size \tmp1, \tmp2 284 add \size, \kaddr, \size 285 sub \tmp2, \tmp1, #1 286 bic \kaddr, \kaddr, \tmp2 287 9998: dc \op, \kaddr 288 add \kaddr, \kaddr, \tmp1 289 cmp \kaddr, \size 290 b.lo 9998b 291 dsb \domain 292 .endm 293 294 /* 295 * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present 296 */ 297 .macro reset_pmuserenr_el0, tmpreg 298 mrs \tmpreg, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer 299 sbfx \tmpreg, \tmpreg, #8, #4 300 cmp \tmpreg, #1 // Skip if no PMU present 301 b.lt 9000f 302 msr pmuserenr_el0, xzr // Disable PMU access from EL0 303 9000: 304 .endm 305 306 /* 307 * Annotate a function as position independent, i.e., safe to be called before 308 * the kernel virtual mapping is activated. 309 */ 310 #define ENDPIPROC(x) \ 311 .globl __pi_##x; \ 312 .type __pi_##x, %function; \ 313 .set __pi_##x, x; \ 314 .size __pi_##x, . - x; \ 315 ENDPROC(x) 316 317 /* 318 * Return the current thread_info. 319 */ 320 .macro get_thread_info, rd 321 mrs \rd, sp_el0 322 .endm 323 324 /* 325 * mov_q - move an immediate constant into a 64-bit register using 326 * between 2 and 4 movz/movk instructions (depending on the 327 * magnitude and sign of the operand) 328 */ 329 .macro mov_q, reg, val 330 .if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff) 331 movz \reg, :abs_g1_s:\val 332 .else 333 .if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff) 334 movz \reg, :abs_g2_s:\val 335 .else 336 movz \reg, :abs_g3:\val 337 movk \reg, :abs_g2_nc:\val 338 .endif 339 movk \reg, :abs_g1_nc:\val 340 .endif 341 movk \reg, :abs_g0_nc:\val 342 .endm 343 344 #endif /* __ASM_ASSEMBLER_H */ 345