• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6#ifndef __ASM_MACROS_S__
7#define __ASM_MACROS_S__
8
9#include <arch.h>
10#include <asm_macros_common.S>
11#include <spinlock.h>
12
13
14	.macro	func_prologue
15	stp	x29, x30, [sp, #-0x10]!
16	mov	x29,sp
17	.endm
18
19	.macro	func_epilogue
20	ldp	x29, x30, [sp], #0x10
21	.endm
22
23
24	.macro	dcache_line_size  reg, tmp
25	mrs	\tmp, ctr_el0
26	ubfx	\tmp, \tmp, #16, #4
27	mov	\reg, #4
28	lsl	\reg, \reg, \tmp
29	.endm
30
31
32	.macro	icache_line_size  reg, tmp
33	mrs	\tmp, ctr_el0
34	and	\tmp, \tmp, #0xf
35	mov	\reg, #4
36	lsl	\reg, \reg, \tmp
37	.endm
38
39
40	.macro	smc_check  label
41	mrs	x0, esr_el3
42	ubfx	x0, x0, #ESR_EC_SHIFT, #ESR_EC_LENGTH
43	cmp	x0, #EC_AARCH64_SMC
44	b.ne	$label
45	.endm
46
47	/*
48	 * Declare the exception vector table, enforcing it is aligned on a
49	 * 2KB boundary, as required by the ARMv8 architecture.
50	 * Use zero bytes as the fill value to be stored in the padding bytes
51	 * so that it inserts illegal AArch64 instructions. This increases
52	 * security, robustness and potentially facilitates debugging.
53	 */
54	.macro vector_base  label
55	.section .vectors, "ax"
56	.align 11, 0
57	\label:
58	.endm
59
60	/*
61	 * Create an entry in the exception vector table, enforcing it is
62	 * aligned on a 128-byte boundary, as required by the ARMv8 architecture.
63	 * Use zero bytes as the fill value to be stored in the padding bytes
64	 * so that it inserts illegal AArch64 instructions. This increases
65	 * security, robustness and potentially facilitates debugging.
66	 */
67	.macro vector_entry  label
68	.cfi_sections .debug_frame
69	.section .vectors, "ax"
70	.align 7, 0
71	.type \label, %function
72	.func \label
73	.cfi_startproc
74	\label:
75	.endm
76
77	/*
78	 * This macro verifies that the given vector doesn't exceed the
79	 * architectural limit of 32 instructions. This is meant to be placed
80	 * immediately after the last instruction in the vector. It takes the
81	 * vector entry as the parameter
82	 */
83	.macro check_vector_size since
84	  .endfunc
85	  .cfi_endproc
86	  .if (. - \since) > (32 * 4)
87	    .error "Vector exceeds 32 instructions"
88	  .endif
89	.endm
90
91#if ENABLE_PLAT_COMPAT
92	/*
93	 * This macro calculates the base address of an MP stack using the
94	 * platform_get_core_pos() index, the name of the stack storage and
95	 * the size of each stack
96	 * In: X0 = MPIDR of CPU whose stack is wanted
97	 * Out: X0 = physical address of stack base
98	 * Clobber: X30, X1, X2
99	 */
100	.macro get_mp_stack _name, _size
101	bl  platform_get_core_pos
102	ldr x2, =(\_name + \_size)
103	mov x1, #\_size
104	madd x0, x0, x1, x2
105	.endm
106#endif
107
108	/*
109	 * This macro calculates the base address of the current CPU's MP stack
110	 * using the plat_my_core_pos() index, the name of the stack storage
111	 * and the size of each stack
112	 * Out: X0 = physical address of stack base
113	 * Clobber: X30, X1, X2
114	 */
115	.macro get_my_mp_stack _name, _size
116	bl  plat_my_core_pos
117	ldr x2, =(\_name + \_size)
118	mov x1, #\_size
119	madd x0, x0, x1, x2
120	.endm
121
122	/*
123	 * This macro calculates the base address of a UP stack using the
124	 * name of the stack storage and the size of the stack
125	 * Out: X0 = physical address of stack base
126	 */
127	.macro get_up_stack _name, _size
128	ldr x0, =(\_name + \_size)
129	.endm
130
131	/*
132	 * Helper macro to generate the best mov/movk combinations according
133	 * the value to be moved. The 16 bits from '_shift' are tested and
134	 * if not zero, they are moved into '_reg' without affecting
135	 * other bits.
136	 */
137	.macro _mov_imm16 _reg, _val, _shift
138		.if (\_val >> \_shift) & 0xffff
139			.if (\_val & (1 << \_shift - 1))
140				movk	\_reg, (\_val >> \_shift) & 0xffff, LSL \_shift
141			.else
142				mov	\_reg, \_val & (0xffff << \_shift)
143			.endif
144		.endif
145	.endm
146
147	/*
148	 * Helper macro to load arbitrary values into 32 or 64-bit registers
149	 * which generates the best mov/movk combinations. Many base addresses
150	 * are 64KB aligned the macro will eliminate updating bits 15:0 in
151	 * that case
152	 */
153	.macro mov_imm _reg, _val
154		.if (\_val) == 0
155			mov	\_reg, #0
156		.else
157			_mov_imm16	\_reg, (\_val), 0
158			_mov_imm16	\_reg, (\_val), 16
159			_mov_imm16	\_reg, (\_val), 32
160			_mov_imm16	\_reg, (\_val), 48
161		.endif
162	.endm
163
164	/*
165	 * Macro to mark instances where we're jumping to a function and don't
166	 * expect a return. To provide the function being jumped to with
167	 * additional information, we use 'bl' instruction to jump rather than
168	 * 'b'.
169         *
170	 * Debuggers infer the location of a call from where LR points to, which
171	 * is usually the instruction after 'bl'. If this macro expansion
172	 * happens to be the last location in a function, that'll cause the LR
173	 * to point a location beyond the function, thereby misleading debugger
174	 * back trace. We therefore insert a 'nop' after the function call for
175	 * debug builds, unless 'skip_nop' parameter is non-zero.
176	 */
177	.macro no_ret _func:req, skip_nop=0
178	bl	\_func
179#if DEBUG
180	.ifeq \skip_nop
181	nop
182	.endif
183#endif
184	.endm
185
186	/*
187	 * Reserve space for a spin lock in assembly file.
188	 */
189	.macro define_asm_spinlock _name:req
190	.align	SPINLOCK_ASM_ALIGN
191	\_name:
192	.space	SPINLOCK_ASM_SIZE
193	.endm
194
195#endif /* __ASM_MACROS_S__ */
196