• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
10#ifdef IMAGE_BL31
11#include <cpu_data.h>
12#endif
13#include <cpu_macros.S>
14#include <debug.h>
15#include <errata_report.h>
16
17 /* Reset fn is needed in BL at reset vector */
18#if defined(IMAGE_BL1) || defined(IMAGE_BL31)
19	/*
20	 * The reset handler common to all platforms.  After a matching
21	 * cpu_ops structure entry is found, the correponding reset_handler
22	 * in the cpu_ops is invoked.
23	 * Clobbers: x0 - x19, x30
24	 */
25	.globl	reset_handler
26func reset_handler
27	mov	x19, x30
28
29	/* The plat_reset_handler can clobber x0 - x18, x30 */
30	bl	plat_reset_handler
31
32	/* Get the matching cpu_ops pointer */
33	bl	get_cpu_ops_ptr
34#if ENABLE_ASSERTIONS
35	cmp	x0, #0
36	ASM_ASSERT(ne)
37#endif
38
39	/* Get the cpu_ops reset handler */
40	ldr	x2, [x0, #CPU_RESET_FUNC]
41	mov	x30, x19
42	cbz	x2, 1f
43
44	/* The cpu_ops reset handler can clobber x0 - x19, x30 */
45	br	x2
461:
47	ret
48endfunc reset_handler
49
50#endif /* IMAGE_BL1 || IMAGE_BL31 */
51
52#ifdef IMAGE_BL31 /* The power down core and cluster is needed only in  BL31 */
53	/*
54	 * void prepare_cpu_pwr_dwn(unsigned int power_level)
55	 *
56	 * Prepare CPU power down function for all platforms. The function takes
57	 * a domain level to be powered down as its parameter. After the cpu_ops
58	 * pointer is retrieved from cpu_data, the handler for requested power
59	 * level is called.
60	 */
61	.globl	prepare_cpu_pwr_dwn
62func prepare_cpu_pwr_dwn
63	/*
64	 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
65	 * power down handler for the last power level
66	 */
67	mov_imm	x2, (CPU_MAX_PWR_DWN_OPS - 1)
68	cmp	x0, x2
69	csel	x2, x2, x0, hi
70
71	mrs	x1, tpidr_el3
72	ldr	x0, [x1, #CPU_DATA_CPU_OPS_PTR]
73#if ENABLE_ASSERTIONS
74	cmp	x0, #0
75	ASM_ASSERT(ne)
76#endif
77
78	/* Get the appropriate power down handler */
79	mov	x1, #CPU_PWR_DWN_OPS
80	add	x1, x1, x2, lsl #3
81	ldr	x1, [x0, x1]
82	br	x1
83endfunc prepare_cpu_pwr_dwn
84
85
86	/*
87	 * Initializes the cpu_ops_ptr if not already initialized
88	 * in cpu_data. This can be called without a runtime stack, but may
89	 * only be called after the MMU is enabled.
90	 * clobbers: x0 - x6, x10
91	 */
92	.globl	init_cpu_ops
93func init_cpu_ops
94	mrs	x6, tpidr_el3
95	ldr	x0, [x6, #CPU_DATA_CPU_OPS_PTR]
96	cbnz	x0, 1f
97	mov	x10, x30
98	bl	get_cpu_ops_ptr
99#if ENABLE_ASSERTIONS
100	cmp	x0, #0
101	ASM_ASSERT(ne)
102#endif
103	str	x0, [x6, #CPU_DATA_CPU_OPS_PTR]!
104	mov x30, x10
1051:
106	ret
107endfunc init_cpu_ops
108#endif /* IMAGE_BL31 */
109
110#if defined(IMAGE_BL31) && CRASH_REPORTING
111	/*
112	 * The cpu specific registers which need to be reported in a crash
113	 * are reported via cpu_ops cpu_reg_dump function. After a matching
114	 * cpu_ops structure entry is found, the correponding cpu_reg_dump
115	 * in the cpu_ops is invoked.
116	 */
117	.globl	do_cpu_reg_dump
118func do_cpu_reg_dump
119	mov	x16, x30
120
121	/* Get the matching cpu_ops pointer */
122	bl	get_cpu_ops_ptr
123	cbz	x0, 1f
124
125	/* Get the cpu_ops cpu_reg_dump */
126	ldr	x2, [x0, #CPU_REG_DUMP]
127	cbz	x2, 1f
128	blr	x2
1291:
130	mov	x30, x16
131	ret
132endfunc do_cpu_reg_dump
133#endif
134
135	/*
136	 * The below function returns the cpu_ops structure matching the
137	 * midr of the core. It reads the MIDR_EL1 and finds the matching
138	 * entry in cpu_ops entries. Only the implementation and part number
139	 * are used to match the entries.
140	 * Return :
141	 *     x0 - The matching cpu_ops pointer on Success
142	 *     x0 - 0 on failure.
143	 * Clobbers : x0 - x5
144	 */
145	.globl	get_cpu_ops_ptr
146func get_cpu_ops_ptr
147	/* Get the cpu_ops start and end locations */
148	adr	x4, (__CPU_OPS_START__ + CPU_MIDR)
149	adr	x5, (__CPU_OPS_END__ + CPU_MIDR)
150
151	/* Initialize the return parameter */
152	mov	x0, #0
153
154	/* Read the MIDR_EL1 */
155	mrs	x2, midr_el1
156	mov_imm	x3, CPU_IMPL_PN_MASK
157
158	/* Retain only the implementation and part number using mask */
159	and	w2, w2, w3
1601:
161	/* Check if we have reached end of list */
162	cmp	x4, x5
163	b.eq	error_exit
164
165	/* load the midr from the cpu_ops */
166	ldr	x1, [x4], #CPU_OPS_SIZE
167	and	w1, w1, w3
168
169	/* Check if midr matches to midr of this core */
170	cmp	w1, w2
171	b.ne	1b
172
173	/* Subtract the increment and offset to get the cpu-ops pointer */
174	sub	x0, x4, #(CPU_OPS_SIZE + CPU_MIDR)
175error_exit:
176	ret
177endfunc get_cpu_ops_ptr
178
179/*
180 * Extract CPU revision and variant, and combine them into a single numeric for
181 * easier comparison.
182 */
183	.globl	cpu_get_rev_var
184func cpu_get_rev_var
185	mrs	x1, midr_el1
186
187	/*
188	 * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them
189	 * as variant[7:4] and revision[3:0] of x0.
190	 *
191	 * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then
192	 * extract x1[3:0] into x0[3:0] retaining other bits.
193	 */
194	ubfx	x0, x1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
195	bfxil	x0, x1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
196	ret
197endfunc cpu_get_rev_var
198
199/*
200 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
201 * application purposes. If the revision-variant is less than or same as a given
202 * value, indicates that errata applies; otherwise not.
203 */
204	.globl	cpu_rev_var_ls
205func cpu_rev_var_ls
206	mov	x2, #ERRATA_APPLIES
207	mov	x3, #ERRATA_NOT_APPLIES
208	cmp	x0, x1
209	csel	x0, x2, x3, ls
210	ret
211endfunc cpu_rev_var_ls
212
213/*
214 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
215 * application purposes. If the revision-variant is higher than or same as a
216 * given value, indicates that errata applies; otherwise not.
217 */
218	.globl	cpu_rev_var_hs
219func cpu_rev_var_hs
220	mov	x2, #ERRATA_APPLIES
221	mov	x3, #ERRATA_NOT_APPLIES
222	cmp	x0, x1
223	csel	x0, x2, x3, hs
224	ret
225endfunc cpu_rev_var_hs
226
227#if REPORT_ERRATA
228/*
229 * void print_errata_status(void);
230 *
231 * Function to print errata status for CPUs of its class. Must be called only:
232 *
233 *   - with MMU and data caches are enabled;
234 *   - after cpu_ops have been initialized in per-CPU data.
235 */
236	.globl print_errata_status
237func print_errata_status
238#ifdef IMAGE_BL1
239	/*
240	 * BL1 doesn't have per-CPU data. So retrieve the CPU operations
241	 * directly.
242	 */
243	stp	xzr, x30, [sp, #-16]!
244	bl	get_cpu_ops_ptr
245	ldp	xzr, x30, [sp], #16
246	ldr	x1, [x0, #CPU_ERRATA_FUNC]
247	cbnz	x1, .Lprint
248#else
249	/*
250	 * Retrieve pointer to cpu_ops from per-CPU data, and further, the
251	 * errata printing function. If it's non-NULL, jump to the function in
252	 * turn.
253	 */
254	mrs	x0, tpidr_el3
255	ldr	x1, [x0, #CPU_DATA_CPU_OPS_PTR]
256	ldr	x0, [x1, #CPU_ERRATA_FUNC]
257	cbz	x0, .Lnoprint
258
259	/*
260	 * Printing errata status requires atomically testing the printed flag.
261	 */
262	stp	x19, x30, [sp, #-16]!
263	mov	x19, x0
264
265	/*
266	 * Load pointers to errata lock and printed flag. Call
267	 * errata_needs_reporting to check whether this CPU needs to report
268	 * errata status pertaining to its class.
269	 */
270	ldr	x0, [x1, #CPU_ERRATA_LOCK]
271	ldr	x1, [x1, #CPU_ERRATA_PRINTED]
272	bl	errata_needs_reporting
273	mov	x1, x19
274	ldp	x19, x30, [sp], #16
275	cbnz	x0, .Lprint
276#endif
277.Lnoprint:
278	ret
279.Lprint:
280	/* Jump to errata reporting function for this CPU */
281	br	x1
282endfunc print_errata_status
283#endif
284