• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * Copyright (c) 2014-2020, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
10#include <common/bl_common.h>
11#include <common/debug.h>
12#include <cpu_macros.S>
13#include <lib/cpus/errata_report.h>
14#include <lib/el3_runtime/cpu_data.h>
15
16 /* Reset fn is needed in BL at reset vector */
17#if defined(IMAGE_BL1) || defined(IMAGE_BL31) || (defined(IMAGE_BL2) && BL2_AT_EL3)
18	/*
19	 * The reset handler common to all platforms.  After a matching
20	 * cpu_ops structure entry is found, the correponding reset_handler
21	 * in the cpu_ops is invoked.
22	 * Clobbers: x0 - x19, x30
23	 */
24	.globl	reset_handler
25func reset_handler
26	mov	x19, x30
27
28	/* The plat_reset_handler can clobber x0 - x18, x30 */
29	bl	plat_reset_handler
30
31	/* Get the matching cpu_ops pointer */
32	bl	get_cpu_ops_ptr
33#if ENABLE_ASSERTIONS
34	cmp	x0, #0
35	ASM_ASSERT(ne)
36#endif
37
38	/* Get the cpu_ops reset handler */
39	ldr	x2, [x0, #CPU_RESET_FUNC]
40	mov	x30, x19
41	cbz	x2, 1f
42
43	/* The cpu_ops reset handler can clobber x0 - x19, x30 */
44	br	x2
451:
46	ret
47endfunc reset_handler
48
49#endif
50
51#ifdef IMAGE_BL31 /* The power down core and cluster is needed only in  BL31 */
52	/*
53	 * void prepare_cpu_pwr_dwn(unsigned int power_level)
54	 *
55	 * Prepare CPU power down function for all platforms. The function takes
56	 * a domain level to be powered down as its parameter. After the cpu_ops
57	 * pointer is retrieved from cpu_data, the handler for requested power
58	 * level is called.
59	 */
60	.globl	prepare_cpu_pwr_dwn
61func prepare_cpu_pwr_dwn
62	/*
63	 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
64	 * power down handler for the last power level
65	 */
66	mov_imm	x2, (CPU_MAX_PWR_DWN_OPS - 1)
67	cmp	x0, x2
68	csel	x2, x2, x0, hi
69
70	mrs	x1, tpidr_el3
71	ldr	x0, [x1, #CPU_DATA_CPU_OPS_PTR]
72#if ENABLE_ASSERTIONS
73	cmp	x0, #0
74	ASM_ASSERT(ne)
75#endif
76
77	/* Get the appropriate power down handler */
78	mov	x1, #CPU_PWR_DWN_OPS
79	add	x1, x1, x2, lsl #3
80	ldr	x1, [x0, x1]
81#if ENABLE_ASSERTIONS
82	cmp	x1, #0
83	ASM_ASSERT(ne)
84#endif
85	br	x1
86endfunc prepare_cpu_pwr_dwn
87
88
89	/*
90	 * Initializes the cpu_ops_ptr if not already initialized
91	 * in cpu_data. This can be called without a runtime stack, but may
92	 * only be called after the MMU is enabled.
93	 * clobbers: x0 - x6, x10
94	 */
95	.globl	init_cpu_ops
96func init_cpu_ops
97	mrs	x6, tpidr_el3
98	ldr	x0, [x6, #CPU_DATA_CPU_OPS_PTR]
99	cbnz	x0, 1f
100	mov	x10, x30
101	bl	get_cpu_ops_ptr
102#if ENABLE_ASSERTIONS
103	cmp	x0, #0
104	ASM_ASSERT(ne)
105#endif
106	str	x0, [x6, #CPU_DATA_CPU_OPS_PTR]!
107	mov x30, x10
1081:
109	ret
110endfunc init_cpu_ops
111#endif /* IMAGE_BL31 */
112
113#if defined(IMAGE_BL31) && CRASH_REPORTING
114	/*
115	 * The cpu specific registers which need to be reported in a crash
116	 * are reported via cpu_ops cpu_reg_dump function. After a matching
117	 * cpu_ops structure entry is found, the correponding cpu_reg_dump
118	 * in the cpu_ops is invoked.
119	 */
120	.globl	do_cpu_reg_dump
121func do_cpu_reg_dump
122	mov	x16, x30
123
124	/* Get the matching cpu_ops pointer */
125	bl	get_cpu_ops_ptr
126	cbz	x0, 1f
127
128	/* Get the cpu_ops cpu_reg_dump */
129	ldr	x2, [x0, #CPU_REG_DUMP]
130	cbz	x2, 1f
131	blr	x2
1321:
133	mov	x30, x16
134	ret
135endfunc do_cpu_reg_dump
136#endif
137
138	/*
139	 * The below function returns the cpu_ops structure matching the
140	 * midr of the core. It reads the MIDR_EL1 and finds the matching
141	 * entry in cpu_ops entries. Only the implementation and part number
142	 * are used to match the entries.
143	 *
144	 * If cpu_ops for the MIDR_EL1 cannot be found and
145	 * SUPPORT_UNKNOWN_MPID is enabled, it will try to look for a
146	 * default cpu_ops with an MIDR value of 0.
147	 * (Implementation number 0x0 should be reseverd for software use
148	 * and therefore no clashes should happen with that default value).
149	 *
150	 * Return :
151	 *     x0 - The matching cpu_ops pointer on Success
152	 *     x0 - 0 on failure.
153	 * Clobbers : x0 - x5
154	 */
155	.globl	get_cpu_ops_ptr
156func get_cpu_ops_ptr
157	/* Read the MIDR_EL1 */
158	mrs	x2, midr_el1
159	mov_imm	x3, CPU_IMPL_PN_MASK
160
161	/* Retain only the implementation and part number using mask */
162	and	w2, w2, w3
163
164	/* Get the cpu_ops end location */
165	adr	x5, (__CPU_OPS_END__ + CPU_MIDR)
166
167	/* Initialize the return parameter */
168	mov	x0, #0
1691:
170	/* Get the cpu_ops start location */
171	adr	x4, (__CPU_OPS_START__ + CPU_MIDR)
172
1732:
174	/* Check if we have reached end of list */
175	cmp	x4, x5
176	b.eq	search_def_ptr
177
178	/* load the midr from the cpu_ops */
179	ldr	x1, [x4], #CPU_OPS_SIZE
180	and	w1, w1, w3
181
182	/* Check if midr matches to midr of this core */
183	cmp	w1, w2
184	b.ne	2b
185
186	/* Subtract the increment and offset to get the cpu-ops pointer */
187	sub	x0, x4, #(CPU_OPS_SIZE + CPU_MIDR)
188#if ENABLE_ASSERTIONS
189	cmp	x0, #0
190	ASM_ASSERT(ne)
191#endif
192#ifdef SUPPORT_UNKNOWN_MPID
193	cbnz	x2, exit_mpid_found
194	/* Mark the unsupported MPID flag */
195	adrp	x1, unsupported_mpid_flag
196	add	x1, x1, :lo12:unsupported_mpid_flag
197	str	w2, [x1]
198exit_mpid_found:
199#endif
200	ret
201
202	/*
203	 * Search again for a default pointer (MIDR = 0x0)
204	 * or return error if already searched.
205	 */
206search_def_ptr:
207#ifdef SUPPORT_UNKNOWN_MPID
208	cbz	x2, error_exit
209	mov	x2, #0
210	b	1b
211error_exit:
212#endif
213	ret
214endfunc get_cpu_ops_ptr
215
216/*
217 * Extract CPU revision and variant, and combine them into a single numeric for
218 * easier comparison.
219 */
220	.globl	cpu_get_rev_var
221func cpu_get_rev_var
222	mrs	x1, midr_el1
223
224	/*
225	 * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them
226	 * as variant[7:4] and revision[3:0] of x0.
227	 *
228	 * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then
229	 * extract x1[3:0] into x0[3:0] retaining other bits.
230	 */
231	ubfx	x0, x1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
232	bfxil	x0, x1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
233	ret
234endfunc cpu_get_rev_var
235
236/*
237 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
238 * application purposes. If the revision-variant is less than or same as a given
239 * value, indicates that errata applies; otherwise not.
240 *
241 * Shall clobber: x0-x3
242 */
243	.globl	cpu_rev_var_ls
244func cpu_rev_var_ls
245	mov	x2, #ERRATA_APPLIES
246	mov	x3, #ERRATA_NOT_APPLIES
247	cmp	x0, x1
248	csel	x0, x2, x3, ls
249	ret
250endfunc cpu_rev_var_ls
251
252/*
253 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
254 * application purposes. If the revision-variant is higher than or same as a
255 * given value, indicates that errata applies; otherwise not.
256 *
257 * Shall clobber: x0-x3
258 */
259	.globl	cpu_rev_var_hs
260func cpu_rev_var_hs
261	mov	x2, #ERRATA_APPLIES
262	mov	x3, #ERRATA_NOT_APPLIES
263	cmp	x0, x1
264	csel	x0, x2, x3, hs
265	ret
266endfunc cpu_rev_var_hs
267
268/*
269 * Compare the CPU's revision-variant (x0) with a given range (x1 - x2), for errata
270 * application purposes. If the revision-variant is between or includes the given
271 * values, this indicates that errata applies; otherwise not.
272 *
273 * Shall clobber: x0-x4
274 */
275	.globl	cpu_rev_var_range
276func cpu_rev_var_range
277	mov	x3, #ERRATA_APPLIES
278	mov	x4, #ERRATA_NOT_APPLIES
279	cmp	x0, x1
280	csel	x1, x3, x4, hs
281	cbz	x1, 1f
282	cmp	x0, x2
283	csel	x1, x3, x4, ls
2841:
285	mov	x0, x1
286	ret
287endfunc cpu_rev_var_range
288
289#if REPORT_ERRATA
290/*
291 * void print_errata_status(void);
292 *
293 * Function to print errata status for CPUs of its class. Must be called only:
294 *
295 *   - with MMU and data caches are enabled;
296 *   - after cpu_ops have been initialized in per-CPU data.
297 */
298	.globl print_errata_status
299func print_errata_status
300#ifdef IMAGE_BL1
301	/*
302	 * BL1 doesn't have per-CPU data. So retrieve the CPU operations
303	 * directly.
304	 */
305	stp	xzr, x30, [sp, #-16]!
306	bl	get_cpu_ops_ptr
307	ldp	xzr, x30, [sp], #16
308	ldr	x1, [x0, #CPU_ERRATA_FUNC]
309	cbnz	x1, .Lprint
310#else
311	/*
312	 * Retrieve pointer to cpu_ops from per-CPU data, and further, the
313	 * errata printing function. If it's non-NULL, jump to the function in
314	 * turn.
315	 */
316	mrs	x0, tpidr_el3
317#if ENABLE_ASSERTIONS
318	cmp	x0, #0
319	ASM_ASSERT(ne)
320#endif
321	ldr	x1, [x0, #CPU_DATA_CPU_OPS_PTR]
322#if ENABLE_ASSERTIONS
323	cmp	x1, #0
324	ASM_ASSERT(ne)
325#endif
326	ldr	x0, [x1, #CPU_ERRATA_FUNC]
327	cbz	x0, .Lnoprint
328
329	/*
330	 * Printing errata status requires atomically testing the printed flag.
331	 */
332	stp	x19, x30, [sp, #-16]!
333	mov	x19, x0
334
335	/*
336	 * Load pointers to errata lock and printed flag. Call
337	 * errata_needs_reporting to check whether this CPU needs to report
338	 * errata status pertaining to its class.
339	 */
340	ldr	x0, [x1, #CPU_ERRATA_LOCK]
341	ldr	x1, [x1, #CPU_ERRATA_PRINTED]
342	bl	errata_needs_reporting
343	mov	x1, x19
344	ldp	x19, x30, [sp], #16
345	cbnz	x0, .Lprint
346#endif
347.Lnoprint:
348	ret
349.Lprint:
350	/* Jump to errata reporting function for this CPU */
351	br	x1
352endfunc print_errata_status
353#endif
354
355/*
356 * int check_wa_cve_2017_5715(void);
357 *
358 * This function returns:
359 *  - ERRATA_APPLIES when firmware mitigation is required.
360 *  - ERRATA_NOT_APPLIES when firmware mitigation is _not_ required.
361 *  - ERRATA_MISSING when firmware mitigation would be required but
362 *    is not compiled in.
363 *
364 * NOTE: Must be called only after cpu_ops have been initialized
365 *       in per-CPU data.
366 */
367	.globl	check_wa_cve_2017_5715
368func check_wa_cve_2017_5715
369	mrs	x0, tpidr_el3
370#if ENABLE_ASSERTIONS
371	cmp	x0, #0
372	ASM_ASSERT(ne)
373#endif
374	ldr	x0, [x0, #CPU_DATA_CPU_OPS_PTR]
375#if ENABLE_ASSERTIONS
376	cmp	x0, #0
377	ASM_ASSERT(ne)
378#endif
379	ldr	x0, [x0, #CPU_EXTRA1_FUNC]
380	/*
381	 * If the reserved function pointer is NULL, this CPU
382	 * is unaffected by CVE-2017-5715 so bail out.
383	 */
384	cmp	x0, #0
385	beq	1f
386	br	x0
3871:
388	mov	x0, #ERRATA_NOT_APPLIES
389	ret
390endfunc check_wa_cve_2017_5715
391
392/*
393 * void *wa_cve_2018_3639_get_disable_ptr(void);
394 *
395 * Returns a function pointer which is used to disable mitigation
396 * for CVE-2018-3639.
397 * The function pointer is only returned on cores that employ
398 * dynamic mitigation.  If the core uses static mitigation or is
399 * unaffected by CVE-2018-3639 this function returns NULL.
400 *
401 * NOTE: Must be called only after cpu_ops have been initialized
402 *       in per-CPU data.
403 */
404	.globl	wa_cve_2018_3639_get_disable_ptr
405func wa_cve_2018_3639_get_disable_ptr
406	mrs	x0, tpidr_el3
407#if ENABLE_ASSERTIONS
408	cmp	x0, #0
409	ASM_ASSERT(ne)
410#endif
411	ldr	x0, [x0, #CPU_DATA_CPU_OPS_PTR]
412#if ENABLE_ASSERTIONS
413	cmp	x0, #0
414	ASM_ASSERT(ne)
415#endif
416	ldr	x0, [x0, #CPU_EXTRA2_FUNC]
417	ret
418endfunc wa_cve_2018_3639_get_disable_ptr
419