• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
10#include <context.h>
11#include <denver.h>
12#include <cpu_macros.S>
13#include <plat_macros.S>
14
15	/* -------------------------------------------------
16	 * CVE-2017-5715 mitigation
17	 *
18	 * Flush the indirect branch predictor and RSB on
19	 * entry to EL3 by issuing a newly added instruction
20	 * for Denver CPUs.
21	 *
22	 * To achieve this without performing any branch
23	 * instruction, a per-cpu vbar is installed which
24	 * executes the workaround and then branches off to
25	 * the corresponding vector entry in the main vector
26	 * table.
27	 * -------------------------------------------------
28	 */
29	.globl	workaround_bpflush_runtime_exceptions
30
31vector_base workaround_bpflush_runtime_exceptions
32
33	.macro	apply_workaround
34	stp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
35
36	/* -------------------------------------------------
37	 * A new write-only system register where a write of
38	 * 1 to bit 0 will cause the indirect branch predictor
39	 * and RSB to be flushed.
40	 *
41	 * A write of 0 to bit 0 will be ignored. A write of
42	 * 1 to any other bit will cause an MCA.
43	 * -------------------------------------------------
44	 */
45	mov	x0, #1
46	msr	s3_0_c15_c0_6, x0
47	isb
48
49	ldp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
50	.endm
51
52	/* ---------------------------------------------------------------------
53	 * Current EL with SP_EL0 : 0x0 - 0x200
54	 * ---------------------------------------------------------------------
55	 */
56vector_entry workaround_bpflush_sync_exception_sp_el0
57	b	sync_exception_sp_el0
58end_vector_entry workaround_bpflush_sync_exception_sp_el0
59
60vector_entry workaround_bpflush_irq_sp_el0
61	b	irq_sp_el0
62end_vector_entry workaround_bpflush_irq_sp_el0
63
64vector_entry workaround_bpflush_fiq_sp_el0
65	b	fiq_sp_el0
66end_vector_entry workaround_bpflush_fiq_sp_el0
67
68vector_entry workaround_bpflush_serror_sp_el0
69	b	serror_sp_el0
70end_vector_entry workaround_bpflush_serror_sp_el0
71
72	/* ---------------------------------------------------------------------
73	 * Current EL with SP_ELx: 0x200 - 0x400
74	 * ---------------------------------------------------------------------
75	 */
76vector_entry workaround_bpflush_sync_exception_sp_elx
77	b	sync_exception_sp_elx
78end_vector_entry workaround_bpflush_sync_exception_sp_elx
79
80vector_entry workaround_bpflush_irq_sp_elx
81	b	irq_sp_elx
82end_vector_entry workaround_bpflush_irq_sp_elx
83
84vector_entry workaround_bpflush_fiq_sp_elx
85	b	fiq_sp_elx
86end_vector_entry workaround_bpflush_fiq_sp_elx
87
88vector_entry workaround_bpflush_serror_sp_elx
89	b	serror_sp_elx
90end_vector_entry workaround_bpflush_serror_sp_elx
91
92	/* ---------------------------------------------------------------------
93	 * Lower EL using AArch64 : 0x400 - 0x600
94	 * ---------------------------------------------------------------------
95	 */
96vector_entry workaround_bpflush_sync_exception_aarch64
97	apply_workaround
98	b	sync_exception_aarch64
99end_vector_entry workaround_bpflush_sync_exception_aarch64
100
101vector_entry workaround_bpflush_irq_aarch64
102	apply_workaround
103	b	irq_aarch64
104end_vector_entry workaround_bpflush_irq_aarch64
105
106vector_entry workaround_bpflush_fiq_aarch64
107	apply_workaround
108	b	fiq_aarch64
109end_vector_entry workaround_bpflush_fiq_aarch64
110
111vector_entry workaround_bpflush_serror_aarch64
112	apply_workaround
113	b	serror_aarch64
114end_vector_entry workaround_bpflush_serror_aarch64
115
116	/* ---------------------------------------------------------------------
117	 * Lower EL using AArch32 : 0x600 - 0x800
118	 * ---------------------------------------------------------------------
119	 */
120vector_entry workaround_bpflush_sync_exception_aarch32
121	apply_workaround
122	b	sync_exception_aarch32
123end_vector_entry workaround_bpflush_sync_exception_aarch32
124
125vector_entry workaround_bpflush_irq_aarch32
126	apply_workaround
127	b	irq_aarch32
128end_vector_entry workaround_bpflush_irq_aarch32
129
130vector_entry workaround_bpflush_fiq_aarch32
131	apply_workaround
132	b	fiq_aarch32
133end_vector_entry workaround_bpflush_fiq_aarch32
134
135vector_entry workaround_bpflush_serror_aarch32
136	apply_workaround
137	b	serror_aarch32
138end_vector_entry workaround_bpflush_serror_aarch32
139
140	.global	denver_disable_dco
141
142	/* ---------------------------------------------
143	 * Disable debug interfaces
144	 * ---------------------------------------------
145	 */
146func denver_disable_ext_debug
147	mov	x0, #1
148	msr	osdlr_el1, x0
149	isb
150	dsb	sy
151	ret
152endfunc denver_disable_ext_debug
153
154	/* ----------------------------------------------------
155	 * Enable dynamic code optimizer (DCO)
156	 * ----------------------------------------------------
157	 */
158func denver_enable_dco
159	mov	x3, x30
160	bl	plat_my_core_pos
161	mov	x1, #1
162	lsl	x1, x1, x0
163	msr	s3_0_c15_c0_2, x1
164	mov	x30, x3
165	ret
166endfunc denver_enable_dco
167
168	/* ----------------------------------------------------
169	 * Disable dynamic code optimizer (DCO)
170	 * ----------------------------------------------------
171	 */
172func denver_disable_dco
173
174	mov	x3, x30
175
176	/* turn off background work */
177	bl	plat_my_core_pos
178	mov	x1, #1
179	lsl	x1, x1, x0
180	lsl	x2, x1, #16
181	msr	s3_0_c15_c0_2, x2
182	isb
183
184	/* wait till the background work turns off */
1851:	mrs	x2, s3_0_c15_c0_2
186	lsr	x2, x2, #32
187	and	w2, w2, 0xFFFF
188	and	x2, x2, x1
189	cbnz	x2, 1b
190
191	mov	x30, x3
192	ret
193endfunc denver_disable_dco
194
195func check_errata_cve_2017_5715
196	mov	x0, #ERRATA_MISSING
197#if WORKAROUND_CVE_2017_5715
198	/*
199	 * Check if the CPU supports the special instruction
200	 * required to flush the indirect branch predictor and
201	 * RSB. Support for this operation can be determined by
202	 * comparing bits 19:16 of ID_AFR0_EL1 with 0b0001.
203	 */
204	mrs	x1, id_afr0_el1
205	mov	x2, #0x10000
206	and	x1, x1, x2
207	cbz	x1, 1f
208	mov	x0, #ERRATA_APPLIES
2091:
210#endif
211	ret
212endfunc check_errata_cve_2017_5715
213
214func check_errata_cve_2018_3639
215#if WORKAROUND_CVE_2018_3639
216	mov	x0, #ERRATA_APPLIES
217#else
218	mov	x0, #ERRATA_MISSING
219#endif
220	ret
221endfunc check_errata_cve_2018_3639
222
223	/* -------------------------------------------------
224	 * The CPU Ops reset function for Denver.
225	 * -------------------------------------------------
226	 */
227func denver_reset_func
228
229	mov	x19, x30
230
231#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
232	/*
233	 * Check if the CPU supports the special instruction
234	 * required to flush the indirect branch predictor and
235	 * RSB. Support for this operation can be determined by
236	 * comparing bits 19:16 of ID_AFR0_EL1 with 0b0001.
237	 */
238	mrs	x0, id_afr0_el1
239	mov	x1, #0x10000
240	and	x0, x0, x1
241	cmp	x0, #0
242	adr	x1, workaround_bpflush_runtime_exceptions
243	mrs	x2, vbar_el3
244	csel	x0, x1, x2, ne
245	msr	vbar_el3, x0
246#endif
247
248#if WORKAROUND_CVE_2018_3639
249	/*
250	 * Denver CPUs with DENVER_MIDR_PN3 or earlier, use different
251	 * bits in the ACTLR_EL3 register to disable speculative
252	 * store buffer and memory disambiguation.
253	 */
254	mrs	x0, midr_el1
255	mov_imm	x1, DENVER_MIDR_PN4
256	cmp	x0, x1
257	mrs	x0, actlr_el3
258	mov	x1, #(DENVER_CPU_DIS_MD_EL3 | DENVER_CPU_DIS_SSB_EL3)
259	mov	x2, #(DENVER_PN4_CPU_DIS_MD_EL3 | DENVER_PN4_CPU_DIS_SSB_EL3)
260	csel	x3, x1, x2, ne
261	orr	x0, x0, x3
262	msr	actlr_el3, x0
263	isb
264	dsb	sy
265#endif
266
267	/* ----------------------------------------------------
268	 * Reset ACTLR.PMSTATE to C1 state
269	 * ----------------------------------------------------
270	 */
271	mrs	x0, actlr_el1
272	bic	x0, x0, #DENVER_CPU_PMSTATE_MASK
273	orr	x0, x0, #DENVER_CPU_PMSTATE_C1
274	msr	actlr_el1, x0
275
276	/* ----------------------------------------------------
277	 * Enable dynamic code optimizer (DCO)
278	 * ----------------------------------------------------
279	 */
280	bl	denver_enable_dco
281
282	ret	x19
283endfunc denver_reset_func
284
285	/* ----------------------------------------------------
286	 * The CPU Ops core power down function for Denver.
287	 * ----------------------------------------------------
288	 */
289func denver_core_pwr_dwn
290
291	mov	x19, x30
292
293	/* ---------------------------------------------
294	 * Force the debug interfaces to be quiescent
295	 * ---------------------------------------------
296	 */
297	bl	denver_disable_ext_debug
298
299	ret	x19
300endfunc denver_core_pwr_dwn
301
302	/* -------------------------------------------------------
303	 * The CPU Ops cluster power down function for Denver.
304	 * -------------------------------------------------------
305	 */
306func denver_cluster_pwr_dwn
307	ret
308endfunc denver_cluster_pwr_dwn
309
310#if REPORT_ERRATA
311	/*
312	 * Errata printing function for Denver. Must follow AAPCS.
313	 */
314func denver_errata_report
315	stp	x8, x30, [sp, #-16]!
316
317	bl	cpu_get_rev_var
318	mov	x8, x0
319
320	/*
321	 * Report all errata. The revision-variant information is passed to
322	 * checking functions of each errata.
323	 */
324	report_errata WORKAROUND_CVE_2017_5715, denver, cve_2017_5715
325	report_errata WORKAROUND_CVE_2018_3639, denver, cve_2018_3639
326
327	ldp	x8, x30, [sp], #16
328	ret
329endfunc denver_errata_report
330#endif
331
332	/* ---------------------------------------------
333	 * This function provides Denver specific
334	 * register information for crash reporting.
335	 * It needs to return with x6 pointing to
336	 * a list of register names in ascii and
337	 * x8 - x15 having values of registers to be
338	 * reported.
339	 * ---------------------------------------------
340	 */
341.section .rodata.denver_regs, "aS"
342denver_regs:  /* The ascii list of register names to be reported */
343	.asciz	"actlr_el1", ""
344
345func denver_cpu_reg_dump
346	adr	x6, denver_regs
347	mrs	x8, ACTLR_EL1
348	ret
349endfunc denver_cpu_reg_dump
350
351declare_cpu_ops_wa denver, DENVER_MIDR_PN0, \
352	denver_reset_func, \
353	check_errata_cve_2017_5715, \
354	CPU_NO_EXTRA2_FUNC, \
355	denver_core_pwr_dwn, \
356	denver_cluster_pwr_dwn
357
358declare_cpu_ops_wa denver, DENVER_MIDR_PN1, \
359	denver_reset_func, \
360	check_errata_cve_2017_5715, \
361	CPU_NO_EXTRA2_FUNC, \
362	denver_core_pwr_dwn, \
363	denver_cluster_pwr_dwn
364
365declare_cpu_ops_wa denver, DENVER_MIDR_PN2, \
366	denver_reset_func, \
367	check_errata_cve_2017_5715, \
368	CPU_NO_EXTRA2_FUNC, \
369	denver_core_pwr_dwn, \
370	denver_cluster_pwr_dwn
371
372declare_cpu_ops_wa denver, DENVER_MIDR_PN3, \
373	denver_reset_func, \
374	check_errata_cve_2017_5715, \
375	CPU_NO_EXTRA2_FUNC, \
376	denver_core_pwr_dwn, \
377	denver_cluster_pwr_dwn
378
379declare_cpu_ops_wa denver, DENVER_MIDR_PN4, \
380	denver_reset_func, \
381	check_errata_cve_2017_5715, \
382	CPU_NO_EXTRA2_FUNC, \
383	denver_core_pwr_dwn, \
384	denver_cluster_pwr_dwn
385