• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * Copyright (c) 2014-2024, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6#ifndef CPU_MACROS_S
7#define CPU_MACROS_S
8
9#include <assert_macros.S>
10#include <lib/cpus/cpu_ops.h>
11#include <lib/cpus/errata.h>
12
13	/*
14	 * Write given expressions as quad words
15	 *
16	 * _count:
17	 *	Write at least _count quad words. If the given number of
18	 *	expressions is less than _count, repeat the last expression to
19	 *	fill _count quad words in total
20	 * _rest:
21	 *	Optional list of expressions. _this is for parameter extraction
22	 *	only, and has no significance to the caller
23	 *
24	 * Invoked as:
25	 *	fill_constants 2, foo, bar, blah, ...
26	 */
27	.macro fill_constants _count:req, _this, _rest:vararg
28	  .ifgt \_count
29	    /* Write the current expression */
30	    .ifb \_this
31	      .error "Nothing to fill"
32	    .endif
33	    .quad \_this
34
35	    /* Invoke recursively for remaining expressions */
36	    .ifnb \_rest
37	      fill_constants \_count-1, \_rest
38	    .else
39	      fill_constants \_count-1, \_this
40	    .endif
41	  .endif
42	.endm
43
44	/*
45	 * Declare CPU operations
46	 *
47	 * _name:
48	 *	Name of the CPU for which operations are being specified
49	 * _midr:
50	 *	Numeric value expected to read from CPU's MIDR
51	 * _resetfunc:
52	 *	Reset function for the CPU. If there's no CPU reset function,
53	 *	specify CPU_NO_RESET_FUNC
54	 * _extra1:
55	 *	This is a placeholder for future per CPU operations.  Currently,
56	 *	some CPUs use this entry to set a test function to determine if
57	 *	the workaround for CVE-2017-5715 needs to be applied or not.
58	 * _extra2:
59	 *	This is a placeholder for future per CPU operations. Currently
60	 *	some CPUs use this entry to set a function to disable the
61	 *	workaround for CVE-2018-3639.
62	 * _extra3:
63	 *	This is a placeholder for future per CPU operations. Currently,
64	 *	some CPUs use this entry to set a test function to determine if
65	 *	the workaround for CVE-2022-23960 needs to be applied or not.
66	 * _e_handler:
67	 *	This is a placeholder for future per CPU exception handlers.
68	 * _power_down_ops:
69	 *	Comma-separated list of functions to perform power-down
70	 *	operatios on the CPU. At least one, and up to
71	 *	CPU_MAX_PWR_DWN_OPS number of functions may be specified.
72	 *	Starting at power level 0, these functions shall handle power
73	 *	down at subsequent power levels. If there aren't exactly
74	 *	CPU_MAX_PWR_DWN_OPS functions, the last specified one will be
75	 *	used to handle power down at subsequent levels
76	 */
77	.macro declare_cpu_ops_base _name:req, _midr:req, _resetfunc:req, \
78		_extra1:req, _extra2:req, _extra3:req, _e_handler:req, _power_down_ops:vararg
79	.section .cpu_ops, "a"
80	.align 3
81	.type cpu_ops_\_name, %object
82	.quad \_midr
83#if defined(IMAGE_AT_EL3)
84	.quad \_resetfunc
85#endif
86	.quad \_extra1
87	.quad \_extra2
88	.quad \_extra3
89	.quad \_e_handler
90#ifdef IMAGE_BL31
91	/* Insert list of functions */
92	fill_constants CPU_MAX_PWR_DWN_OPS, \_power_down_ops
93#endif
94	/*
95	 * It is possible (although unlikely) that a cpu may have no errata in
96	 * code. In that case the start label will not be defined. The list is
97	 * intended to be used in a loop, so define it as zero-length for
98	 * predictable behaviour. Since this macro is always called at the end
99	 * of the cpu file (after all errata have been parsed) we can be sure
100	 * that we are at the end of the list. Some cpus call declare_cpu_ops
101	 * twice, so only do this once.
102	 */
103	.pushsection .rodata.errata_entries
104	.ifndef \_name\()_errata_list_start
105		\_name\()_errata_list_start:
106	.endif
107	.ifndef \_name\()_errata_list_end
108		\_name\()_errata_list_end:
109	.endif
110	.popsection
111
112	/* and now put them in cpu_ops */
113	.quad \_name\()_errata_list_start
114	.quad \_name\()_errata_list_end
115
116#if REPORT_ERRATA
117	.ifndef \_name\()_cpu_str
118	  /*
119	   * Place errata reported flag, and the spinlock to arbitrate access to
120	   * it in the data section.
121	   */
122	  .pushsection .data
123	  define_asm_spinlock \_name\()_errata_lock
124	  \_name\()_errata_reported:
125	  .word	0
126	  .popsection
127
128	  /* Place CPU string in rodata */
129	  .pushsection .rodata
130	  \_name\()_cpu_str:
131	  .asciz "\_name"
132	  .popsection
133	.endif
134
135	.quad \_name\()_cpu_str
136
137#ifdef IMAGE_BL31
138	/* Pointers to errata lock and reported flag */
139	.quad \_name\()_errata_lock
140	.quad \_name\()_errata_reported
141#endif /* IMAGE_BL31 */
142#endif /* REPORT_ERRATA */
143
144#if defined(IMAGE_BL31) && CRASH_REPORTING
145	.quad \_name\()_cpu_reg_dump
146#endif
147	.endm
148
149	.macro declare_cpu_ops _name:req, _midr:req, _resetfunc:req, \
150		_power_down_ops:vararg
151		declare_cpu_ops_base \_name, \_midr, \_resetfunc, 0, 0, 0, 0, \
152			\_power_down_ops
153	.endm
154
155	.macro declare_cpu_ops_eh _name:req, _midr:req, _resetfunc:req, \
156		_e_handler:req, _power_down_ops:vararg
157		declare_cpu_ops_base \_name, \_midr, \_resetfunc, \
158			0, 0, 0, \_e_handler, \_power_down_ops
159	.endm
160
161	.macro declare_cpu_ops_wa _name:req, _midr:req, \
162		_resetfunc:req, _extra1:req, _extra2:req, \
163		_extra3:req, _power_down_ops:vararg
164		declare_cpu_ops_base \_name, \_midr, \_resetfunc, \
165			\_extra1, \_extra2, \_extra3, 0, \_power_down_ops
166	.endm
167
168	/*
169	 * This macro is used on some CPUs to detect if they are vulnerable
170	 * to CVE-2017-5715.
171	 */
172	.macro	cpu_check_csv2 _reg _label
173	mrs	\_reg, id_aa64pfr0_el1
174	ubfx	\_reg, \_reg, #ID_AA64PFR0_CSV2_SHIFT, #ID_AA64PFR0_CSV2_LENGTH
175	/*
176	 * If the field equals 1, branch targets trained in one context cannot
177	 * affect speculative execution in a different context.
178	 *
179	 * If the field equals 2, it means that the system is also aware of
180	 * SCXTNUM_ELx register contexts. We aren't using them in the TF, so we
181	 * expect users of the registers to do the right thing.
182	 *
183	 * Only apply mitigations if the value of this field is 0.
184	 */
185#if ENABLE_ASSERTIONS
186	cmp	\_reg, #3 /* Only values 0 to 2 are expected */
187	ASM_ASSERT(lo)
188#endif
189
190	cmp	\_reg, #0
191	bne	\_label
192	.endm
193
194	/*
195	 * Helper macro that reads the part number of the current
196	 * CPU and jumps to the given label if it matches the CPU
197	 * MIDR provided.
198	 *
199	 * Clobbers x0.
200	 */
201	.macro  jump_if_cpu_midr _cpu_midr, _label
202	mrs	x0, midr_el1
203	ubfx	x0, x0, MIDR_PN_SHIFT, #12
204	cmp	w0, #((\_cpu_midr >> MIDR_PN_SHIFT) & MIDR_PN_MASK)
205	b.eq	\_label
206	.endm
207
208
209/*
210 * Workaround wrappers for errata that apply at reset or runtime. Reset errata
211 * will be applied automatically
212 *
213 * _cpu:
214 *	Name of cpu as given to declare_cpu_ops
215 *
216 * _cve:
217 *	Whether erratum is a CVE. CVE year if yes, 0 otherwise
218 *
219 * _id:
220 *	Erratum or CVE number. Please combine with previous field with ERRATUM
221 *	or CVE macros
222 *
223 * _chosen:
224 *	Compile time flag on whether the erratum is included
225 *
226 * _apply_at_reset:
227 *	Whether the erratum should be automatically applied at reset
228 */
229.macro add_erratum_entry _cpu:req, _cve:req, _id:req, _chosen:req, _apply_at_reset:req
230	.pushsection .rodata.errata_entries
231		.align	3
232		.ifndef \_cpu\()_errata_list_start
233		\_cpu\()_errata_list_start:
234		.endif
235
236		/* check if unused and compile out if no references */
237		.if \_apply_at_reset && \_chosen
238			.quad	erratum_\_cpu\()_\_id\()_wa
239		.else
240			.quad	0
241		.endif
242		/* TODO(errata ABI): this prevents all checker functions from
243		 * being optimised away. Can be done away with unless the ABI
244		 * needs them */
245		.quad	check_erratum_\_cpu\()_\_id
246		/* Will fit CVEs with up to 10 character in the ID field */
247		.word	\_id
248		.hword	\_cve
249		.byte	\_chosen
250		/* TODO(errata ABI): mitigated field for known but unmitigated
251		 * errata */
252		.byte	0x1
253	.popsection
254.endm
255
256.macro _workaround_start _cpu:req, _cve:req, _id:req, _chosen:req, _apply_at_reset:req
257	add_erratum_entry \_cpu, \_cve, \_id, \_chosen, \_apply_at_reset
258
259	func erratum_\_cpu\()_\_id\()_wa
260		mov	x8, x30
261
262		/* save rev_var for workarounds that might need it but don't
263		 * restore to x0 because few will care */
264		mov	x7, x0
265		bl	check_erratum_\_cpu\()_\_id
266		cbz	x0, erratum_\_cpu\()_\_id\()_skip
267.endm
268
269.macro _workaround_end _cpu:req, _id:req
270	erratum_\_cpu\()_\_id\()_skip:
271		ret	x8
272	endfunc erratum_\_cpu\()_\_id\()_wa
273.endm
274
275/*******************************************************************************
276 * Errata workaround wrappers
277 ******************************************************************************/
278/*
279 * Workaround wrappers for errata that apply at reset or runtime. Reset errata
280 * will be applied automatically
281 *
282 * _cpu:
283 *	Name of cpu as given to declare_cpu_ops
284 *
285 * _cve:
286 *	Whether erratum is a CVE. CVE year if yes, 0 otherwise
287 *
288 * _id:
289 *	Erratum or CVE number. Please combine with previous field with ERRATUM
290 *	or CVE macros
291 *
292 * _chosen:
293 *	Compile time flag on whether the erratum is included
294 *
295 * in body:
296 *	clobber x0 to x7 (please only use those)
297 *	argument x7 - cpu_rev_var
298 *
299 * _wa clobbers: x0-x8 (PCS compliant)
300 */
301.macro workaround_reset_start _cpu:req, _cve:req, _id:req, _chosen:req
302	_workaround_start \_cpu, \_cve, \_id, \_chosen, 1
303.endm
304
305/*
306 * See `workaround_reset_start` for usage info. Additional arguments:
307 *
308 * _midr:
309 *	Check if CPU's MIDR matches the CPU it's meant for. Must be specified
310 *	for errata applied in generic code
311 */
312.macro workaround_runtime_start _cpu:req, _cve:req, _id:req, _chosen:req, _midr
313	/*
314	 * Let errata specify if they need MIDR checking. Sadly, storing the
315	 * MIDR in an .equ to retrieve automatically blows up as it stores some
316	 * brackets in the symbol
317	 */
318	.ifnb \_midr
319		jump_if_cpu_midr \_midr, 1f
320		b	erratum_\_cpu\()_\_id\()_skip
321
322		1:
323	.endif
324	_workaround_start \_cpu, \_cve, \_id, \_chosen, 0
325.endm
326
327/*
328 * Usage and arguments identical to `workaround_reset_start`. The _cve argument
329 * is kept here so the same #define can be used as that macro
330 */
331.macro workaround_reset_end _cpu:req, _cve:req, _id:req
332	_workaround_end \_cpu, \_id
333.endm
334
335/*
336 * See `workaround_reset_start` for usage info. The _cve argument is kept here
337 * so the same #define can be used as that macro. Additional arguments:
338 *
339 * _no_isb:
340 *	Optionally do not include the trailing isb. Please disable with the
341 *	NO_ISB macro
342 */
343.macro workaround_runtime_end _cpu:req, _cve:req, _id:req, _no_isb
344	/*
345	 * Runtime errata do not have a reset function to call the isb for them
346	 * and missing the isb could be very problematic. It is also likely as
347	 * they tend to be scattered in generic code.
348	 */
349	.ifb \_no_isb
350		isb
351	.endif
352	_workaround_end \_cpu, \_id
353.endm
354
355/*******************************************************************************
356 * Errata workaround helpers
357 ******************************************************************************/
358/*
359 * Set a bit in a system register. Can set multiple bits but is limited by the
360 *  way the ORR instruction encodes them.
361 *
362 * _reg:
363 *	Register to write to
364 *
365 * _bit:
366 *	Bit to set. Please use a descriptive #define
367 *
368 * _assert:
369 *	Optionally whether to read back and assert that the bit has been
370 *	written. Please disable with NO_ASSERT macro
371 *
372 * clobbers: x1
373 */
374.macro sysreg_bit_set _reg:req, _bit:req, _assert=1
375	mrs	x1, \_reg
376	orr	x1, x1, #\_bit
377	msr	\_reg, x1
378.endm
379
380/*
381 * Clear a bit in a system register. Can clear multiple bits but is limited by
382 *  the way the BIC instrucion encodes them.
383 *
384 * see sysreg_bit_set for usage
385 */
386.macro sysreg_bit_clear _reg:req, _bit:req
387	mrs	x1, \_reg
388	bic	x1, x1, #\_bit
389	msr	\_reg, x1
390.endm
391
392.macro override_vector_table _table:req
393	adr	x1, \_table
394	msr	vbar_el3, x1
395.endm
396
397/*
398 * BFI : Inserts bitfield into a system register.
399 *
400 * BFI{cond} Rd, Rn, #lsb, #width
401 */
402.macro sysreg_bitfield_insert _reg:req, _src:req, _lsb:req, _width:req
403	/* Source value for BFI */
404	mov	x1, #\_src
405	mrs	x0, \_reg
406	bfi	x0, x1, #\_lsb, #\_width
407	msr	\_reg, x0
408.endm
409
410.macro sysreg_bitfield_insert_from_gpr _reg:req, _gpr:req, _lsb:req, _width:req
411	/* Source value in register for BFI */
412	mov	x1, \_gpr
413	mrs	x0, \_reg
414	bfi	x0, x1, #\_lsb, #\_width
415	msr	\_reg, x0
416.endm
417
418/*
419 * Apply erratum
420 *
421 * _cpu:
422 *	Name of cpu as given to declare_cpu_ops
423 *
424 * _cve:
425 *	Whether erratum is a CVE. CVE year if yes, 0 otherwise
426 *
427 * _id:
428 *	Erratum or CVE number. Please combine with previous field with ERRATUM
429 *	or CVE macros
430 *
431 * _chosen:
432 *	Compile time flag on whether the erratum is included
433 *
434 * _get_rev:
435 *	Optional parameter that determines whether to insert a call to the CPU revision fetching
436 *	procedure. Stores the result of this in the temporary register x10 to allow for chaining
437 *
438 * clobbers: x0-x10 (PCS compliant)
439 */
440.macro apply_erratum _cpu:req, _cve:req, _id:req, _chosen:req, _get_rev=GET_CPU_REV
441	.if (\_chosen & \_get_rev)
442		mov	x9, x30
443		bl	cpu_get_rev_var
444		mov	x10, x0
445	.elseif (\_chosen)
446		mov	x9, x30
447		mov	x0, x10
448	.endif
449
450	.if \_chosen
451		bl	erratum_\_cpu\()_\_id\()_wa
452		mov	x30, x9
453	.endif
454.endm
455
456/*
457 * Helpers to select which revisions errata apply to. Don't leave a link
458 * register as the cpu_rev_var_*** will call the ret and we can save on one.
459 *
460 * _cpu:
461 *	Name of cpu as given to declare_cpu_ops
462 *
463 * _cve:
464 *	Whether erratum is a CVE. CVE year if yes, 0 otherwise
465 *
466 * _id:
467 *	Erratum or CVE number. Please combine with previous field with ERRATUM
468 *	or CVE macros
469 *
470 * _rev_num:
471 *	Revision to apply to
472 *
473 * in body:
474 *	clobber: x0 to x4
475 *	argument: x0 - cpu_rev_var
476 */
477.macro check_erratum_ls _cpu:req, _cve:req, _id:req, _rev_num:req
478	func check_erratum_\_cpu\()_\_id
479		mov	x1, #\_rev_num
480		b	cpu_rev_var_ls
481	endfunc check_erratum_\_cpu\()_\_id
482.endm
483
484.macro check_erratum_hs _cpu:req, _cve:req, _id:req, _rev_num:req
485	func check_erratum_\_cpu\()_\_id
486		mov	x1, #\_rev_num
487		b	cpu_rev_var_hs
488	endfunc check_erratum_\_cpu\()_\_id
489.endm
490
491.macro check_erratum_range _cpu:req, _cve:req, _id:req, _rev_num_lo:req, _rev_num_hi:req
492	func check_erratum_\_cpu\()_\_id
493		mov	x1, #\_rev_num_lo
494		mov	x2, #\_rev_num_hi
495		b	cpu_rev_var_range
496	endfunc check_erratum_\_cpu\()_\_id
497.endm
498
499.macro check_erratum_chosen _cpu:req, _cve:req, _id:req, _chosen:req
500	func check_erratum_\_cpu\()_\_id
501		.if \_chosen
502			mov	x0, #ERRATA_APPLIES
503		.else
504			mov	x0, #ERRATA_MISSING
505		.endif
506		ret
507	endfunc check_erratum_\_cpu\()_\_id
508.endm
509
510/* provide a shorthand for the name format for annoying errata */
511.macro check_erratum_custom_start _cpu:req, _cve:req, _id:req
512	func check_erratum_\_cpu\()_\_id
513.endm
514
515.macro check_erratum_custom_end _cpu:req, _cve:req, _id:req
516	endfunc check_erratum_\_cpu\()_\_id
517.endm
518
519
520/*******************************************************************************
521 * CPU reset function wrapper
522 ******************************************************************************/
523
524/*
525 * Wrapper to automatically apply all reset-time errata. Will end with an isb.
526 *
527 * _cpu:
528 *	Name of cpu as given to declare_cpu_ops
529 *
530 * in body:
531 *	clobber x8 to x14
532 *	argument x14 - cpu_rev_var
533 */
534.macro cpu_reset_func_start _cpu:req
535	func \_cpu\()_reset_func
536		mov	x15, x30
537		bl	cpu_get_rev_var
538		mov	x14, x0
539
540		/* short circuit the location to avoid searching the list */
541		adrp	x12, \_cpu\()_errata_list_start
542		add	x12, x12, :lo12:\_cpu\()_errata_list_start
543		adrp	x13, \_cpu\()_errata_list_end
544		add	x13, x13, :lo12:\_cpu\()_errata_list_end
545
546	errata_begin:
547		/* if head catches up with end of list, exit */
548		cmp	x12, x13
549		b.eq	errata_end
550
551		ldr	x10, [x12, #ERRATUM_WA_FUNC]
552		/* TODO(errata ABI): check mitigated and checker function fields
553		 * for 0 */
554		ldrb	w11, [x12, #ERRATUM_CHOSEN]
555
556		/* skip if not chosen */
557		cbz	x11, 1f
558		/* skip if runtime erratum */
559		cbz	x10, 1f
560
561		/* put cpu revision in x0 and call workaround */
562		mov	x0, x14
563		blr	x10
564	1:
565		add	x12, x12, #ERRATUM_ENTRY_SIZE
566		b	errata_begin
567	errata_end:
568.endm
569
570.macro cpu_reset_func_end _cpu:req
571		isb
572		ret	x15
573	endfunc \_cpu\()_reset_func
574.endm
575
576#endif /* CPU_MACROS_S */
577