• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S
4  *
5  * Copyright (C) 1996-2000 Russell King
6  * Copyright (C) 2012 ARM Ltd.
7  */
8 #ifndef __ASSEMBLY__
9 #error "Only include this from assembly code"
10 #endif
11 
12 #ifndef __ASM_ASSEMBLER_H
13 #define __ASM_ASSEMBLER_H
14 
15 #include <asm-generic/export.h>
16 
17 #include <asm/asm-offsets.h>
18 #include <asm/alternative.h>
19 #include <asm/cpufeature.h>
20 #include <asm/cputype.h>
21 #include <asm/debug-monitors.h>
22 #include <asm/page.h>
23 #include <asm/pgtable-hwdef.h>
24 #include <asm/ptrace.h>
25 #include <asm/thread_info.h>
26 
27 	/*
28 	 * Provide a wxN alias for each wN register so what we can paste a xN
29 	 * reference after a 'w' to obtain the 32-bit version.
30 	 */
31 	.irp	n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
32 	wx\n	.req	w\n
33 	.endr
34 
35 	.macro save_and_disable_daif, flags
36 	mrs	\flags, daif
37 	msr	daifset, #0xf
38 	.endm
39 
40 	.macro disable_daif
41 	msr	daifset, #0xf
42 	.endm
43 
44 	.macro enable_daif
45 	msr	daifclr, #0xf
46 	.endm
47 
48 	.macro	restore_daif, flags:req
49 	msr	daif, \flags
50 	.endm
51 
52 	/* IRQ is the lowest priority flag, unconditionally unmask the rest. */
53 	.macro enable_da_f
54 	msr	daifclr, #(8 | 4 | 1)
55 	.endm
56 
57 /*
58  * Save/restore interrupts.
59  */
60 	.macro	save_and_disable_irq, flags
61 	mrs	\flags, daif
62 	msr	daifset, #2
63 	.endm
64 
65 	.macro	restore_irq, flags
66 	msr	daif, \flags
67 	.endm
68 
69 	.macro	enable_dbg
70 	msr	daifclr, #8
71 	.endm
72 
73 	.macro	disable_step_tsk, flgs, tmp
74 	tbz	\flgs, #TIF_SINGLESTEP, 9990f
75 	mrs	\tmp, mdscr_el1
76 	bic	\tmp, \tmp, #DBG_MDSCR_SS
77 	msr	mdscr_el1, \tmp
78 	isb	// Synchronise with enable_dbg
79 9990:
80 	.endm
81 
82 	/* call with daif masked */
83 	.macro	enable_step_tsk, flgs, tmp
84 	tbz	\flgs, #TIF_SINGLESTEP, 9990f
85 	mrs	\tmp, mdscr_el1
86 	orr	\tmp, \tmp, #DBG_MDSCR_SS
87 	msr	mdscr_el1, \tmp
88 9990:
89 	.endm
90 
91 /*
92  * RAS Error Synchronization barrier
93  */
94 	.macro  esb
95 #ifdef CONFIG_ARM64_RAS_EXTN
96 	hint    #16
97 #else
98 	nop
99 #endif
100 	.endm
101 
102 /*
103  * Value prediction barrier
104  */
105 	.macro	csdb
106 	hint	#20
107 	.endm
108 
109 /*
110  * Clear Branch History instruction
111  */
112 	.macro clearbhb
113 	hint	#22
114 	.endm
115 
116 /*
117  * Speculation barrier
118  */
119 	.macro	sb
120 alternative_if_not ARM64_HAS_SB
121 	dsb	nsh
122 	isb
123 alternative_else
124 	SB_BARRIER_INSN
125 	nop
126 alternative_endif
127 	.endm
128 
129 /*
130  * NOP sequence
131  */
132 	.macro	nops, num
133 	.rept	\num
134 	nop
135 	.endr
136 	.endm
137 
138 /*
139  * Emit an entry into the exception table
140  */
141 	.macro		_asm_extable, from, to
142 	.pushsection	__ex_table, "a"
143 	.align		3
144 	.long		(\from - .), (\to - .)
145 	.popsection
146 	.endm
147 
148 #define USER(l, x...)				\
149 9999:	x;					\
150 	_asm_extable	9999b, l
151 
152 /*
153  * Register aliases.
154  */
155 lr	.req	x30		// link register
156 
157 /*
158  * Vector entry
159  */
160 	 .macro	ventry	label
161 	.align	7
162 	b	\label
163 	.endm
164 
165 /*
166  * Select code when configured for BE.
167  */
168 #ifdef CONFIG_CPU_BIG_ENDIAN
169 #define CPU_BE(code...) code
170 #else
171 #define CPU_BE(code...)
172 #endif
173 
174 /*
175  * Select code when configured for LE.
176  */
177 #ifdef CONFIG_CPU_BIG_ENDIAN
178 #define CPU_LE(code...)
179 #else
180 #define CPU_LE(code...) code
181 #endif
182 
183 /*
184  * Define a macro that constructs a 64-bit value by concatenating two
185  * 32-bit registers. Note that on big endian systems the order of the
186  * registers is swapped.
187  */
188 #ifndef CONFIG_CPU_BIG_ENDIAN
189 	.macro	regs_to_64, rd, lbits, hbits
190 #else
191 	.macro	regs_to_64, rd, hbits, lbits
192 #endif
193 	orr	\rd, \lbits, \hbits, lsl #32
194 	.endm
195 
196 /*
197  * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
198  * <symbol> is within the range +/- 4 GB of the PC.
199  */
200 	/*
201 	 * @dst: destination register (64 bit wide)
202 	 * @sym: name of the symbol
203 	 */
204 	.macro	adr_l, dst, sym
205 	adrp	\dst, \sym
206 	add	\dst, \dst, :lo12:\sym
207 	.endm
208 
209 	/*
210 	 * @dst: destination register (32 or 64 bit wide)
211 	 * @sym: name of the symbol
212 	 * @tmp: optional 64-bit scratch register to be used if <dst> is a
213 	 *       32-bit wide register, in which case it cannot be used to hold
214 	 *       the address
215 	 */
216 	.macro	ldr_l, dst, sym, tmp=
217 	.ifb	\tmp
218 	adrp	\dst, \sym
219 	ldr	\dst, [\dst, :lo12:\sym]
220 	.else
221 	adrp	\tmp, \sym
222 	ldr	\dst, [\tmp, :lo12:\sym]
223 	.endif
224 	.endm
225 
226 	/*
227 	 * @src: source register (32 or 64 bit wide)
228 	 * @sym: name of the symbol
229 	 * @tmp: mandatory 64-bit scratch register to calculate the address
230 	 *       while <src> needs to be preserved.
231 	 */
232 	.macro	str_l, src, sym, tmp
233 	adrp	\tmp, \sym
234 	str	\src, [\tmp, :lo12:\sym]
235 	.endm
236 
237 	/*
238 	 * @dst: destination register
239 	 */
240 #if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
241 	.macro	this_cpu_offset, dst
242 	mrs	\dst, tpidr_el2
243 	.endm
244 #else
245 	.macro	this_cpu_offset, dst
246 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
247 	mrs	\dst, tpidr_el1
248 alternative_else
249 	mrs	\dst, tpidr_el2
250 alternative_endif
251 	.endm
252 #endif
253 
254 	/*
255 	 * @dst: Result of per_cpu(sym, smp_processor_id()) (can be SP)
256 	 * @sym: The name of the per-cpu variable
257 	 * @tmp: scratch register
258 	 */
259 	.macro adr_this_cpu, dst, sym, tmp
260 	adrp	\tmp, \sym
261 	add	\dst, \tmp, #:lo12:\sym
262 	this_cpu_offset \tmp
263 	add	\dst, \dst, \tmp
264 	.endm
265 
266 	/*
267 	 * @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id()))
268 	 * @sym: The name of the per-cpu variable
269 	 * @tmp: scratch register
270 	 */
271 	.macro ldr_this_cpu dst, sym, tmp
272 	adr_l	\dst, \sym
273 	this_cpu_offset \tmp
274 	ldr	\dst, [\dst, \tmp]
275 	.endm
276 
277 /*
278  * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
279  */
280 	.macro	vma_vm_mm, rd, rn
281 	ldr	\rd, [\rn, #VMA_VM_MM]
282 	.endm
283 
284 /*
285  * read_ctr - read CTR_EL0. If the system has mismatched register fields,
286  * provide the system wide safe value from arm64_ftr_reg_ctrel0.sys_val
287  */
288 	.macro	read_ctr, reg
289 alternative_if_not ARM64_MISMATCHED_CACHE_TYPE
290 	mrs	\reg, ctr_el0			// read CTR
291 	nop
292 alternative_else
293 	ldr_l	\reg, arm64_ftr_reg_ctrel0 + ARM64_FTR_SYSVAL
294 alternative_endif
295 	.endm
296 
297 
298 /*
299  * raw_dcache_line_size - get the minimum D-cache line size on this CPU
300  * from the CTR register.
301  */
302 	.macro	raw_dcache_line_size, reg, tmp
303 	mrs	\tmp, ctr_el0			// read CTR
304 	ubfm	\tmp, \tmp, #16, #19		// cache line size encoding
305 	mov	\reg, #4			// bytes per word
306 	lsl	\reg, \reg, \tmp		// actual cache line size
307 	.endm
308 
309 /*
310  * dcache_line_size - get the safe D-cache line size across all CPUs
311  */
312 	.macro	dcache_line_size, reg, tmp
313 	read_ctr	\tmp
314 	ubfm		\tmp, \tmp, #16, #19	// cache line size encoding
315 	mov		\reg, #4		// bytes per word
316 	lsl		\reg, \reg, \tmp	// actual cache line size
317 	.endm
318 
319 /*
320  * raw_icache_line_size - get the minimum I-cache line size on this CPU
321  * from the CTR register.
322  */
323 	.macro	raw_icache_line_size, reg, tmp
324 	mrs	\tmp, ctr_el0			// read CTR
325 	and	\tmp, \tmp, #0xf		// cache line size encoding
326 	mov	\reg, #4			// bytes per word
327 	lsl	\reg, \reg, \tmp		// actual cache line size
328 	.endm
329 
330 /*
331  * icache_line_size - get the safe I-cache line size across all CPUs
332  */
333 	.macro	icache_line_size, reg, tmp
334 	read_ctr	\tmp
335 	and		\tmp, \tmp, #0xf	// cache line size encoding
336 	mov		\reg, #4		// bytes per word
337 	lsl		\reg, \reg, \tmp	// actual cache line size
338 	.endm
339 
340 /*
341  * tcr_set_t0sz - update TCR.T0SZ so that we can load the ID map
342  */
343 	.macro	tcr_set_t0sz, valreg, t0sz
344 	bfi	\valreg, \t0sz, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
345 	.endm
346 
347 /*
348  * tcr_set_t1sz - update TCR.T1SZ
349  */
350 	.macro	tcr_set_t1sz, valreg, t1sz
351 	bfi	\valreg, \t1sz, #TCR_T1SZ_OFFSET, #TCR_TxSZ_WIDTH
352 	.endm
353 
354 /*
355  * tcr_compute_pa_size - set TCR.(I)PS to the highest supported
356  * ID_AA64MMFR0_EL1.PARange value
357  *
358  *	tcr:		register with the TCR_ELx value to be updated
359  *	pos:		IPS or PS bitfield position
360  *	tmp{0,1}:	temporary registers
361  */
362 	.macro	tcr_compute_pa_size, tcr, pos, tmp0, tmp1
363 	mrs	\tmp0, ID_AA64MMFR0_EL1
364 	// Narrow PARange to fit the PS field in TCR_ELx
365 	ubfx	\tmp0, \tmp0, #ID_AA64MMFR0_PARANGE_SHIFT, #3
366 	mov	\tmp1, #ID_AA64MMFR0_PARANGE_MAX
367 	cmp	\tmp0, \tmp1
368 	csel	\tmp0, \tmp1, \tmp0, hi
369 	bfi	\tcr, \tmp0, \pos, #3
370 	.endm
371 
372 /*
373  * Macro to perform a data cache maintenance for the interval
374  * [kaddr, kaddr + size)
375  *
376  * 	op:		operation passed to dc instruction
377  * 	domain:		domain used in dsb instruciton
378  * 	kaddr:		starting virtual address of the region
379  * 	size:		size of the region
380  * 	Corrupts:	kaddr, size, tmp1, tmp2
381  */
382 	.macro __dcache_op_workaround_clean_cache, op, kaddr
383 alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
384 	dc	\op, \kaddr
385 alternative_else
386 	dc	civac, \kaddr
387 alternative_endif
388 	.endm
389 
390 	.macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
391 	dcache_line_size \tmp1, \tmp2
392 	add	\size, \kaddr, \size
393 	sub	\tmp2, \tmp1, #1
394 	bic	\kaddr, \kaddr, \tmp2
395 9998:
396 	.ifc	\op, cvau
397 	__dcache_op_workaround_clean_cache \op, \kaddr
398 	.else
399 	.ifc	\op, cvac
400 	__dcache_op_workaround_clean_cache \op, \kaddr
401 	.else
402 	.ifc	\op, cvap
403 	sys	3, c7, c12, 1, \kaddr	// dc cvap
404 	.else
405 	.ifc	\op, cvadp
406 	sys	3, c7, c13, 1, \kaddr	// dc cvadp
407 	.else
408 	dc	\op, \kaddr
409 	.endif
410 	.endif
411 	.endif
412 	.endif
413 	add	\kaddr, \kaddr, \tmp1
414 	cmp	\kaddr, \size
415 	b.lo	9998b
416 	dsb	\domain
417 	.endm
418 
419 /*
420  * Macro to perform an instruction cache maintenance for the interval
421  * [start, end)
422  *
423  * 	start, end:	virtual addresses describing the region
424  *	label:		A label to branch to on user fault.
425  * 	Corrupts:	tmp1, tmp2
426  */
427 	.macro invalidate_icache_by_line start, end, tmp1, tmp2, label
428 	icache_line_size \tmp1, \tmp2
429 	sub	\tmp2, \tmp1, #1
430 	bic	\tmp2, \start, \tmp2
431 9997:
432 USER(\label, ic	ivau, \tmp2)			// invalidate I line PoU
433 	add	\tmp2, \tmp2, \tmp1
434 	cmp	\tmp2, \end
435 	b.lo	9997b
436 	dsb	ish
437 	isb
438 	.endm
439 
440 /*
441  * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
442  */
443 	.macro	reset_pmuserenr_el0, tmpreg
444 	mrs	\tmpreg, id_aa64dfr0_el1
445 	sbfx	\tmpreg, \tmpreg, #ID_AA64DFR0_PMUVER_SHIFT, #4
446 	cmp	\tmpreg, #1			// Skip if no PMU present
447 	b.lt	9000f
448 	msr	pmuserenr_el0, xzr		// Disable PMU access from EL0
449 9000:
450 	.endm
451 
452 /*
453  * reset_amuserenr_el0 - reset AMUSERENR_EL0 if AMUv1 present
454  */
455 	.macro	reset_amuserenr_el0, tmpreg
456 	mrs	\tmpreg, id_aa64pfr0_el1	// Check ID_AA64PFR0_EL1
457 	ubfx	\tmpreg, \tmpreg, #ID_AA64PFR0_AMU_SHIFT, #4
458 	cbz	\tmpreg, .Lskip_\@		// Skip if no AMU present
459 	msr_s	SYS_AMUSERENR_EL0, xzr		// Disable AMU access from EL0
460 .Lskip_\@:
461 	.endm
462 /*
463  * copy_page - copy src to dest using temp registers t1-t8
464  */
465 	.macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req
466 9998:	ldp	\t1, \t2, [\src]
467 	ldp	\t3, \t4, [\src, #16]
468 	ldp	\t5, \t6, [\src, #32]
469 	ldp	\t7, \t8, [\src, #48]
470 	add	\src, \src, #64
471 	stnp	\t1, \t2, [\dest]
472 	stnp	\t3, \t4, [\dest, #16]
473 	stnp	\t5, \t6, [\dest, #32]
474 	stnp	\t7, \t8, [\dest, #48]
475 	add	\dest, \dest, #64
476 	tst	\src, #(PAGE_SIZE - 1)
477 	b.ne	9998b
478 	.endm
479 
480 /*
481  * Annotate a function as being unsuitable for kprobes.
482  */
483 #ifdef CONFIG_KPROBES
484 #define NOKPROBE(x)				\
485 	.pushsection "_kprobe_blacklist", "aw";	\
486 	.quad	x;				\
487 	.popsection;
488 #else
489 #define NOKPROBE(x)
490 #endif
491 
492 #ifdef CONFIG_KASAN
493 #define EXPORT_SYMBOL_NOKASAN(name)
494 #else
495 #define EXPORT_SYMBOL_NOKASAN(name)	EXPORT_SYMBOL(name)
496 #endif
497 
498 	/*
499 	 * Emit a 64-bit absolute little endian symbol reference in a way that
500 	 * ensures that it will be resolved at build time, even when building a
501 	 * PIE binary. This requires cooperation from the linker script, which
502 	 * must emit the lo32/hi32 halves individually.
503 	 */
504 	.macro	le64sym, sym
505 	.long	\sym\()_lo32
506 	.long	\sym\()_hi32
507 	.endm
508 
509 	/*
510 	 * mov_q - move an immediate constant into a 64-bit register using
511 	 *         between 2 and 4 movz/movk instructions (depending on the
512 	 *         magnitude and sign of the operand)
513 	 */
514 	.macro	mov_q, reg, val
515 	.if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff)
516 	movz	\reg, :abs_g1_s:\val
517 	.else
518 	.if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff)
519 	movz	\reg, :abs_g2_s:\val
520 	.else
521 	movz	\reg, :abs_g3:\val
522 	movk	\reg, :abs_g2_nc:\val
523 	.endif
524 	movk	\reg, :abs_g1_nc:\val
525 	.endif
526 	movk	\reg, :abs_g0_nc:\val
527 	.endm
528 
529 /*
530  * Return the current task_struct.
531  */
532 	.macro	get_current_task, rd
533 	mrs	\rd, sp_el0
534 	.endm
535 
536 /*
537  * Offset ttbr1 to allow for 48-bit kernel VAs set with 52-bit PTRS_PER_PGD.
538  * orr is used as it can cover the immediate value (and is idempotent).
539  * In future this may be nop'ed out when dealing with 52-bit kernel VAs.
540  * 	ttbr: Value of ttbr to set, modified.
541  */
542 	.macro	offset_ttbr1, ttbr, tmp
543 #ifdef CONFIG_ARM64_VA_BITS_52
544 	mrs_s	\tmp, SYS_ID_AA64MMFR2_EL1
545 	and	\tmp, \tmp, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
546 	cbnz	\tmp, .Lskipoffs_\@
547 	orr	\ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
548 .Lskipoffs_\@ :
549 #endif
550 	.endm
551 
552 /*
553  * Perform the reverse of offset_ttbr1.
554  * bic is used as it can cover the immediate value and, in future, won't need
555  * to be nop'ed out when dealing with 52-bit kernel VAs.
556  */
557 	.macro	restore_ttbr1, ttbr
558 #ifdef CONFIG_ARM64_VA_BITS_52
559 	bic	\ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
560 #endif
561 	.endm
562 
563 /*
564  * Arrange a physical address in a TTBR register, taking care of 52-bit
565  * addresses.
566  *
567  * 	phys:	physical address, preserved
568  * 	ttbr:	returns the TTBR value
569  */
570 	.macro	phys_to_ttbr, ttbr, phys
571 #ifdef CONFIG_ARM64_PA_BITS_52
572 	orr	\ttbr, \phys, \phys, lsr #46
573 	and	\ttbr, \ttbr, #TTBR_BADDR_MASK_52
574 #else
575 	mov	\ttbr, \phys
576 #endif
577 	.endm
578 
579 	.macro	phys_to_pte, pte, phys
580 #ifdef CONFIG_ARM64_PA_BITS_52
581 	/*
582 	 * We assume \phys is 64K aligned and this is guaranteed by only
583 	 * supporting this configuration with 64K pages.
584 	 */
585 	orr	\pte, \phys, \phys, lsr #36
586 	and	\pte, \pte, #PTE_ADDR_MASK
587 #else
588 	mov	\pte, \phys
589 #endif
590 	.endm
591 
592 	.macro	pte_to_phys, phys, pte
593 #ifdef CONFIG_ARM64_PA_BITS_52
594 	ubfiz	\phys, \pte, #(48 - 16 - 12), #16
595 	bfxil	\phys, \pte, #16, #32
596 	lsl	\phys, \phys, #16
597 #else
598 	and	\phys, \pte, #PTE_ADDR_MASK
599 #endif
600 	.endm
601 
602 /*
603  * tcr_clear_errata_bits - Clear TCR bits that trigger an errata on this CPU.
604  */
605 	.macro	tcr_clear_errata_bits, tcr, tmp1, tmp2
606 #ifdef CONFIG_FUJITSU_ERRATUM_010001
607 	mrs	\tmp1, midr_el1
608 
609 	mov_q	\tmp2, MIDR_FUJITSU_ERRATUM_010001_MASK
610 	and	\tmp1, \tmp1, \tmp2
611 	mov_q	\tmp2, MIDR_FUJITSU_ERRATUM_010001
612 	cmp	\tmp1, \tmp2
613 	b.ne	10f
614 
615 	mov_q	\tmp2, TCR_CLEAR_FUJITSU_ERRATUM_010001
616 	bic	\tcr, \tcr, \tmp2
617 10:
618 #endif /* CONFIG_FUJITSU_ERRATUM_010001 */
619 	.endm
620 
621 /**
622  * Errata workaround prior to disable MMU. Insert an ISB immediately prior
623  * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0.
624  */
625 	.macro pre_disable_mmu_workaround
626 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_E1041
627 	isb
628 #endif
629 	.endm
630 
631 	/*
632 	 * frame_push - Push @regcount callee saved registers to the stack,
633 	 *              starting at x19, as well as x29/x30, and set x29 to
634 	 *              the new value of sp. Add @extra bytes of stack space
635 	 *              for locals.
636 	 */
637 	.macro		frame_push, regcount:req, extra
638 	__frame		st, \regcount, \extra
639 	.endm
640 
641 	/*
642 	 * frame_pop  - Pop the callee saved registers from the stack that were
643 	 *              pushed in the most recent call to frame_push, as well
644 	 *              as x29/x30 and any extra stack space that may have been
645 	 *              allocated.
646 	 */
647 	.macro		frame_pop
648 	__frame		ld
649 	.endm
650 
651 	.macro		__frame_regs, reg1, reg2, op, num
652 	.if		.Lframe_regcount == \num
653 	\op\()r		\reg1, [sp, #(\num + 1) * 8]
654 	.elseif		.Lframe_regcount > \num
655 	\op\()p		\reg1, \reg2, [sp, #(\num + 1) * 8]
656 	.endif
657 	.endm
658 
659 	.macro		__frame, op, regcount, extra=0
660 	.ifc		\op, st
661 	.if		(\regcount) < 0 || (\regcount) > 10
662 	.error		"regcount should be in the range [0 ... 10]"
663 	.endif
664 	.if		((\extra) % 16) != 0
665 	.error		"extra should be a multiple of 16 bytes"
666 	.endif
667 	.ifdef		.Lframe_regcount
668 	.if		.Lframe_regcount != -1
669 	.error		"frame_push/frame_pop may not be nested"
670 	.endif
671 	.endif
672 	.set		.Lframe_regcount, \regcount
673 	.set		.Lframe_extra, \extra
674 	.set		.Lframe_local_offset, ((\regcount + 3) / 2) * 16
675 	stp		x29, x30, [sp, #-.Lframe_local_offset - .Lframe_extra]!
676 	mov		x29, sp
677 	.endif
678 
679 	__frame_regs	x19, x20, \op, 1
680 	__frame_regs	x21, x22, \op, 3
681 	__frame_regs	x23, x24, \op, 5
682 	__frame_regs	x25, x26, \op, 7
683 	__frame_regs	x27, x28, \op, 9
684 
685 	.ifc		\op, ld
686 	.if		.Lframe_regcount == -1
687 	.error		"frame_push/frame_pop may not be nested"
688 	.endif
689 	ldp		x29, x30, [sp], #.Lframe_local_offset + .Lframe_extra
690 	.set		.Lframe_regcount, -1
691 	.endif
692 	.endm
693 
694 	/*
695 	 * Check whether preempt/bh-disabled asm code should yield as soon as
696 	 * it is able. This is the case if we are currently running in task
697 	 * context, and either a softirq is pending, or the TIF_NEED_RESCHED
698 	 * flag is set and re-enabling preemption a single time would result in
699 	 * a preempt count of zero. (Note that the TIF_NEED_RESCHED flag is
700 	 * stored negated in the top word of the thread_info::preempt_count
701 	 * field)
702 	 */
703 	.macro		cond_yield, lbl:req, tmp:req, tmp2:req
704 	get_current_task \tmp
705 	ldr		\tmp, [\tmp, #TSK_TI_PREEMPT]
706 	/*
707 	 * If we are serving a softirq, there is no point in yielding: the
708 	 * softirq will not be preempted no matter what we do, so we should
709 	 * run to completion as quickly as we can.
710 	 */
711 	tbnz		\tmp, #SOFTIRQ_SHIFT, .Lnoyield_\@
712 #ifdef CONFIG_PREEMPTION
713 	sub		\tmp, \tmp, #PREEMPT_DISABLE_OFFSET
714 	cbz		\tmp, \lbl
715 #endif
716 	adr_l		\tmp, irq_stat + IRQ_CPUSTAT_SOFTIRQ_PENDING
717 	this_cpu_offset	\tmp2
718 	ldr		w\tmp, [\tmp, \tmp2]
719 	cbnz		w\tmp, \lbl	// yield on pending softirq in task context
720 .Lnoyield_\@:
721 	.endm
722 
723 /*
724  * This macro emits a program property note section identifying
725  * architecture features which require special handling, mainly for
726  * use in assembly files included in the VDSO.
727  */
728 
729 #define NT_GNU_PROPERTY_TYPE_0  5
730 #define GNU_PROPERTY_AARCH64_FEATURE_1_AND      0xc0000000
731 
732 #define GNU_PROPERTY_AARCH64_FEATURE_1_BTI      (1U << 0)
733 #define GNU_PROPERTY_AARCH64_FEATURE_1_PAC      (1U << 1)
734 
735 #ifdef CONFIG_ARM64_BTI_KERNEL
736 #define GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT		\
737 		((GNU_PROPERTY_AARCH64_FEATURE_1_BTI |	\
738 		  GNU_PROPERTY_AARCH64_FEATURE_1_PAC))
739 #endif
740 
741 #ifdef GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT
742 .macro emit_aarch64_feature_1_and, feat=GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT
743 	.pushsection .note.gnu.property, "a"
744 	.align  3
745 	.long   2f - 1f
746 	.long   6f - 3f
747 	.long   NT_GNU_PROPERTY_TYPE_0
748 1:      .string "GNU"
749 2:
750 	.align  3
751 3:      .long   GNU_PROPERTY_AARCH64_FEATURE_1_AND
752 	.long   5f - 4f
753 4:
754 	/*
755 	 * This is described with an array of char in the Linux API
756 	 * spec but the text and all other usage (including binutils,
757 	 * clang and GCC) treat this as a 32 bit value so no swizzling
758 	 * is required for big endian.
759 	 */
760 	.long   \feat
761 5:
762 	.align  3
763 6:
764 	.popsection
765 .endm
766 
767 #else
768 .macro emit_aarch64_feature_1_and, feat=0
769 .endm
770 
771 #endif /* GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT */
772 
773 	.macro __mitigate_spectre_bhb_loop      tmp
774 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
775 alternative_cb  spectre_bhb_patch_loop_iter
776 	mov	\tmp, #32		// Patched to correct the immediate
777 alternative_cb_end
778 .Lspectre_bhb_loop\@:
779 	b	. + 4
780 	subs	\tmp, \tmp, #1
781 	b.ne	.Lspectre_bhb_loop\@
782 	sb
783 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
784 	.endm
785 
786 	/* Save/restores x0-x3 to the stack */
787 	.macro __mitigate_spectre_bhb_fw
788 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
789 	stp	x0, x1, [sp, #-16]!
790 	stp	x2, x3, [sp, #-16]!
791 	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_3
792 alternative_cb	smccc_patch_fw_mitigation_conduit
793 	nop					// Patched to SMC/HVC #0
794 alternative_cb_end
795 	ldp	x2, x3, [sp], #16
796 	ldp	x0, x1, [sp], #16
797 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
798 	.endm
799 #endif	/* __ASM_ASSEMBLER_H */
800