• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S
3  *
4  * Copyright (C) 1996-2000 Russell King
5  * Copyright (C) 2012 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 #ifndef __ASSEMBLY__
20 #error "Only include this from assembly code"
21 #endif
22 
23 #ifndef __ASM_ASSEMBLER_H
24 #define __ASM_ASSEMBLER_H
25 
26 #include <asm/asm-offsets.h>
27 #include <asm/cpufeature.h>
28 #include <asm/cputype.h>
29 #include <asm/page.h>
30 #include <asm/pgtable-hwdef.h>
31 #include <asm/ptrace.h>
32 #include <asm/thread_info.h>
33 
34 /*
35  * Enable and disable interrupts.
36  */
37 	.macro	disable_irq
38 	msr	daifset, #2
39 	.endm
40 
41 	.macro	enable_irq
42 	msr	daifclr, #2
43 	.endm
44 
45 	.macro	save_and_disable_irq, flags
46 	mrs	\flags, daif
47 	msr	daifset, #2
48 	.endm
49 
50 	.macro	restore_irq, flags
51 	msr	daif, \flags
52 	.endm
53 
54 /*
55  * Enable and disable debug exceptions.
56  */
57 	.macro	disable_dbg
58 	msr	daifset, #8
59 	.endm
60 
61 	.macro	enable_dbg
62 	msr	daifclr, #8
63 	.endm
64 
65 	.macro	disable_step_tsk, flgs, tmp
66 	tbz	\flgs, #TIF_SINGLESTEP, 9990f
67 	mrs	\tmp, mdscr_el1
68 	bic	\tmp, \tmp, #1
69 	msr	mdscr_el1, \tmp
70 	isb	// Synchronise with enable_dbg
71 9990:
72 	.endm
73 
74 	.macro	enable_step_tsk, flgs, tmp
75 	tbz	\flgs, #TIF_SINGLESTEP, 9990f
76 	disable_dbg
77 	mrs	\tmp, mdscr_el1
78 	orr	\tmp, \tmp, #1
79 	msr	mdscr_el1, \tmp
80 9990:
81 	.endm
82 
83 /*
84  * Enable both debug exceptions and interrupts. This is likely to be
85  * faster than two daifclr operations, since writes to this register
86  * are self-synchronising.
87  */
88 	.macro	enable_dbg_and_irq
89 	msr	daifclr, #(8 | 2)
90 	.endm
91 
92 /*
93  * SMP data memory barrier
94  */
95 	.macro	smp_dmb, opt
96 	dmb	\opt
97 	.endm
98 
99 /*
100  * Value prediction barrier
101  */
102 	.macro	csdb
103 	hint	#20
104 	.endm
105 
106 /*
107  * Sanitise a 64-bit bounded index wrt speculation, returning zero if out
108  * of bounds.
109  */
110 	.macro	mask_nospec64, idx, limit, tmp
111 	sub	\tmp, \idx, \limit
112 	bic	\tmp, \tmp, \idx
113 	and	\idx, \idx, \tmp, asr #63
114 	csdb
115 	.endm
116 
117 /*
118  * NOP sequence
119  */
120 	.macro	nops, num
121 	.rept	\num
122 	nop
123 	.endr
124 	.endm
125 
126 /*
127  * Emit an entry into the exception table
128  */
129 	.macro		_asm_extable, from, to
130 	.pushsection	__ex_table, "a"
131 	.align		3
132 	.long		(\from - .), (\to - .)
133 	.popsection
134 	.endm
135 
136 #define USER(l, x...)				\
137 9999:	x;					\
138 	_asm_extable	9999b, l
139 
140 /*
141  * Register aliases.
142  */
143 lr	.req	x30		// link register
144 
145 /*
146  * Vector entry
147  */
148 	 .macro	ventry	label
149 	.align	7
150 	b	\label
151 	.endm
152 
153 /*
154  * Select code when configured for BE.
155  */
156 #ifdef CONFIG_CPU_BIG_ENDIAN
157 #define CPU_BE(code...) code
158 #else
159 #define CPU_BE(code...)
160 #endif
161 
162 /*
163  * Select code when configured for LE.
164  */
165 #ifdef CONFIG_CPU_BIG_ENDIAN
166 #define CPU_LE(code...)
167 #else
168 #define CPU_LE(code...) code
169 #endif
170 
171 /*
172  * Define a macro that constructs a 64-bit value by concatenating two
173  * 32-bit registers. Note that on big endian systems the order of the
174  * registers is swapped.
175  */
176 #ifndef CONFIG_CPU_BIG_ENDIAN
177 	.macro	regs_to_64, rd, lbits, hbits
178 #else
179 	.macro	regs_to_64, rd, hbits, lbits
180 #endif
181 	orr	\rd, \lbits, \hbits, lsl #32
182 	.endm
183 
184 /*
185  * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
186  * <symbol> is within the range +/- 4 GB of the PC when running
187  * in core kernel context. In module context, a movz/movk sequence
188  * is used, since modules may be loaded far away from the kernel
189  * when KASLR is in effect.
190  */
191 	/*
192 	 * @dst: destination register (64 bit wide)
193 	 * @sym: name of the symbol
194 	 */
195 	.macro	adr_l, dst, sym
196 #ifndef MODULE
197 	adrp	\dst, \sym
198 	add	\dst, \dst, :lo12:\sym
199 #else
200 	movz	\dst, #:abs_g3:\sym
201 	movk	\dst, #:abs_g2_nc:\sym
202 	movk	\dst, #:abs_g1_nc:\sym
203 	movk	\dst, #:abs_g0_nc:\sym
204 #endif
205 	.endm
206 
207 	/*
208 	 * @dst: destination register (32 or 64 bit wide)
209 	 * @sym: name of the symbol
210 	 * @tmp: optional 64-bit scratch register to be used if <dst> is a
211 	 *       32-bit wide register, in which case it cannot be used to hold
212 	 *       the address
213 	 */
214 	.macro	ldr_l, dst, sym, tmp=
215 #ifndef MODULE
216 	.ifb	\tmp
217 	adrp	\dst, \sym
218 	ldr	\dst, [\dst, :lo12:\sym]
219 	.else
220 	adrp	\tmp, \sym
221 	ldr	\dst, [\tmp, :lo12:\sym]
222 	.endif
223 #else
224 	.ifb	\tmp
225 	adr_l	\dst, \sym
226 	ldr	\dst, [\dst]
227 	.else
228 	adr_l	\tmp, \sym
229 	ldr	\dst, [\tmp]
230 	.endif
231 #endif
232 	.endm
233 
234 	/*
235 	 * @src: source register (32 or 64 bit wide)
236 	 * @sym: name of the symbol
237 	 * @tmp: mandatory 64-bit scratch register to calculate the address
238 	 *       while <src> needs to be preserved.
239 	 */
240 	.macro	str_l, src, sym, tmp
241 #ifndef MODULE
242 	adrp	\tmp, \sym
243 	str	\src, [\tmp, :lo12:\sym]
244 #else
245 	adr_l	\tmp, \sym
246 	str	\src, [\tmp]
247 #endif
248 	.endm
249 
250 	/*
251 	 * @dst: Result of per_cpu(sym, smp_processor_id()), can be SP for
252 	 *       non-module code
253 	 * @sym: The name of the per-cpu variable
254 	 * @tmp: scratch register
255 	 */
256 	.macro adr_this_cpu, dst, sym, tmp
257 #ifndef MODULE
258 	adrp	\tmp, \sym
259 	add	\dst, \tmp, #:lo12:\sym
260 #else
261 	adr_l	\dst, \sym
262 #endif
263 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
264 	mrs	\tmp, tpidr_el1
265 alternative_else
266 	mrs	\tmp, tpidr_el2
267 alternative_endif
268 	add	\dst, \dst, \tmp
269 	.endm
270 
271 	/*
272 	 * @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id()))
273 	 * @sym: The name of the per-cpu variable
274 	 * @tmp: scratch register
275 	 */
276 	.macro ldr_this_cpu dst, sym, tmp
277 	adr_l	\dst, \sym
278 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
279 	mrs	\tmp, tpidr_el1
280 alternative_else
281 	mrs	\tmp, tpidr_el2
282 alternative_endif
283 	ldr	\dst, [\dst, \tmp]
284 	.endm
285 
286 /*
287  * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
288  */
289 	.macro	vma_vm_mm, rd, rn
290 	ldr	\rd, [\rn, #VMA_VM_MM]
291 	.endm
292 
293 /*
294  * mmid - get context id from mm pointer (mm->context.id)
295  */
296 	.macro	mmid, rd, rn
297 	ldr	\rd, [\rn, #MM_CONTEXT_ID]
298 	.endm
299 /*
300  * read_ctr - read CTR_EL0. If the system has mismatched
301  * cache line sizes, provide the system wide safe value
302  * from arm64_ftr_reg_ctrel0.sys_val
303  */
304 	.macro	read_ctr, reg
305 alternative_if_not ARM64_MISMATCHED_CACHE_LINE_SIZE
306 	mrs	\reg, ctr_el0			// read CTR
307 	nop
308 alternative_else
309 	ldr_l	\reg, arm64_ftr_reg_ctrel0 + ARM64_FTR_SYSVAL
310 alternative_endif
311 	.endm
312 
313 
314 /*
315  * raw_dcache_line_size - get the minimum D-cache line size on this CPU
316  * from the CTR register.
317  */
318 	.macro	raw_dcache_line_size, reg, tmp
319 	mrs	\tmp, ctr_el0			// read CTR
320 	ubfm	\tmp, \tmp, #16, #19		// cache line size encoding
321 	mov	\reg, #4			// bytes per word
322 	lsl	\reg, \reg, \tmp		// actual cache line size
323 	.endm
324 
325 /*
326  * dcache_line_size - get the safe D-cache line size across all CPUs
327  */
328 	.macro	dcache_line_size, reg, tmp
329 	read_ctr	\tmp
330 	ubfm		\tmp, \tmp, #16, #19	// cache line size encoding
331 	mov		\reg, #4		// bytes per word
332 	lsl		\reg, \reg, \tmp	// actual cache line size
333 	.endm
334 
335 /*
336  * raw_icache_line_size - get the minimum I-cache line size on this CPU
337  * from the CTR register.
338  */
339 	.macro	raw_icache_line_size, reg, tmp
340 	mrs	\tmp, ctr_el0			// read CTR
341 	and	\tmp, \tmp, #0xf		// cache line size encoding
342 	mov	\reg, #4			// bytes per word
343 	lsl	\reg, \reg, \tmp		// actual cache line size
344 	.endm
345 
346 /*
347  * icache_line_size - get the safe I-cache line size across all CPUs
348  */
349 	.macro	icache_line_size, reg, tmp
350 	read_ctr	\tmp
351 	and		\tmp, \tmp, #0xf	// cache line size encoding
352 	mov		\reg, #4		// bytes per word
353 	lsl		\reg, \reg, \tmp	// actual cache line size
354 	.endm
355 
356 /*
357  * tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map
358  */
359 	.macro	tcr_set_idmap_t0sz, valreg, tmpreg
360 #ifndef CONFIG_ARM64_VA_BITS_48
361 	ldr_l	\tmpreg, idmap_t0sz
362 	bfi	\valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
363 #endif
364 	.endm
365 
366 /*
367  * Macro to perform a data cache maintenance for the interval
368  * [kaddr, kaddr + size)
369  *
370  * 	op:		operation passed to dc instruction
371  * 	domain:		domain used in dsb instruciton
372  * 	kaddr:		starting virtual address of the region
373  * 	size:		size of the region
374  * 	Corrupts:	kaddr, size, tmp1, tmp2
375  */
376 	.macro __dcache_op_workaround_clean_cache, op, kaddr
377 alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
378 	dc	\op, \kaddr
379 alternative_else
380 	dc	civac, \kaddr
381 alternative_endif
382 	.endm
383 
384 	.macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
385 	dcache_line_size \tmp1, \tmp2
386 	add	\size, \kaddr, \size
387 	sub	\tmp2, \tmp1, #1
388 	bic	\kaddr, \kaddr, \tmp2
389 9998:
390 	.ifc	\op, cvau
391 	__dcache_op_workaround_clean_cache \op, \kaddr
392 	.else
393 	.ifc	\op, cvac
394 	__dcache_op_workaround_clean_cache \op, \kaddr
395 	.else
396 	.ifc	\op, cvap
397 	sys	3, c7, c12, 1, \kaddr	// dc cvap
398 	.else
399 	dc	\op, \kaddr
400 	.endif
401 	.endif
402 	.endif
403 	add	\kaddr, \kaddr, \tmp1
404 	cmp	\kaddr, \size
405 	b.lo	9998b
406 	dsb	\domain
407 	.endm
408 
409 /*
410  * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
411  */
412 	.macro	reset_pmuserenr_el0, tmpreg
413 	mrs	\tmpreg, id_aa64dfr0_el1	// Check ID_AA64DFR0_EL1 PMUVer
414 	sbfx	\tmpreg, \tmpreg, #8, #4
415 	cmp	\tmpreg, #1			// Skip if no PMU present
416 	b.lt	9000f
417 	msr	pmuserenr_el0, xzr		// Disable PMU access from EL0
418 9000:
419 	.endm
420 
421 /*
422  * copy_page - copy src to dest using temp registers t1-t8
423  */
424 	.macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req
425 9998:	ldp	\t1, \t2, [\src]
426 	ldp	\t3, \t4, [\src, #16]
427 	ldp	\t5, \t6, [\src, #32]
428 	ldp	\t7, \t8, [\src, #48]
429 	add	\src, \src, #64
430 	stnp	\t1, \t2, [\dest]
431 	stnp	\t3, \t4, [\dest, #16]
432 	stnp	\t5, \t6, [\dest, #32]
433 	stnp	\t7, \t8, [\dest, #48]
434 	add	\dest, \dest, #64
435 	tst	\src, #(PAGE_SIZE - 1)
436 	b.ne	9998b
437 	.endm
438 
439 /*
440  * Annotate a function as position independent, i.e., safe to be called before
441  * the kernel virtual mapping is activated.
442  */
443 #define ENDPIPROC(x)			\
444 	.globl	__pi_##x;		\
445 	.type 	__pi_##x, %function;	\
446 	.set	__pi_##x, x;		\
447 	.size	__pi_##x, . - x;	\
448 	ENDPROC(x)
449 
450 /*
451  * Annotate a function as being unsuitable for kprobes.
452  */
453 #ifdef CONFIG_KPROBES
454 #define NOKPROBE(x)				\
455 	.pushsection "_kprobe_blacklist", "aw";	\
456 	.quad	x;				\
457 	.popsection;
458 #else
459 #define NOKPROBE(x)
460 #endif
461 	/*
462 	 * Emit a 64-bit absolute little endian symbol reference in a way that
463 	 * ensures that it will be resolved at build time, even when building a
464 	 * PIE binary. This requires cooperation from the linker script, which
465 	 * must emit the lo32/hi32 halves individually.
466 	 */
467 	.macro	le64sym, sym
468 	.long	\sym\()_lo32
469 	.long	\sym\()_hi32
470 	.endm
471 
472 	/*
473 	 * mov_q - move an immediate constant into a 64-bit register using
474 	 *         between 2 and 4 movz/movk instructions (depending on the
475 	 *         magnitude and sign of the operand)
476 	 */
477 	.macro	mov_q, reg, val
478 	.if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff)
479 	movz	\reg, :abs_g1_s:\val
480 	.else
481 	.if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff)
482 	movz	\reg, :abs_g2_s:\val
483 	.else
484 	movz	\reg, :abs_g3:\val
485 	movk	\reg, :abs_g2_nc:\val
486 	.endif
487 	movk	\reg, :abs_g1_nc:\val
488 	.endif
489 	movk	\reg, :abs_g0_nc:\val
490 	.endm
491 
492 /*
493  * Return the current thread_info.
494  */
495 	.macro	get_thread_info, rd
496 	mrs	\rd, sp_el0
497 	.endm
498 
499 /**
500  * Errata workaround prior to disable MMU. Insert an ISB immediately prior
501  * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0.
502  */
503 	.macro pre_disable_mmu_workaround
504 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_E1041
505 	isb
506 #endif
507 	.endm
508 
509 	.macro	pte_to_phys, phys, pte
510 	and	\phys, \pte, #(((1 << (48 - PAGE_SHIFT)) - 1) << PAGE_SHIFT)
511 	.endm
512 
513 /*
514  * Check the MIDR_EL1 of the current CPU for a given model and a range of
515  * variant/revision. See asm/cputype.h for the macros used below.
516  *
517  *	model:		MIDR_CPU_MODEL of CPU
518  *	rv_min:		Minimum of MIDR_CPU_VAR_REV()
519  *	rv_max:		Maximum of MIDR_CPU_VAR_REV()
520  *	res:		Result register.
521  *	tmp1, tmp2, tmp3: Temporary registers
522  *
523  * Corrupts: res, tmp1, tmp2, tmp3
524  * Returns:  0, if the CPU id doesn't match. Non-zero otherwise
525  */
526 	.macro	cpu_midr_match model, rv_min, rv_max, res, tmp1, tmp2, tmp3
527 	mrs		\res, midr_el1
528 	mov_q		\tmp1, (MIDR_REVISION_MASK | MIDR_VARIANT_MASK)
529 	mov_q		\tmp2, MIDR_CPU_MODEL_MASK
530 	and		\tmp3, \res, \tmp2	// Extract model
531 	and		\tmp1, \res, \tmp1	// rev & variant
532 	mov_q		\tmp2, \model
533 	cmp		\tmp3, \tmp2
534 	cset		\res, eq
535 	cbz		\res, .Ldone\@		// Model matches ?
536 
537 	.if (\rv_min != 0)			// Skip min check if rv_min == 0
538 	mov_q		\tmp3, \rv_min
539 	cmp		\tmp1, \tmp3
540 	cset		\res, ge
541 	.endif					// \rv_min != 0
542 	/* Skip rv_max check if rv_min == rv_max && rv_min != 0 */
543 	.if ((\rv_min != \rv_max) || \rv_min == 0)
544 	mov_q		\tmp2, \rv_max
545 	cmp		\tmp1, \tmp2
546 	cset		\tmp2, le
547 	and		\res, \res, \tmp2
548 	.endif
549 .Ldone\@:
550 	.endm
551 
552 #endif	/* __ASM_ASSEMBLER_H */
553