• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S
3  *
4  * Copyright (C) 1996-2000 Russell King
5  * Copyright (C) 2012 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 #ifndef __ASSEMBLY__
20 #error "Only include this from assembly code"
21 #endif
22 
23 #ifndef __ASM_ASSEMBLER_H
24 #define __ASM_ASSEMBLER_H
25 
26 #include <asm/asm-offsets.h>
27 #include <asm/cpufeature.h>
28 #include <asm/debug-monitors.h>
29 #include <asm/page.h>
30 #include <asm/pgtable-hwdef.h>
31 #include <asm/ptrace.h>
32 #include <asm/thread_info.h>
33 
34 	.macro save_and_disable_daif, flags
35 	mrs	\flags, daif
36 	msr	daifset, #0xf
37 	.endm
38 
39 	.macro disable_daif
40 	msr	daifset, #0xf
41 	.endm
42 
43 	.macro enable_daif
44 	msr	daifclr, #0xf
45 	.endm
46 
47 	.macro	restore_daif, flags:req
48 	msr	daif, \flags
49 	.endm
50 
51 	/* Only on aarch64 pstate, PSR_D_BIT is different for aarch32 */
52 	.macro	inherit_daif, pstate:req, tmp:req
53 	and	\tmp, \pstate, #(PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
54 	msr	daif, \tmp
55 	.endm
56 
57 	/* IRQ is the lowest priority flag, unconditionally unmask the rest. */
58 	.macro enable_da_f
59 	msr	daifclr, #(8 | 4 | 1)
60 	.endm
61 
62 /*
63  * Enable and disable interrupts.
64  */
65 	.macro	disable_irq
66 	msr	daifset, #2
67 	.endm
68 
69 	.macro	enable_irq
70 	msr	daifclr, #2
71 	.endm
72 
73 	.macro	save_and_disable_irq, flags
74 	mrs	\flags, daif
75 	msr	daifset, #2
76 	.endm
77 
78 	.macro	restore_irq, flags
79 	msr	daif, \flags
80 	.endm
81 
82 	.macro	enable_dbg
83 	msr	daifclr, #8
84 	.endm
85 
86 	.macro	disable_step_tsk, flgs, tmp
87 	tbz	\flgs, #TIF_SINGLESTEP, 9990f
88 	mrs	\tmp, mdscr_el1
89 	bic	\tmp, \tmp, #DBG_MDSCR_SS
90 	msr	mdscr_el1, \tmp
91 	isb	// Synchronise with enable_dbg
92 9990:
93 	.endm
94 
95 	/* call with daif masked */
96 	.macro	enable_step_tsk, flgs, tmp
97 	tbz	\flgs, #TIF_SINGLESTEP, 9990f
98 	mrs	\tmp, mdscr_el1
99 	orr	\tmp, \tmp, #DBG_MDSCR_SS
100 	msr	mdscr_el1, \tmp
101 9990:
102 	.endm
103 
104 /*
105  * SMP data memory barrier
106  */
107 	.macro	smp_dmb, opt
108 	dmb	\opt
109 	.endm
110 
111 /*
112  * RAS Error Synchronization barrier
113  */
114 	.macro  esb
115 #ifdef CONFIG_ARM64_RAS_EXTN
116 	hint    #16
117 #else
118 	nop
119 #endif
120 	.endm
121 
122 /*
123  * Value prediction barrier
124  */
125 	.macro	csdb
126 	hint	#20
127 	.endm
128 
129 /*
130  * Sanitise a 64-bit bounded index wrt speculation, returning zero if out
131  * of bounds.
132  */
133 	.macro	mask_nospec64, idx, limit, tmp
134 	sub	\tmp, \idx, \limit
135 	bic	\tmp, \tmp, \idx
136 	and	\idx, \idx, \tmp, asr #63
137 	csdb
138 	.endm
139 
140 /*
141  * NOP sequence
142  */
143 	.macro	nops, num
144 	.rept	\num
145 	nop
146 	.endr
147 	.endm
148 
149 /*
150  * Emit an entry into the exception table
151  */
152 	.macro		_asm_extable, from, to
153 	.pushsection	__ex_table, "a"
154 	.align		3
155 	.long		(\from - .), (\to - .)
156 	.popsection
157 	.endm
158 
159 #define USER(l, x...)				\
160 9999:	x;					\
161 	_asm_extable	9999b, l
162 
163 /*
164  * Register aliases.
165  */
166 lr	.req	x30		// link register
167 
168 /*
169  * Vector entry
170  */
171 	 .macro	ventry	label
172 	.align	7
173 	b	\label
174 	.endm
175 
176 /*
177  * Select code when configured for BE.
178  */
179 #ifdef CONFIG_CPU_BIG_ENDIAN
180 #define CPU_BE(code...) code
181 #else
182 #define CPU_BE(code...)
183 #endif
184 
185 /*
186  * Select code when configured for LE.
187  */
188 #ifdef CONFIG_CPU_BIG_ENDIAN
189 #define CPU_LE(code...)
190 #else
191 #define CPU_LE(code...) code
192 #endif
193 
194 /*
195  * Define a macro that constructs a 64-bit value by concatenating two
196  * 32-bit registers. Note that on big endian systems the order of the
197  * registers is swapped.
198  */
199 #ifndef CONFIG_CPU_BIG_ENDIAN
200 	.macro	regs_to_64, rd, lbits, hbits
201 #else
202 	.macro	regs_to_64, rd, hbits, lbits
203 #endif
204 	orr	\rd, \lbits, \hbits, lsl #32
205 	.endm
206 
207 /*
208  * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
209  * <symbol> is within the range +/- 4 GB of the PC.
210  */
211 	/*
212 	 * @dst: destination register (64 bit wide)
213 	 * @sym: name of the symbol
214 	 */
215 	.macro	adr_l, dst, sym
216 	adrp	\dst, \sym
217 	add	\dst, \dst, :lo12:\sym
218 	.endm
219 
220 	/*
221 	 * @dst: destination register (32 or 64 bit wide)
222 	 * @sym: name of the symbol
223 	 * @tmp: optional 64-bit scratch register to be used if <dst> is a
224 	 *       32-bit wide register, in which case it cannot be used to hold
225 	 *       the address
226 	 */
227 	.macro	ldr_l, dst, sym, tmp=
228 	.ifb	\tmp
229 	adrp	\dst, \sym
230 	ldr	\dst, [\dst, :lo12:\sym]
231 	.else
232 	adrp	\tmp, \sym
233 	ldr	\dst, [\tmp, :lo12:\sym]
234 	.endif
235 	.endm
236 
237 	/*
238 	 * @src: source register (32 or 64 bit wide)
239 	 * @sym: name of the symbol
240 	 * @tmp: mandatory 64-bit scratch register to calculate the address
241 	 *       while <src> needs to be preserved.
242 	 */
243 	.macro	str_l, src, sym, tmp
244 	adrp	\tmp, \sym
245 	str	\src, [\tmp, :lo12:\sym]
246 	.endm
247 
248 	/*
249 	 * @dst: Result of per_cpu(sym, smp_processor_id()) (can be SP)
250 	 * @sym: The name of the per-cpu variable
251 	 * @tmp: scratch register
252 	 */
253 	.macro adr_this_cpu, dst, sym, tmp
254 	adrp	\tmp, \sym
255 	add	\dst, \tmp, #:lo12:\sym
256 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
257 	mrs	\tmp, tpidr_el1
258 alternative_else
259 	mrs	\tmp, tpidr_el2
260 alternative_endif
261 	add	\dst, \dst, \tmp
262 	.endm
263 
264 	/*
265 	 * @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id()))
266 	 * @sym: The name of the per-cpu variable
267 	 * @tmp: scratch register
268 	 */
269 	.macro ldr_this_cpu dst, sym, tmp
270 	adr_l	\dst, \sym
271 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
272 	mrs	\tmp, tpidr_el1
273 alternative_else
274 	mrs	\tmp, tpidr_el2
275 alternative_endif
276 	ldr	\dst, [\dst, \tmp]
277 	.endm
278 
279 /*
280  * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
281  */
282 	.macro	vma_vm_mm, rd, rn
283 	ldr	\rd, [\rn, #VMA_VM_MM]
284 	.endm
285 
286 /*
287  * mmid - get context id from mm pointer (mm->context.id)
288  */
289 	.macro	mmid, rd, rn
290 	ldr	\rd, [\rn, #MM_CONTEXT_ID]
291 	.endm
292 /*
293  * read_ctr - read CTR_EL0. If the system has mismatched
294  * cache line sizes, provide the system wide safe value
295  * from arm64_ftr_reg_ctrel0.sys_val
296  */
297 	.macro	read_ctr, reg
298 alternative_if_not ARM64_MISMATCHED_CACHE_LINE_SIZE
299 	mrs	\reg, ctr_el0			// read CTR
300 	nop
301 alternative_else
302 	ldr_l	\reg, arm64_ftr_reg_ctrel0 + ARM64_FTR_SYSVAL
303 alternative_endif
304 	.endm
305 
306 
307 /*
308  * raw_dcache_line_size - get the minimum D-cache line size on this CPU
309  * from the CTR register.
310  */
311 	.macro	raw_dcache_line_size, reg, tmp
312 	mrs	\tmp, ctr_el0			// read CTR
313 	ubfm	\tmp, \tmp, #16, #19		// cache line size encoding
314 	mov	\reg, #4			// bytes per word
315 	lsl	\reg, \reg, \tmp		// actual cache line size
316 	.endm
317 
318 /*
319  * dcache_line_size - get the safe D-cache line size across all CPUs
320  */
321 	.macro	dcache_line_size, reg, tmp
322 	read_ctr	\tmp
323 	ubfm		\tmp, \tmp, #16, #19	// cache line size encoding
324 	mov		\reg, #4		// bytes per word
325 	lsl		\reg, \reg, \tmp	// actual cache line size
326 	.endm
327 
328 /*
329  * raw_icache_line_size - get the minimum I-cache line size on this CPU
330  * from the CTR register.
331  */
332 	.macro	raw_icache_line_size, reg, tmp
333 	mrs	\tmp, ctr_el0			// read CTR
334 	and	\tmp, \tmp, #0xf		// cache line size encoding
335 	mov	\reg, #4			// bytes per word
336 	lsl	\reg, \reg, \tmp		// actual cache line size
337 	.endm
338 
339 /*
340  * icache_line_size - get the safe I-cache line size across all CPUs
341  */
342 	.macro	icache_line_size, reg, tmp
343 	read_ctr	\tmp
344 	and		\tmp, \tmp, #0xf	// cache line size encoding
345 	mov		\reg, #4		// bytes per word
346 	lsl		\reg, \reg, \tmp	// actual cache line size
347 	.endm
348 
349 /*
350  * tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map
351  */
352 	.macro	tcr_set_idmap_t0sz, valreg, tmpreg
353 	ldr_l	\tmpreg, idmap_t0sz
354 	bfi	\valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
355 	.endm
356 
357 /*
358  * tcr_compute_pa_size - set TCR.(I)PS to the highest supported
359  * ID_AA64MMFR0_EL1.PARange value
360  *
361  *	tcr:		register with the TCR_ELx value to be updated
362  *	pos:		IPS or PS bitfield position
363  *	tmp{0,1}:	temporary registers
364  */
365 	.macro	tcr_compute_pa_size, tcr, pos, tmp0, tmp1
366 	mrs	\tmp0, ID_AA64MMFR0_EL1
367 	// Narrow PARange to fit the PS field in TCR_ELx
368 	ubfx	\tmp0, \tmp0, #ID_AA64MMFR0_PARANGE_SHIFT, #3
369 	mov	\tmp1, #ID_AA64MMFR0_PARANGE_MAX
370 	cmp	\tmp0, \tmp1
371 	csel	\tmp0, \tmp1, \tmp0, hi
372 	bfi	\tcr, \tmp0, \pos, #3
373 	.endm
374 
375 /*
376  * Macro to perform a data cache maintenance for the interval
377  * [kaddr, kaddr + size)
378  *
379  * 	op:		operation passed to dc instruction
380  * 	domain:		domain used in dsb instruciton
381  * 	kaddr:		starting virtual address of the region
382  * 	size:		size of the region
383  * 	Corrupts:	kaddr, size, tmp1, tmp2
384  */
385 	.macro __dcache_op_workaround_clean_cache, op, kaddr
386 alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
387 	dc	\op, \kaddr
388 alternative_else
389 	dc	civac, \kaddr
390 alternative_endif
391 	.endm
392 
393 	.macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
394 	dcache_line_size \tmp1, \tmp2
395 	add	\size, \kaddr, \size
396 	sub	\tmp2, \tmp1, #1
397 	bic	\kaddr, \kaddr, \tmp2
398 9998:
399 	.ifc	\op, cvau
400 	__dcache_op_workaround_clean_cache \op, \kaddr
401 	.else
402 	.ifc	\op, cvac
403 	__dcache_op_workaround_clean_cache \op, \kaddr
404 	.else
405 	.ifc	\op, cvap
406 	sys	3, c7, c12, 1, \kaddr	// dc cvap
407 	.else
408 	dc	\op, \kaddr
409 	.endif
410 	.endif
411 	.endif
412 	add	\kaddr, \kaddr, \tmp1
413 	cmp	\kaddr, \size
414 	b.lo	9998b
415 	dsb	\domain
416 	.endm
417 
418 /*
419  * Macro to perform an instruction cache maintenance for the interval
420  * [start, end)
421  *
422  * 	start, end:	virtual addresses describing the region
423  *	label:		A label to branch to on user fault.
424  * 	Corrupts:	tmp1, tmp2
425  */
426 	.macro invalidate_icache_by_line start, end, tmp1, tmp2, label
427 	icache_line_size \tmp1, \tmp2
428 	sub	\tmp2, \tmp1, #1
429 	bic	\tmp2, \start, \tmp2
430 9997:
431 USER(\label, ic	ivau, \tmp2)			// invalidate I line PoU
432 	add	\tmp2, \tmp2, \tmp1
433 	cmp	\tmp2, \end
434 	b.lo	9997b
435 	dsb	ish
436 	isb
437 	.endm
438 
439 /*
440  * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
441  */
442 	.macro	reset_pmuserenr_el0, tmpreg
443 	mrs	\tmpreg, id_aa64dfr0_el1	// Check ID_AA64DFR0_EL1 PMUVer
444 	sbfx	\tmpreg, \tmpreg, #8, #4
445 	cmp	\tmpreg, #1			// Skip if no PMU present
446 	b.lt	9000f
447 	msr	pmuserenr_el0, xzr		// Disable PMU access from EL0
448 9000:
449 	.endm
450 
451 /*
452  * copy_page - copy src to dest using temp registers t1-t8
453  */
454 	.macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req
455 9998:	ldp	\t1, \t2, [\src]
456 	ldp	\t3, \t4, [\src, #16]
457 	ldp	\t5, \t6, [\src, #32]
458 	ldp	\t7, \t8, [\src, #48]
459 	add	\src, \src, #64
460 	stnp	\t1, \t2, [\dest]
461 	stnp	\t3, \t4, [\dest, #16]
462 	stnp	\t5, \t6, [\dest, #32]
463 	stnp	\t7, \t8, [\dest, #48]
464 	add	\dest, \dest, #64
465 	tst	\src, #(PAGE_SIZE - 1)
466 	b.ne	9998b
467 	.endm
468 
469 /*
470  * Annotate a function as position independent, i.e., safe to be called before
471  * the kernel virtual mapping is activated.
472  */
473 #define ENDPIPROC(x)			\
474 	.globl	__pi_##x;		\
475 	.type 	__pi_##x, %function;	\
476 	.set	__pi_##x, x;		\
477 	.size	__pi_##x, . - x;	\
478 	ENDPROC(x)
479 
480 /*
481  * Annotate a function as being unsuitable for kprobes.
482  */
483 #ifdef CONFIG_KPROBES
484 #define NOKPROBE(x)				\
485 	.pushsection "_kprobe_blacklist", "aw";	\
486 	.quad	x;				\
487 	.popsection;
488 #else
489 #define NOKPROBE(x)
490 #endif
491 	/*
492 	 * Emit a 64-bit absolute little endian symbol reference in a way that
493 	 * ensures that it will be resolved at build time, even when building a
494 	 * PIE binary. This requires cooperation from the linker script, which
495 	 * must emit the lo32/hi32 halves individually.
496 	 */
497 	.macro	le64sym, sym
498 	.long	\sym\()_lo32
499 	.long	\sym\()_hi32
500 	.endm
501 
502 	/*
503 	 * mov_q - move an immediate constant into a 64-bit register using
504 	 *         between 2 and 4 movz/movk instructions (depending on the
505 	 *         magnitude and sign of the operand)
506 	 */
507 	.macro	mov_q, reg, val
508 	.if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff)
509 	movz	\reg, :abs_g1_s:\val
510 	.else
511 	.if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff)
512 	movz	\reg, :abs_g2_s:\val
513 	.else
514 	movz	\reg, :abs_g3:\val
515 	movk	\reg, :abs_g2_nc:\val
516 	.endif
517 	movk	\reg, :abs_g1_nc:\val
518 	.endif
519 	movk	\reg, :abs_g0_nc:\val
520 	.endm
521 
522 /*
523  * Return the current thread_info.
524  */
525 	.macro	get_thread_info, rd
526 	mrs	\rd, sp_el0
527 	.endm
528 
529 /*
530  * Arrange a physical address in a TTBR register, taking care of 52-bit
531  * addresses.
532  *
533  * 	phys:	physical address, preserved
534  * 	ttbr:	returns the TTBR value
535  */
536 	.macro	phys_to_ttbr, ttbr, phys
537 #ifdef CONFIG_ARM64_PA_BITS_52
538 	orr	\ttbr, \phys, \phys, lsr #46
539 	and	\ttbr, \ttbr, #TTBR_BADDR_MASK_52
540 #else
541 	mov	\ttbr, \phys
542 #endif
543 	.endm
544 
545 	.macro	phys_to_pte, pte, phys
546 #ifdef CONFIG_ARM64_PA_BITS_52
547 	/*
548 	 * We assume \phys is 64K aligned and this is guaranteed by only
549 	 * supporting this configuration with 64K pages.
550 	 */
551 	orr	\pte, \phys, \phys, lsr #36
552 	and	\pte, \pte, #PTE_ADDR_MASK
553 #else
554 	mov	\pte, \phys
555 #endif
556 	.endm
557 
558 	.macro	pte_to_phys, phys, pte
559 #ifdef CONFIG_ARM64_PA_BITS_52
560 	ubfiz	\phys, \pte, #(48 - 16 - 12), #16
561 	bfxil	\phys, \pte, #16, #32
562 	lsl	\phys, \phys, #16
563 #else
564 	and	\phys, \pte, #PTE_ADDR_MASK
565 #endif
566 	.endm
567 
568 /**
569  * Errata workaround prior to disable MMU. Insert an ISB immediately prior
570  * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0.
571  */
572 	.macro pre_disable_mmu_workaround
573 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_E1041
574 	isb
575 #endif
576 	.endm
577 
578 	/*
579 	 * frame_push - Push @regcount callee saved registers to the stack,
580 	 *              starting at x19, as well as x29/x30, and set x29 to
581 	 *              the new value of sp. Add @extra bytes of stack space
582 	 *              for locals.
583 	 */
584 	.macro		frame_push, regcount:req, extra
585 	__frame		st, \regcount, \extra
586 	.endm
587 
588 	/*
589 	 * frame_pop  - Pop the callee saved registers from the stack that were
590 	 *              pushed in the most recent call to frame_push, as well
591 	 *              as x29/x30 and any extra stack space that may have been
592 	 *              allocated.
593 	 */
594 	.macro		frame_pop
595 	__frame		ld
596 	.endm
597 
598 	.macro		__frame_regs, reg1, reg2, op, num
599 	.if		.Lframe_regcount == \num
600 	\op\()r		\reg1, [sp, #(\num + 1) * 8]
601 	.elseif		.Lframe_regcount > \num
602 	\op\()p		\reg1, \reg2, [sp, #(\num + 1) * 8]
603 	.endif
604 	.endm
605 
606 	.macro		__frame, op, regcount, extra=0
607 	.ifc		\op, st
608 	.if		(\regcount) < 0 || (\regcount) > 10
609 	.error		"regcount should be in the range [0 ... 10]"
610 	.endif
611 	.if		((\extra) % 16) != 0
612 	.error		"extra should be a multiple of 16 bytes"
613 	.endif
614 	.ifdef		.Lframe_regcount
615 	.if		.Lframe_regcount != -1
616 	.error		"frame_push/frame_pop may not be nested"
617 	.endif
618 	.endif
619 	.set		.Lframe_regcount, \regcount
620 	.set		.Lframe_extra, \extra
621 	.set		.Lframe_local_offset, ((\regcount + 3) / 2) * 16
622 	stp		x29, x30, [sp, #-.Lframe_local_offset - .Lframe_extra]!
623 	mov		x29, sp
624 	.endif
625 
626 	__frame_regs	x19, x20, \op, 1
627 	__frame_regs	x21, x22, \op, 3
628 	__frame_regs	x23, x24, \op, 5
629 	__frame_regs	x25, x26, \op, 7
630 	__frame_regs	x27, x28, \op, 9
631 
632 	.ifc		\op, ld
633 	.if		.Lframe_regcount == -1
634 	.error		"frame_push/frame_pop may not be nested"
635 	.endif
636 	ldp		x29, x30, [sp], #.Lframe_local_offset + .Lframe_extra
637 	.set		.Lframe_regcount, -1
638 	.endif
639 	.endm
640 
641 /*
642  * Check whether to yield to another runnable task from kernel mode NEON code
643  * (which runs with preemption disabled).
644  *
645  * if_will_cond_yield_neon
646  *        // pre-yield patchup code
647  * do_cond_yield_neon
648  *        // post-yield patchup code
649  * endif_yield_neon    <label>
650  *
651  * where <label> is optional, and marks the point where execution will resume
652  * after a yield has been performed. If omitted, execution resumes right after
653  * the endif_yield_neon invocation. Note that the entire sequence, including
654  * the provided patchup code, will be omitted from the image if CONFIG_PREEMPT
655  * is not defined.
656  *
657  * As a convenience, in the case where no patchup code is required, the above
658  * sequence may be abbreviated to
659  *
660  * cond_yield_neon <label>
661  *
662  * Note that the patchup code does not support assembler directives that change
663  * the output section, any use of such directives is undefined.
664  *
665  * The yield itself consists of the following:
666  * - Check whether the preempt count is exactly 1, in which case disabling
667  *   preemption once will make the task preemptible. If this is not the case,
668  *   yielding is pointless.
669  * - Check whether TIF_NEED_RESCHED is set, and if so, disable and re-enable
670  *   kernel mode NEON (which will trigger a reschedule), and branch to the
671  *   yield fixup code.
672  *
673  * This macro sequence may clobber all CPU state that is not guaranteed by the
674  * AAPCS to be preserved across an ordinary function call.
675  */
676 
677 	.macro		cond_yield_neon, lbl
678 	if_will_cond_yield_neon
679 	do_cond_yield_neon
680 	endif_yield_neon	\lbl
681 	.endm
682 
683 	.macro		if_will_cond_yield_neon
684 #ifdef CONFIG_PREEMPT
685 	get_thread_info	x0
686 	ldr		w1, [x0, #TSK_TI_PREEMPT]
687 	ldr		x0, [x0, #TSK_TI_FLAGS]
688 	cmp		w1, #PREEMPT_DISABLE_OFFSET
689 	csel		x0, x0, xzr, eq
690 	tbnz		x0, #TIF_NEED_RESCHED, .Lyield_\@	// needs rescheduling?
691 	/* fall through to endif_yield_neon */
692 	.subsection	1
693 .Lyield_\@ :
694 #else
695 	.section	".discard.cond_yield_neon", "ax"
696 #endif
697 	.endm
698 
699 	.macro		do_cond_yield_neon
700 	bl		kernel_neon_end
701 	bl		kernel_neon_begin
702 	.endm
703 
704 	.macro		endif_yield_neon, lbl
705 	.ifnb		\lbl
706 	b		\lbl
707 	.else
708 	b		.Lyield_out_\@
709 	.endif
710 	.previous
711 .Lyield_out_\@ :
712 	.endm
713 
714 #endif	/* __ASM_ASSEMBLER_H */
715