• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 *  linux/arch/arm/kernel/entry-armv.S
3 *
4 *  Copyright (C) 1996,1997,1998 Russell King.
5 *  ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
6 *  nommu support by Hyok S. Choi (hyok.choi@samsung.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 *  Low-level vector interface routines
13 *
14 *  Note:  there is a StrongARM bug in the STMIA rn, {regs}^ instruction
15 *  that causes it to save wrong values...  Be aware!
16 */
17
18#include <linux/init.h>
19
20#include <asm/assembler.h>
21#include <asm/memory.h>
22#include <asm/glue-df.h>
23#include <asm/glue-pf.h>
24#include <asm/vfpmacros.h>
25#ifndef CONFIG_MULTI_IRQ_HANDLER
26#include <mach/entry-macro.S>
27#endif
28#include <asm/thread_notify.h>
29#include <asm/unwind.h>
30#include <asm/unistd.h>
31#include <asm/tls.h>
32#include <asm/system_info.h>
33
34#include "entry-header.S"
35#include <asm/entry-macro-multi.S>
36#include <asm/probes.h>
37
38/*
39 * Interrupt handling.
40 */
41	.macro	irq_handler
42#ifdef CONFIG_MULTI_IRQ_HANDLER
43	ldr	r1, =handle_arch_irq
44	mov	r0, sp
45	badr	lr, 9997f
46	ldr	pc, [r1]
47#else
48	arch_irq_handler_default
49#endif
509997:
51	.endm
52
53	.macro	pabt_helper
54	@ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
55#ifdef MULTI_PABORT
56	ldr	ip, .LCprocfns
57	mov	lr, pc
58	ldr	pc, [ip, #PROCESSOR_PABT_FUNC]
59#else
60	bl	CPU_PABORT_HANDLER
61#endif
62	.endm
63
64	.macro	dabt_helper
65
66	@
67	@ Call the processor-specific abort handler:
68	@
69	@  r2 - pt_regs
70	@  r4 - aborted context pc
71	@  r5 - aborted context psr
72	@
73	@ The abort handler must return the aborted address in r0, and
74	@ the fault status register in r1.  r9 must be preserved.
75	@
76#ifdef MULTI_DABORT
77	ldr	ip, .LCprocfns
78	mov	lr, pc
79	ldr	pc, [ip, #PROCESSOR_DABT_FUNC]
80#else
81	bl	CPU_DABORT_HANDLER
82#endif
83	.endm
84
85#ifdef CONFIG_KPROBES
86	.section	.kprobes.text,"ax",%progbits
87#else
88	.text
89#endif
90
91/*
92 * Invalid mode handlers
93 */
94	.macro	inv_entry, reason
95	sub	sp, sp, #S_FRAME_SIZE
96 ARM(	stmib	sp, {r1 - lr}		)
97 THUMB(	stmia	sp, {r0 - r12}		)
98 THUMB(	str	sp, [sp, #S_SP]		)
99 THUMB(	str	lr, [sp, #S_LR]		)
100	mov	r1, #\reason
101	.endm
102
103__pabt_invalid:
104	inv_entry BAD_PREFETCH
105	b	common_invalid
106ENDPROC(__pabt_invalid)
107
108__dabt_invalid:
109	inv_entry BAD_DATA
110	b	common_invalid
111ENDPROC(__dabt_invalid)
112
113__irq_invalid:
114	inv_entry BAD_IRQ
115	b	common_invalid
116ENDPROC(__irq_invalid)
117
118__und_invalid:
119	inv_entry BAD_UNDEFINSTR
120
121	@
122	@ XXX fall through to common_invalid
123	@
124
125@
126@ common_invalid - generic code for failed exception (re-entrant version of handlers)
127@
128common_invalid:
129	zero_fp
130
131	ldmia	r0, {r4 - r6}
132	add	r0, sp, #S_PC		@ here for interlock avoidance
133	mov	r7, #-1			@  ""   ""    ""        ""
134	str	r4, [sp]		@ save preserved r0
135	stmia	r0, {r5 - r7}		@ lr_<exception>,
136					@ cpsr_<exception>, "old_r0"
137
138	mov	r0, sp
139	b	bad_mode
140ENDPROC(__und_invalid)
141
142/*
143 * SVC mode handlers
144 */
145
146#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
147#define SPFIX(code...) code
148#else
149#define SPFIX(code...)
150#endif
151
152	.macro	svc_entry, stack_hole=0, trace=1, uaccess=1
153 UNWIND(.fnstart		)
154 UNWIND(.save {r0 - pc}		)
155	sub	sp, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
156#ifdef CONFIG_THUMB2_KERNEL
157 SPFIX(	str	r0, [sp]	)	@ temporarily saved
158 SPFIX(	mov	r0, sp		)
159 SPFIX(	tst	r0, #4		)	@ test original stack alignment
160 SPFIX(	ldr	r0, [sp]	)	@ restored
161#else
162 SPFIX(	tst	sp, #4		)
163#endif
164 SPFIX(	subeq	sp, sp, #4	)
165	stmia	sp, {r1 - r12}
166
167	ldmia	r0, {r3 - r5}
168	add	r7, sp, #S_SP - 4	@ here for interlock avoidance
169	mov	r6, #-1			@  ""  ""      ""       ""
170	add	r2, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
171 SPFIX(	addeq	r2, r2, #4	)
172	str	r3, [sp, #-4]!		@ save the "real" r0 copied
173					@ from the exception stack
174
175	mov	r3, lr
176
177	@
178	@ We are now ready to fill in the remaining blanks on the stack:
179	@
180	@  r2 - sp_svc
181	@  r3 - lr_svc
182	@  r4 - lr_<exception>, already fixed up for correct return/restart
183	@  r5 - spsr_<exception>
184	@  r6 - orig_r0 (see pt_regs definition in ptrace.h)
185	@
186	stmia	r7, {r2 - r6}
187
188	uaccess_save r0
189	.if \uaccess
190	uaccess_disable r0
191	.endif
192
193	.if \trace
194#ifdef CONFIG_TRACE_IRQFLAGS
195	bl	trace_hardirqs_off
196#endif
197	.endif
198	.endm
199
200	.align	5
201__dabt_svc:
202	svc_entry uaccess=0
203	mov	r2, sp
204	dabt_helper
205 THUMB(	ldr	r5, [sp, #S_PSR]	)	@ potentially updated CPSR
206	svc_exit r5				@ return from exception
207 UNWIND(.fnend		)
208ENDPROC(__dabt_svc)
209
210	.align	5
211__irq_svc:
212	svc_entry
213	irq_handler
214
215#ifdef CONFIG_PREEMPT
216	get_thread_info tsk
217	ldr	r8, [tsk, #TI_PREEMPT]		@ get preempt count
218	ldr	r0, [tsk, #TI_FLAGS]		@ get flags
219	teq	r8, #0				@ if preempt count != 0
220	movne	r0, #0				@ force flags to 0
221	tst	r0, #_TIF_NEED_RESCHED
222	blne	svc_preempt
223#endif
224
225	svc_exit r5, irq = 1			@ return from exception
226 UNWIND(.fnend		)
227ENDPROC(__irq_svc)
228
229	.ltorg
230
231#ifdef CONFIG_PREEMPT
232svc_preempt:
233	mov	r8, lr
2341:	bl	preempt_schedule_irq		@ irq en/disable is done inside
235	ldr	r0, [tsk, #TI_FLAGS]		@ get new tasks TI_FLAGS
236	tst	r0, #_TIF_NEED_RESCHED
237	reteq	r8				@ go again
238	b	1b
239#endif
240
241__und_fault:
242	@ Correct the PC such that it is pointing at the instruction
243	@ which caused the fault.  If the faulting instruction was ARM
244	@ the PC will be pointing at the next instruction, and have to
245	@ subtract 4.  Otherwise, it is Thumb, and the PC will be
246	@ pointing at the second half of the Thumb instruction.  We
247	@ have to subtract 2.
248	ldr	r2, [r0, #S_PC]
249	sub	r2, r2, r1
250	str	r2, [r0, #S_PC]
251	b	do_undefinstr
252ENDPROC(__und_fault)
253
254	.align	5
255__und_svc:
256#ifdef CONFIG_KPROBES
257	@ If a kprobe is about to simulate a "stmdb sp..." instruction,
258	@ it obviously needs free stack space which then will belong to
259	@ the saved context.
260	svc_entry MAX_STACK_SIZE
261#else
262	svc_entry
263#endif
264	@
265	@ call emulation code, which returns using r9 if it has emulated
266	@ the instruction, or the more conventional lr if we are to treat
267	@ this as a real undefined instruction
268	@
269	@  r0 - instruction
270	@
271#ifndef CONFIG_THUMB2_KERNEL
272	ldr	r0, [r4, #-4]
273#else
274	mov	r1, #2
275	ldrh	r0, [r4, #-2]			@ Thumb instruction at LR - 2
276	cmp	r0, #0xe800			@ 32-bit instruction if xx >= 0
277	blo	__und_svc_fault
278	ldrh	r9, [r4]			@ bottom 16 bits
279	add	r4, r4, #2
280	str	r4, [sp, #S_PC]
281	orr	r0, r9, r0, lsl #16
282#endif
283	badr	r9, __und_svc_finish
284	mov	r2, r4
285	bl	call_fpe
286
287	mov	r1, #4				@ PC correction to apply
288__und_svc_fault:
289	mov	r0, sp				@ struct pt_regs *regs
290	bl	__und_fault
291
292__und_svc_finish:
293	ldr	r5, [sp, #S_PSR]		@ Get SVC cpsr
294	svc_exit r5				@ return from exception
295 UNWIND(.fnend		)
296ENDPROC(__und_svc)
297
298	.align	5
299__pabt_svc:
300	svc_entry
301	mov	r2, sp				@ regs
302	pabt_helper
303	svc_exit r5				@ return from exception
304 UNWIND(.fnend		)
305ENDPROC(__pabt_svc)
306
307	.align	5
308__fiq_svc:
309	svc_entry trace=0
310	mov	r0, sp				@ struct pt_regs *regs
311	bl	handle_fiq_as_nmi
312	svc_exit_via_fiq
313 UNWIND(.fnend		)
314ENDPROC(__fiq_svc)
315
316	.align	5
317.LCcralign:
318	.word	cr_alignment
319#ifdef MULTI_DABORT
320.LCprocfns:
321	.word	processor
322#endif
323.LCfp:
324	.word	fp_enter
325
326/*
327 * Abort mode handlers
328 */
329
330@
331@ Taking a FIQ in abort mode is similar to taking a FIQ in SVC mode
332@ and reuses the same macros. However in abort mode we must also
333@ save/restore lr_abt and spsr_abt to make nested aborts safe.
334@
335	.align 5
336__fiq_abt:
337	svc_entry trace=0
338
339 ARM(	msr	cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
340 THUMB( mov	r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
341 THUMB( msr	cpsr_c, r0 )
342	mov	r1, lr		@ Save lr_abt
343	mrs	r2, spsr	@ Save spsr_abt, abort is now safe
344 ARM(	msr	cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
345 THUMB( mov	r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
346 THUMB( msr	cpsr_c, r0 )
347	stmfd	sp!, {r1 - r2}
348
349	add	r0, sp, #8			@ struct pt_regs *regs
350	bl	handle_fiq_as_nmi
351
352	ldmfd	sp!, {r1 - r2}
353 ARM(	msr	cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
354 THUMB( mov	r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
355 THUMB( msr	cpsr_c, r0 )
356	mov	lr, r1		@ Restore lr_abt, abort is unsafe
357	msr	spsr_cxsf, r2	@ Restore spsr_abt
358 ARM(	msr	cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
359 THUMB( mov	r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
360 THUMB( msr	cpsr_c, r0 )
361
362	svc_exit_via_fiq
363 UNWIND(.fnend		)
364ENDPROC(__fiq_abt)
365
366/*
367 * User mode handlers
368 *
369 * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
370 */
371
372#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
373#error "sizeof(struct pt_regs) must be a multiple of 8"
374#endif
375
376	.macro	usr_entry, trace=1, uaccess=1
377 UNWIND(.fnstart	)
378 UNWIND(.cantunwind	)	@ don't unwind the user space
379	sub	sp, sp, #S_FRAME_SIZE
380 ARM(	stmib	sp, {r1 - r12}	)
381 THUMB(	stmia	sp, {r0 - r12}	)
382
383 ATRAP(	mrc	p15, 0, r7, c1, c0, 0)
384 ATRAP(	ldr	r8, .LCcralign)
385
386	ldmia	r0, {r3 - r5}
387	add	r0, sp, #S_PC		@ here for interlock avoidance
388	mov	r6, #-1			@  ""  ""     ""        ""
389
390	str	r3, [sp]		@ save the "real" r0 copied
391					@ from the exception stack
392
393 ATRAP(	ldr	r8, [r8, #0])
394
395	@
396	@ We are now ready to fill in the remaining blanks on the stack:
397	@
398	@  r4 - lr_<exception>, already fixed up for correct return/restart
399	@  r5 - spsr_<exception>
400	@  r6 - orig_r0 (see pt_regs definition in ptrace.h)
401	@
402	@ Also, separately save sp_usr and lr_usr
403	@
404	stmia	r0, {r4 - r6}
405 ARM(	stmdb	r0, {sp, lr}^			)
406 THUMB(	store_user_sp_lr r0, r1, S_SP - S_PC	)
407
408	.if \uaccess
409	uaccess_disable ip
410	.endif
411
412	@ Enable the alignment trap while in kernel mode
413 ATRAP(	teq	r8, r7)
414 ATRAP( mcrne	p15, 0, r8, c1, c0, 0)
415
416	@
417	@ Clear FP to mark the first stack frame
418	@
419	zero_fp
420
421	.if	\trace
422#ifdef CONFIG_TRACE_IRQFLAGS
423	bl	trace_hardirqs_off
424#endif
425	ct_user_exit save = 0
426	.endif
427	.endm
428
429	.macro	kuser_cmpxchg_check
430#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS)
431#ifndef CONFIG_MMU
432#warning "NPTL on non MMU needs fixing"
433#else
434	@ Make sure our user space atomic helper is restarted
435	@ if it was interrupted in a critical region.  Here we
436	@ perform a quick test inline since it should be false
437	@ 99.9999% of the time.  The rest is done out of line.
438	cmp	r4, #TASK_SIZE
439	blhs	kuser_cmpxchg64_fixup
440#endif
441#endif
442	.endm
443
444	.align	5
445__dabt_usr:
446	usr_entry uaccess=0
447	kuser_cmpxchg_check
448	mov	r2, sp
449	dabt_helper
450	b	ret_from_exception
451 UNWIND(.fnend		)
452ENDPROC(__dabt_usr)
453
454	.align	5
455__irq_usr:
456	usr_entry
457	kuser_cmpxchg_check
458	irq_handler
459	get_thread_info tsk
460	mov	why, #0
461	b	ret_to_user_from_irq
462 UNWIND(.fnend		)
463ENDPROC(__irq_usr)
464
465	.ltorg
466
467	.align	5
468__und_usr:
469	usr_entry uaccess=0
470
471	mov	r2, r4
472	mov	r3, r5
473
474	@ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the
475	@      faulting instruction depending on Thumb mode.
476	@ r3 = regs->ARM_cpsr
477	@
478	@ The emulation code returns using r9 if it has emulated the
479	@ instruction, or the more conventional lr if we are to treat
480	@ this as a real undefined instruction
481	@
482	badr	r9, ret_from_exception
483
484	@ IRQs must be enabled before attempting to read the instruction from
485	@ user space since that could cause a page/translation fault if the
486	@ page table was modified by another CPU.
487	enable_irq
488
489	tst	r3, #PSR_T_BIT			@ Thumb mode?
490	bne	__und_usr_thumb
491	sub	r4, r2, #4			@ ARM instr at LR - 4
4921:	ldrt	r0, [r4]
493 ARM_BE8(rev	r0, r0)				@ little endian instruction
494
495	uaccess_disable ip
496
497	@ r0 = 32-bit ARM instruction which caused the exception
498	@ r2 = PC value for the following instruction (:= regs->ARM_pc)
499	@ r4 = PC value for the faulting instruction
500	@ lr = 32-bit undefined instruction function
501	badr	lr, __und_usr_fault_32
502	b	call_fpe
503
504__und_usr_thumb:
505	@ Thumb instruction
506	sub	r4, r2, #2			@ First half of thumb instr at LR - 2
507#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
508/*
509 * Thumb-2 instruction handling.  Note that because pre-v6 and >= v6 platforms
510 * can never be supported in a single kernel, this code is not applicable at
511 * all when __LINUX_ARM_ARCH__ < 6.  This allows simplifying assumptions to be
512 * made about .arch directives.
513 */
514#if __LINUX_ARM_ARCH__ < 7
515/* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */
516#define NEED_CPU_ARCHITECTURE
517	ldr	r5, .LCcpu_architecture
518	ldr	r5, [r5]
519	cmp	r5, #CPU_ARCH_ARMv7
520	blo	__und_usr_fault_16		@ 16bit undefined instruction
521/*
522 * The following code won't get run unless the running CPU really is v7, so
523 * coding round the lack of ldrht on older arches is pointless.  Temporarily
524 * override the assembler target arch with the minimum required instead:
525 */
526	.arch	armv6t2
527#endif
5282:	ldrht	r5, [r4]
529ARM_BE8(rev16	r5, r5)				@ little endian instruction
530	cmp	r5, #0xe800			@ 32bit instruction if xx != 0
531	blo	__und_usr_fault_16_pan		@ 16bit undefined instruction
5323:	ldrht	r0, [r2]
533ARM_BE8(rev16	r0, r0)				@ little endian instruction
534	uaccess_disable ip
535	add	r2, r2, #2			@ r2 is PC + 2, make it PC + 4
536	str	r2, [sp, #S_PC]			@ it's a 2x16bit instr, update
537	orr	r0, r0, r5, lsl #16
538	badr	lr, __und_usr_fault_32
539	@ r0 = the two 16-bit Thumb instructions which caused the exception
540	@ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc)
541	@ r4 = PC value for the first 16-bit Thumb instruction
542	@ lr = 32bit undefined instruction function
543
544#if __LINUX_ARM_ARCH__ < 7
545/* If the target arch was overridden, change it back: */
546#ifdef CONFIG_CPU_32v6K
547	.arch	armv6k
548#else
549	.arch	armv6
550#endif
551#endif /* __LINUX_ARM_ARCH__ < 7 */
552#else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */
553	b	__und_usr_fault_16
554#endif
555 UNWIND(.fnend)
556ENDPROC(__und_usr)
557
558/*
559 * The out of line fixup for the ldrt instructions above.
560 */
561	.pushsection .text.fixup, "ax"
562	.align	2
5634:	str     r4, [sp, #S_PC]			@ retry current instruction
564	ret	r9
565	.popsection
566	.pushsection __ex_table,"a"
567	.long	1b, 4b
568#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
569	.long	2b, 4b
570	.long	3b, 4b
571#endif
572	.popsection
573
574/*
575 * Check whether the instruction is a co-processor instruction.
576 * If yes, we need to call the relevant co-processor handler.
577 *
578 * Note that we don't do a full check here for the co-processor
579 * instructions; all instructions with bit 27 set are well
580 * defined.  The only instructions that should fault are the
581 * co-processor instructions.  However, we have to watch out
582 * for the ARM6/ARM7 SWI bug.
583 *
584 * NEON is a special case that has to be handled here. Not all
585 * NEON instructions are co-processor instructions, so we have
586 * to make a special case of checking for them. Plus, there's
587 * five groups of them, so we have a table of mask/opcode pairs
588 * to check against, and if any match then we branch off into the
589 * NEON handler code.
590 *
591 * Emulators may wish to make use of the following registers:
592 *  r0  = instruction opcode (32-bit ARM or two 16-bit Thumb)
593 *  r2  = PC value to resume execution after successful emulation
594 *  r9  = normal "successful" return address
595 *  r10 = this threads thread_info structure
596 *  lr  = unrecognised instruction return address
597 * IRQs enabled, FIQs enabled.
598 */
599	@
600	@ Fall-through from Thumb-2 __und_usr
601	@
602#ifdef CONFIG_NEON
603	get_thread_info r10			@ get current thread
604	adr	r6, .LCneon_thumb_opcodes
605	b	2f
606#endif
607call_fpe:
608	get_thread_info r10			@ get current thread
609#ifdef CONFIG_NEON
610	adr	r6, .LCneon_arm_opcodes
6112:	ldr	r5, [r6], #4			@ mask value
612	ldr	r7, [r6], #4			@ opcode bits matching in mask
613	cmp	r5, #0				@ end mask?
614	beq	1f
615	and	r8, r0, r5
616	cmp	r8, r7				@ NEON instruction?
617	bne	2b
618	mov	r7, #1
619	strb	r7, [r10, #TI_USED_CP + 10]	@ mark CP#10 as used
620	strb	r7, [r10, #TI_USED_CP + 11]	@ mark CP#11 as used
621	b	do_vfp				@ let VFP handler handle this
6221:
623#endif
624	tst	r0, #0x08000000			@ only CDP/CPRT/LDC/STC have bit 27
625	tstne	r0, #0x04000000			@ bit 26 set on both ARM and Thumb-2
626	reteq	lr
627	and	r8, r0, #0x00000f00		@ mask out CP number
628	mov	r7, #1
629	add	r6, r10, r8, lsr #8		@ add used_cp[] array offset first
630	strb	r7, [r6, #TI_USED_CP]		@ set appropriate used_cp[]
631#ifdef CONFIG_IWMMXT
632	@ Test if we need to give access to iWMMXt coprocessors
633	ldr	r5, [r10, #TI_FLAGS]
634	rsbs	r7, r8, #(1 << 8)		@ CP 0 or 1 only
635	movcss	r7, r5, lsr #(TIF_USING_IWMMXT + 1)
636	bcs	iwmmxt_task_enable
637#endif
638 ARM(	add	pc, pc, r8, lsr #6	)
639 THUMB(	lsr	r8, r8, #6		)
640 THUMB(	add	pc, r8			)
641	nop
642
643	ret.w	lr				@ CP#0
644	W(b)	do_fpe				@ CP#1 (FPE)
645	W(b)	do_fpe				@ CP#2 (FPE)
646	ret.w	lr				@ CP#3
647#ifdef CONFIG_CRUNCH
648	b	crunch_task_enable		@ CP#4 (MaverickCrunch)
649	b	crunch_task_enable		@ CP#5 (MaverickCrunch)
650	b	crunch_task_enable		@ CP#6 (MaverickCrunch)
651#else
652	ret.w	lr				@ CP#4
653	ret.w	lr				@ CP#5
654	ret.w	lr				@ CP#6
655#endif
656	ret.w	lr				@ CP#7
657	ret.w	lr				@ CP#8
658	ret.w	lr				@ CP#9
659#ifdef CONFIG_VFP
660	W(b)	do_vfp				@ CP#10 (VFP)
661	W(b)	do_vfp				@ CP#11 (VFP)
662#else
663	ret.w	lr				@ CP#10 (VFP)
664	ret.w	lr				@ CP#11 (VFP)
665#endif
666	ret.w	lr				@ CP#12
667	ret.w	lr				@ CP#13
668	ret.w	lr				@ CP#14 (Debug)
669	ret.w	lr				@ CP#15 (Control)
670
671#ifdef NEED_CPU_ARCHITECTURE
672	.align	2
673.LCcpu_architecture:
674	.word	__cpu_architecture
675#endif
676
677#ifdef CONFIG_NEON
678	.align	6
679
680.LCneon_arm_opcodes:
681	.word	0xfe000000			@ mask
682	.word	0xf2000000			@ opcode
683
684	.word	0xff100000			@ mask
685	.word	0xf4000000			@ opcode
686
687	.word	0x00000000			@ mask
688	.word	0x00000000			@ opcode
689
690.LCneon_thumb_opcodes:
691	.word	0xef000000			@ mask
692	.word	0xef000000			@ opcode
693
694	.word	0xff100000			@ mask
695	.word	0xf9000000			@ opcode
696
697	.word	0x00000000			@ mask
698	.word	0x00000000			@ opcode
699#endif
700
701do_fpe:
702	ldr	r4, .LCfp
703	add	r10, r10, #TI_FPSTATE		@ r10 = workspace
704	ldr	pc, [r4]			@ Call FP module USR entry point
705
706/*
707 * The FP module is called with these registers set:
708 *  r0  = instruction
709 *  r2  = PC+4
710 *  r9  = normal "successful" return address
711 *  r10 = FP workspace
712 *  lr  = unrecognised FP instruction return address
713 */
714
715	.pushsection .data
716ENTRY(fp_enter)
717	.word	no_fp
718	.popsection
719
720ENTRY(no_fp)
721	ret	lr
722ENDPROC(no_fp)
723
724__und_usr_fault_32:
725	mov	r1, #4
726	b	1f
727__und_usr_fault_16_pan:
728	uaccess_disable ip
729__und_usr_fault_16:
730	mov	r1, #2
7311:	mov	r0, sp
732	badr	lr, ret_from_exception
733	b	__und_fault
734ENDPROC(__und_usr_fault_32)
735ENDPROC(__und_usr_fault_16)
736
737	.align	5
738__pabt_usr:
739	usr_entry
740	mov	r2, sp				@ regs
741	pabt_helper
742 UNWIND(.fnend		)
743	/* fall through */
744/*
745 * This is the return code to user mode for abort handlers
746 */
747ENTRY(ret_from_exception)
748 UNWIND(.fnstart	)
749 UNWIND(.cantunwind	)
750	get_thread_info tsk
751	mov	why, #0
752	b	ret_to_user
753 UNWIND(.fnend		)
754ENDPROC(__pabt_usr)
755ENDPROC(ret_from_exception)
756
757	.align	5
758__fiq_usr:
759	usr_entry trace=0
760	kuser_cmpxchg_check
761	mov	r0, sp				@ struct pt_regs *regs
762	bl	handle_fiq_as_nmi
763	get_thread_info tsk
764	restore_user_regs fast = 0, offset = 0
765 UNWIND(.fnend		)
766ENDPROC(__fiq_usr)
767
768/*
769 * Register switch for ARMv3 and ARMv4 processors
770 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
771 * previous and next are guaranteed not to be the same.
772 */
773ENTRY(__switch_to)
774 UNWIND(.fnstart	)
775 UNWIND(.cantunwind	)
776	add	ip, r1, #TI_CPU_SAVE
777 ARM(	stmia	ip!, {r4 - sl, fp, sp, lr} )	@ Store most regs on stack
778 THUMB(	stmia	ip!, {r4 - sl, fp}	   )	@ Store most regs on stack
779 THUMB(	str	sp, [ip], #4		   )
780 THUMB(	str	lr, [ip], #4		   )
781	ldr	r4, [r2, #TI_TP_VALUE]
782	ldr	r5, [r2, #TI_TP_VALUE + 4]
783#ifdef CONFIG_CPU_USE_DOMAINS
784	mrc	p15, 0, r6, c3, c0, 0		@ Get domain register
785	str	r6, [r1, #TI_CPU_DOMAIN]	@ Save old domain register
786	ldr	r6, [r2, #TI_CPU_DOMAIN]
787#endif
788	switch_tls r1, r4, r5, r3, r7
789#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
790	ldr	r7, [r2, #TI_TASK]
791	ldr	r8, =__stack_chk_guard
792	ldr	r7, [r7, #TSK_STACK_CANARY]
793#endif
794#ifdef CONFIG_CPU_USE_DOMAINS
795	mcr	p15, 0, r6, c3, c0, 0		@ Set domain register
796#endif
797	mov	r5, r0
798	add	r4, r2, #TI_CPU_SAVE
799	ldr	r0, =thread_notify_head
800	mov	r1, #THREAD_NOTIFY_SWITCH
801	bl	atomic_notifier_call_chain
802#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
803	str	r7, [r8]
804#endif
805 THUMB(	mov	ip, r4			   )
806	mov	r0, r5
807 ARM(	ldmia	r4, {r4 - sl, fp, sp, pc}  )	@ Load all regs saved previously
808 THUMB(	ldmia	ip!, {r4 - sl, fp}	   )	@ Load all regs saved previously
809 THUMB(	ldr	sp, [ip], #4		   )
810 THUMB(	ldr	pc, [ip]		   )
811 UNWIND(.fnend		)
812ENDPROC(__switch_to)
813
814	__INIT
815
816/*
817 * User helpers.
818 *
819 * Each segment is 32-byte aligned and will be moved to the top of the high
820 * vector page.  New segments (if ever needed) must be added in front of
821 * existing ones.  This mechanism should be used only for things that are
822 * really small and justified, and not be abused freely.
823 *
824 * See Documentation/arm/kernel_user_helpers.txt for formal definitions.
825 */
826 THUMB(	.arm	)
827
828	.macro	usr_ret, reg
829#ifdef CONFIG_ARM_THUMB
830	bx	\reg
831#else
832	ret	\reg
833#endif
834	.endm
835
836	.macro	kuser_pad, sym, size
837	.if	(. - \sym) & 3
838	.rept	4 - (. - \sym) & 3
839	.byte	0
840	.endr
841	.endif
842	.rept	(\size - (. - \sym)) / 4
843	.word	0xe7fddef1
844	.endr
845	.endm
846
847#ifdef CONFIG_KUSER_HELPERS
848	.align	5
849	.globl	__kuser_helper_start
850__kuser_helper_start:
851
852/*
853 * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
854 * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point.
855 */
856
857__kuser_cmpxchg64:				@ 0xffff0f60
858
859#if defined(CONFIG_CPU_32v6K)
860
861	stmfd	sp!, {r4, r5, r6, r7}
862	ldrd	r4, r5, [r0]			@ load old val
863	ldrd	r6, r7, [r1]			@ load new val
864	smp_dmb	arm
8651:	ldrexd	r0, r1, [r2]			@ load current val
866	eors	r3, r0, r4			@ compare with oldval (1)
867	eoreqs	r3, r1, r5			@ compare with oldval (2)
868	strexdeq r3, r6, r7, [r2]		@ store newval if eq
869	teqeq	r3, #1				@ success?
870	beq	1b				@ if no then retry
871	smp_dmb	arm
872	rsbs	r0, r3, #0			@ set returned val and C flag
873	ldmfd	sp!, {r4, r5, r6, r7}
874	usr_ret	lr
875
876#elif !defined(CONFIG_SMP)
877
878#ifdef CONFIG_MMU
879
880	/*
881	 * The only thing that can break atomicity in this cmpxchg64
882	 * implementation is either an IRQ or a data abort exception
883	 * causing another process/thread to be scheduled in the middle of
884	 * the critical sequence.  The same strategy as for cmpxchg is used.
885	 */
886	stmfd	sp!, {r4, r5, r6, lr}
887	ldmia	r0, {r4, r5}			@ load old val
888	ldmia	r1, {r6, lr}			@ load new val
8891:	ldmia	r2, {r0, r1}			@ load current val
890	eors	r3, r0, r4			@ compare with oldval (1)
891	eoreqs	r3, r1, r5			@ compare with oldval (2)
8922:	stmeqia	r2, {r6, lr}			@ store newval if eq
893	rsbs	r0, r3, #0			@ set return val and C flag
894	ldmfd	sp!, {r4, r5, r6, pc}
895
896	.text
897kuser_cmpxchg64_fixup:
898	@ Called from kuser_cmpxchg_fixup.
899	@ r4 = address of interrupted insn (must be preserved).
900	@ sp = saved regs. r7 and r8 are clobbered.
901	@ 1b = first critical insn, 2b = last critical insn.
902	@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
903	mov	r7, #0xffff0fff
904	sub	r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
905	subs	r8, r4, r7
906	rsbcss	r8, r8, #(2b - 1b)
907	strcs	r7, [sp, #S_PC]
908#if __LINUX_ARM_ARCH__ < 6
909	bcc	kuser_cmpxchg32_fixup
910#endif
911	ret	lr
912	.previous
913
914#else
915#warning "NPTL on non MMU needs fixing"
916	mov	r0, #-1
917	adds	r0, r0, #0
918	usr_ret	lr
919#endif
920
921#else
922#error "incoherent kernel configuration"
923#endif
924
925	kuser_pad __kuser_cmpxchg64, 64
926
927__kuser_memory_barrier:				@ 0xffff0fa0
928	smp_dmb	arm
929	usr_ret	lr
930
931	kuser_pad __kuser_memory_barrier, 32
932
933__kuser_cmpxchg:				@ 0xffff0fc0
934
935#if __LINUX_ARM_ARCH__ < 6
936
937#ifdef CONFIG_MMU
938
939	/*
940	 * The only thing that can break atomicity in this cmpxchg
941	 * implementation is either an IRQ or a data abort exception
942	 * causing another process/thread to be scheduled in the middle
943	 * of the critical sequence.  To prevent this, code is added to
944	 * the IRQ and data abort exception handlers to set the pc back
945	 * to the beginning of the critical section if it is found to be
946	 * within that critical section (see kuser_cmpxchg_fixup).
947	 */
9481:	ldr	r3, [r2]			@ load current val
949	subs	r3, r3, r0			@ compare with oldval
9502:	streq	r1, [r2]			@ store newval if eq
951	rsbs	r0, r3, #0			@ set return val and C flag
952	usr_ret	lr
953
954	.text
955kuser_cmpxchg32_fixup:
956	@ Called from kuser_cmpxchg_check macro.
957	@ r4 = address of interrupted insn (must be preserved).
958	@ sp = saved regs. r7 and r8 are clobbered.
959	@ 1b = first critical insn, 2b = last critical insn.
960	@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
961	mov	r7, #0xffff0fff
962	sub	r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
963	subs	r8, r4, r7
964	rsbcss	r8, r8, #(2b - 1b)
965	strcs	r7, [sp, #S_PC]
966	ret	lr
967	.previous
968
969#else
970#warning "NPTL on non MMU needs fixing"
971	mov	r0, #-1
972	adds	r0, r0, #0
973	usr_ret	lr
974#endif
975
976#else
977
978	smp_dmb	arm
9791:	ldrex	r3, [r2]
980	subs	r3, r3, r0
981	strexeq	r3, r1, [r2]
982	teqeq	r3, #1
983	beq	1b
984	rsbs	r0, r3, #0
985	/* beware -- each __kuser slot must be 8 instructions max */
986	ALT_SMP(b	__kuser_memory_barrier)
987	ALT_UP(usr_ret	lr)
988
989#endif
990
991	kuser_pad __kuser_cmpxchg, 32
992
993__kuser_get_tls:				@ 0xffff0fe0
994	ldr	r0, [pc, #(16 - 8)]	@ read TLS, set in kuser_get_tls_init
995	usr_ret	lr
996	mrc	p15, 0, r0, c13, c0, 3	@ 0xffff0fe8 hardware TLS code
997	kuser_pad __kuser_get_tls, 16
998	.rep	3
999	.word	0			@ 0xffff0ff0 software TLS value, then
1000	.endr				@ pad up to __kuser_helper_version
1001
1002__kuser_helper_version:				@ 0xffff0ffc
1003	.word	((__kuser_helper_end - __kuser_helper_start) >> 5)
1004
1005	.globl	__kuser_helper_end
1006__kuser_helper_end:
1007
1008#endif
1009
1010 THUMB(	.thumb	)
1011
1012/*
1013 * Vector stubs.
1014 *
1015 * This code is copied to 0xffff1000 so we can use branches in the
1016 * vectors, rather than ldr's.  Note that this code must not exceed
1017 * a page size.
1018 *
1019 * Common stub entry macro:
1020 *   Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
1021 *
1022 * SP points to a minimal amount of processor-private memory, the address
1023 * of which is copied into r0 for the mode specific abort handler.
1024 */
1025	.macro	vector_stub, name, mode, correction=0
1026	.align	5
1027
1028vector_\name:
1029	.if \correction
1030	sub	lr, lr, #\correction
1031	.endif
1032
1033	@
1034	@ Save r0, lr_<exception> (parent PC) and spsr_<exception>
1035	@ (parent CPSR)
1036	@
1037	stmia	sp, {r0, lr}		@ save r0, lr
1038	mrs	lr, spsr
1039	str	lr, [sp, #8]		@ save spsr
1040
1041	@
1042	@ Prepare for SVC32 mode.  IRQs remain disabled.
1043	@
1044	mrs	r0, cpsr
1045	eor	r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
1046	msr	spsr_cxsf, r0
1047
1048	@
1049	@ the branch table must immediately follow this code
1050	@
1051	and	lr, lr, #0x0f
1052 THUMB(	adr	r0, 1f			)
1053 THUMB(	ldr	lr, [r0, lr, lsl #2]	)
1054	mov	r0, sp
1055 ARM(	ldr	lr, [pc, lr, lsl #2]	)
1056	movs	pc, lr			@ branch to handler in SVC mode
1057ENDPROC(vector_\name)
1058
1059	.align	2
1060	@ handler addresses follow this label
10611:
1062	.endm
1063
1064	.section .stubs, "ax", %progbits
1065__stubs_start:
1066	@ This must be the first word
1067	.word	vector_swi
1068
1069vector_rst:
1070 ARM(	swi	SYS_ERROR0	)
1071 THUMB(	svc	#0		)
1072 THUMB(	nop			)
1073	b	vector_und
1074
1075/*
1076 * Interrupt dispatcher
1077 */
1078	vector_stub	irq, IRQ_MODE, 4
1079
1080	.long	__irq_usr			@  0  (USR_26 / USR_32)
1081	.long	__irq_invalid			@  1  (FIQ_26 / FIQ_32)
1082	.long	__irq_invalid			@  2  (IRQ_26 / IRQ_32)
1083	.long	__irq_svc			@  3  (SVC_26 / SVC_32)
1084	.long	__irq_invalid			@  4
1085	.long	__irq_invalid			@  5
1086	.long	__irq_invalid			@  6
1087	.long	__irq_invalid			@  7
1088	.long	__irq_invalid			@  8
1089	.long	__irq_invalid			@  9
1090	.long	__irq_invalid			@  a
1091	.long	__irq_invalid			@  b
1092	.long	__irq_invalid			@  c
1093	.long	__irq_invalid			@  d
1094	.long	__irq_invalid			@  e
1095	.long	__irq_invalid			@  f
1096
1097/*
1098 * Data abort dispatcher
1099 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1100 */
1101	vector_stub	dabt, ABT_MODE, 8
1102
1103	.long	__dabt_usr			@  0  (USR_26 / USR_32)
1104	.long	__dabt_invalid			@  1  (FIQ_26 / FIQ_32)
1105	.long	__dabt_invalid			@  2  (IRQ_26 / IRQ_32)
1106	.long	__dabt_svc			@  3  (SVC_26 / SVC_32)
1107	.long	__dabt_invalid			@  4
1108	.long	__dabt_invalid			@  5
1109	.long	__dabt_invalid			@  6
1110	.long	__dabt_invalid			@  7
1111	.long	__dabt_invalid			@  8
1112	.long	__dabt_invalid			@  9
1113	.long	__dabt_invalid			@  a
1114	.long	__dabt_invalid			@  b
1115	.long	__dabt_invalid			@  c
1116	.long	__dabt_invalid			@  d
1117	.long	__dabt_invalid			@  e
1118	.long	__dabt_invalid			@  f
1119
1120/*
1121 * Prefetch abort dispatcher
1122 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1123 */
1124	vector_stub	pabt, ABT_MODE, 4
1125
1126	.long	__pabt_usr			@  0 (USR_26 / USR_32)
1127	.long	__pabt_invalid			@  1 (FIQ_26 / FIQ_32)
1128	.long	__pabt_invalid			@  2 (IRQ_26 / IRQ_32)
1129	.long	__pabt_svc			@  3 (SVC_26 / SVC_32)
1130	.long	__pabt_invalid			@  4
1131	.long	__pabt_invalid			@  5
1132	.long	__pabt_invalid			@  6
1133	.long	__pabt_invalid			@  7
1134	.long	__pabt_invalid			@  8
1135	.long	__pabt_invalid			@  9
1136	.long	__pabt_invalid			@  a
1137	.long	__pabt_invalid			@  b
1138	.long	__pabt_invalid			@  c
1139	.long	__pabt_invalid			@  d
1140	.long	__pabt_invalid			@  e
1141	.long	__pabt_invalid			@  f
1142
1143/*
1144 * Undef instr entry dispatcher
1145 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
1146 */
1147	vector_stub	und, UND_MODE
1148
1149	.long	__und_usr			@  0 (USR_26 / USR_32)
1150	.long	__und_invalid			@  1 (FIQ_26 / FIQ_32)
1151	.long	__und_invalid			@  2 (IRQ_26 / IRQ_32)
1152	.long	__und_svc			@  3 (SVC_26 / SVC_32)
1153	.long	__und_invalid			@  4
1154	.long	__und_invalid			@  5
1155	.long	__und_invalid			@  6
1156	.long	__und_invalid			@  7
1157	.long	__und_invalid			@  8
1158	.long	__und_invalid			@  9
1159	.long	__und_invalid			@  a
1160	.long	__und_invalid			@  b
1161	.long	__und_invalid			@  c
1162	.long	__und_invalid			@  d
1163	.long	__und_invalid			@  e
1164	.long	__und_invalid			@  f
1165
1166	.align	5
1167
1168/*=============================================================================
1169 * Address exception handler
1170 *-----------------------------------------------------------------------------
1171 * These aren't too critical.
1172 * (they're not supposed to happen, and won't happen in 32-bit data mode).
1173 */
1174
1175vector_addrexcptn:
1176	b	vector_addrexcptn
1177
1178/*=============================================================================
1179 * FIQ "NMI" handler
1180 *-----------------------------------------------------------------------------
1181 * Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86
1182 * systems.
1183 */
1184	vector_stub	fiq, FIQ_MODE, 4
1185
1186	.long	__fiq_usr			@  0  (USR_26 / USR_32)
1187	.long	__fiq_svc			@  1  (FIQ_26 / FIQ_32)
1188	.long	__fiq_svc			@  2  (IRQ_26 / IRQ_32)
1189	.long	__fiq_svc			@  3  (SVC_26 / SVC_32)
1190	.long	__fiq_svc			@  4
1191	.long	__fiq_svc			@  5
1192	.long	__fiq_svc			@  6
1193	.long	__fiq_abt			@  7
1194	.long	__fiq_svc			@  8
1195	.long	__fiq_svc			@  9
1196	.long	__fiq_svc			@  a
1197	.long	__fiq_svc			@  b
1198	.long	__fiq_svc			@  c
1199	.long	__fiq_svc			@  d
1200	.long	__fiq_svc			@  e
1201	.long	__fiq_svc			@  f
1202
1203	.globl	vector_fiq_offset
1204	.equ	vector_fiq_offset, vector_fiq
1205
1206	.section .vectors, "ax", %progbits
1207__vectors_start:
1208	W(b)	vector_rst
1209	W(b)	vector_und
1210	W(ldr)	pc, __vectors_start + 0x1000
1211	W(b)	vector_pabt
1212	W(b)	vector_dabt
1213	W(b)	vector_addrexcptn
1214	W(b)	vector_irq
1215	W(b)	vector_fiq
1216
1217	.data
1218
1219	.globl	cr_alignment
1220cr_alignment:
1221	.space	4
1222
1223#ifdef CONFIG_MULTI_IRQ_HANDLER
1224	.globl	handle_arch_irq
1225handle_arch_irq:
1226	.space	4
1227#endif
1228