• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/* -*- mode: asm -*-
2 *
3 *  linux/arch/m68k/kernel/entry.S
4 *
5 *  Copyright (C) 1991, 1992  Linus Torvalds
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License.  See the file README.legal in the main directory of this archive
9 * for more details.
10 *
11 * Linux/m68k support by Hamish Macdonald
12 *
13 * 68060 fixes by Jesper Skov
14 *
15 */
16
17/*
18 * entry.S  contains the system-call and fault low-level handling routines.
19 * This also contains the timer-interrupt handler, as well as all interrupts
20 * and faults that can result in a task-switch.
21 *
22 * NOTE: This code handles signal-recognition, which happens every time
23 * after a timer-interrupt and after each system call.
24 *
25 */
26
27/*
28 * 12/03/96 Jes: Currently we only support m68k single-cpu systems, so
29 *               all pointers that used to be 'current' are now entry
30 *               number 0 in the 'current_set' list.
31 *
32 *  6/05/00 RZ:	 addedd writeback completion after return from sighandler
33 *		 for 68040
34 */
35
36#include <linux/linkage.h>
37#include <asm/errno.h>
38#include <asm/setup.h>
39#include <asm/segment.h>
40#include <asm/traps.h>
41#include <asm/unistd.h>
42#include <asm/asm-offsets.h>
43#include <asm/entry.h>
44
45.globl system_call, buserr, trap, resume
46.globl sys_call_table
47.globl __sys_fork, __sys_clone, __sys_vfork
48.globl bad_interrupt
49.globl auto_irqhandler_fixup
50.globl user_irqvec_fixup
51
52.text
53ENTRY(__sys_fork)
54	SAVE_SWITCH_STACK
55	jbsr	sys_fork
56	lea     %sp@(24),%sp
57	rts
58
59ENTRY(__sys_clone)
60	SAVE_SWITCH_STACK
61	pea	%sp@(SWITCH_STACK_SIZE)
62	jbsr	m68k_clone
63	lea     %sp@(28),%sp
64	rts
65
66ENTRY(__sys_vfork)
67	SAVE_SWITCH_STACK
68	jbsr	sys_vfork
69	lea     %sp@(24),%sp
70	rts
71
72ENTRY(__sys_clone3)
73	SAVE_SWITCH_STACK
74	pea	%sp@(SWITCH_STACK_SIZE)
75	jbsr	m68k_clone3
76	lea	%sp@(28),%sp
77	rts
78
79ENTRY(sys_sigreturn)
80	SAVE_SWITCH_STACK
81	movel	%sp,%sp@-		  | switch_stack pointer
82	pea	%sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer
83	jbsr	do_sigreturn
84	addql	#8,%sp
85	RESTORE_SWITCH_STACK
86	rts
87
88ENTRY(sys_rt_sigreturn)
89	SAVE_SWITCH_STACK
90	movel	%sp,%sp@-		  | switch_stack pointer
91	pea	%sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer
92	jbsr	do_rt_sigreturn
93	addql	#8,%sp
94	RESTORE_SWITCH_STACK
95	rts
96
97ENTRY(buserr)
98	SAVE_ALL_INT
99	GET_CURRENT(%d0)
100	movel	%sp,%sp@-		| stack frame pointer argument
101	jbsr	buserr_c
102	addql	#4,%sp
103	jra	ret_from_exception
104
105ENTRY(trap)
106	SAVE_ALL_INT
107	GET_CURRENT(%d0)
108	movel	%sp,%sp@-		| stack frame pointer argument
109	jbsr	trap_c
110	addql	#4,%sp
111	jra	ret_from_exception
112
113	| After a fork we jump here directly from resume,
114	| so that %d1 contains the previous task
115	| schedule_tail now used regardless of CONFIG_SMP
116ENTRY(ret_from_fork)
117	movel	%d1,%sp@-
118	jsr	schedule_tail
119	addql	#4,%sp
120	jra	ret_from_exception
121
122ENTRY(ret_from_kernel_thread)
123	| a3 contains the kernel thread payload, d7 - its argument
124	movel	%d1,%sp@-
125	jsr	schedule_tail
126	movel	%d7,(%sp)
127	jsr	%a3@
128	addql	#4,%sp
129	jra	ret_from_exception
130
131#if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU)
132
133#ifdef TRAP_DBG_INTERRUPT
134
135.globl dbginterrupt
136ENTRY(dbginterrupt)
137	SAVE_ALL_INT
138	GET_CURRENT(%d0)
139	movel	%sp,%sp@- 		/* stack frame pointer argument */
140	jsr	dbginterrupt_c
141	addql	#4,%sp
142	jra	ret_from_exception
143#endif
144
145ENTRY(reschedule)
146	/* save top of frame */
147	pea	%sp@
148	jbsr	set_esp0
149	addql	#4,%sp
150	pea	ret_from_exception
151	jmp	schedule
152
153ENTRY(ret_from_user_signal)
154	moveq #__NR_sigreturn,%d0
155	trap #0
156
157ENTRY(ret_from_user_rt_signal)
158	movel #__NR_rt_sigreturn,%d0
159	trap #0
160
161#else
162
163do_trace_entry:
164	movel	#-ENOSYS,%sp@(PT_OFF_D0)| needed for strace
165	subql	#4,%sp
166	SAVE_SWITCH_STACK
167	jbsr	syscall_trace
168	RESTORE_SWITCH_STACK
169	addql	#4,%sp
170	addql	#1,%d0			| optimization for cmpil #-1,%d0
171	jeq	ret_from_syscall
172	movel	%sp@(PT_OFF_ORIG_D0),%d0
173	cmpl	#NR_syscalls,%d0
174	jcs	syscall
175	jra	ret_from_syscall
176badsys:
177	movel	#-ENOSYS,%sp@(PT_OFF_D0)
178	jra	ret_from_syscall
179
180do_trace_exit:
181	subql	#4,%sp
182	SAVE_SWITCH_STACK
183	jbsr	syscall_trace
184	RESTORE_SWITCH_STACK
185	addql	#4,%sp
186	jra	.Lret_from_exception
187
188ENTRY(ret_from_signal)
189	movel	%curptr@(TASK_STACK),%a1
190	tstb	%a1@(TINFO_FLAGS+2)
191	jge	1f
192	jbsr	syscall_trace
1931:	RESTORE_SWITCH_STACK
194	addql	#4,%sp
195/* on 68040 complete pending writebacks if any */
196#ifdef CONFIG_M68040
197	bfextu	%sp@(PT_OFF_FORMATVEC){#0,#4},%d0
198	subql	#7,%d0				| bus error frame ?
199	jbne	1f
200	movel	%sp,%sp@-
201	jbsr	berr_040cleanup
202	addql	#4,%sp
2031:
204#endif
205	jra	.Lret_from_exception
206
207ENTRY(system_call)
208	SAVE_ALL_SYS
209
210	GET_CURRENT(%d1)
211	movel	%d1,%a1
212
213	| save top of frame
214	movel	%sp,%curptr@(TASK_THREAD+THREAD_ESP0)
215
216	| syscall trace?
217	tstb	%a1@(TINFO_FLAGS+2)
218	jmi	do_trace_entry
219	cmpl	#NR_syscalls,%d0
220	jcc	badsys
221syscall:
222	jbsr	@(sys_call_table,%d0:l:4)@(0)
223	movel	%d0,%sp@(PT_OFF_D0)	| save the return value
224ret_from_syscall:
225	|oriw	#0x0700,%sr
226	movel	%curptr@(TASK_STACK),%a1
227	movew	%a1@(TINFO_FLAGS+2),%d0
228	jne	syscall_exit_work
2291:	RESTORE_ALL
230
231syscall_exit_work:
232	btst	#5,%sp@(PT_OFF_SR)	| check if returning to kernel
233	bnes	1b			| if so, skip resched, signals
234	lslw	#1,%d0
235	jcs	do_trace_exit
236	jmi	do_delayed_trace
237	lslw	#8,%d0
238	jne	do_signal_return
239	pea	resume_userspace
240	jra	schedule
241
242
243ENTRY(ret_from_exception)
244.Lret_from_exception:
245	btst	#5,%sp@(PT_OFF_SR)	| check if returning to kernel
246	bnes	1f			| if so, skip resched, signals
247	| only allow interrupts when we are really the last one on the
248	| kernel stack, otherwise stack overflow can occur during
249	| heavy interrupt load
250	andw	#ALLOWINT,%sr
251
252resume_userspace:
253	movel	%curptr@(TASK_STACK),%a1
254	moveb	%a1@(TINFO_FLAGS+3),%d0
255	jne	exit_work
2561:	RESTORE_ALL
257
258exit_work:
259	| save top of frame
260	movel	%sp,%curptr@(TASK_THREAD+THREAD_ESP0)
261	lslb	#1,%d0
262	jne	do_signal_return
263	pea	resume_userspace
264	jra	schedule
265
266
267do_signal_return:
268	|andw	#ALLOWINT,%sr
269	subql	#4,%sp			| dummy return address
270	SAVE_SWITCH_STACK
271	pea	%sp@(SWITCH_STACK_SIZE)
272	bsrl	do_notify_resume
273	addql	#4,%sp
274	RESTORE_SWITCH_STACK
275	addql	#4,%sp
276	jbra	resume_userspace
277
278do_delayed_trace:
279	bclr	#7,%sp@(PT_OFF_SR)	| clear trace bit in SR
280	pea	1			| send SIGTRAP
281	movel	%curptr,%sp@-
282	pea	LSIGTRAP
283	jbsr	send_sig
284	addql	#8,%sp
285	addql	#4,%sp
286	jbra	resume_userspace
287
288
289/* This is the main interrupt handler for autovector interrupts */
290
291ENTRY(auto_inthandler)
292	SAVE_ALL_INT
293	GET_CURRENT(%d0)
294					|  put exception # in d0
295	bfextu	%sp@(PT_OFF_FORMATVEC){#4,#10},%d0
296	subw	#VEC_SPUR,%d0
297
298	movel	%sp,%sp@-
299	movel	%d0,%sp@-		|  put vector # on stack
300auto_irqhandler_fixup = . + 2
301	jsr	do_IRQ			|  process the IRQ
302	addql	#8,%sp			|  pop parameters off stack
303	jra	ret_from_exception
304
305/* Handler for user defined interrupt vectors */
306
307ENTRY(user_inthandler)
308	SAVE_ALL_INT
309	GET_CURRENT(%d0)
310					|  put exception # in d0
311	bfextu	%sp@(PT_OFF_FORMATVEC){#4,#10},%d0
312user_irqvec_fixup = . + 2
313	subw	#VEC_USER,%d0
314
315	movel	%sp,%sp@-
316	movel	%d0,%sp@-		|  put vector # on stack
317	jsr	do_IRQ			|  process the IRQ
318	addql	#8,%sp			|  pop parameters off stack
319	jra	ret_from_exception
320
321/* Handler for uninitialized and spurious interrupts */
322
323ENTRY(bad_inthandler)
324	SAVE_ALL_INT
325	GET_CURRENT(%d0)
326
327	movel	%sp,%sp@-
328	jsr	handle_badint
329	addql	#4,%sp
330	jra	ret_from_exception
331
332resume:
333	/*
334	 * Beware - when entering resume, prev (the current task) is
335	 * in a0, next (the new task) is in a1,so don't change these
336	 * registers until their contents are no longer needed.
337	 */
338
339	/* save sr */
340	movew	%sr,%a0@(TASK_THREAD+THREAD_SR)
341
342	/* save fs (sfc,%dfc) (may be pointing to kernel memory) */
343	movec	%sfc,%d0
344	movew	%d0,%a0@(TASK_THREAD+THREAD_FS)
345
346	/* save usp */
347	/* it is better to use a movel here instead of a movew 8*) */
348	movec	%usp,%d0
349	movel	%d0,%a0@(TASK_THREAD+THREAD_USP)
350
351	/* save non-scratch registers on stack */
352	SAVE_SWITCH_STACK
353
354	/* save current kernel stack pointer */
355	movel	%sp,%a0@(TASK_THREAD+THREAD_KSP)
356
357	/* save floating point context */
358#ifndef CONFIG_M68KFPU_EMU_ONLY
359#ifdef CONFIG_M68KFPU_EMU
360	tstl	m68k_fputype
361	jeq	3f
362#endif
363	fsave	%a0@(TASK_THREAD+THREAD_FPSTATE)
364
365#if defined(CONFIG_M68060)
366#if !defined(CPU_M68060_ONLY)
367	btst	#3,m68k_cputype+3
368	beqs	1f
369#endif
370	/* The 060 FPU keeps status in bits 15-8 of the first longword */
371	tstb	%a0@(TASK_THREAD+THREAD_FPSTATE+2)
372	jeq	3f
373#if !defined(CPU_M68060_ONLY)
374	jra	2f
375#endif
376#endif /* CONFIG_M68060 */
377#if !defined(CPU_M68060_ONLY)
3781:	tstb	%a0@(TASK_THREAD+THREAD_FPSTATE)
379	jeq	3f
380#endif
3812:	fmovemx	%fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG)
382	fmoveml	%fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL)
3833:
384#endif	/* CONFIG_M68KFPU_EMU_ONLY */
385	/* Return previous task in %d1 */
386	movel	%curptr,%d1
387
388	/* switch to new task (a1 contains new task) */
389	movel	%a1,%curptr
390
391	/* restore floating point context */
392#ifndef CONFIG_M68KFPU_EMU_ONLY
393#ifdef CONFIG_M68KFPU_EMU
394	tstl	m68k_fputype
395	jeq	4f
396#endif
397#if defined(CONFIG_M68060)
398#if !defined(CPU_M68060_ONLY)
399	btst	#3,m68k_cputype+3
400	beqs	1f
401#endif
402	/* The 060 FPU keeps status in bits 15-8 of the first longword */
403	tstb	%a1@(TASK_THREAD+THREAD_FPSTATE+2)
404	jeq	3f
405#if !defined(CPU_M68060_ONLY)
406	jra	2f
407#endif
408#endif /* CONFIG_M68060 */
409#if !defined(CPU_M68060_ONLY)
4101:	tstb	%a1@(TASK_THREAD+THREAD_FPSTATE)
411	jeq	3f
412#endif
4132:	fmovemx	%a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7
414	fmoveml	%a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar
4153:	frestore %a1@(TASK_THREAD+THREAD_FPSTATE)
4164:
417#endif	/* CONFIG_M68KFPU_EMU_ONLY */
418
419	/* restore the kernel stack pointer */
420	movel	%a1@(TASK_THREAD+THREAD_KSP),%sp
421
422	/* restore non-scratch registers */
423	RESTORE_SWITCH_STACK
424
425	/* restore user stack pointer */
426	movel	%a1@(TASK_THREAD+THREAD_USP),%a0
427	movel	%a0,%usp
428
429	/* restore fs (sfc,%dfc) */
430	movew	%a1@(TASK_THREAD+THREAD_FS),%a0
431	movec	%a0,%sfc
432	movec	%a0,%dfc
433
434	/* restore status register */
435	movew	%a1@(TASK_THREAD+THREAD_SR),%sr
436
437	rts
438
439#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
440