• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 *  arch/s390/kernel/entry64.S
3 *    S390 low-level entry points.
4 *
5 *    Copyright (C) IBM Corp. 1999,2006
6 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 *		 Hartmut Penner (hp@de.ibm.com),
8 *		 Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
9 *		 Heiko Carstens <heiko.carstens@de.ibm.com>
10 */
11
12#include <linux/sys.h>
13#include <linux/linkage.h>
14#include <linux/init.h>
15#include <asm/cache.h>
16#include <asm/lowcore.h>
17#include <asm/errno.h>
18#include <asm/ptrace.h>
19#include <asm/thread_info.h>
20#include <asm/asm-offsets.h>
21#include <asm/unistd.h>
22#include <asm/page.h>
23
24/*
25 * Stack layout for the system_call stack entry.
26 * The first few entries are identical to the user_regs_struct.
27 */
28SP_PTREGS    =	STACK_FRAME_OVERHEAD
29SP_ARGS      =	STACK_FRAME_OVERHEAD + __PT_ARGS
30SP_PSW	     =	STACK_FRAME_OVERHEAD + __PT_PSW
31SP_R0	     =	STACK_FRAME_OVERHEAD + __PT_GPRS
32SP_R1	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 8
33SP_R2	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 16
34SP_R3	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 24
35SP_R4	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 32
36SP_R5	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 40
37SP_R6	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 48
38SP_R7	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 56
39SP_R8	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 64
40SP_R9	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 72
41SP_R10	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 80
42SP_R11	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 88
43SP_R12	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 96
44SP_R13	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 104
45SP_R14	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 112
46SP_R15	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 120
47SP_ORIG_R2   =	STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2
48SP_ILC	     =	STACK_FRAME_OVERHEAD + __PT_ILC
49SP_SVCNR      =	STACK_FRAME_OVERHEAD + __PT_SVCNR
50SP_SIZE      =	STACK_FRAME_OVERHEAD + __PT_SIZE
51
52STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
53STACK_SIZE  = 1 << STACK_SHIFT
54
55_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
56		 _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP )
57_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
58		 _TIF_MCCK_PENDING)
59
60#define BASED(name) name-system_call(%r13)
61
62#ifdef CONFIG_TRACE_IRQFLAGS
63	.macro	TRACE_IRQS_ON
64	 basr	%r2,%r0
65	 brasl	%r14,trace_hardirqs_on_caller
66	.endm
67
68	.macro	TRACE_IRQS_OFF
69	 basr	%r2,%r0
70	 brasl	%r14,trace_hardirqs_off_caller
71	.endm
72
73	.macro TRACE_IRQS_CHECK
74	basr	%r2,%r0
75	tm	SP_PSW(%r15),0x03	# irqs enabled?
76	jz	0f
77	brasl	%r14,trace_hardirqs_on_caller
78	j	1f
790:	brasl	%r14,trace_hardirqs_off_caller
801:
81	.endm
82#else
83#define TRACE_IRQS_ON
84#define TRACE_IRQS_OFF
85#define TRACE_IRQS_CHECK
86#endif
87
88#ifdef CONFIG_LOCKDEP
89	.macro	LOCKDEP_SYS_EXIT
90	tm	SP_PSW+1(%r15),0x01	# returning to user ?
91	jz	0f
92	brasl	%r14,lockdep_sys_exit
930:
94	.endm
95#else
96#define LOCKDEP_SYS_EXIT
97#endif
98
99	.macro	UPDATE_VTIME lc_from,lc_to,lc_sum
100	lg	%r10,\lc_from
101	slg	%r10,\lc_to
102	alg	%r10,\lc_sum
103	stg	%r10,\lc_sum
104	.endm
105
106/*
107 * Register usage in interrupt handlers:
108 *    R9  - pointer to current task structure
109 *    R13 - pointer to literal pool
110 *    R14 - return register for function calls
111 *    R15 - kernel stack pointer
112 */
113
114	.macro	SAVE_ALL_BASE savearea
115	stmg	%r12,%r15,\savearea
116	larl	%r13,system_call
117	.endm
118
119	.macro	SAVE_ALL_SVC psworg,savearea
120	la	%r12,\psworg
121	lg	%r15,__LC_KERNEL_STACK	# problem state -> load ksp
122	.endm
123
124	.macro	SAVE_ALL_SYNC psworg,savearea
125	la	%r12,\psworg
126	tm	\psworg+1,0x01		# test problem state bit
127	jz	2f			# skip stack setup save
128	lg	%r15,__LC_KERNEL_STACK	# problem state -> load ksp
129#ifdef CONFIG_CHECK_STACK
130	j	3f
1312:	tml	%r15,STACK_SIZE - CONFIG_STACK_GUARD
132	jz	stack_overflow
1333:
134#endif
1352:
136	.endm
137
138	.macro	SAVE_ALL_ASYNC psworg,savearea
139	la	%r12,\psworg
140	tm	\psworg+1,0x01		# test problem state bit
141	jnz	1f			# from user -> load kernel stack
142	clc	\psworg+8(8),BASED(.Lcritical_end)
143	jhe	0f
144	clc	\psworg+8(8),BASED(.Lcritical_start)
145	jl	0f
146	brasl	%r14,cleanup_critical
147	tm	1(%r12),0x01		# retest problem state after cleanup
148	jnz	1f
1490:	lg	%r14,__LC_ASYNC_STACK	# are we already on the async. stack ?
150	slgr	%r14,%r15
151	srag	%r14,%r14,STACK_SHIFT
152	jz	2f
1531:	lg	%r15,__LC_ASYNC_STACK	# load async stack
154#ifdef CONFIG_CHECK_STACK
155	j	3f
1562:	tml	%r15,STACK_SIZE - CONFIG_STACK_GUARD
157	jz	stack_overflow
1583:
159#endif
1602:
161	.endm
162
163	.macro	CREATE_STACK_FRAME psworg,savearea
164	aghi	%r15,-SP_SIZE		# make room for registers & psw
165	mvc	SP_PSW(16,%r15),0(%r12)	# move user PSW to stack
166	stg	%r2,SP_ORIG_R2(%r15)	# store original content of gpr 2
167	icm	%r12,3,__LC_SVC_ILC
168	stmg	%r0,%r11,SP_R0(%r15)	# store gprs %r0-%r11 to kernel stack
169	st	%r12,SP_SVCNR(%r15)
170	mvc	SP_R12(32,%r15),\savearea # move %r12-%r15 to stack
171	la	%r12,0
172	stg	%r12,__SF_BACKCHAIN(%r15)
173	.endm
174
175	.macro	RESTORE_ALL psworg,sync
176	mvc	\psworg(16),SP_PSW(%r15) # move user PSW to lowcore
177	.if !\sync
178	ni	\psworg+1,0xfd		# clear wait state bit
179	.endif
180	lg	%r14,__LC_VDSO_PER_CPU
181	lmg	%r0,%r13,SP_R0(%r15)	# load gprs 0-13 of user
182	stpt	__LC_EXIT_TIMER
183	mvc	__VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
184	lmg	%r14,%r15,SP_R14(%r15)	# load grps 14-15 of user
185	lpswe	\psworg			# back to caller
186	.endm
187
188/*
189 * Scheduler resume function, called by switch_to
190 *  gpr2 = (task_struct *) prev
191 *  gpr3 = (task_struct *) next
192 * Returns:
193 *  gpr2 = prev
194 */
195	.globl	__switch_to
196__switch_to:
197	tm	__THREAD_per+4(%r3),0xe8 # is the new process using per ?
198	jz	__switch_to_noper		# if not we're fine
199	stctg	%c9,%c11,__SF_EMPTY(%r15)# We are using per stuff
200	clc	__THREAD_per(24,%r3),__SF_EMPTY(%r15)
201	je	__switch_to_noper	     # we got away without bashing TLB's
202	lctlg	%c9,%c11,__THREAD_per(%r3)	# Nope we didn't
203__switch_to_noper:
204	lg	%r4,__THREAD_info(%r2)		    # get thread_info of prev
205	tm	__TI_flags+7(%r4),_TIF_MCCK_PENDING # machine check pending?
206	jz	__switch_to_no_mcck
207	ni	__TI_flags+7(%r4),255-_TIF_MCCK_PENDING # clear flag in prev
208	lg	%r4,__THREAD_info(%r3)		    # get thread_info of next
209	oi	__TI_flags+7(%r4),_TIF_MCCK_PENDING # set it in next
210__switch_to_no_mcck:
211	stmg	%r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task
212	stg	%r15,__THREAD_ksp(%r2)	# store kernel stack to prev->tss.ksp
213	lg	%r15,__THREAD_ksp(%r3)	# load kernel stack from next->tss.ksp
214	lmg	%r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task
215	stg	%r3,__LC_CURRENT	# __LC_CURRENT = current task struct
216	lctl	%c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
217	lg	%r3,__THREAD_info(%r3)	# load thread_info from task struct
218	stg	%r3,__LC_THREAD_INFO
219	aghi	%r3,STACK_SIZE
220	stg	%r3,__LC_KERNEL_STACK	# __LC_KERNEL_STACK = new kernel stack
221	br	%r14
222
223__critical_start:
224/*
225 * SVC interrupt handler routine. System calls are synchronous events and
226 * are executed with interrupts enabled.
227 */
228
229	.globl	system_call
230system_call:
231	stpt	__LC_SYNC_ENTER_TIMER
232sysc_saveall:
233	SAVE_ALL_BASE __LC_SAVE_AREA
234	SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
235	CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
236	llgh	%r7,__LC_SVC_INT_CODE	# get svc number from lowcore
237sysc_vtime:
238	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
239sysc_stime:
240	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
241sysc_update:
242	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
243sysc_do_svc:
244	lg	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
245	ltgr	%r7,%r7		# test for svc 0
246	jnz	sysc_nr_ok
247	# svc 0: system call number in %r1
248	cl	%r1,BASED(.Lnr_syscalls)
249	jnl	sysc_nr_ok
250	lgfr	%r7,%r1 	# clear high word in r1
251sysc_nr_ok:
252	mvc	SP_ARGS(8,%r15),SP_R7(%r15)
253sysc_do_restart:
254	sth	%r7,SP_SVCNR(%r15)
255	sllg	%r7,%r7,2	# svc number * 4
256	larl	%r10,sys_call_table
257#ifdef CONFIG_COMPAT
258	tm	__TI_flags+5(%r9),(_TIF_31BIT>>16)  # running in 31 bit mode ?
259	jno	sysc_noemu
260	larl	%r10,sys_call_table_emu  # use 31 bit emulation system calls
261sysc_noemu:
262#endif
263	tm	__TI_flags+7(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
264	lgf	%r8,0(%r7,%r10) # load address of system call routine
265	jnz	sysc_tracesys
266	basr	%r14,%r8	# call sys_xxxx
267	stg	%r2,SP_R2(%r15) # store return value (change R2 on stack)
268
269sysc_return:
270	tm	__TI_flags+7(%r9),_TIF_WORK_SVC
271	jnz	sysc_work	# there is work to do (signals etc.)
272sysc_restore:
273#ifdef CONFIG_TRACE_IRQFLAGS
274	larl	%r1,sysc_restore_trace_psw
275	lpswe	0(%r1)
276sysc_restore_trace:
277	TRACE_IRQS_CHECK
278	LOCKDEP_SYS_EXIT
279#endif
280sysc_leave:
281	RESTORE_ALL __LC_RETURN_PSW,1
282sysc_done:
283
284#ifdef CONFIG_TRACE_IRQFLAGS
285	.align	8
286	.globl sysc_restore_trace_psw
287sysc_restore_trace_psw:
288	.quad	0, sysc_restore_trace
289#endif
290
291#
292# recheck if there is more work to do
293#
294sysc_work_loop:
295	tm	__TI_flags+7(%r9),_TIF_WORK_SVC
296	jz	sysc_restore	  # there is no work to do
297#
298# One of the work bits is on. Find out which one.
299#
300sysc_work:
301	tm	SP_PSW+1(%r15),0x01	# returning to user ?
302	jno	sysc_restore
303	tm	__TI_flags+7(%r9),_TIF_MCCK_PENDING
304	jo	sysc_mcck_pending
305	tm	__TI_flags+7(%r9),_TIF_NEED_RESCHED
306	jo	sysc_reschedule
307	tm	__TI_flags+7(%r9),_TIF_SIGPENDING
308	jnz	sysc_sigpending
309	tm	__TI_flags+7(%r9),_TIF_NOTIFY_RESUME
310	jnz	sysc_notify_resume
311	tm	__TI_flags+7(%r9),_TIF_RESTART_SVC
312	jo	sysc_restart
313	tm	__TI_flags+7(%r9),_TIF_SINGLE_STEP
314	jo	sysc_singlestep
315	j	sysc_restore
316sysc_work_done:
317
318#
319# _TIF_NEED_RESCHED is set, call schedule
320#
321sysc_reschedule:
322	larl	%r14,sysc_work_loop
323	jg	schedule	# return point is sysc_return
324
325#
326# _TIF_MCCK_PENDING is set, call handler
327#
328sysc_mcck_pending:
329	larl	%r14,sysc_work_loop
330	jg	s390_handle_mcck	# TIF bit will be cleared by handler
331
332#
333# _TIF_SIGPENDING is set, call do_signal
334#
335sysc_sigpending:
336	ni	__TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
337	la	%r2,SP_PTREGS(%r15)	# load pt_regs
338	brasl	%r14,do_signal		# call do_signal
339	tm	__TI_flags+7(%r9),_TIF_RESTART_SVC
340	jo	sysc_restart
341	tm	__TI_flags+7(%r9),_TIF_SINGLE_STEP
342	jo	sysc_singlestep
343	j	sysc_work_loop
344
345#
346# _TIF_NOTIFY_RESUME is set, call do_notify_resume
347#
348sysc_notify_resume:
349	la	%r2,SP_PTREGS(%r15)	# load pt_regs
350	larl	%r14,sysc_work_loop
351	jg	do_notify_resume	# call do_notify_resume
352
353#
354# _TIF_RESTART_SVC is set, set up registers and restart svc
355#
356sysc_restart:
357	ni	__TI_flags+7(%r9),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC
358	lg	%r7,SP_R2(%r15)		# load new svc number
359	mvc	SP_R2(8,%r15),SP_ORIG_R2(%r15) # restore first argument
360	lmg	%r2,%r6,SP_R2(%r15)	# load svc arguments
361	j	sysc_do_restart 	# restart svc
362
363#
364# _TIF_SINGLE_STEP is set, call do_single_step
365#
366sysc_singlestep:
367	ni	__TI_flags+7(%r9),255-_TIF_SINGLE_STEP	# clear TIF_SINGLE_STEP
368	xc	SP_SVCNR(2,%r15),SP_SVCNR(%r15)		# clear svc number
369	la	%r2,SP_PTREGS(%r15)	# address of register-save area
370	larl	%r14,sysc_return	# load adr. of system return
371	jg	do_single_step		# branch to do_sigtrap
372
373#
374# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
375# and after the system call
376#
377sysc_tracesys:
378	la	%r2,SP_PTREGS(%r15)	# load pt_regs
379	la	%r3,0
380	srl	%r7,2
381	stg	%r7,SP_R2(%r15)
382	brasl	%r14,do_syscall_trace_enter
383	lghi	%r0,NR_syscalls
384	clgr	%r0,%r2
385	jnh	sysc_tracenogo
386	sllg	%r7,%r2,2		# svc number *4
387	lgf	%r8,0(%r7,%r10)
388sysc_tracego:
389	lmg	%r3,%r6,SP_R3(%r15)
390	lg	%r2,SP_ORIG_R2(%r15)
391	basr	%r14,%r8		# call sys_xxx
392	stg	%r2,SP_R2(%r15)		# store return value
393sysc_tracenogo:
394	tm	__TI_flags+7(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
395	jz	sysc_return
396	la	%r2,SP_PTREGS(%r15)	# load pt_regs
397	larl	%r14,sysc_return	# return point is sysc_return
398	jg	do_syscall_trace_exit
399
400#
401# a new process exits the kernel with ret_from_fork
402#
403	.globl	ret_from_fork
404ret_from_fork:
405	lg	%r13,__LC_SVC_NEW_PSW+8
406	lg	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
407	tm	SP_PSW+1(%r15),0x01	# forking a kernel thread ?
408	jo	0f
409	stg	%r15,SP_R15(%r15)	# store stack pointer for new kthread
4100:	brasl	%r14,schedule_tail
411	TRACE_IRQS_ON
412	stosm	24(%r15),0x03		# reenable interrupts
413	j	sysc_tracenogo
414
415#
416# kernel_execve function needs to deal with pt_regs that is not
417# at the usual place
418#
419	.globl	kernel_execve
420kernel_execve:
421	stmg	%r12,%r15,96(%r15)
422	lgr	%r14,%r15
423	aghi	%r15,-SP_SIZE
424	stg	%r14,__SF_BACKCHAIN(%r15)
425	la	%r12,SP_PTREGS(%r15)
426	xc	0(__PT_SIZE,%r12),0(%r12)
427	lgr	%r5,%r12
428	brasl	%r14,do_execve
429	ltgfr	%r2,%r2
430	je	0f
431	aghi	%r15,SP_SIZE
432	lmg	%r12,%r15,96(%r15)
433	br	%r14
434	# execve succeeded.
4350:	stnsm	__SF_EMPTY(%r15),0xfc	# disable interrupts
436	lg	%r15,__LC_KERNEL_STACK	# load ksp
437	aghi	%r15,-SP_SIZE		# make room for registers & psw
438	lg	%r13,__LC_SVC_NEW_PSW+8
439	lg	%r9,__LC_THREAD_INFO
440	mvc	SP_PTREGS(__PT_SIZE,%r15),0(%r12)	# copy pt_regs
441	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
442	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
443	brasl	%r14,execve_tail
444	j	sysc_return
445
446/*
447 * Program check handler routine
448 */
449
450	.globl	pgm_check_handler
451pgm_check_handler:
452/*
453 * First we need to check for a special case:
454 * Single stepping an instruction that disables the PER event mask will
455 * cause a PER event AFTER the mask has been set. Example: SVC or LPSW.
456 * For a single stepped SVC the program check handler gets control after
457 * the SVC new PSW has been loaded. But we want to execute the SVC first and
458 * then handle the PER event. Therefore we update the SVC old PSW to point
459 * to the pgm_check_handler and branch to the SVC handler after we checked
460 * if we have to load the kernel stack register.
461 * For every other possible cause for PER event without the PER mask set
462 * we just ignore the PER event (FIXME: is there anything we have to do
463 * for LPSW?).
464 */
465	stpt	__LC_SYNC_ENTER_TIMER
466	SAVE_ALL_BASE __LC_SAVE_AREA
467	tm	__LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
468	jnz	pgm_per 		 # got per exception -> special case
469	SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA
470	CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA
471	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
472	jz	pgm_no_vtime
473	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
474	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
475	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
476pgm_no_vtime:
477	lg	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
478	mvc	SP_ARGS(8,%r15),__LC_LAST_BREAK
479	TRACE_IRQS_OFF
480	lgf	%r3,__LC_PGM_ILC	# load program interruption code
481	lghi	%r8,0x7f
482	ngr	%r8,%r3
483pgm_do_call:
484	sll	%r8,3
485	larl	%r1,pgm_check_table
486	lg	%r1,0(%r8,%r1)		# load address of handler routine
487	la	%r2,SP_PTREGS(%r15)	# address of register-save area
488	larl	%r14,sysc_return
489	br	%r1			# branch to interrupt-handler
490
491#
492# handle per exception
493#
494pgm_per:
495	tm	__LC_PGM_OLD_PSW,0x40	# test if per event recording is on
496	jnz	pgm_per_std		# ok, normal per event from user space
497# ok its one of the special cases, now we need to find out which one
498	clc	__LC_PGM_OLD_PSW(16),__LC_SVC_NEW_PSW
499	je	pgm_svcper
500# no interesting special case, ignore PER event
501	lmg	%r12,%r15,__LC_SAVE_AREA
502	lpswe	__LC_PGM_OLD_PSW
503
504#
505# Normal per exception
506#
507pgm_per_std:
508	SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA
509	CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA
510	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
511	jz	pgm_no_vtime2
512	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
513	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
514	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
515pgm_no_vtime2:
516	lg	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
517	TRACE_IRQS_OFF
518	lg	%r1,__TI_task(%r9)
519	tm	SP_PSW+1(%r15),0x01	# kernel per event ?
520	jz	kernel_per
521	mvc	__THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
522	mvc	__THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS
523	mvc	__THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
524	oi	__TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
525	lgf	%r3,__LC_PGM_ILC	# load program interruption code
526	lghi	%r8,0x7f
527	ngr	%r8,%r3			# clear per-event-bit and ilc
528	je	sysc_return
529	j	pgm_do_call
530
531#
532# it was a single stepped SVC that is causing all the trouble
533#
534pgm_svcper:
535	SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
536	CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
537	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
538	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
539	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
540	llgh	%r7,__LC_SVC_INT_CODE	# get svc number from lowcore
541	lg	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
542	lg	%r1,__TI_task(%r9)
543	mvc	__THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
544	mvc	__THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS
545	mvc	__THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
546	oi	__TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
547	TRACE_IRQS_ON
548	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
549	j	sysc_do_svc
550
551#
552# per was called from kernel, must be kprobes
553#
554kernel_per:
555	xc	SP_SVCNR(2,%r15),SP_SVCNR(%r15)	# clear svc number
556	la	%r2,SP_PTREGS(%r15)	# address of register-save area
557	larl	%r14,sysc_restore	# load adr. of system ret, no work
558	jg	do_single_step		# branch to do_single_step
559
560/*
561 * IO interrupt handler routine
562 */
563	.globl io_int_handler
564io_int_handler:
565	stck	__LC_INT_CLOCK
566	stpt	__LC_ASYNC_ENTER_TIMER
567	SAVE_ALL_BASE __LC_SAVE_AREA+32
568	SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+32
569	CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+32
570	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
571	jz	io_no_vtime
572	UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
573	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
574	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
575io_no_vtime:
576	lg	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
577	TRACE_IRQS_OFF
578	la	%r2,SP_PTREGS(%r15)	# address of register-save area
579	brasl	%r14,do_IRQ		# call standard irq handler
580io_return:
581	tm	__TI_flags+7(%r9),_TIF_WORK_INT
582	jnz	io_work 		# there is work to do (signals etc.)
583io_restore:
584#ifdef CONFIG_TRACE_IRQFLAGS
585	larl	%r1,io_restore_trace_psw
586	lpswe	0(%r1)
587io_restore_trace:
588	TRACE_IRQS_CHECK
589	LOCKDEP_SYS_EXIT
590#endif
591io_leave:
592	RESTORE_ALL __LC_RETURN_PSW,0
593io_done:
594
595#ifdef CONFIG_TRACE_IRQFLAGS
596	.align	8
597	.globl io_restore_trace_psw
598io_restore_trace_psw:
599	.quad	0, io_restore_trace
600#endif
601
602#
603# There is work todo, we need to check if we return to userspace, then
604# check, if we are in SIE, if yes leave it
605#
606io_work:
607	tm	SP_PSW+1(%r15),0x01	# returning to user ?
608#ifndef CONFIG_PREEMPT
609#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
610	jnz	io_work_user		# yes -> no need to check for SIE
611	la	%r1, BASED(sie_opcode)	# we return to kernel here
612	lg	%r2, SP_PSW+8(%r15)
613	clc	0(2,%r1), 0(%r2)	# is current instruction = SIE?
614	jne	io_restore		# no-> return to kernel
615	lg	%r1, SP_PSW+8(%r15)	# yes-> add 4 bytes to leave SIE
616	aghi	%r1, 4
617	stg	%r1, SP_PSW+8(%r15)
618	j	io_restore		# return to kernel
619#else
620	jno	io_restore		# no-> skip resched & signal
621#endif
622#else
623	jnz	io_work_user		# yes -> do resched & signal
624#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
625	la	%r1, BASED(sie_opcode)
626	lg	%r2, SP_PSW+8(%r15)
627	clc	0(2,%r1), 0(%r2)	# is current instruction = SIE?
628	jne	0f			# no -> leave PSW alone
629	lg	%r1, SP_PSW+8(%r15)	# yes-> add 4 bytes to leave SIE
630	aghi	%r1, 4
631	stg	%r1, SP_PSW+8(%r15)
6320:
633#endif
634	# check for preemptive scheduling
635	icm	%r0,15,__TI_precount(%r9)
636	jnz	io_restore		# preemption is disabled
637	# switch to kernel stack
638	lg	%r1,SP_R15(%r15)
639	aghi	%r1,-SP_SIZE
640	mvc	SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
641	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
642	lgr	%r15,%r1
643io_resume_loop:
644	tm	__TI_flags+7(%r9),_TIF_NEED_RESCHED
645	jno	io_restore
646	larl	%r14,io_resume_loop
647	jg	preempt_schedule_irq
648#endif
649
650io_work_user:
651	lg	%r1,__LC_KERNEL_STACK
652	aghi	%r1,-SP_SIZE
653	mvc	SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
654	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
655	lgr	%r15,%r1
656#
657# One of the work bits is on. Find out which one.
658# Checked are: _TIF_SIGPENDING, _TIF_RESTORE_SIGPENDING, _TIF_NEED_RESCHED
659#	       and _TIF_MCCK_PENDING
660#
661io_work_loop:
662	tm	__TI_flags+7(%r9),_TIF_MCCK_PENDING
663	jo	io_mcck_pending
664	tm	__TI_flags+7(%r9),_TIF_NEED_RESCHED
665	jo	io_reschedule
666	tm	__TI_flags+7(%r9),_TIF_SIGPENDING
667	jnz	io_sigpending
668	tm	__TI_flags+7(%r9),_TIF_NOTIFY_RESUME
669	jnz	io_notify_resume
670	j	io_restore
671io_work_done:
672
673#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
674sie_opcode:
675	.long 0xb2140000
676#endif
677
678#
679# _TIF_MCCK_PENDING is set, call handler
680#
681io_mcck_pending:
682	brasl	%r14,s390_handle_mcck	# TIF bit will be cleared by handler
683	j	io_work_loop
684
685#
686# _TIF_NEED_RESCHED is set, call schedule
687#
688io_reschedule:
689	TRACE_IRQS_ON
690	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
691	brasl	%r14,schedule		# call scheduler
692	stnsm	__SF_EMPTY(%r15),0xfc	# disable I/O and ext. interrupts
693	TRACE_IRQS_OFF
694	tm	__TI_flags+7(%r9),_TIF_WORK_INT
695	jz	io_restore		# there is no work to do
696	j	io_work_loop
697
698#
699# _TIF_SIGPENDING or is set, call do_signal
700#
701io_sigpending:
702	TRACE_IRQS_ON
703	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
704	la	%r2,SP_PTREGS(%r15)	# load pt_regs
705	brasl	%r14,do_signal		# call do_signal
706	stnsm	__SF_EMPTY(%r15),0xfc	# disable I/O and ext. interrupts
707	TRACE_IRQS_OFF
708	j	io_work_loop
709
710#
711# _TIF_NOTIFY_RESUME or is set, call do_notify_resume
712#
713io_notify_resume:
714	TRACE_IRQS_ON
715	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
716	la	%r2,SP_PTREGS(%r15)	# load pt_regs
717	brasl	%r14,do_notify_resume	# call do_notify_resume
718	stnsm	__SF_EMPTY(%r15),0xfc	# disable I/O and ext. interrupts
719	TRACE_IRQS_OFF
720	j	io_work_loop
721
722/*
723 * External interrupt handler routine
724 */
725	.globl	ext_int_handler
726ext_int_handler:
727	stck	__LC_INT_CLOCK
728	stpt	__LC_ASYNC_ENTER_TIMER
729	SAVE_ALL_BASE __LC_SAVE_AREA+32
730	SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32
731	CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32
732	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
733	jz	ext_no_vtime
734	UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
735	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
736	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
737ext_no_vtime:
738	lg	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
739	TRACE_IRQS_OFF
740	la	%r2,SP_PTREGS(%r15)	# address of register-save area
741	llgh	%r3,__LC_EXT_INT_CODE	# get interruption code
742	brasl	%r14,do_extint
743	j	io_return
744
745__critical_end:
746
747/*
748 * Machine check handler routines
749 */
750	.globl mcck_int_handler
751mcck_int_handler:
752	stck	__LC_INT_CLOCK
753	la	%r1,4095		# revalidate r1
754	spt	__LC_CPU_TIMER_SAVE_AREA-4095(%r1)	# revalidate cpu timer
755	lmg	%r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
756	SAVE_ALL_BASE __LC_SAVE_AREA+64
757	la	%r12,__LC_MCK_OLD_PSW
758	tm	__LC_MCCK_CODE,0x80	# system damage?
759	jo	mcck_int_main		# yes -> rest of mcck code invalid
760	la	%r14,4095
761	mvc	__LC_SAVE_AREA+104(8),__LC_ASYNC_ENTER_TIMER
762	mvc	__LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA-4095(%r14)
763	tm	__LC_MCCK_CODE+5,0x02	# stored cpu timer value valid?
764	jo	1f
765	la	%r14,__LC_SYNC_ENTER_TIMER
766	clc	0(8,%r14),__LC_ASYNC_ENTER_TIMER
767	jl	0f
768	la	%r14,__LC_ASYNC_ENTER_TIMER
7690:	clc	0(8,%r14),__LC_EXIT_TIMER
770	jl	0f
771	la	%r14,__LC_EXIT_TIMER
7720:	clc	0(8,%r14),__LC_LAST_UPDATE_TIMER
773	jl	0f
774	la	%r14,__LC_LAST_UPDATE_TIMER
7750:	spt	0(%r14)
776	mvc	__LC_ASYNC_ENTER_TIMER(8),0(%r14)
7771:	tm	__LC_MCCK_CODE+2,0x09	# mwp + ia of old psw valid?
778	jno	mcck_int_main		# no -> skip cleanup critical
779	tm	__LC_MCK_OLD_PSW+1,0x01 # test problem state bit
780	jnz	mcck_int_main		# from user -> load kernel stack
781	clc	__LC_MCK_OLD_PSW+8(8),BASED(.Lcritical_end)
782	jhe	mcck_int_main
783	clc	__LC_MCK_OLD_PSW+8(8),BASED(.Lcritical_start)
784	jl	mcck_int_main
785	brasl	%r14,cleanup_critical
786mcck_int_main:
787	lg	%r14,__LC_PANIC_STACK	# are we already on the panic stack?
788	slgr	%r14,%r15
789	srag	%r14,%r14,PAGE_SHIFT
790	jz	0f
791	lg	%r15,__LC_PANIC_STACK	# load panic stack
7920:	CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+64
793	tm	__LC_MCCK_CODE+2,0x08	# mwp of old psw valid?
794	jno	mcck_no_vtime		# no -> no timer update
795	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
796	jz	mcck_no_vtime
797	UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
798	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
799	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
800mcck_no_vtime:
801	lg	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
802	la	%r2,SP_PTREGS(%r15)	# load pt_regs
803	brasl	%r14,s390_do_machine_check
804	tm	SP_PSW+1(%r15),0x01	# returning to user ?
805	jno	mcck_return
806	lg	%r1,__LC_KERNEL_STACK	# switch to kernel stack
807	aghi	%r1,-SP_SIZE
808	mvc	SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
809	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
810	lgr	%r15,%r1
811	stosm	__SF_EMPTY(%r15),0x04	# turn dat on
812	tm	__TI_flags+7(%r9),_TIF_MCCK_PENDING
813	jno	mcck_return
814	TRACE_IRQS_OFF
815	brasl	%r14,s390_handle_mcck
816	TRACE_IRQS_ON
817mcck_return:
818	mvc	__LC_RETURN_MCCK_PSW(16),SP_PSW(%r15) # move return PSW
819	ni	__LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
820	lmg	%r0,%r15,SP_R0(%r15)	# load gprs 0-15
821	mvc	__LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+104
822	tm	__LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
823	jno	0f
824	stpt	__LC_EXIT_TIMER
8250:	lpswe	__LC_RETURN_MCCK_PSW	# back to caller
826
827/*
828 * Restart interruption handler, kick starter for additional CPUs
829 */
830#ifdef CONFIG_SMP
831	__CPUINIT
832	.globl restart_int_handler
833restart_int_handler:
834	lg	%r15,__LC_SAVE_AREA+120 # load ksp
835	lghi	%r10,__LC_CREGS_SAVE_AREA
836	lctlg	%c0,%c15,0(%r10) # get new ctl regs
837	lghi	%r10,__LC_AREGS_SAVE_AREA
838	lam	%a0,%a15,0(%r10)
839	lmg	%r6,%r15,__SF_GPRS(%r15) # load registers from clone
840	stosm	__SF_EMPTY(%r15),0x04	# now we can turn dat on
841	jg	start_secondary
842	.previous
843#else
844/*
845 * If we do not run with SMP enabled, let the new CPU crash ...
846 */
847	.globl restart_int_handler
848restart_int_handler:
849	basr	%r1,0
850restart_base:
851	lpswe	restart_crash-restart_base(%r1)
852	.align 8
853restart_crash:
854	.long  0x000a0000,0x00000000,0x00000000,0x00000000
855restart_go:
856#endif
857
858#ifdef CONFIG_CHECK_STACK
859/*
860 * The synchronous or the asynchronous stack overflowed. We are dead.
861 * No need to properly save the registers, we are going to panic anyway.
862 * Setup a pt_regs so that show_trace can provide a good call trace.
863 */
864stack_overflow:
865	lg	%r15,__LC_PANIC_STACK	# change to panic stack
866	aghi	%r15,-SP_SIZE
867	mvc	SP_PSW(16,%r15),0(%r12)	# move user PSW to stack
868	stmg	%r0,%r11,SP_R0(%r15)	# store gprs %r0-%r11 to kernel stack
869	la	%r1,__LC_SAVE_AREA
870	chi	%r12,__LC_SVC_OLD_PSW
871	je	0f
872	chi	%r12,__LC_PGM_OLD_PSW
873	je	0f
874	la	%r1,__LC_SAVE_AREA+32
8750:	mvc	SP_R12(32,%r15),0(%r1)	# move %r12-%r15 to stack
876	mvc	SP_ARGS(8,%r15),__LC_LAST_BREAK
877	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain
878	la	%r2,SP_PTREGS(%r15)	# load pt_regs
879	jg	kernel_stack_overflow
880#endif
881
882cleanup_table_system_call:
883	.quad	system_call, sysc_do_svc
884cleanup_table_sysc_return:
885	.quad	sysc_return, sysc_leave
886cleanup_table_sysc_leave:
887	.quad	sysc_leave, sysc_done
888cleanup_table_sysc_work_loop:
889	.quad	sysc_work_loop, sysc_work_done
890cleanup_table_io_return:
891	.quad	io_return, io_leave
892cleanup_table_io_leave:
893	.quad	io_leave, io_done
894cleanup_table_io_work_loop:
895	.quad	io_work_loop, io_work_done
896
897cleanup_critical:
898	clc	8(8,%r12),BASED(cleanup_table_system_call)
899	jl	0f
900	clc	8(8,%r12),BASED(cleanup_table_system_call+8)
901	jl	cleanup_system_call
9020:
903	clc	8(8,%r12),BASED(cleanup_table_sysc_return)
904	jl	0f
905	clc	8(8,%r12),BASED(cleanup_table_sysc_return+8)
906	jl	cleanup_sysc_return
9070:
908	clc	8(8,%r12),BASED(cleanup_table_sysc_leave)
909	jl	0f
910	clc	8(8,%r12),BASED(cleanup_table_sysc_leave+8)
911	jl	cleanup_sysc_leave
9120:
913	clc	8(8,%r12),BASED(cleanup_table_sysc_work_loop)
914	jl	0f
915	clc	8(8,%r12),BASED(cleanup_table_sysc_work_loop+8)
916	jl	cleanup_sysc_return
9170:
918	clc	8(8,%r12),BASED(cleanup_table_io_return)
919	jl	0f
920	clc	8(8,%r12),BASED(cleanup_table_io_return+8)
921	jl	cleanup_io_return
9220:
923	clc	8(8,%r12),BASED(cleanup_table_io_leave)
924	jl	0f
925	clc	8(8,%r12),BASED(cleanup_table_io_leave+8)
926	jl	cleanup_io_leave
9270:
928	clc	8(8,%r12),BASED(cleanup_table_io_work_loop)
929	jl	0f
930	clc	8(8,%r12),BASED(cleanup_table_io_work_loop+8)
931	jl	cleanup_io_return
9320:
933	br	%r14
934
935cleanup_system_call:
936	mvc	__LC_RETURN_PSW(16),0(%r12)
937	cghi	%r12,__LC_MCK_OLD_PSW
938	je	0f
939	la	%r12,__LC_SAVE_AREA+32
940	j	1f
9410:	la	%r12,__LC_SAVE_AREA+64
9421:
943	clc	__LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+8)
944	jh	0f
945	mvc	__LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
9460:	clc	__LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+16)
947	jhe	cleanup_vtime
948	clc	__LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn)
949	jh	0f
950	mvc	__LC_SAVE_AREA(32),0(%r12)
9510:	stg	%r13,8(%r12)
952	stg	%r12,__LC_SAVE_AREA+96	# argh
953	SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
954	CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
955	lg	%r12,__LC_SAVE_AREA+96	# argh
956	stg	%r15,24(%r12)
957	llgh	%r7,__LC_SVC_INT_CODE
958cleanup_vtime:
959	clc	__LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24)
960	jhe	cleanup_stime
961	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
962cleanup_stime:
963	clc	__LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+32)
964	jh	cleanup_update
965	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
966cleanup_update:
967	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
968	mvc	__LC_RETURN_PSW+8(8),BASED(cleanup_table_system_call+8)
969	la	%r12,__LC_RETURN_PSW
970	br	%r14
971cleanup_system_call_insn:
972	.quad	sysc_saveall
973	.quad	system_call
974	.quad	sysc_vtime
975	.quad	sysc_stime
976	.quad	sysc_update
977
978cleanup_sysc_return:
979	mvc	__LC_RETURN_PSW(8),0(%r12)
980	mvc	__LC_RETURN_PSW+8(8),BASED(cleanup_table_sysc_return)
981	la	%r12,__LC_RETURN_PSW
982	br	%r14
983
984cleanup_sysc_leave:
985	clc	8(8,%r12),BASED(cleanup_sysc_leave_insn)
986	je	3f
987	clc	8(8,%r12),BASED(cleanup_sysc_leave_insn+8)
988	jhe	0f
989	mvc	__LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
9900:	mvc	__LC_RETURN_PSW(16),SP_PSW(%r15)
991	cghi	%r12,__LC_MCK_OLD_PSW
992	jne	1f
993	mvc	__LC_SAVE_AREA+64(32),SP_R12(%r15)
994	j	2f
9951:	mvc	__LC_SAVE_AREA+32(32),SP_R12(%r15)
9962:	lmg	%r0,%r11,SP_R0(%r15)
997	lg	%r15,SP_R15(%r15)
9983:	la	%r12,__LC_RETURN_PSW
999	br	%r14
1000cleanup_sysc_leave_insn:
1001	.quad	sysc_done - 4
1002	.quad	sysc_done - 16
1003
1004cleanup_io_return:
1005	mvc	__LC_RETURN_PSW(8),0(%r12)
1006	mvc	__LC_RETURN_PSW+8(8),BASED(cleanup_table_io_work_loop)
1007	la	%r12,__LC_RETURN_PSW
1008	br	%r14
1009
1010cleanup_io_leave:
1011	clc	8(8,%r12),BASED(cleanup_io_leave_insn)
1012	je	3f
1013	clc	8(8,%r12),BASED(cleanup_io_leave_insn+8)
1014	jhe	0f
1015	mvc	__LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
10160:	mvc	__LC_RETURN_PSW(16),SP_PSW(%r15)
1017	cghi	%r12,__LC_MCK_OLD_PSW
1018	jne	1f
1019	mvc	__LC_SAVE_AREA+64(32),SP_R12(%r15)
1020	j	2f
10211:	mvc	__LC_SAVE_AREA+32(32),SP_R12(%r15)
10222:	lmg	%r0,%r11,SP_R0(%r15)
1023	lg	%r15,SP_R15(%r15)
10243:	la	%r12,__LC_RETURN_PSW
1025	br	%r14
1026cleanup_io_leave_insn:
1027	.quad	io_done - 4
1028	.quad	io_done - 16
1029
1030/*
1031 * Integer constants
1032 */
1033		.align	4
1034.Lconst:
1035.Lnr_syscalls:	.long	NR_syscalls
1036.L0x0130:	.short	0x130
1037.L0x0140:	.short	0x140
1038.L0x0150:	.short	0x150
1039.L0x0160:	.short	0x160
1040.L0x0170:	.short	0x170
1041.Lcritical_start:
1042		.quad	__critical_start
1043.Lcritical_end:
1044		.quad	__critical_end
1045
1046		.section .rodata, "a"
1047#define SYSCALL(esa,esame,emu)	.long esame
1048sys_call_table:
1049#include "syscalls.S"
1050#undef SYSCALL
1051
1052#ifdef CONFIG_COMPAT
1053
1054#define SYSCALL(esa,esame,emu)	.long emu
1055sys_call_table_emu:
1056#include "syscalls.S"
1057#undef SYSCALL
1058#endif
1059