• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 *    S390 low-level entry points.
4 *
5 *    Copyright IBM Corp. 1999, 2012
6 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 *		 Hartmut Penner (hp@de.ibm.com),
8 *		 Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
9 *		 Heiko Carstens <heiko.carstens@de.ibm.com>
10 */
11
12#include <linux/init.h>
13#include <linux/linkage.h>
14#include <asm/alternative-asm.h>
15#include <asm/processor.h>
16#include <asm/cache.h>
17#include <asm/ctl_reg.h>
18#include <asm/dwarf.h>
19#include <asm/errno.h>
20#include <asm/ptrace.h>
21#include <asm/thread_info.h>
22#include <asm/asm-offsets.h>
23#include <asm/unistd.h>
24#include <asm/page.h>
25#include <asm/sigp.h>
26#include <asm/irq.h>
27#include <asm/vx-insn.h>
28#include <asm/setup.h>
29#include <asm/nmi.h>
30#include <asm/export.h>
31#include <asm/nospec-insn.h>
32
33__PT_R0      =	__PT_GPRS
34__PT_R1      =	__PT_GPRS + 8
35__PT_R2      =	__PT_GPRS + 16
36__PT_R3      =	__PT_GPRS + 24
37__PT_R4      =	__PT_GPRS + 32
38__PT_R5      =	__PT_GPRS + 40
39__PT_R6      =	__PT_GPRS + 48
40__PT_R7      =	__PT_GPRS + 56
41__PT_R8      =	__PT_GPRS + 64
42__PT_R9      =	__PT_GPRS + 72
43__PT_R10     =	__PT_GPRS + 80
44__PT_R11     =	__PT_GPRS + 88
45__PT_R12     =	__PT_GPRS + 96
46__PT_R13     =	__PT_GPRS + 104
47__PT_R14     =	__PT_GPRS + 112
48__PT_R15     =	__PT_GPRS + 120
49
50STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER
51STACK_SIZE  = 1 << STACK_SHIFT
52STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
53
54_TIF_WORK	= (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
55		   _TIF_UPROBE | _TIF_GUARDED_STORAGE | _TIF_PATCH_PENDING | \
56		   _TIF_NOTIFY_SIGNAL)
57_TIF_TRACE	= (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
58		   _TIF_SYSCALL_TRACEPOINT)
59_CIF_WORK	= (_CIF_ASCE_PRIMARY | _CIF_ASCE_SECONDARY | _CIF_FPU)
60_PIF_WORK	= (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
61
62_LPP_OFFSET	= __LC_LPP
63
64	.macro	TRACE_IRQS_ON
65#ifdef CONFIG_TRACE_IRQFLAGS
66	basr	%r2,%r0
67	brasl	%r14,trace_hardirqs_on_caller
68#endif
69	.endm
70
71	.macro	TRACE_IRQS_OFF
72#ifdef CONFIG_TRACE_IRQFLAGS
73	basr	%r2,%r0
74	brasl	%r14,trace_hardirqs_off_caller
75#endif
76	.endm
77
78	.macro	LOCKDEP_SYS_EXIT
79#ifdef CONFIG_LOCKDEP
80	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
81	jz	.+10
82	brasl	%r14,lockdep_sys_exit
83#endif
84	.endm
85
86	.macro	CHECK_STACK savearea
87#ifdef CONFIG_CHECK_STACK
88	tml	%r15,STACK_SIZE - CONFIG_STACK_GUARD
89	lghi	%r14,\savearea
90	jz	stack_overflow
91#endif
92	.endm
93
94	.macro	CHECK_VMAP_STACK savearea,oklabel
95#ifdef CONFIG_VMAP_STACK
96	lgr	%r14,%r15
97	nill	%r14,0x10000 - STACK_SIZE
98	oill	%r14,STACK_INIT
99	clg	%r14,__LC_KERNEL_STACK
100	je	\oklabel
101	clg	%r14,__LC_ASYNC_STACK
102	je	\oklabel
103	clg	%r14,__LC_NODAT_STACK
104	je	\oklabel
105	clg	%r14,__LC_RESTART_STACK
106	je	\oklabel
107	lghi	%r14,\savearea
108	j	stack_overflow
109#else
110	j	\oklabel
111#endif
112	.endm
113
114	.macro	SWITCH_ASYNC savearea,timer,clock
115	tmhh	%r8,0x0001		# interrupting from user ?
116	jnz	4f
117#if IS_ENABLED(CONFIG_KVM)
118	lgr	%r14,%r9
119	larl	%r13,.Lsie_gmap
120	slgr	%r14,%r13
121	lghi	%r13,.Lsie_done - .Lsie_gmap
122	clgr	%r14,%r13
123	jhe	0f
124	lghi	%r11,\savearea		# inside critical section, do cleanup
125	brasl	%r14,.Lcleanup_sie
126#endif
1270:	larl	%r13,.Lpsw_idle_exit
128	cgr	%r13,%r9
129	jne	3f
130
131	larl	%r1,smp_cpu_mtid
132	llgf	%r1,0(%r1)
133	ltgr	%r1,%r1
134	jz	2f			# no SMT, skip mt_cycles calculation
135	.insn	rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15)
136	larl	%r3,mt_cycles
137	ag	%r3,__LC_PERCPU_OFFSET
138	la	%r4,__SF_EMPTY+16(%r15)
1391:	lg	%r0,0(%r3)
140	slg	%r0,0(%r4)
141	alg	%r0,64(%r4)
142	stg	%r0,0(%r3)
143	la	%r3,8(%r3)
144	la	%r4,8(%r4)
145	brct	%r1,1b
146
1472:	mvc	__CLOCK_IDLE_EXIT(8,%r2), \clock
148	mvc	__TIMER_IDLE_EXIT(8,%r2), \timer
149	# account system time going idle
150	ni	__LC_CPU_FLAGS+7,255-_CIF_ENABLED_WAIT
151
152	lg	%r13,__LC_STEAL_TIMER
153	alg	%r13,__CLOCK_IDLE_ENTER(%r2)
154	slg	%r13,__LC_LAST_UPDATE_CLOCK
155	stg	%r13,__LC_STEAL_TIMER
156
157	mvc	__LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
158
159	lg	%r13,__LC_SYSTEM_TIMER
160	alg	%r13,__LC_LAST_UPDATE_TIMER
161	slg	%r13,__TIMER_IDLE_ENTER(%r2)
162	stg	%r13,__LC_SYSTEM_TIMER
163	mvc	__LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
164
165	nihh	%r8,0xfcfd		# clear wait state and irq bits
1663:	lg	%r14,__LC_ASYNC_STACK	# are we already on the target stack?
167	slgr	%r14,%r15
168	srag	%r14,%r14,STACK_SHIFT
169	jnz	5f
170	CHECK_STACK \savearea
171	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
172	j	6f
1734:	UPDATE_VTIME %r14,%r15,\timer
174	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
1755:	lg	%r15,__LC_ASYNC_STACK	# load async stack
1766:	la	%r11,STACK_FRAME_OVERHEAD(%r15)
177	.endm
178
179	.macro UPDATE_VTIME w1,w2,enter_timer
180	lg	\w1,__LC_EXIT_TIMER
181	lg	\w2,__LC_LAST_UPDATE_TIMER
182	slg	\w1,\enter_timer
183	slg	\w2,__LC_EXIT_TIMER
184	alg	\w1,__LC_USER_TIMER
185	alg	\w2,__LC_SYSTEM_TIMER
186	stg	\w1,__LC_USER_TIMER
187	stg	\w2,__LC_SYSTEM_TIMER
188	mvc	__LC_LAST_UPDATE_TIMER(8),\enter_timer
189	.endm
190
191	.macro RESTORE_SM_CLEAR_PER
192	stg	%r8,__LC_RETURN_PSW
193	ni	__LC_RETURN_PSW,0xbf
194	ssm	__LC_RETURN_PSW
195	.endm
196
197	.macro ENABLE_INTS
198	stosm	__SF_EMPTY(%r15),3
199	.endm
200
201	.macro ENABLE_INTS_TRACE
202	TRACE_IRQS_ON
203	ENABLE_INTS
204	.endm
205
206	.macro DISABLE_INTS
207	stnsm	__SF_EMPTY(%r15),0xfc
208	.endm
209
210	.macro DISABLE_INTS_TRACE
211	DISABLE_INTS
212	TRACE_IRQS_OFF
213	.endm
214
215	.macro STCK savearea
216#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
217	.insn	s,0xb27c0000,\savearea		# store clock fast
218#else
219	.insn	s,0xb2050000,\savearea		# store clock
220#endif
221	.endm
222
223	/*
224	 * The TSTMSK macro generates a test-under-mask instruction by
225	 * calculating the memory offset for the specified mask value.
226	 * Mask value can be any constant.  The macro shifts the mask
227	 * value to calculate the memory offset for the test-under-mask
228	 * instruction.
229	 */
230	.macro TSTMSK addr, mask, size=8, bytepos=0
231		.if (\bytepos < \size) && (\mask >> 8)
232			.if (\mask & 0xff)
233				.error "Mask exceeds byte boundary"
234			.endif
235			TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)"
236			.exitm
237		.endif
238		.ifeq \mask
239			.error "Mask must not be zero"
240		.endif
241		off = \size - \bytepos - 1
242		tm	off+\addr, \mask
243	.endm
244
245	.macro BPOFF
246	ALTERNATIVE "", ".long 0xb2e8c000", 82
247	.endm
248
249	.macro BPON
250	ALTERNATIVE "", ".long 0xb2e8d000", 82
251	.endm
252
253	.macro BPENTER tif_ptr,tif_mask
254	ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .long 0xb2e8d000", \
255		    "", 82
256	.endm
257
258	.macro BPEXIT tif_ptr,tif_mask
259	TSTMSK	\tif_ptr,\tif_mask
260	ALTERNATIVE "jz .+8;  .long 0xb2e8c000", \
261		    "jnz .+8; .long 0xb2e8d000", 82
262	.endm
263
264	GEN_BR_THUNK %r9
265	GEN_BR_THUNK %r14
266	GEN_BR_THUNK %r14,%r11
267
268	.section .kprobes.text, "ax"
269.Ldummy:
270	/*
271	 * This nop exists only in order to avoid that __switch_to starts at
272	 * the beginning of the kprobes text section. In that case we would
273	 * have several symbols at the same address. E.g. objdump would take
274	 * an arbitrary symbol name when disassembling this code.
275	 * With the added nop in between the __switch_to symbol is unique
276	 * again.
277	 */
278	nop	0
279
280ENTRY(__bpon)
281	.globl __bpon
282	BPON
283	BR_EX	%r14
284ENDPROC(__bpon)
285
286/*
287 * Scheduler resume function, called by switch_to
288 *  gpr2 = (task_struct *) prev
289 *  gpr3 = (task_struct *) next
290 * Returns:
291 *  gpr2 = prev
292 */
293ENTRY(__switch_to)
294	stmg	%r6,%r15,__SF_GPRS(%r15)	# store gprs of prev task
295	lghi	%r4,__TASK_stack
296	lghi	%r1,__TASK_thread
297	llill	%r5,STACK_INIT
298	stg	%r15,__THREAD_ksp(%r1,%r2)	# store kernel stack of prev
299	lg	%r15,0(%r4,%r3)			# start of kernel stack of next
300	agr	%r15,%r5			# end of kernel stack of next
301	stg	%r3,__LC_CURRENT		# store task struct of next
302	stg	%r15,__LC_KERNEL_STACK		# store end of kernel stack
303	lg	%r15,__THREAD_ksp(%r1,%r3)	# load kernel stack of next
304	aghi	%r3,__TASK_pid
305	mvc	__LC_CURRENT_PID(4,%r0),0(%r3)	# store pid of next
306	lmg	%r6,%r15,__SF_GPRS(%r15)	# load gprs of next task
307	ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
308	BR_EX	%r14
309ENDPROC(__switch_to)
310
311#if IS_ENABLED(CONFIG_KVM)
312/*
313 * sie64a calling convention:
314 * %r2 pointer to sie control block
315 * %r3 guest register save area
316 */
317ENTRY(sie64a)
318	stmg	%r6,%r14,__SF_GPRS(%r15)	# save kernel registers
319	lg	%r12,__LC_CURRENT
320	stg	%r2,__SF_SIE_CONTROL(%r15)	# save control block pointer
321	stg	%r3,__SF_SIE_SAVEAREA(%r15)	# save guest register save area
322	xc	__SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0
323	mvc	__SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags
324	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU		# load guest fp/vx registers ?
325	jno	.Lsie_load_guest_gprs
326	brasl	%r14,load_fpu_regs		# load guest fp/vx regs
327.Lsie_load_guest_gprs:
328	lmg	%r0,%r13,0(%r3)			# load guest gprs 0-13
329	lg	%r14,__LC_GMAP			# get gmap pointer
330	ltgr	%r14,%r14
331	jz	.Lsie_gmap
332	lctlg	%c1,%c1,__GMAP_ASCE(%r14)	# load primary asce
333.Lsie_gmap:
334	lg	%r14,__SF_SIE_CONTROL(%r15)	# get control block pointer
335	oi	__SIE_PROG0C+3(%r14),1		# we are going into SIE now
336	tm	__SIE_PROG20+3(%r14),3		# last exit...
337	jnz	.Lsie_skip
338	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
339	jo	.Lsie_skip			# exit if fp/vx regs changed
340	BPEXIT	__SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
341.Lsie_entry:
342	sie	0(%r14)
343	BPOFF
344	BPENTER	__SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
345.Lsie_skip:
346	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
347	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
348.Lsie_done:
349# some program checks are suppressing. C code (e.g. do_protection_exception)
350# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
351# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
352# Other instructions between sie64a and .Lsie_done should not cause program
353# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
354# See also .Lcleanup_sie
355.Lrewind_pad6:
356	nopr	7
357.Lrewind_pad4:
358	nopr	7
359.Lrewind_pad2:
360	nopr	7
361	.globl sie_exit
362sie_exit:
363	lg	%r14,__SF_SIE_SAVEAREA(%r15)	# load guest register save area
364	stmg	%r0,%r13,0(%r14)		# save guest gprs 0-13
365	xgr	%r0,%r0				# clear guest registers to
366	xgr	%r1,%r1				# prevent speculative use
367	xgr	%r2,%r2
368	xgr	%r3,%r3
369	xgr	%r4,%r4
370	xgr	%r5,%r5
371	lmg	%r6,%r14,__SF_GPRS(%r15)	# restore kernel registers
372	lg	%r2,__SF_SIE_REASON(%r15)	# return exit reason code
373	BR_EX	%r14
374.Lsie_fault:
375	lghi	%r14,-EFAULT
376	stg	%r14,__SF_SIE_REASON(%r15)	# set exit reason code
377	j	sie_exit
378
379	EX_TABLE(.Lrewind_pad6,.Lsie_fault)
380	EX_TABLE(.Lrewind_pad4,.Lsie_fault)
381	EX_TABLE(.Lrewind_pad2,.Lsie_fault)
382	EX_TABLE(sie_exit,.Lsie_fault)
383ENDPROC(sie64a)
384EXPORT_SYMBOL(sie64a)
385EXPORT_SYMBOL(sie_exit)
386#endif
387
388/*
389 * SVC interrupt handler routine. System calls are synchronous events and
390 * are entered with interrupts disabled.
391 */
392
393ENTRY(system_call)
394	stpt	__LC_SYNC_ENTER_TIMER
395	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
396	BPOFF
397	lg	%r12,__LC_CURRENT
398	lghi	%r14,_PIF_SYSCALL
399.Lsysc_per:
400	lghi	%r13,__TASK_thread
401	lg	%r15,__LC_KERNEL_STACK
402	la	%r11,STACK_FRAME_OVERHEAD(%r15)	# pointer to pt_regs
403	UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
404	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
405	stmg	%r0,%r7,__PT_R0(%r11)
406	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
407	mvc	__PT_PSW(16,%r11),__LC_SVC_OLD_PSW
408	mvc	__PT_INT_CODE(4,%r11),__LC_SVC_ILC
409	stg	%r14,__PT_FLAGS(%r11)
410	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
411	ENABLE_INTS
412.Lsysc_do_svc:
413	# clear user controlled register to prevent speculative use
414	xgr	%r0,%r0
415	# load address of system call table
416	lg	%r10,__THREAD_sysc_table(%r13,%r12)
417	llgh	%r8,__PT_INT_CODE+2(%r11)
418	slag	%r8,%r8,3			# shift and test for svc 0
419	jnz	.Lsysc_nr_ok
420	# svc 0: system call number in %r1
421	llgfr	%r1,%r1				# clear high word in r1
422	sth	%r1,__PT_INT_CODE+2(%r11)
423	cghi	%r1,NR_syscalls
424	jnl	.Lsysc_nr_ok
425	slag	%r8,%r1,3
426.Lsysc_nr_ok:
427	stg	%r2,__PT_ORIG_GPR2(%r11)
428	stg	%r7,STACK_FRAME_OVERHEAD(%r15)
429	lg	%r9,0(%r8,%r10)			# get system call add.
430	TSTMSK	__TI_flags(%r12),_TIF_TRACE
431	jnz	.Lsysc_tracesys
432	BASR_EX	%r14,%r9			# call sys_xxxx
433	stg	%r2,__PT_R2(%r11)		# store return value
434
435.Lsysc_return:
436#ifdef CONFIG_DEBUG_RSEQ
437	lgr	%r2,%r11
438	brasl	%r14,rseq_syscall
439#endif
440	LOCKDEP_SYS_EXIT
441.Lsysc_tif:
442	DISABLE_INTS
443	TSTMSK	__PT_FLAGS(%r11),_PIF_WORK
444	jnz	.Lsysc_work
445	TSTMSK	__TI_flags(%r12),_TIF_WORK
446	jnz	.Lsysc_work			# check for work
447	TSTMSK	__LC_CPU_FLAGS,(_CIF_WORK-_CIF_FPU)
448	jnz	.Lsysc_work
449	BPEXIT	__TI_flags(%r12),_TIF_ISOLATE_BP
450.Lsysc_restore:
451	DISABLE_INTS
452	TSTMSK	__LC_CPU_FLAGS, _CIF_FPU
453	jz	.Lsysc_skip_fpu
454	brasl	%r14,load_fpu_regs
455.Lsysc_skip_fpu:
456	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
457	stpt	__LC_EXIT_TIMER
458	lmg	%r0,%r15,__PT_R0(%r11)
459	b	__LC_RETURN_LPSWE
460
461#
462# One of the work bits is on. Find out which one.
463#
464.Lsysc_work:
465	ENABLE_INTS
466	TSTMSK	__TI_flags(%r12),_TIF_NEED_RESCHED
467	jo	.Lsysc_reschedule
468	TSTMSK	__PT_FLAGS(%r11),_PIF_SYSCALL_RESTART
469	jo	.Lsysc_syscall_restart
470#ifdef CONFIG_UPROBES
471	TSTMSK	__TI_flags(%r12),_TIF_UPROBE
472	jo	.Lsysc_uprobe_notify
473#endif
474	TSTMSK	__TI_flags(%r12),_TIF_GUARDED_STORAGE
475	jo	.Lsysc_guarded_storage
476	TSTMSK	__PT_FLAGS(%r11),_PIF_PER_TRAP
477	jo	.Lsysc_singlestep
478#ifdef CONFIG_LIVEPATCH
479	TSTMSK	__TI_flags(%r12),_TIF_PATCH_PENDING
480	jo	.Lsysc_patch_pending	# handle live patching just before
481					# signals and possible syscall restart
482#endif
483	TSTMSK	__PT_FLAGS(%r11),_PIF_SYSCALL_RESTART
484	jo	.Lsysc_syscall_restart
485	TSTMSK	__TI_flags(%r12),(_TIF_SIGPENDING|_TIF_NOTIFY_SIGNAL)
486	jnz	.Lsysc_sigpending
487	TSTMSK	__TI_flags(%r12),_TIF_NOTIFY_RESUME
488	jo	.Lsysc_notify_resume
489	TSTMSK	__LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY)
490	jnz	.Lsysc_asce
491	j	.Lsysc_return
492
493#
494# _TIF_NEED_RESCHED is set, call schedule
495#
496.Lsysc_reschedule:
497	larl	%r14,.Lsysc_return
498	jg	schedule
499
500#
501# _CIF_ASCE_PRIMARY and/or _CIF_ASCE_SECONDARY set, load user space asce
502#
503.Lsysc_asce:
504	ni	__LC_CPU_FLAGS+7,255-_CIF_ASCE_SECONDARY
505	lctlg	%c7,%c7,__LC_VDSO_ASCE		# load secondary asce
506	TSTMSK	__LC_CPU_FLAGS,_CIF_ASCE_PRIMARY
507	jz	.Lsysc_return
508#ifndef CONFIG_HAVE_MARCH_Z10_FEATURES
509	tm	__LC_STFLE_FAC_LIST+3,0x10	# has MVCOS ?
510	jnz	.Lsysc_set_fs_fixup
511	ni	__LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY
512	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
513	j	.Lsysc_return
514.Lsysc_set_fs_fixup:
515#endif
516	larl	%r14,.Lsysc_return
517	jg	set_fs_fixup
518
519
520#
521# _TIF_SIGPENDING is set, call do_signal
522#
523.Lsysc_sigpending:
524	lgr	%r2,%r11		# pass pointer to pt_regs
525	brasl	%r14,do_signal
526	TSTMSK	__PT_FLAGS(%r11),_PIF_SYSCALL
527	jno	.Lsysc_return
528.Lsysc_do_syscall:
529	lghi	%r13,__TASK_thread
530	lmg	%r2,%r7,__PT_R2(%r11)	# load svc arguments
531	lghi	%r1,0			# svc 0 returns -ENOSYS
532	j	.Lsysc_do_svc
533
534#
535# _TIF_NOTIFY_RESUME is set, call do_notify_resume
536#
537.Lsysc_notify_resume:
538	lgr	%r2,%r11		# pass pointer to pt_regs
539	larl	%r14,.Lsysc_return
540	jg	do_notify_resume
541
542#
543# _TIF_UPROBE is set, call uprobe_notify_resume
544#
545#ifdef CONFIG_UPROBES
546.Lsysc_uprobe_notify:
547	lgr	%r2,%r11		# pass pointer to pt_regs
548	larl	%r14,.Lsysc_return
549	jg	uprobe_notify_resume
550#endif
551
552#
553# _TIF_GUARDED_STORAGE is set, call guarded_storage_load
554#
555.Lsysc_guarded_storage:
556	lgr	%r2,%r11		# pass pointer to pt_regs
557	larl	%r14,.Lsysc_return
558	jg	gs_load_bc_cb
559#
560# _TIF_PATCH_PENDING is set, call klp_update_patch_state
561#
562#ifdef CONFIG_LIVEPATCH
563.Lsysc_patch_pending:
564	lg	%r2,__LC_CURRENT	# pass pointer to task struct
565	larl	%r14,.Lsysc_return
566	jg	klp_update_patch_state
567#endif
568
569#
570# _PIF_PER_TRAP is set, call do_per_trap
571#
572.Lsysc_singlestep:
573	ni	__PT_FLAGS+7(%r11),255-_PIF_PER_TRAP
574	lgr	%r2,%r11		# pass pointer to pt_regs
575	larl	%r14,.Lsysc_return
576	jg	do_per_trap
577
578#
579# _PIF_SYSCALL_RESTART is set, repeat the current system call
580#
581.Lsysc_syscall_restart:
582	ni	__PT_FLAGS+7(%r11),255-_PIF_SYSCALL_RESTART
583	lmg	%r1,%r7,__PT_R1(%r11)	# load svc arguments
584	lg	%r2,__PT_ORIG_GPR2(%r11)
585	j	.Lsysc_do_svc
586
587#
588# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
589# and after the system call
590#
591.Lsysc_tracesys:
592	lgr	%r2,%r11		# pass pointer to pt_regs
593	la	%r3,0
594	llgh	%r0,__PT_INT_CODE+2(%r11)
595	stg	%r0,__PT_R2(%r11)
596	brasl	%r14,do_syscall_trace_enter
597	lghi	%r0,NR_syscalls
598	clgr	%r0,%r2
599	jnh	.Lsysc_tracenogo
600	sllg	%r8,%r2,3
601	lg	%r9,0(%r8,%r10)
602	lmg	%r3,%r7,__PT_R3(%r11)
603	stg	%r7,STACK_FRAME_OVERHEAD(%r15)
604	lg	%r2,__PT_ORIG_GPR2(%r11)
605	BASR_EX	%r14,%r9		# call sys_xxx
606	stg	%r2,__PT_R2(%r11)	# store return value
607.Lsysc_tracenogo:
608	TSTMSK	__TI_flags(%r12),_TIF_TRACE
609	jz	.Lsysc_return
610	lgr	%r2,%r11		# pass pointer to pt_regs
611	larl	%r14,.Lsysc_return
612	jg	do_syscall_trace_exit
613ENDPROC(system_call)
614
615#
616# a new process exits the kernel with ret_from_fork
617#
618ENTRY(ret_from_fork)
619	la	%r11,STACK_FRAME_OVERHEAD(%r15)
620	lg	%r12,__LC_CURRENT
621	brasl	%r14,schedule_tail
622	tm	__PT_PSW+1(%r11),0x01	# forking a kernel thread ?
623	jne	.Lsysc_tracenogo
624	# it's a kernel thread
625	lmg	%r9,%r10,__PT_R9(%r11)	# load gprs
626	la	%r2,0(%r10)
627	BASR_EX	%r14,%r9
628	j	.Lsysc_tracenogo
629ENDPROC(ret_from_fork)
630
631ENTRY(kernel_thread_starter)
632	la	%r2,0(%r10)
633	BASR_EX	%r14,%r9
634	j	.Lsysc_tracenogo
635ENDPROC(kernel_thread_starter)
636
637/*
638 * Program check handler routine
639 */
640
641ENTRY(pgm_check_handler)
642	stpt	__LC_SYNC_ENTER_TIMER
643	BPOFF
644	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
645	lg	%r10,__LC_LAST_BREAK
646	srag	%r11,%r10,12
647	jnz	0f
648	/* if __LC_LAST_BREAK is < 4096, it contains one of
649	 * the lpswe addresses in lowcore. Set it to 1 (initial state)
650	 * to prevent leaking that address to userspace.
651	 */
652	lghi	%r10,1
6530:	lg	%r12,__LC_CURRENT
654	lghi	%r11,0
655	lmg	%r8,%r9,__LC_PGM_OLD_PSW
656	tmhh	%r8,0x0001		# test problem state bit
657	jnz	3f			# -> fault in user space
658#if IS_ENABLED(CONFIG_KVM)
659	# cleanup critical section for program checks in sie64a
660	lgr	%r14,%r9
661	larl	%r13,.Lsie_gmap
662	slgr	%r14,%r13
663	lghi	%r13,.Lsie_done - .Lsie_gmap
664	clgr	%r14,%r13
665	jhe	1f
666	lg	%r14,__SF_SIE_CONTROL(%r15)	# get control block pointer
667	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
668	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
669	larl	%r9,sie_exit			# skip forward to sie_exit
670	lghi	%r11,_PIF_GUEST_FAULT
671#endif
6721:	tmhh	%r8,0x4000		# PER bit set in old PSW ?
673	jnz	2f			# -> enabled, can't be a double fault
674	tm	__LC_PGM_ILC+3,0x80	# check for per exception
675	jnz	.Lpgm_svcper		# -> single stepped svc
6762:	CHECK_STACK __LC_SAVE_AREA_SYNC
677	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
678	# CHECK_VMAP_STACK branches to stack_overflow or 5f
679	CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,5f
6803:	UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
681	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
682	lg	%r15,__LC_KERNEL_STACK
683	lgr	%r14,%r12
684	aghi	%r14,__TASK_thread	# pointer to thread_struct
685	lghi	%r13,__LC_PGM_TDB
686	tm	__LC_PGM_ILC+2,0x02	# check for transaction abort
687	jz	4f
688	mvc	__THREAD_trap_tdb(256,%r14),0(%r13)
6894:	stg	%r10,__THREAD_last_break(%r14)
6905:	lgr	%r13,%r11
691	la	%r11,STACK_FRAME_OVERHEAD(%r15)
692	stmg	%r0,%r7,__PT_R0(%r11)
693	# clear user controlled registers to prevent speculative use
694	xgr	%r0,%r0
695	xgr	%r1,%r1
696	xgr	%r2,%r2
697	xgr	%r3,%r3
698	xgr	%r4,%r4
699	xgr	%r5,%r5
700	xgr	%r6,%r6
701	xgr	%r7,%r7
702	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
703	stmg	%r8,%r9,__PT_PSW(%r11)
704	mvc	__PT_INT_CODE(4,%r11),__LC_PGM_ILC
705	mvc	__PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE
706	stg	%r13,__PT_FLAGS(%r11)
707	stg	%r10,__PT_ARGS(%r11)
708	tm	__LC_PGM_ILC+3,0x80	# check for per exception
709	jz	6f
710	tmhh	%r8,0x0001		# kernel per event ?
711	jz	.Lpgm_kprobe
712	oi	__PT_FLAGS+7(%r11),_PIF_PER_TRAP
713	mvc	__THREAD_per_address(8,%r14),__LC_PER_ADDRESS
714	mvc	__THREAD_per_cause(2,%r14),__LC_PER_CODE
715	mvc	__THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
7166:	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
717	RESTORE_SM_CLEAR_PER
718	larl	%r1,pgm_check_table
719	llgh	%r10,__PT_INT_CODE+2(%r11)
720	nill	%r10,0x007f
721	sll	%r10,3
722	je	.Lpgm_return
723	lg	%r9,0(%r10,%r1)		# load address of handler routine
724	lgr	%r2,%r11		# pass pointer to pt_regs
725	BASR_EX	%r14,%r9		# branch to interrupt-handler
726.Lpgm_return:
727	LOCKDEP_SYS_EXIT
728	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
729	jno	.Lsysc_restore
730	TSTMSK	__PT_FLAGS(%r11),_PIF_SYSCALL
731	jo	.Lsysc_do_syscall
732	j	.Lsysc_tif
733
734#
735# PER event in supervisor state, must be kprobes
736#
737.Lpgm_kprobe:
738	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
739	RESTORE_SM_CLEAR_PER
740	lgr	%r2,%r11		# pass pointer to pt_regs
741	brasl	%r14,do_per_trap
742	j	.Lpgm_return
743
744#
745# single stepped system call
746#
747.Lpgm_svcper:
748	mvc	__LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
749	larl	%r14,.Lsysc_per
750	stg	%r14,__LC_RETURN_PSW+8
751	lghi	%r14,_PIF_SYSCALL | _PIF_PER_TRAP
752	lpswe	__LC_RETURN_PSW		# branch to .Lsysc_per
753ENDPROC(pgm_check_handler)
754
755/*
756 * IO interrupt handler routine
757 */
758ENTRY(io_int_handler)
759	STCK	__LC_INT_CLOCK
760	stpt	__LC_ASYNC_ENTER_TIMER
761	BPOFF
762	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
763	lg	%r12,__LC_CURRENT
764	lmg	%r8,%r9,__LC_IO_OLD_PSW
765	SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER,__LC_INT_CLOCK
766	stmg	%r0,%r7,__PT_R0(%r11)
767	# clear user controlled registers to prevent speculative use
768	xgr	%r0,%r0
769	xgr	%r1,%r1
770	xgr	%r2,%r2
771	xgr	%r3,%r3
772	xgr	%r4,%r4
773	xgr	%r5,%r5
774	xgr	%r6,%r6
775	xgr	%r7,%r7
776	xgr	%r10,%r10
777	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
778	stmg	%r8,%r9,__PT_PSW(%r11)
779	mvc	__PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
780	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
781	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
782	TSTMSK	__LC_CPU_FLAGS,_CIF_IGNORE_IRQ
783	jo	.Lio_restore
784	TRACE_IRQS_OFF
785.Lio_loop:
786	lgr	%r2,%r11		# pass pointer to pt_regs
787	lghi	%r3,IO_INTERRUPT
788	tm	__PT_INT_CODE+8(%r11),0x80	# adapter interrupt ?
789	jz	.Lio_call
790	lghi	%r3,THIN_INTERRUPT
791.Lio_call:
792	brasl	%r14,do_IRQ
793	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_LPAR
794	jz	.Lio_return
795	tpi	0
796	jz	.Lio_return
797	mvc	__PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
798	j	.Lio_loop
799.Lio_return:
800	LOCKDEP_SYS_EXIT
801	TSTMSK	__TI_flags(%r12),_TIF_WORK
802	jnz	.Lio_work		# there is work to do (signals etc.)
803	TSTMSK	__LC_CPU_FLAGS,_CIF_WORK
804	jnz	.Lio_work
805.Lio_restore:
806	TRACE_IRQS_ON
807	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
808	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
809	jno	.Lio_exit_kernel
810	BPEXIT	__TI_flags(%r12),_TIF_ISOLATE_BP
811	stpt	__LC_EXIT_TIMER
812.Lio_exit_kernel:
813	lmg	%r0,%r15,__PT_R0(%r11)
814	b	__LC_RETURN_LPSWE
815.Lio_done:
816
817#
818# There is work todo, find out in which context we have been interrupted:
819# 1) if we return to user space we can do all _TIF_WORK work
820# 2) if we return to kernel code and kvm is enabled check if we need to
821#    modify the psw to leave SIE
822# 3) if we return to kernel code and preemptive scheduling is enabled check
823#    the preemption counter and if it is zero call preempt_schedule_irq
824# Before any work can be done, a switch to the kernel stack is required.
825#
826.Lio_work:
827	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
828	jo	.Lio_work_user		# yes -> do resched & signal
829#ifdef CONFIG_PREEMPTION
830	# check for preemptive scheduling
831	icm	%r0,15,__LC_PREEMPT_COUNT
832	jnz	.Lio_restore		# preemption is disabled
833	TSTMSK	__TI_flags(%r12),_TIF_NEED_RESCHED
834	jno	.Lio_restore
835	# switch to kernel stack
836	lg	%r1,__PT_R15(%r11)
837	aghi	%r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
838	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
839	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
840	la	%r11,STACK_FRAME_OVERHEAD(%r1)
841	lgr	%r15,%r1
842	brasl	%r14,preempt_schedule_irq
843	j	.Lio_return
844#else
845	j	.Lio_restore
846#endif
847
848#
849# Need to do work before returning to userspace, switch to kernel stack
850#
851.Lio_work_user:
852	lg	%r1,__LC_KERNEL_STACK
853	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
854	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
855	la	%r11,STACK_FRAME_OVERHEAD(%r1)
856	lgr	%r15,%r1
857
858#
859# One of the work bits is on. Find out which one.
860#
861	TSTMSK	__TI_flags(%r12),_TIF_NEED_RESCHED
862	jo	.Lio_reschedule
863#ifdef CONFIG_LIVEPATCH
864	TSTMSK	__TI_flags(%r12),_TIF_PATCH_PENDING
865	jo	.Lio_patch_pending
866#endif
867	TSTMSK	__TI_flags(%r12),(_TIF_SIGPENDING|_TIF_NOTIFY_SIGNAL)
868	jnz	.Lio_sigpending
869	TSTMSK	__TI_flags(%r12),_TIF_NOTIFY_RESUME
870	jo	.Lio_notify_resume
871	TSTMSK	__TI_flags(%r12),_TIF_GUARDED_STORAGE
872	jo	.Lio_guarded_storage
873	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
874	jo	.Lio_vxrs
875	TSTMSK	__LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY)
876	jnz	.Lio_asce
877	j	.Lio_return
878
879#
880# _CIF_ASCE_PRIMARY and/or CIF_ASCE_SECONDARY set, load user space asce
881#
882.Lio_asce:
883	ni	__LC_CPU_FLAGS+7,255-_CIF_ASCE_SECONDARY
884	lctlg	%c7,%c7,__LC_VDSO_ASCE		# load secondary asce
885	TSTMSK	__LC_CPU_FLAGS,_CIF_ASCE_PRIMARY
886	jz	.Lio_return
887#ifndef CONFIG_HAVE_MARCH_Z10_FEATURES
888	tm	__LC_STFLE_FAC_LIST+3,0x10	# has MVCOS ?
889	jnz	.Lio_set_fs_fixup
890	ni	__LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY
891	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
892	j	.Lio_return
893.Lio_set_fs_fixup:
894#endif
895	larl	%r14,.Lio_return
896	jg	set_fs_fixup
897
898#
899# CIF_FPU is set, restore floating-point controls and floating-point registers.
900#
901.Lio_vxrs:
902	larl	%r14,.Lio_return
903	jg	load_fpu_regs
904
905#
906# _TIF_GUARDED_STORAGE is set, call guarded_storage_load
907#
908.Lio_guarded_storage:
909	ENABLE_INTS_TRACE
910	lgr	%r2,%r11		# pass pointer to pt_regs
911	brasl	%r14,gs_load_bc_cb
912	DISABLE_INTS_TRACE
913	j	.Lio_return
914
915#
916# _TIF_NEED_RESCHED is set, call schedule
917#
918.Lio_reschedule:
919	ENABLE_INTS_TRACE
920	brasl	%r14,schedule		# call scheduler
921	DISABLE_INTS_TRACE
922	j	.Lio_return
923
924#
925# _TIF_PATCH_PENDING is set, call klp_update_patch_state
926#
927#ifdef CONFIG_LIVEPATCH
928.Lio_patch_pending:
929	lg	%r2,__LC_CURRENT	# pass pointer to task struct
930	larl	%r14,.Lio_return
931	jg	klp_update_patch_state
932#endif
933
934#
935# _TIF_SIGPENDING or is set, call do_signal
936#
937.Lio_sigpending:
938	ENABLE_INTS_TRACE
939	lgr	%r2,%r11		# pass pointer to pt_regs
940	brasl	%r14,do_signal
941	DISABLE_INTS_TRACE
942	j	.Lio_return
943
944#
945# _TIF_NOTIFY_RESUME or is set, call do_notify_resume
946#
947.Lio_notify_resume:
948	ENABLE_INTS_TRACE
949	lgr	%r2,%r11		# pass pointer to pt_regs
950	brasl	%r14,do_notify_resume
951	DISABLE_INTS_TRACE
952	j	.Lio_return
953ENDPROC(io_int_handler)
954
955/*
956 * External interrupt handler routine
957 */
958ENTRY(ext_int_handler)
959	STCK	__LC_INT_CLOCK
960	stpt	__LC_ASYNC_ENTER_TIMER
961	BPOFF
962	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
963	lg	%r12,__LC_CURRENT
964	lmg	%r8,%r9,__LC_EXT_OLD_PSW
965	SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER,__LC_INT_CLOCK
966	stmg	%r0,%r7,__PT_R0(%r11)
967	# clear user controlled registers to prevent speculative use
968	xgr	%r0,%r0
969	xgr	%r1,%r1
970	xgr	%r2,%r2
971	xgr	%r3,%r3
972	xgr	%r4,%r4
973	xgr	%r5,%r5
974	xgr	%r6,%r6
975	xgr	%r7,%r7
976	xgr	%r10,%r10
977	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
978	stmg	%r8,%r9,__PT_PSW(%r11)
979	lghi	%r1,__LC_EXT_PARAMS2
980	mvc	__PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR
981	mvc	__PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
982	mvc	__PT_INT_PARM_LONG(8,%r11),0(%r1)
983	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
984	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
985	TSTMSK	__LC_CPU_FLAGS,_CIF_IGNORE_IRQ
986	jo	.Lio_restore
987	TRACE_IRQS_OFF
988	lgr	%r2,%r11		# pass pointer to pt_regs
989	lghi	%r3,EXT_INTERRUPT
990	brasl	%r14,do_IRQ
991	j	.Lio_return
992ENDPROC(ext_int_handler)
993
994/*
995 * Load idle PSW.
996 */
997ENTRY(psw_idle)
998	stg	%r14,(__SF_GPRS+8*8)(%r15)
999	stg	%r3,__SF_EMPTY(%r15)
1000	larl	%r1,.Lpsw_idle_exit
1001	stg	%r1,__SF_EMPTY+8(%r15)
1002	larl	%r1,smp_cpu_mtid
1003	llgf	%r1,0(%r1)
1004	ltgr	%r1,%r1
1005	jz	.Lpsw_idle_stcctm
1006	.insn	rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15)
1007.Lpsw_idle_stcctm:
1008	oi	__LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
1009	BPON
1010	STCK	__CLOCK_IDLE_ENTER(%r2)
1011	stpt	__TIMER_IDLE_ENTER(%r2)
1012	lpswe	__SF_EMPTY(%r15)
1013.Lpsw_idle_exit:
1014	BR_EX	%r14
1015ENDPROC(psw_idle)
1016
1017/*
1018 * Store floating-point controls and floating-point or vector register
1019 * depending whether the vector facility is available.	A critical section
1020 * cleanup assures that the registers are stored even if interrupted for
1021 * some other work.  The CIF_FPU flag is set to trigger a lazy restore
1022 * of the register contents at return from io or a system call.
1023 */
1024ENTRY(save_fpu_regs)
1025	stnsm	__SF_EMPTY(%r15),0xfc
1026	lg	%r2,__LC_CURRENT
1027	aghi	%r2,__TASK_thread
1028	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
1029	jo	.Lsave_fpu_regs_exit
1030	stfpc	__THREAD_FPU_fpc(%r2)
1031	lg	%r3,__THREAD_FPU_regs(%r2)
1032	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_VX
1033	jz	.Lsave_fpu_regs_fp	  # no -> store FP regs
1034	VSTM	%v0,%v15,0,%r3		  # vstm 0,15,0(3)
1035	VSTM	%v16,%v31,256,%r3	  # vstm 16,31,256(3)
1036	j	.Lsave_fpu_regs_done	  # -> set CIF_FPU flag
1037.Lsave_fpu_regs_fp:
1038	std	0,0(%r3)
1039	std	1,8(%r3)
1040	std	2,16(%r3)
1041	std	3,24(%r3)
1042	std	4,32(%r3)
1043	std	5,40(%r3)
1044	std	6,48(%r3)
1045	std	7,56(%r3)
1046	std	8,64(%r3)
1047	std	9,72(%r3)
1048	std	10,80(%r3)
1049	std	11,88(%r3)
1050	std	12,96(%r3)
1051	std	13,104(%r3)
1052	std	14,112(%r3)
1053	std	15,120(%r3)
1054.Lsave_fpu_regs_done:
1055	oi	__LC_CPU_FLAGS+7,_CIF_FPU
1056.Lsave_fpu_regs_exit:
1057	ssm	__SF_EMPTY(%r15)
1058	BR_EX	%r14
1059.Lsave_fpu_regs_end:
1060ENDPROC(save_fpu_regs)
1061EXPORT_SYMBOL(save_fpu_regs)
1062
1063/*
1064 * Load floating-point controls and floating-point or vector registers.
1065 * A critical section cleanup assures that the register contents are
1066 * loaded even if interrupted for some other work.
1067 *
1068 * There are special calling conventions to fit into sysc and io return work:
1069 *	%r15:	<kernel stack>
1070 * The function requires:
1071 *	%r4
1072 */
1073load_fpu_regs:
1074	stnsm	__SF_EMPTY(%r15),0xfc
1075	lg	%r4,__LC_CURRENT
1076	aghi	%r4,__TASK_thread
1077	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
1078	jno	.Lload_fpu_regs_exit
1079	lfpc	__THREAD_FPU_fpc(%r4)
1080	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_VX
1081	lg	%r4,__THREAD_FPU_regs(%r4)	# %r4 <- reg save area
1082	jz	.Lload_fpu_regs_fp		# -> no VX, load FP regs
1083	VLM	%v0,%v15,0,%r4
1084	VLM	%v16,%v31,256,%r4
1085	j	.Lload_fpu_regs_done
1086.Lload_fpu_regs_fp:
1087	ld	0,0(%r4)
1088	ld	1,8(%r4)
1089	ld	2,16(%r4)
1090	ld	3,24(%r4)
1091	ld	4,32(%r4)
1092	ld	5,40(%r4)
1093	ld	6,48(%r4)
1094	ld	7,56(%r4)
1095	ld	8,64(%r4)
1096	ld	9,72(%r4)
1097	ld	10,80(%r4)
1098	ld	11,88(%r4)
1099	ld	12,96(%r4)
1100	ld	13,104(%r4)
1101	ld	14,112(%r4)
1102	ld	15,120(%r4)
1103.Lload_fpu_regs_done:
1104	ni	__LC_CPU_FLAGS+7,255-_CIF_FPU
1105.Lload_fpu_regs_exit:
1106	ssm	__SF_EMPTY(%r15)
1107	BR_EX	%r14
1108.Lload_fpu_regs_end:
1109ENDPROC(load_fpu_regs)
1110
1111/*
1112 * Machine check handler routines
1113 */
1114ENTRY(mcck_int_handler)
1115	STCK	__LC_MCCK_CLOCK
1116	BPOFF
1117	la	%r1,4095		# validate r1
1118	spt	__LC_CPU_TIMER_SAVE_AREA-4095(%r1)	# validate cpu timer
1119	sckc	__LC_CLOCK_COMPARATOR			# validate comparator
1120	lam	%a0,%a15,__LC_AREGS_SAVE_AREA-4095(%r1) # validate acrs
1121	lmg	%r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs
1122	lg	%r12,__LC_CURRENT
1123	lmg	%r8,%r9,__LC_MCK_OLD_PSW
1124	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
1125	jo	.Lmcck_panic		# yes -> rest of mcck code invalid
1126	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_CR_VALID
1127	jno	.Lmcck_panic		# control registers invalid -> panic
1128	la	%r14,4095
1129	lctlg	%c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r14) # validate ctl regs
1130	ptlb
1131	lg	%r11,__LC_MCESAD-4095(%r14) # extended machine check save area
1132	nill	%r11,0xfc00		# MCESA_ORIGIN_MASK
1133	TSTMSK	__LC_CREGS_SAVE_AREA+16-4095(%r14),CR2_GUARDED_STORAGE
1134	jno	0f
1135	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_GS_VALID
1136	jno	0f
1137	.insn	 rxy,0xe3000000004d,0,__MCESA_GS_SAVE_AREA(%r11) # LGSC
11380:	l	%r14,__LC_FP_CREG_SAVE_AREA-4095(%r14)
1139	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_FC_VALID
1140	jo	0f
1141	sr	%r14,%r14
11420:	sfpc	%r14
1143	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_VX
1144	jo	0f
1145	lghi	%r14,__LC_FPREGS_SAVE_AREA
1146	ld	%f0,0(%r14)
1147	ld	%f1,8(%r14)
1148	ld	%f2,16(%r14)
1149	ld	%f3,24(%r14)
1150	ld	%f4,32(%r14)
1151	ld	%f5,40(%r14)
1152	ld	%f6,48(%r14)
1153	ld	%f7,56(%r14)
1154	ld	%f8,64(%r14)
1155	ld	%f9,72(%r14)
1156	ld	%f10,80(%r14)
1157	ld	%f11,88(%r14)
1158	ld	%f12,96(%r14)
1159	ld	%f13,104(%r14)
1160	ld	%f14,112(%r14)
1161	ld	%f15,120(%r14)
1162	j	1f
11630:	VLM	%v0,%v15,0,%r11
1164	VLM	%v16,%v31,256,%r11
11651:	lghi	%r14,__LC_CPU_TIMER_SAVE_AREA
1166	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
1167	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID
1168	jo	3f
1169	la	%r14,__LC_SYNC_ENTER_TIMER
1170	clc	0(8,%r14),__LC_ASYNC_ENTER_TIMER
1171	jl	0f
1172	la	%r14,__LC_ASYNC_ENTER_TIMER
11730:	clc	0(8,%r14),__LC_EXIT_TIMER
1174	jl	1f
1175	la	%r14,__LC_EXIT_TIMER
11761:	clc	0(8,%r14),__LC_LAST_UPDATE_TIMER
1177	jl	2f
1178	la	%r14,__LC_LAST_UPDATE_TIMER
11792:	spt	0(%r14)
1180	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
11813:	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID
1182	jno	.Lmcck_panic
1183	tmhh	%r8,0x0001		# interrupting from user ?
1184	jnz	4f
1185	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID
1186	jno	.Lmcck_panic
11874:	ssm	__LC_PGM_NEW_PSW	# turn dat on, keep irqs off
1188	SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER,__LC_MCCK_CLOCK
1189.Lmcck_skip:
1190	lghi	%r14,__LC_GPREGS_SAVE_AREA+64
1191	stmg	%r0,%r7,__PT_R0(%r11)
1192	# clear user controlled registers to prevent speculative use
1193	xgr	%r0,%r0
1194	xgr	%r1,%r1
1195	xgr	%r2,%r2
1196	xgr	%r3,%r3
1197	xgr	%r4,%r4
1198	xgr	%r5,%r5
1199	xgr	%r6,%r6
1200	xgr	%r7,%r7
1201	xgr	%r10,%r10
1202	mvc	__PT_R8(64,%r11),0(%r14)
1203	stmg	%r8,%r9,__PT_PSW(%r11)
1204	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
1205	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
1206	lgr	%r2,%r11		# pass pointer to pt_regs
1207	brasl	%r14,s390_do_machine_check
1208	cghi	%r2,0
1209	je	.Lmcck_return
1210	lg	%r1,__LC_KERNEL_STACK	# switch to kernel stack
1211	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
1212	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
1213	la	%r11,STACK_FRAME_OVERHEAD(%r1)
1214	lgr	%r15,%r1
1215	TRACE_IRQS_OFF
1216	brasl	%r14,s390_handle_mcck
1217	TRACE_IRQS_ON
1218.Lmcck_return:
1219	lmg	%r0,%r10,__PT_R0(%r11)
1220	mvc	__LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
1221	tm	__LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
1222	jno	0f
1223	BPEXIT	__TI_flags(%r12),_TIF_ISOLATE_BP
1224	stpt	__LC_EXIT_TIMER
12250:	lmg	%r11,%r15,__PT_R11(%r11)
1226	b	__LC_RETURN_MCCK_LPSWE
1227
1228.Lmcck_panic:
1229	lg	%r15,__LC_NODAT_STACK
1230	la	%r11,STACK_FRAME_OVERHEAD(%r15)
1231	j	.Lmcck_skip
1232ENDPROC(mcck_int_handler)
1233
1234#
1235# PSW restart interrupt handler
1236#
1237ENTRY(restart_int_handler)
1238	ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
1239	stg	%r15,__LC_SAVE_AREA_RESTART
1240	lg	%r15,__LC_RESTART_STACK
1241	xc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15)
1242	stmg	%r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
1243	mvc	STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
1244	mvc	STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW
1245	xc	0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
1246	lg	%r1,__LC_RESTART_FN		# load fn, parm & source cpu
1247	lg	%r2,__LC_RESTART_DATA
1248	lg	%r3,__LC_RESTART_SOURCE
1249	ltgr	%r3,%r3				# test source cpu address
1250	jm	1f				# negative -> skip source stop
12510:	sigp	%r4,%r3,SIGP_SENSE		# sigp sense to source cpu
1252	brc	10,0b				# wait for status stored
12531:	basr	%r14,%r1			# call function
1254	stap	__SF_EMPTY(%r15)		# store cpu address
1255	llgh	%r3,__SF_EMPTY(%r15)
12562:	sigp	%r4,%r3,SIGP_STOP		# sigp stop to current cpu
1257	brc	2,2b
12583:	j	3b
1259ENDPROC(restart_int_handler)
1260
1261	.section .kprobes.text, "ax"
1262
1263#if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK)
1264/*
1265 * The synchronous or the asynchronous stack overflowed. We are dead.
1266 * No need to properly save the registers, we are going to panic anyway.
1267 * Setup a pt_regs so that show_trace can provide a good call trace.
1268 */
1269ENTRY(stack_overflow)
1270	lg	%r15,__LC_NODAT_STACK	# change to panic stack
1271	la	%r11,STACK_FRAME_OVERHEAD(%r15)
1272	stmg	%r0,%r7,__PT_R0(%r11)
1273	stmg	%r8,%r9,__PT_PSW(%r11)
1274	mvc	__PT_R8(64,%r11),0(%r14)
1275	stg	%r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
1276	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
1277	lgr	%r2,%r11		# pass pointer to pt_regs
1278	jg	kernel_stack_overflow
1279ENDPROC(stack_overflow)
1280#endif
1281
1282#if IS_ENABLED(CONFIG_KVM)
1283.Lcleanup_sie:
1284	cghi	%r11,__LC_SAVE_AREA_ASYNC	#Is this in normal interrupt?
1285	je	1f
1286	larl	%r13,.Lsie_entry
1287	slgr	%r9,%r13
1288	lghi	%r13,.Lsie_skip - .Lsie_entry
1289	clgr	%r9,%r13
1290	jh	1f
1291	oi	__LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
12921:	BPENTER	__SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
1293	lg	%r9,__SF_SIE_CONTROL(%r15)	# get control block pointer
1294	ni	__SIE_PROG0C+3(%r9),0xfe	# no longer in SIE
1295	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
1296	larl	%r9,sie_exit			# skip forward to sie_exit
1297	BR_EX	%r14,%r11
1298
1299#endif
1300	.section .rodata, "a"
1301#define SYSCALL(esame,emu)	.quad __s390x_ ## esame
1302	.globl	sys_call_table
1303sys_call_table:
1304#include "asm/syscall_table.h"
1305#undef SYSCALL
1306
1307#ifdef CONFIG_COMPAT
1308
1309#define SYSCALL(esame,emu)	.quad __s390_ ## emu
1310	.globl	sys_call_table_emu
1311sys_call_table_emu:
1312#include "asm/syscall_table.h"
1313#undef SYSCALL
1314#endif
1315