1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * S390 low-level entry points. 4 * 5 * Copyright IBM Corp. 1999, 2012 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 7 * Hartmut Penner (hp@de.ibm.com), 8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 9 * Heiko Carstens <heiko.carstens@de.ibm.com> 10 */ 11 12#include <linux/init.h> 13#include <linux/linkage.h> 14#include <asm/alternative-asm.h> 15#include <asm/processor.h> 16#include <asm/cache.h> 17#include <asm/ctl_reg.h> 18#include <asm/dwarf.h> 19#include <asm/errno.h> 20#include <asm/ptrace.h> 21#include <asm/thread_info.h> 22#include <asm/asm-offsets.h> 23#include <asm/unistd.h> 24#include <asm/page.h> 25#include <asm/sigp.h> 26#include <asm/irq.h> 27#include <asm/vx-insn.h> 28#include <asm/setup.h> 29#include <asm/nmi.h> 30#include <asm/export.h> 31#include <asm/nospec-insn.h> 32 33__PT_R0 = __PT_GPRS 34__PT_R1 = __PT_GPRS + 8 35__PT_R2 = __PT_GPRS + 16 36__PT_R3 = __PT_GPRS + 24 37__PT_R4 = __PT_GPRS + 32 38__PT_R5 = __PT_GPRS + 40 39__PT_R6 = __PT_GPRS + 48 40__PT_R7 = __PT_GPRS + 56 41__PT_R8 = __PT_GPRS + 64 42__PT_R9 = __PT_GPRS + 72 43__PT_R10 = __PT_GPRS + 80 44__PT_R11 = __PT_GPRS + 88 45__PT_R12 = __PT_GPRS + 96 46__PT_R13 = __PT_GPRS + 104 47__PT_R14 = __PT_GPRS + 112 48__PT_R15 = __PT_GPRS + 120 49 50STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER 51STACK_SIZE = 1 << STACK_SHIFT 52STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE 53 54_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 55 _TIF_UPROBE | _TIF_GUARDED_STORAGE | _TIF_PATCH_PENDING) 56_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ 57 _TIF_SYSCALL_TRACEPOINT) 58_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE_PRIMARY | \ 59 _CIF_ASCE_SECONDARY | _CIF_FPU) 60_PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART) 61 62_LPP_OFFSET = __LC_LPP 63 64#define BASED(name) name-cleanup_critical(%r13) 65 66 .macro TRACE_IRQS_ON 67#ifdef CONFIG_TRACE_IRQFLAGS 68 basr %r2,%r0 69 brasl %r14,trace_hardirqs_on_caller 70#endif 71 .endm 72 73 .macro TRACE_IRQS_OFF 74#ifdef CONFIG_TRACE_IRQFLAGS 75 basr %r2,%r0 76 brasl %r14,trace_hardirqs_off_caller 77#endif 78 .endm 79 80 .macro LOCKDEP_SYS_EXIT 81#ifdef CONFIG_LOCKDEP 82 tm __PT_PSW+1(%r11),0x01 # returning to user ? 83 jz .+10 84 brasl %r14,lockdep_sys_exit 85#endif 86 .endm 87 88 .macro CHECK_STACK savearea 89#ifdef CONFIG_CHECK_STACK 90 tml %r15,STACK_SIZE - CONFIG_STACK_GUARD 91 lghi %r14,\savearea 92 jz stack_overflow 93#endif 94 .endm 95 96 .macro CHECK_VMAP_STACK savearea,oklabel 97#ifdef CONFIG_VMAP_STACK 98 lgr %r14,%r15 99 nill %r14,0x10000 - STACK_SIZE 100 oill %r14,STACK_INIT 101 clg %r14,__LC_KERNEL_STACK 102 je \oklabel 103 clg %r14,__LC_ASYNC_STACK 104 je \oklabel 105 clg %r14,__LC_NODAT_STACK 106 je \oklabel 107 clg %r14,__LC_RESTART_STACK 108 je \oklabel 109 lghi %r14,\savearea 110 j stack_overflow 111#else 112 j \oklabel 113#endif 114 .endm 115 116 .macro SWITCH_ASYNC savearea,timer 117 tmhh %r8,0x0001 # interrupting from user ? 118 jnz 2f 119 lgr %r14,%r9 120 cghi %r14,__LC_RETURN_LPSWE 121 je 0f 122 slg %r14,BASED(.Lcritical_start) 123 clg %r14,BASED(.Lcritical_length) 124 jhe 1f 1250: 126 lghi %r11,\savearea # inside critical section, do cleanup 127 brasl %r14,cleanup_critical 128 tmhh %r8,0x0001 # retest problem state after cleanup 129 jnz 2f 1301: lg %r14,__LC_ASYNC_STACK # are we already on the target stack? 131 slgr %r14,%r15 132 srag %r14,%r14,STACK_SHIFT 133 jnz 3f 134 CHECK_STACK \savearea 135 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 136 j 4f 1372: UPDATE_VTIME %r14,%r15,\timer 138 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 1393: lg %r15,__LC_ASYNC_STACK # load async stack 1404: la %r11,STACK_FRAME_OVERHEAD(%r15) 141 .endm 142 143 .macro UPDATE_VTIME w1,w2,enter_timer 144 lg \w1,__LC_EXIT_TIMER 145 lg \w2,__LC_LAST_UPDATE_TIMER 146 slg \w1,\enter_timer 147 slg \w2,__LC_EXIT_TIMER 148 alg \w1,__LC_USER_TIMER 149 alg \w2,__LC_SYSTEM_TIMER 150 stg \w1,__LC_USER_TIMER 151 stg \w2,__LC_SYSTEM_TIMER 152 mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer 153 .endm 154 155 .macro REENABLE_IRQS 156 stg %r8,__LC_RETURN_PSW 157 ni __LC_RETURN_PSW,0xbf 158 ssm __LC_RETURN_PSW 159 .endm 160 161 .macro STCK savearea 162#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES 163 .insn s,0xb27c0000,\savearea # store clock fast 164#else 165 .insn s,0xb2050000,\savearea # store clock 166#endif 167 .endm 168 169 /* 170 * The TSTMSK macro generates a test-under-mask instruction by 171 * calculating the memory offset for the specified mask value. 172 * Mask value can be any constant. The macro shifts the mask 173 * value to calculate the memory offset for the test-under-mask 174 * instruction. 175 */ 176 .macro TSTMSK addr, mask, size=8, bytepos=0 177 .if (\bytepos < \size) && (\mask >> 8) 178 .if (\mask & 0xff) 179 .error "Mask exceeds byte boundary" 180 .endif 181 TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)" 182 .exitm 183 .endif 184 .ifeq \mask 185 .error "Mask must not be zero" 186 .endif 187 off = \size - \bytepos - 1 188 tm off+\addr, \mask 189 .endm 190 191 .macro BPOFF 192 ALTERNATIVE "", ".long 0xb2e8c000", 82 193 .endm 194 195 .macro BPON 196 ALTERNATIVE "", ".long 0xb2e8d000", 82 197 .endm 198 199 .macro BPENTER tif_ptr,tif_mask 200 ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .long 0xb2e8d000", \ 201 "", 82 202 .endm 203 204 .macro BPEXIT tif_ptr,tif_mask 205 TSTMSK \tif_ptr,\tif_mask 206 ALTERNATIVE "jz .+8; .long 0xb2e8c000", \ 207 "jnz .+8; .long 0xb2e8d000", 82 208 .endm 209 210 GEN_BR_THUNK %r9 211 GEN_BR_THUNK %r14 212 GEN_BR_THUNK %r14,%r11 213 214 .section .kprobes.text, "ax" 215.Ldummy: 216 /* 217 * This nop exists only in order to avoid that __switch_to starts at 218 * the beginning of the kprobes text section. In that case we would 219 * have several symbols at the same address. E.g. objdump would take 220 * an arbitrary symbol name when disassembling this code. 221 * With the added nop in between the __switch_to symbol is unique 222 * again. 223 */ 224 nop 0 225 226ENTRY(__bpon) 227 .globl __bpon 228 BPON 229 BR_EX %r14 230ENDPROC(__bpon) 231 232/* 233 * Scheduler resume function, called by switch_to 234 * gpr2 = (task_struct *) prev 235 * gpr3 = (task_struct *) next 236 * Returns: 237 * gpr2 = prev 238 */ 239ENTRY(__switch_to) 240 stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task 241 lghi %r4,__TASK_stack 242 lghi %r1,__TASK_thread 243 llill %r5,STACK_INIT 244 stg %r15,__THREAD_ksp(%r1,%r2) # store kernel stack of prev 245 lg %r15,0(%r4,%r3) # start of kernel stack of next 246 agr %r15,%r5 # end of kernel stack of next 247 stg %r3,__LC_CURRENT # store task struct of next 248 stg %r15,__LC_KERNEL_STACK # store end of kernel stack 249 lg %r15,__THREAD_ksp(%r1,%r3) # load kernel stack of next 250 aghi %r3,__TASK_pid 251 mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next 252 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 253 ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40 254 BR_EX %r14 255ENDPROC(__switch_to) 256 257.L__critical_start: 258 259#if IS_ENABLED(CONFIG_KVM) 260/* 261 * sie64a calling convention: 262 * %r2 pointer to sie control block 263 * %r3 guest register save area 264 */ 265ENTRY(sie64a) 266 stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers 267 lg %r12,__LC_CURRENT 268 stg %r2,__SF_SIE_CONTROL(%r15) # save control block pointer 269 stg %r3,__SF_SIE_SAVEAREA(%r15) # save guest register save area 270 xc __SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0 271 mvc __SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags 272 TSTMSK __LC_CPU_FLAGS,_CIF_FPU # load guest fp/vx registers ? 273 jno .Lsie_load_guest_gprs 274 brasl %r14,load_fpu_regs # load guest fp/vx regs 275.Lsie_load_guest_gprs: 276 lmg %r0,%r13,0(%r3) # load guest gprs 0-13 277 lg %r14,__LC_GMAP # get gmap pointer 278 ltgr %r14,%r14 279 jz .Lsie_gmap 280 lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce 281.Lsie_gmap: 282 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer 283 oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now 284 tm __SIE_PROG20+3(%r14),3 # last exit... 285 jnz .Lsie_skip 286 TSTMSK __LC_CPU_FLAGS,_CIF_FPU 287 jo .Lsie_skip # exit if fp/vx regs changed 288 BPEXIT __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) 289.Lsie_entry: 290 sie 0(%r14) 291.Lsie_exit: 292 BPOFF 293 BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) 294.Lsie_skip: 295 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE 296 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 297.Lsie_done: 298# some program checks are suppressing. C code (e.g. do_protection_exception) 299# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There 300# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable. 301# Other instructions between sie64a and .Lsie_done should not cause program 302# interrupts. So lets use 3 nops as a landing pad for all possible rewinds. 303# See also .Lcleanup_sie 304.Lrewind_pad6: 305 nopr 7 306.Lrewind_pad4: 307 nopr 7 308.Lrewind_pad2: 309 nopr 7 310 .globl sie_exit 311sie_exit: 312 lg %r14,__SF_SIE_SAVEAREA(%r15) # load guest register save area 313 stmg %r0,%r13,0(%r14) # save guest gprs 0-13 314 xgr %r0,%r0 # clear guest registers to 315 xgr %r1,%r1 # prevent speculative use 316 xgr %r2,%r2 317 xgr %r3,%r3 318 xgr %r4,%r4 319 xgr %r5,%r5 320 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers 321 lg %r2,__SF_SIE_REASON(%r15) # return exit reason code 322 BR_EX %r14 323.Lsie_fault: 324 lghi %r14,-EFAULT 325 stg %r14,__SF_SIE_REASON(%r15) # set exit reason code 326 j sie_exit 327 328 EX_TABLE(.Lrewind_pad6,.Lsie_fault) 329 EX_TABLE(.Lrewind_pad4,.Lsie_fault) 330 EX_TABLE(.Lrewind_pad2,.Lsie_fault) 331 EX_TABLE(sie_exit,.Lsie_fault) 332ENDPROC(sie64a) 333EXPORT_SYMBOL(sie64a) 334EXPORT_SYMBOL(sie_exit) 335#endif 336 337/* 338 * SVC interrupt handler routine. System calls are synchronous events and 339 * are executed with interrupts enabled. 340 */ 341 342ENTRY(system_call) 343 stpt __LC_SYNC_ENTER_TIMER 344.Lsysc_stmg: 345 stmg %r8,%r15,__LC_SAVE_AREA_SYNC 346 BPOFF 347 lg %r12,__LC_CURRENT 348 lghi %r13,__TASK_thread 349 lghi %r14,_PIF_SYSCALL 350.Lsysc_per: 351 lg %r15,__LC_KERNEL_STACK 352 la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs 353.Lsysc_vtime: 354 UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER 355 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 356 stmg %r0,%r7,__PT_R0(%r11) 357 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC 358 mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW 359 mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC 360 stg %r14,__PT_FLAGS(%r11) 361.Lsysc_do_svc: 362 # clear user controlled register to prevent speculative use 363 xgr %r0,%r0 364 # load address of system call table 365 lg %r10,__THREAD_sysc_table(%r13,%r12) 366 llgh %r8,__PT_INT_CODE+2(%r11) 367 slag %r8,%r8,3 # shift and test for svc 0 368 jnz .Lsysc_nr_ok 369 # svc 0: system call number in %r1 370 llgfr %r1,%r1 # clear high word in r1 371 sth %r1,__PT_INT_CODE+2(%r11) 372 cghi %r1,NR_syscalls 373 jnl .Lsysc_nr_ok 374 slag %r8,%r1,3 375.Lsysc_nr_ok: 376 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 377 stg %r2,__PT_ORIG_GPR2(%r11) 378 stg %r7,STACK_FRAME_OVERHEAD(%r15) 379 lg %r9,0(%r8,%r10) # get system call add. 380 TSTMSK __TI_flags(%r12),_TIF_TRACE 381 jnz .Lsysc_tracesys 382 BASR_EX %r14,%r9 # call sys_xxxx 383 stg %r2,__PT_R2(%r11) # store return value 384 385.Lsysc_return: 386#ifdef CONFIG_DEBUG_RSEQ 387 lgr %r2,%r11 388 brasl %r14,rseq_syscall 389#endif 390 LOCKDEP_SYS_EXIT 391.Lsysc_tif: 392 TSTMSK __PT_FLAGS(%r11),_PIF_WORK 393 jnz .Lsysc_work 394 TSTMSK __TI_flags(%r12),_TIF_WORK 395 jnz .Lsysc_work # check for work 396 TSTMSK __LC_CPU_FLAGS,_CIF_WORK 397 jnz .Lsysc_work 398 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP 399.Lsysc_restore: 400 lg %r14,__LC_VDSO_PER_CPU 401 lmg %r0,%r10,__PT_R0(%r11) 402 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) 403.Lsysc_exit_timer: 404 stpt __LC_EXIT_TIMER 405 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 406 lmg %r11,%r15,__PT_R11(%r11) 407 b __LC_RETURN_LPSWE(%r0) 408.Lsysc_done: 409 410# 411# One of the work bits is on. Find out which one. 412# 413.Lsysc_work: 414 TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING 415 jo .Lsysc_mcck_pending 416 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED 417 jo .Lsysc_reschedule 418 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART 419 jo .Lsysc_syscall_restart 420#ifdef CONFIG_UPROBES 421 TSTMSK __TI_flags(%r12),_TIF_UPROBE 422 jo .Lsysc_uprobe_notify 423#endif 424 TSTMSK __TI_flags(%r12),_TIF_GUARDED_STORAGE 425 jo .Lsysc_guarded_storage 426 TSTMSK __PT_FLAGS(%r11),_PIF_PER_TRAP 427 jo .Lsysc_singlestep 428#ifdef CONFIG_LIVEPATCH 429 TSTMSK __TI_flags(%r12),_TIF_PATCH_PENDING 430 jo .Lsysc_patch_pending # handle live patching just before 431 # signals and possible syscall restart 432#endif 433 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART 434 jo .Lsysc_syscall_restart 435 TSTMSK __TI_flags(%r12),_TIF_SIGPENDING 436 jo .Lsysc_sigpending 437 TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME 438 jo .Lsysc_notify_resume 439 TSTMSK __LC_CPU_FLAGS,_CIF_FPU 440 jo .Lsysc_vxrs 441 TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY) 442 jnz .Lsysc_asce 443 j .Lsysc_return # beware of critical section cleanup 444 445# 446# _TIF_NEED_RESCHED is set, call schedule 447# 448.Lsysc_reschedule: 449 larl %r14,.Lsysc_return 450 jg schedule 451 452# 453# _CIF_MCCK_PENDING is set, call handler 454# 455.Lsysc_mcck_pending: 456 larl %r14,.Lsysc_return 457 jg s390_handle_mcck # TIF bit will be cleared by handler 458 459# 460# _CIF_ASCE_PRIMARY and/or _CIF_ASCE_SECONDARY set, load user space asce 461# 462.Lsysc_asce: 463 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_SECONDARY 464 lctlg %c7,%c7,__LC_VDSO_ASCE # load secondary asce 465 TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_PRIMARY 466 jz .Lsysc_return 467#ifndef CONFIG_HAVE_MARCH_Z10_FEATURES 468 tm __LC_STFLE_FAC_LIST+3,0x10 # has MVCOS ? 469 jnz .Lsysc_set_fs_fixup 470 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY 471 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 472 j .Lsysc_return 473.Lsysc_set_fs_fixup: 474#endif 475 larl %r14,.Lsysc_return 476 jg set_fs_fixup 477 478# 479# CIF_FPU is set, restore floating-point controls and floating-point registers. 480# 481.Lsysc_vxrs: 482 larl %r14,.Lsysc_return 483 jg load_fpu_regs 484 485# 486# _TIF_SIGPENDING is set, call do_signal 487# 488.Lsysc_sigpending: 489 lgr %r2,%r11 # pass pointer to pt_regs 490 brasl %r14,do_signal 491 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL 492 jno .Lsysc_return 493.Lsysc_do_syscall: 494 lghi %r13,__TASK_thread 495 lmg %r2,%r7,__PT_R2(%r11) # load svc arguments 496 lghi %r1,0 # svc 0 returns -ENOSYS 497 j .Lsysc_do_svc 498 499# 500# _TIF_NOTIFY_RESUME is set, call do_notify_resume 501# 502.Lsysc_notify_resume: 503 lgr %r2,%r11 # pass pointer to pt_regs 504 larl %r14,.Lsysc_return 505 jg do_notify_resume 506 507# 508# _TIF_UPROBE is set, call uprobe_notify_resume 509# 510#ifdef CONFIG_UPROBES 511.Lsysc_uprobe_notify: 512 lgr %r2,%r11 # pass pointer to pt_regs 513 larl %r14,.Lsysc_return 514 jg uprobe_notify_resume 515#endif 516 517# 518# _TIF_GUARDED_STORAGE is set, call guarded_storage_load 519# 520.Lsysc_guarded_storage: 521 lgr %r2,%r11 # pass pointer to pt_regs 522 larl %r14,.Lsysc_return 523 jg gs_load_bc_cb 524# 525# _TIF_PATCH_PENDING is set, call klp_update_patch_state 526# 527#ifdef CONFIG_LIVEPATCH 528.Lsysc_patch_pending: 529 lg %r2,__LC_CURRENT # pass pointer to task struct 530 larl %r14,.Lsysc_return 531 jg klp_update_patch_state 532#endif 533 534# 535# _PIF_PER_TRAP is set, call do_per_trap 536# 537.Lsysc_singlestep: 538 ni __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP 539 lgr %r2,%r11 # pass pointer to pt_regs 540 larl %r14,.Lsysc_return 541 jg do_per_trap 542 543# 544# _PIF_SYSCALL_RESTART is set, repeat the current system call 545# 546.Lsysc_syscall_restart: 547 ni __PT_FLAGS+7(%r11),255-_PIF_SYSCALL_RESTART 548 lmg %r1,%r7,__PT_R1(%r11) # load svc arguments 549 lg %r2,__PT_ORIG_GPR2(%r11) 550 j .Lsysc_do_svc 551 552# 553# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before 554# and after the system call 555# 556.Lsysc_tracesys: 557 lgr %r2,%r11 # pass pointer to pt_regs 558 la %r3,0 559 llgh %r0,__PT_INT_CODE+2(%r11) 560 stg %r0,__PT_R2(%r11) 561 brasl %r14,do_syscall_trace_enter 562 lghi %r0,NR_syscalls 563 clgr %r0,%r2 564 jnh .Lsysc_tracenogo 565 sllg %r8,%r2,3 566 lg %r9,0(%r8,%r10) 567.Lsysc_tracego: 568 lmg %r3,%r7,__PT_R3(%r11) 569 stg %r7,STACK_FRAME_OVERHEAD(%r15) 570 lg %r2,__PT_ORIG_GPR2(%r11) 571 BASR_EX %r14,%r9 # call sys_xxx 572 stg %r2,__PT_R2(%r11) # store return value 573.Lsysc_tracenogo: 574 TSTMSK __TI_flags(%r12),_TIF_TRACE 575 jz .Lsysc_return 576 lgr %r2,%r11 # pass pointer to pt_regs 577 larl %r14,.Lsysc_return 578 jg do_syscall_trace_exit 579ENDPROC(system_call) 580 581# 582# a new process exits the kernel with ret_from_fork 583# 584ENTRY(ret_from_fork) 585 la %r11,STACK_FRAME_OVERHEAD(%r15) 586 lg %r12,__LC_CURRENT 587 brasl %r14,schedule_tail 588 TRACE_IRQS_ON 589 ssm __LC_SVC_NEW_PSW # reenable interrupts 590 tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? 591 jne .Lsysc_tracenogo 592 # it's a kernel thread 593 lmg %r9,%r10,__PT_R9(%r11) # load gprs 594 la %r2,0(%r10) 595 BASR_EX %r14,%r9 596 j .Lsysc_tracenogo 597ENDPROC(ret_from_fork) 598 599ENTRY(kernel_thread_starter) 600 la %r2,0(%r10) 601 BASR_EX %r14,%r9 602 j .Lsysc_tracenogo 603ENDPROC(kernel_thread_starter) 604 605/* 606 * Program check handler routine 607 */ 608 609ENTRY(pgm_check_handler) 610 stpt __LC_SYNC_ENTER_TIMER 611 BPOFF 612 stmg %r8,%r15,__LC_SAVE_AREA_SYNC 613 lg %r10,__LC_LAST_BREAK 614 srag %r11,%r10,12 615 jnz 0f 616 /* if __LC_LAST_BREAK is < 4096, it contains one of 617 * the lpswe addresses in lowcore. Set it to 1 (initial state) 618 * to prevent leaking that address to userspace. 619 */ 620 lghi %r10,1 6210: lg %r12,__LC_CURRENT 622 lghi %r11,0 623 larl %r13,cleanup_critical 624 lmg %r8,%r9,__LC_PGM_OLD_PSW 625 tmhh %r8,0x0001 # test problem state bit 626 jnz 3f # -> fault in user space 627#if IS_ENABLED(CONFIG_KVM) 628 # cleanup critical section for program checks in sie64a 629 lgr %r14,%r9 630 slg %r14,BASED(.Lsie_critical_start) 631 clg %r14,BASED(.Lsie_critical_length) 632 jhe 1f 633 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer 634 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE 635 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 636 larl %r9,sie_exit # skip forward to sie_exit 637 lghi %r11,_PIF_GUEST_FAULT 638#endif 6391: tmhh %r8,0x4000 # PER bit set in old PSW ? 640 jnz 2f # -> enabled, can't be a double fault 641 tm __LC_PGM_ILC+3,0x80 # check for per exception 642 jnz .Lpgm_svcper # -> single stepped svc 6432: CHECK_STACK __LC_SAVE_AREA_SYNC 644 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 645 # CHECK_VMAP_STACK branches to stack_overflow or 5f 646 CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,5f 6473: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER 648 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 649 lg %r15,__LC_KERNEL_STACK 650 lgr %r14,%r12 651 aghi %r14,__TASK_thread # pointer to thread_struct 652 lghi %r13,__LC_PGM_TDB 653 tm __LC_PGM_ILC+2,0x02 # check for transaction abort 654 jz 4f 655 mvc __THREAD_trap_tdb(256,%r14),0(%r13) 6564: stg %r10,__THREAD_last_break(%r14) 6575: lgr %r13,%r11 658 la %r11,STACK_FRAME_OVERHEAD(%r15) 659 stmg %r0,%r7,__PT_R0(%r11) 660 # clear user controlled registers to prevent speculative use 661 xgr %r0,%r0 662 xgr %r1,%r1 663 xgr %r2,%r2 664 xgr %r3,%r3 665 xgr %r4,%r4 666 xgr %r5,%r5 667 xgr %r6,%r6 668 xgr %r7,%r7 669 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC 670 stmg %r8,%r9,__PT_PSW(%r11) 671 mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC 672 mvc __PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE 673 stg %r13,__PT_FLAGS(%r11) 674 stg %r10,__PT_ARGS(%r11) 675 tm __LC_PGM_ILC+3,0x80 # check for per exception 676 jz 6f 677 tmhh %r8,0x0001 # kernel per event ? 678 jz .Lpgm_kprobe 679 oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP 680 mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS 681 mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE 682 mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID 6836: REENABLE_IRQS 684 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 685 larl %r1,pgm_check_table 686 llgh %r10,__PT_INT_CODE+2(%r11) 687 nill %r10,0x007f 688 sll %r10,3 689 je .Lpgm_return 690 lg %r9,0(%r10,%r1) # load address of handler routine 691 lgr %r2,%r11 # pass pointer to pt_regs 692 BASR_EX %r14,%r9 # branch to interrupt-handler 693.Lpgm_return: 694 LOCKDEP_SYS_EXIT 695 tm __PT_PSW+1(%r11),0x01 # returning to user ? 696 jno .Lsysc_restore 697 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL 698 jo .Lsysc_do_syscall 699 j .Lsysc_tif 700 701# 702# PER event in supervisor state, must be kprobes 703# 704.Lpgm_kprobe: 705 REENABLE_IRQS 706 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 707 lgr %r2,%r11 # pass pointer to pt_regs 708 brasl %r14,do_per_trap 709 j .Lpgm_return 710 711# 712# single stepped system call 713# 714.Lpgm_svcper: 715 mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW 716 lghi %r13,__TASK_thread 717 larl %r14,.Lsysc_per 718 stg %r14,__LC_RETURN_PSW+8 719 lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP 720 lpswe __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs 721ENDPROC(pgm_check_handler) 722 723/* 724 * IO interrupt handler routine 725 */ 726ENTRY(io_int_handler) 727 STCK __LC_INT_CLOCK 728 stpt __LC_ASYNC_ENTER_TIMER 729 BPOFF 730 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC 731 lg %r12,__LC_CURRENT 732 larl %r13,cleanup_critical 733 lmg %r8,%r9,__LC_IO_OLD_PSW 734 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER 735 stmg %r0,%r7,__PT_R0(%r11) 736 # clear user controlled registers to prevent speculative use 737 xgr %r0,%r0 738 xgr %r1,%r1 739 xgr %r2,%r2 740 xgr %r3,%r3 741 xgr %r4,%r4 742 xgr %r5,%r5 743 xgr %r6,%r6 744 xgr %r7,%r7 745 xgr %r10,%r10 746 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC 747 stmg %r8,%r9,__PT_PSW(%r11) 748 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID 749 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 750 TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ 751 jo .Lio_restore 752 TRACE_IRQS_OFF 753 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 754.Lio_loop: 755 lgr %r2,%r11 # pass pointer to pt_regs 756 lghi %r3,IO_INTERRUPT 757 tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ? 758 jz .Lio_call 759 lghi %r3,THIN_INTERRUPT 760.Lio_call: 761 brasl %r14,do_IRQ 762 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPAR 763 jz .Lio_return 764 tpi 0 765 jz .Lio_return 766 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID 767 j .Lio_loop 768.Lio_return: 769 LOCKDEP_SYS_EXIT 770 TRACE_IRQS_ON 771.Lio_tif: 772 TSTMSK __TI_flags(%r12),_TIF_WORK 773 jnz .Lio_work # there is work to do (signals etc.) 774 TSTMSK __LC_CPU_FLAGS,_CIF_WORK 775 jnz .Lio_work 776.Lio_restore: 777 lg %r14,__LC_VDSO_PER_CPU 778 lmg %r0,%r10,__PT_R0(%r11) 779 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) 780 tm __PT_PSW+1(%r11),0x01 # returning to user ? 781 jno .Lio_exit_kernel 782 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP 783.Lio_exit_timer: 784 stpt __LC_EXIT_TIMER 785 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 786.Lio_exit_kernel: 787 lmg %r11,%r15,__PT_R11(%r11) 788 b __LC_RETURN_LPSWE(%r0) 789.Lio_done: 790 791# 792# There is work todo, find out in which context we have been interrupted: 793# 1) if we return to user space we can do all _TIF_WORK work 794# 2) if we return to kernel code and kvm is enabled check if we need to 795# modify the psw to leave SIE 796# 3) if we return to kernel code and preemptive scheduling is enabled check 797# the preemption counter and if it is zero call preempt_schedule_irq 798# Before any work can be done, a switch to the kernel stack is required. 799# 800.Lio_work: 801 tm __PT_PSW+1(%r11),0x01 # returning to user ? 802 jo .Lio_work_user # yes -> do resched & signal 803#ifdef CONFIG_PREEMPT 804 # check for preemptive scheduling 805 icm %r0,15,__LC_PREEMPT_COUNT 806 jnz .Lio_restore # preemption is disabled 807 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED 808 jno .Lio_restore 809 # switch to kernel stack 810 lg %r1,__PT_R15(%r11) 811 aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 812 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 813 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) 814 la %r11,STACK_FRAME_OVERHEAD(%r1) 815 lgr %r15,%r1 816 # TRACE_IRQS_ON already done at .Lio_return, call 817 # TRACE_IRQS_OFF to keep things symmetrical 818 TRACE_IRQS_OFF 819 brasl %r14,preempt_schedule_irq 820 j .Lio_return 821#else 822 j .Lio_restore 823#endif 824 825# 826# Need to do work before returning to userspace, switch to kernel stack 827# 828.Lio_work_user: 829 lg %r1,__LC_KERNEL_STACK 830 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 831 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) 832 la %r11,STACK_FRAME_OVERHEAD(%r1) 833 lgr %r15,%r1 834 835# 836# One of the work bits is on. Find out which one. 837# 838.Lio_work_tif: 839 TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING 840 jo .Lio_mcck_pending 841 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED 842 jo .Lio_reschedule 843#ifdef CONFIG_LIVEPATCH 844 TSTMSK __TI_flags(%r12),_TIF_PATCH_PENDING 845 jo .Lio_patch_pending 846#endif 847 TSTMSK __TI_flags(%r12),_TIF_SIGPENDING 848 jo .Lio_sigpending 849 TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME 850 jo .Lio_notify_resume 851 TSTMSK __TI_flags(%r12),_TIF_GUARDED_STORAGE 852 jo .Lio_guarded_storage 853 TSTMSK __LC_CPU_FLAGS,_CIF_FPU 854 jo .Lio_vxrs 855 TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY) 856 jnz .Lio_asce 857 j .Lio_return # beware of critical section cleanup 858 859# 860# _CIF_MCCK_PENDING is set, call handler 861# 862.Lio_mcck_pending: 863 # TRACE_IRQS_ON already done at .Lio_return 864 brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler 865 TRACE_IRQS_OFF 866 j .Lio_return 867 868# 869# _CIF_ASCE_PRIMARY and/or CIF_ASCE_SECONDARY set, load user space asce 870# 871.Lio_asce: 872 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_SECONDARY 873 lctlg %c7,%c7,__LC_VDSO_ASCE # load secondary asce 874 TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_PRIMARY 875 jz .Lio_return 876#ifndef CONFIG_HAVE_MARCH_Z10_FEATURES 877 tm __LC_STFLE_FAC_LIST+3,0x10 # has MVCOS ? 878 jnz .Lio_set_fs_fixup 879 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY 880 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 881 j .Lio_return 882.Lio_set_fs_fixup: 883#endif 884 larl %r14,.Lio_return 885 jg set_fs_fixup 886 887# 888# CIF_FPU is set, restore floating-point controls and floating-point registers. 889# 890.Lio_vxrs: 891 larl %r14,.Lio_return 892 jg load_fpu_regs 893 894# 895# _TIF_GUARDED_STORAGE is set, call guarded_storage_load 896# 897.Lio_guarded_storage: 898 # TRACE_IRQS_ON already done at .Lio_return 899 ssm __LC_SVC_NEW_PSW # reenable interrupts 900 lgr %r2,%r11 # pass pointer to pt_regs 901 brasl %r14,gs_load_bc_cb 902 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 903 TRACE_IRQS_OFF 904 j .Lio_return 905 906# 907# _TIF_NEED_RESCHED is set, call schedule 908# 909.Lio_reschedule: 910 # TRACE_IRQS_ON already done at .Lio_return 911 ssm __LC_SVC_NEW_PSW # reenable interrupts 912 brasl %r14,schedule # call scheduler 913 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 914 TRACE_IRQS_OFF 915 j .Lio_return 916 917# 918# _TIF_PATCH_PENDING is set, call klp_update_patch_state 919# 920#ifdef CONFIG_LIVEPATCH 921.Lio_patch_pending: 922 lg %r2,__LC_CURRENT # pass pointer to task struct 923 larl %r14,.Lio_return 924 jg klp_update_patch_state 925#endif 926 927# 928# _TIF_SIGPENDING or is set, call do_signal 929# 930.Lio_sigpending: 931 # TRACE_IRQS_ON already done at .Lio_return 932 ssm __LC_SVC_NEW_PSW # reenable interrupts 933 lgr %r2,%r11 # pass pointer to pt_regs 934 brasl %r14,do_signal 935 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 936 TRACE_IRQS_OFF 937 j .Lio_return 938 939# 940# _TIF_NOTIFY_RESUME or is set, call do_notify_resume 941# 942.Lio_notify_resume: 943 # TRACE_IRQS_ON already done at .Lio_return 944 ssm __LC_SVC_NEW_PSW # reenable interrupts 945 lgr %r2,%r11 # pass pointer to pt_regs 946 brasl %r14,do_notify_resume 947 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 948 TRACE_IRQS_OFF 949 j .Lio_return 950ENDPROC(io_int_handler) 951 952/* 953 * External interrupt handler routine 954 */ 955ENTRY(ext_int_handler) 956 STCK __LC_INT_CLOCK 957 stpt __LC_ASYNC_ENTER_TIMER 958 BPOFF 959 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC 960 lg %r12,__LC_CURRENT 961 larl %r13,cleanup_critical 962 lmg %r8,%r9,__LC_EXT_OLD_PSW 963 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER 964 stmg %r0,%r7,__PT_R0(%r11) 965 # clear user controlled registers to prevent speculative use 966 xgr %r0,%r0 967 xgr %r1,%r1 968 xgr %r2,%r2 969 xgr %r3,%r3 970 xgr %r4,%r4 971 xgr %r5,%r5 972 xgr %r6,%r6 973 xgr %r7,%r7 974 xgr %r10,%r10 975 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC 976 stmg %r8,%r9,__PT_PSW(%r11) 977 lghi %r1,__LC_EXT_PARAMS2 978 mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR 979 mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS 980 mvc __PT_INT_PARM_LONG(8,%r11),0(%r1) 981 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 982 TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ 983 jo .Lio_restore 984 TRACE_IRQS_OFF 985 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 986 lgr %r2,%r11 # pass pointer to pt_regs 987 lghi %r3,EXT_INTERRUPT 988 brasl %r14,do_IRQ 989 j .Lio_return 990ENDPROC(ext_int_handler) 991 992/* 993 * Load idle PSW. The second "half" of this function is in .Lcleanup_idle. 994 */ 995ENTRY(psw_idle) 996 stg %r14,(__SF_GPRS+8*8)(%r15) 997 stg %r3,__SF_EMPTY(%r15) 998 larl %r1,.Lpsw_idle_lpsw+4 999 stg %r1,__SF_EMPTY+8(%r15) 1000 larl %r1,smp_cpu_mtid 1001 llgf %r1,0(%r1) 1002 ltgr %r1,%r1 1003 jz .Lpsw_idle_stcctm 1004 .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15) 1005.Lpsw_idle_stcctm: 1006 oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT 1007 BPON 1008 STCK __CLOCK_IDLE_ENTER(%r2) 1009 stpt __TIMER_IDLE_ENTER(%r2) 1010.Lpsw_idle_lpsw: 1011 lpswe __SF_EMPTY(%r15) 1012 BR_EX %r14 1013.Lpsw_idle_end: 1014ENDPROC(psw_idle) 1015 1016/* 1017 * Store floating-point controls and floating-point or vector register 1018 * depending whether the vector facility is available. A critical section 1019 * cleanup assures that the registers are stored even if interrupted for 1020 * some other work. The CIF_FPU flag is set to trigger a lazy restore 1021 * of the register contents at return from io or a system call. 1022 */ 1023ENTRY(save_fpu_regs) 1024 lg %r2,__LC_CURRENT 1025 aghi %r2,__TASK_thread 1026 TSTMSK __LC_CPU_FLAGS,_CIF_FPU 1027 jo .Lsave_fpu_regs_exit 1028 stfpc __THREAD_FPU_fpc(%r2) 1029 lg %r3,__THREAD_FPU_regs(%r2) 1030 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX 1031 jz .Lsave_fpu_regs_fp # no -> store FP regs 1032 VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3) 1033 VSTM %v16,%v31,256,%r3 # vstm 16,31,256(3) 1034 j .Lsave_fpu_regs_done # -> set CIF_FPU flag 1035.Lsave_fpu_regs_fp: 1036 std 0,0(%r3) 1037 std 1,8(%r3) 1038 std 2,16(%r3) 1039 std 3,24(%r3) 1040 std 4,32(%r3) 1041 std 5,40(%r3) 1042 std 6,48(%r3) 1043 std 7,56(%r3) 1044 std 8,64(%r3) 1045 std 9,72(%r3) 1046 std 10,80(%r3) 1047 std 11,88(%r3) 1048 std 12,96(%r3) 1049 std 13,104(%r3) 1050 std 14,112(%r3) 1051 std 15,120(%r3) 1052.Lsave_fpu_regs_done: 1053 oi __LC_CPU_FLAGS+7,_CIF_FPU 1054.Lsave_fpu_regs_exit: 1055 BR_EX %r14 1056.Lsave_fpu_regs_end: 1057ENDPROC(save_fpu_regs) 1058EXPORT_SYMBOL(save_fpu_regs) 1059 1060/* 1061 * Load floating-point controls and floating-point or vector registers. 1062 * A critical section cleanup assures that the register contents are 1063 * loaded even if interrupted for some other work. 1064 * 1065 * There are special calling conventions to fit into sysc and io return work: 1066 * %r15: <kernel stack> 1067 * The function requires: 1068 * %r4 1069 */ 1070load_fpu_regs: 1071 lg %r4,__LC_CURRENT 1072 aghi %r4,__TASK_thread 1073 TSTMSK __LC_CPU_FLAGS,_CIF_FPU 1074 jno .Lload_fpu_regs_exit 1075 lfpc __THREAD_FPU_fpc(%r4) 1076 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX 1077 lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area 1078 jz .Lload_fpu_regs_fp # -> no VX, load FP regs 1079 VLM %v0,%v15,0,%r4 1080 VLM %v16,%v31,256,%r4 1081 j .Lload_fpu_regs_done 1082.Lload_fpu_regs_fp: 1083 ld 0,0(%r4) 1084 ld 1,8(%r4) 1085 ld 2,16(%r4) 1086 ld 3,24(%r4) 1087 ld 4,32(%r4) 1088 ld 5,40(%r4) 1089 ld 6,48(%r4) 1090 ld 7,56(%r4) 1091 ld 8,64(%r4) 1092 ld 9,72(%r4) 1093 ld 10,80(%r4) 1094 ld 11,88(%r4) 1095 ld 12,96(%r4) 1096 ld 13,104(%r4) 1097 ld 14,112(%r4) 1098 ld 15,120(%r4) 1099.Lload_fpu_regs_done: 1100 ni __LC_CPU_FLAGS+7,255-_CIF_FPU 1101.Lload_fpu_regs_exit: 1102 BR_EX %r14 1103.Lload_fpu_regs_end: 1104ENDPROC(load_fpu_regs) 1105 1106.L__critical_end: 1107 1108/* 1109 * Machine check handler routines 1110 */ 1111ENTRY(mcck_int_handler) 1112 STCK __LC_MCCK_CLOCK 1113 BPOFF 1114 la %r1,4095 # validate r1 1115 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # validate cpu timer 1116 sckc __LC_CLOCK_COMPARATOR # validate comparator 1117 lam %a0,%a15,__LC_AREGS_SAVE_AREA-4095(%r1) # validate acrs 1118 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs 1119 lg %r12,__LC_CURRENT 1120 larl %r13,cleanup_critical 1121 lmg %r8,%r9,__LC_MCK_OLD_PSW 1122 TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE 1123 jo .Lmcck_panic # yes -> rest of mcck code invalid 1124 TSTMSK __LC_MCCK_CODE,MCCK_CODE_CR_VALID 1125 jno .Lmcck_panic # control registers invalid -> panic 1126 la %r14,4095 1127 lctlg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r14) # validate ctl regs 1128 ptlb 1129 lg %r11,__LC_MCESAD-4095(%r14) # extended machine check save area 1130 nill %r11,0xfc00 # MCESA_ORIGIN_MASK 1131 TSTMSK __LC_CREGS_SAVE_AREA+16-4095(%r14),CR2_GUARDED_STORAGE 1132 jno 0f 1133 TSTMSK __LC_MCCK_CODE,MCCK_CODE_GS_VALID 1134 jno 0f 1135 .insn rxy,0xe3000000004d,0,__MCESA_GS_SAVE_AREA(%r11) # LGSC 11360: l %r14,__LC_FP_CREG_SAVE_AREA-4095(%r14) 1137 TSTMSK __LC_MCCK_CODE,MCCK_CODE_FC_VALID 1138 jo 0f 1139 sr %r14,%r14 11400: sfpc %r14 1141 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX 1142 jo 0f 1143 lghi %r14,__LC_FPREGS_SAVE_AREA 1144 ld %f0,0(%r14) 1145 ld %f1,8(%r14) 1146 ld %f2,16(%r14) 1147 ld %f3,24(%r14) 1148 ld %f4,32(%r14) 1149 ld %f5,40(%r14) 1150 ld %f6,48(%r14) 1151 ld %f7,56(%r14) 1152 ld %f8,64(%r14) 1153 ld %f9,72(%r14) 1154 ld %f10,80(%r14) 1155 ld %f11,88(%r14) 1156 ld %f12,96(%r14) 1157 ld %f13,104(%r14) 1158 ld %f14,112(%r14) 1159 ld %f15,120(%r14) 1160 j 1f 11610: VLM %v0,%v15,0,%r11 1162 VLM %v16,%v31,256,%r11 11631: lghi %r14,__LC_CPU_TIMER_SAVE_AREA 1164 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 1165 TSTMSK __LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID 1166 jo 3f 1167 la %r14,__LC_SYNC_ENTER_TIMER 1168 clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER 1169 jl 0f 1170 la %r14,__LC_ASYNC_ENTER_TIMER 11710: clc 0(8,%r14),__LC_EXIT_TIMER 1172 jl 1f 1173 la %r14,__LC_EXIT_TIMER 11741: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER 1175 jl 2f 1176 la %r14,__LC_LAST_UPDATE_TIMER 11772: spt 0(%r14) 1178 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 11793: TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID 1180 jno .Lmcck_panic 1181 tmhh %r8,0x0001 # interrupting from user ? 1182 jnz 4f 1183 TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID 1184 jno .Lmcck_panic 11854: ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off 1186 SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER 1187.Lmcck_skip: 1188 lghi %r14,__LC_GPREGS_SAVE_AREA+64 1189 stmg %r0,%r7,__PT_R0(%r11) 1190 # clear user controlled registers to prevent speculative use 1191 xgr %r0,%r0 1192 xgr %r1,%r1 1193 xgr %r2,%r2 1194 xgr %r3,%r3 1195 xgr %r4,%r4 1196 xgr %r5,%r5 1197 xgr %r6,%r6 1198 xgr %r7,%r7 1199 xgr %r10,%r10 1200 mvc __PT_R8(64,%r11),0(%r14) 1201 stmg %r8,%r9,__PT_PSW(%r11) 1202 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 1203 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 1204 lgr %r2,%r11 # pass pointer to pt_regs 1205 brasl %r14,s390_do_machine_check 1206 tm __PT_PSW+1(%r11),0x01 # returning to user ? 1207 jno .Lmcck_return 1208 lg %r1,__LC_KERNEL_STACK # switch to kernel stack 1209 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 1210 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) 1211 la %r11,STACK_FRAME_OVERHEAD(%r1) 1212 lgr %r15,%r1 1213 TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING 1214 jno .Lmcck_return 1215 TRACE_IRQS_OFF 1216 brasl %r14,s390_handle_mcck 1217 TRACE_IRQS_ON 1218.Lmcck_return: 1219 lg %r14,__LC_VDSO_PER_CPU 1220 lmg %r0,%r10,__PT_R0(%r11) 1221 mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW 1222 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? 1223 jno 0f 1224 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP 1225 stpt __LC_EXIT_TIMER 1226 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 12270: lmg %r11,%r15,__PT_R11(%r11) 1228 b __LC_RETURN_MCCK_LPSWE 1229 1230.Lmcck_panic: 1231 lg %r15,__LC_NODAT_STACK 1232 la %r11,STACK_FRAME_OVERHEAD(%r15) 1233 j .Lmcck_skip 1234ENDPROC(mcck_int_handler) 1235 1236# 1237# PSW restart interrupt handler 1238# 1239ENTRY(restart_int_handler) 1240 ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40 1241 stg %r15,__LC_SAVE_AREA_RESTART 1242 lg %r15,__LC_RESTART_STACK 1243 xc STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15) 1244 stmg %r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 1245 mvc STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART 1246 mvc STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW 1247 xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15) 1248 lg %r1,__LC_RESTART_FN # load fn, parm & source cpu 1249 lg %r2,__LC_RESTART_DATA 1250 lg %r3,__LC_RESTART_SOURCE 1251 ltgr %r3,%r3 # test source cpu address 1252 jm 1f # negative -> skip source stop 12530: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu 1254 brc 10,0b # wait for status stored 12551: basr %r14,%r1 # call function 1256 stap __SF_EMPTY(%r15) # store cpu address 1257 llgh %r3,__SF_EMPTY(%r15) 12582: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu 1259 brc 2,2b 12603: j 3b 1261ENDPROC(restart_int_handler) 1262 1263 .section .kprobes.text, "ax" 1264 1265#if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK) 1266/* 1267 * The synchronous or the asynchronous stack overflowed. We are dead. 1268 * No need to properly save the registers, we are going to panic anyway. 1269 * Setup a pt_regs so that show_trace can provide a good call trace. 1270 */ 1271ENTRY(stack_overflow) 1272 lg %r15,__LC_NODAT_STACK # change to panic stack 1273 la %r11,STACK_FRAME_OVERHEAD(%r15) 1274 stmg %r0,%r7,__PT_R0(%r11) 1275 stmg %r8,%r9,__PT_PSW(%r11) 1276 mvc __PT_R8(64,%r11),0(%r14) 1277 stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2 1278 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 1279 lgr %r2,%r11 # pass pointer to pt_regs 1280 jg kernel_stack_overflow 1281ENDPROC(stack_overflow) 1282#endif 1283 1284ENTRY(cleanup_critical) 1285 cghi %r9,__LC_RETURN_LPSWE 1286 je .Lcleanup_lpswe 1287#if IS_ENABLED(CONFIG_KVM) 1288 clg %r9,BASED(.Lcleanup_table_sie) # .Lsie_gmap 1289 jl 0f 1290 clg %r9,BASED(.Lcleanup_table_sie+8)# .Lsie_done 1291 jl .Lcleanup_sie 1292#endif 1293 clg %r9,BASED(.Lcleanup_table) # system_call 1294 jl 0f 1295 clg %r9,BASED(.Lcleanup_table+8) # .Lsysc_do_svc 1296 jl .Lcleanup_system_call 1297 clg %r9,BASED(.Lcleanup_table+16) # .Lsysc_tif 1298 jl 0f 1299 clg %r9,BASED(.Lcleanup_table+24) # .Lsysc_restore 1300 jl .Lcleanup_sysc_tif 1301 clg %r9,BASED(.Lcleanup_table+32) # .Lsysc_done 1302 jl .Lcleanup_sysc_restore 1303 clg %r9,BASED(.Lcleanup_table+40) # .Lio_tif 1304 jl 0f 1305 clg %r9,BASED(.Lcleanup_table+48) # .Lio_restore 1306 jl .Lcleanup_io_tif 1307 clg %r9,BASED(.Lcleanup_table+56) # .Lio_done 1308 jl .Lcleanup_io_restore 1309 clg %r9,BASED(.Lcleanup_table+64) # psw_idle 1310 jl 0f 1311 clg %r9,BASED(.Lcleanup_table+72) # .Lpsw_idle_end 1312 jl .Lcleanup_idle 1313 clg %r9,BASED(.Lcleanup_table+80) # save_fpu_regs 1314 jl 0f 1315 clg %r9,BASED(.Lcleanup_table+88) # .Lsave_fpu_regs_end 1316 jl .Lcleanup_save_fpu_regs 1317 clg %r9,BASED(.Lcleanup_table+96) # load_fpu_regs 1318 jl 0f 1319 clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end 1320 jl .Lcleanup_load_fpu_regs 13210: BR_EX %r14,%r11 1322ENDPROC(cleanup_critical) 1323 1324 .align 8 1325.Lcleanup_table: 1326 .quad system_call 1327 .quad .Lsysc_do_svc 1328 .quad .Lsysc_tif 1329 .quad .Lsysc_restore 1330 .quad .Lsysc_done 1331 .quad .Lio_tif 1332 .quad .Lio_restore 1333 .quad .Lio_done 1334 .quad psw_idle 1335 .quad .Lpsw_idle_end 1336 .quad save_fpu_regs 1337 .quad .Lsave_fpu_regs_end 1338 .quad load_fpu_regs 1339 .quad .Lload_fpu_regs_end 1340 1341#if IS_ENABLED(CONFIG_KVM) 1342.Lcleanup_table_sie: 1343 .quad .Lsie_gmap 1344 .quad .Lsie_done 1345 1346.Lcleanup_sie: 1347 cghi %r11,__LC_SAVE_AREA_ASYNC #Is this in normal interrupt? 1348 je 1f 1349 slg %r9,BASED(.Lsie_crit_mcck_start) 1350 clg %r9,BASED(.Lsie_crit_mcck_length) 1351 jh 1f 1352 oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST 13531: BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) 1354 lg %r9,__SF_SIE_CONTROL(%r15) # get control block pointer 1355 ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE 1356 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 1357 larl %r9,sie_exit # skip forward to sie_exit 1358 BR_EX %r14,%r11 1359#endif 1360 1361.Lcleanup_system_call: 1362 # check if stpt has been executed 1363 clg %r9,BASED(.Lcleanup_system_call_insn) 1364 jh 0f 1365 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER 1366 cghi %r11,__LC_SAVE_AREA_ASYNC 1367 je 0f 1368 mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER 13690: # check if stmg has been executed 1370 clg %r9,BASED(.Lcleanup_system_call_insn+8) 1371 jh 0f 1372 mvc __LC_SAVE_AREA_SYNC(64),0(%r11) 13730: # check if base register setup + TIF bit load has been done 1374 clg %r9,BASED(.Lcleanup_system_call_insn+16) 1375 jhe 0f 1376 # set up saved register r12 task struct pointer 1377 stg %r12,32(%r11) 1378 # set up saved register r13 __TASK_thread offset 1379 mvc 40(8,%r11),BASED(.Lcleanup_system_call_const) 13800: # check if the user time update has been done 1381 clg %r9,BASED(.Lcleanup_system_call_insn+24) 1382 jh 0f 1383 lg %r15,__LC_EXIT_TIMER 1384 slg %r15,__LC_SYNC_ENTER_TIMER 1385 alg %r15,__LC_USER_TIMER 1386 stg %r15,__LC_USER_TIMER 13870: # check if the system time update has been done 1388 clg %r9,BASED(.Lcleanup_system_call_insn+32) 1389 jh 0f 1390 lg %r15,__LC_LAST_UPDATE_TIMER 1391 slg %r15,__LC_EXIT_TIMER 1392 alg %r15,__LC_SYSTEM_TIMER 1393 stg %r15,__LC_SYSTEM_TIMER 13940: # update accounting time stamp 1395 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 1396 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 1397 # set up saved register r11 1398 lg %r15,__LC_KERNEL_STACK 1399 la %r9,STACK_FRAME_OVERHEAD(%r15) 1400 stg %r9,24(%r11) # r11 pt_regs pointer 1401 # fill pt_regs 1402 mvc __PT_R8(64,%r9),__LC_SAVE_AREA_SYNC 1403 stmg %r0,%r7,__PT_R0(%r9) 1404 mvc __PT_PSW(16,%r9),__LC_SVC_OLD_PSW 1405 mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC 1406 xc __PT_FLAGS(8,%r9),__PT_FLAGS(%r9) 1407 mvi __PT_FLAGS+7(%r9),_PIF_SYSCALL 1408 # setup saved register r15 1409 stg %r15,56(%r11) # r15 stack pointer 1410 # set new psw address and exit 1411 larl %r9,.Lsysc_do_svc 1412 BR_EX %r14,%r11 1413.Lcleanup_system_call_insn: 1414 .quad system_call 1415 .quad .Lsysc_stmg 1416 .quad .Lsysc_per 1417 .quad .Lsysc_vtime+36 1418 .quad .Lsysc_vtime+42 1419.Lcleanup_system_call_const: 1420 .quad __TASK_thread 1421 1422.Lcleanup_sysc_tif: 1423 larl %r9,.Lsysc_tif 1424 BR_EX %r14,%r11 1425 1426.Lcleanup_sysc_restore: 1427 # check if stpt has been executed 1428 clg %r9,BASED(.Lcleanup_sysc_restore_insn) 1429 jh 0f 1430 mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER 1431 cghi %r11,__LC_SAVE_AREA_ASYNC 1432 je 0f 1433 mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER 14340: clg %r9,BASED(.Lcleanup_sysc_restore_insn+8) 1435 je 1f 1436 lg %r9,24(%r11) # get saved pointer to pt_regs 1437 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) 1438 mvc 0(64,%r11),__PT_R8(%r9) 1439 lmg %r0,%r7,__PT_R0(%r9) 1440.Lcleanup_lpswe: 14411: lmg %r8,%r9,__LC_RETURN_PSW 1442 BR_EX %r14,%r11 1443.Lcleanup_sysc_restore_insn: 1444 .quad .Lsysc_exit_timer 1445 .quad .Lsysc_done - 4 1446 1447.Lcleanup_io_tif: 1448 larl %r9,.Lio_tif 1449 BR_EX %r14,%r11 1450 1451.Lcleanup_io_restore: 1452 # check if stpt has been executed 1453 clg %r9,BASED(.Lcleanup_io_restore_insn) 1454 jh 0f 1455 mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER 14560: clg %r9,BASED(.Lcleanup_io_restore_insn+8) 1457 je 1f 1458 lg %r9,24(%r11) # get saved r11 pointer to pt_regs 1459 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) 1460 mvc 0(64,%r11),__PT_R8(%r9) 1461 lmg %r0,%r7,__PT_R0(%r9) 14621: lmg %r8,%r9,__LC_RETURN_PSW 1463 BR_EX %r14,%r11 1464.Lcleanup_io_restore_insn: 1465 .quad .Lio_exit_timer 1466 .quad .Lio_done - 4 1467 1468.Lcleanup_idle: 1469 ni __LC_CPU_FLAGS+7,255-_CIF_ENABLED_WAIT 1470 # copy interrupt clock & cpu timer 1471 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK 1472 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER 1473 cghi %r11,__LC_SAVE_AREA_ASYNC 1474 je 0f 1475 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK 1476 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER 14770: # check if stck & stpt have been executed 1478 clg %r9,BASED(.Lcleanup_idle_insn) 1479 jhe 1f 1480 mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2) 1481 mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2) 14821: # calculate idle cycles 1483 clg %r9,BASED(.Lcleanup_idle_insn) 1484 jl 3f 1485 larl %r1,smp_cpu_mtid 1486 llgf %r1,0(%r1) 1487 ltgr %r1,%r1 1488 jz 3f 1489 .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15) 1490 larl %r3,mt_cycles 1491 ag %r3,__LC_PERCPU_OFFSET 1492 la %r4,__SF_EMPTY+16(%r15) 14932: lg %r0,0(%r3) 1494 slg %r0,0(%r4) 1495 alg %r0,64(%r4) 1496 stg %r0,0(%r3) 1497 la %r3,8(%r3) 1498 la %r4,8(%r4) 1499 brct %r1,2b 15003: # account system time going idle 1501 lg %r9,__LC_STEAL_TIMER 1502 alg %r9,__CLOCK_IDLE_ENTER(%r2) 1503 slg %r9,__LC_LAST_UPDATE_CLOCK 1504 stg %r9,__LC_STEAL_TIMER 1505 mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2) 1506 lg %r9,__LC_SYSTEM_TIMER 1507 alg %r9,__LC_LAST_UPDATE_TIMER 1508 slg %r9,__TIMER_IDLE_ENTER(%r2) 1509 stg %r9,__LC_SYSTEM_TIMER 1510 mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2) 1511 # prepare return psw 1512 nihh %r8,0xfcfd # clear irq & wait state bits 1513 lg %r9,48(%r11) # return from psw_idle 1514 BR_EX %r14,%r11 1515.Lcleanup_idle_insn: 1516 .quad .Lpsw_idle_lpsw 1517 1518.Lcleanup_save_fpu_regs: 1519 larl %r9,save_fpu_regs 1520 BR_EX %r14,%r11 1521 1522.Lcleanup_load_fpu_regs: 1523 larl %r9,load_fpu_regs 1524 BR_EX %r14,%r11 1525 1526/* 1527 * Integer constants 1528 */ 1529 .align 8 1530.Lcritical_start: 1531 .quad .L__critical_start 1532.Lcritical_length: 1533 .quad .L__critical_end - .L__critical_start 1534#if IS_ENABLED(CONFIG_KVM) 1535.Lsie_critical_start: 1536 .quad .Lsie_gmap 1537.Lsie_critical_length: 1538 .quad .Lsie_done - .Lsie_gmap 1539.Lsie_crit_mcck_start: 1540 .quad .Lsie_entry 1541.Lsie_crit_mcck_length: 1542 .quad .Lsie_skip - .Lsie_entry 1543#endif 1544 .section .rodata, "a" 1545 .balign 8 1546#define SYSCALL(esame,emu) .quad __s390x_ ## esame 1547 .globl sys_call_table 1548sys_call_table: 1549#include "asm/syscall_table.h" 1550#undef SYSCALL 1551 1552#ifdef CONFIG_COMPAT 1553 1554#define SYSCALL(esame,emu) .quad __s390_ ## emu 1555 .globl sys_call_table_emu 1556sys_call_table_emu: 1557#include "asm/syscall_table.h" 1558#undef SYSCALL 1559#endif 1560