1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * S390 low-level entry points. 4 * 5 * Copyright IBM Corp. 1999, 2012 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 7 * Hartmut Penner (hp@de.ibm.com), 8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 9 * Heiko Carstens <heiko.carstens@de.ibm.com> 10 */ 11 12#include <linux/init.h> 13#include <linux/linkage.h> 14#include <asm/alternative-asm.h> 15#include <asm/processor.h> 16#include <asm/cache.h> 17#include <asm/dwarf.h> 18#include <asm/errno.h> 19#include <asm/ptrace.h> 20#include <asm/thread_info.h> 21#include <asm/asm-offsets.h> 22#include <asm/unistd.h> 23#include <asm/page.h> 24#include <asm/sigp.h> 25#include <asm/irq.h> 26#include <asm/vx-insn.h> 27#include <asm/setup.h> 28#include <asm/nmi.h> 29#include <asm/export.h> 30#include <asm/nospec-insn.h> 31 32__PT_R0 = __PT_GPRS 33__PT_R1 = __PT_GPRS + 8 34__PT_R2 = __PT_GPRS + 16 35__PT_R3 = __PT_GPRS + 24 36__PT_R4 = __PT_GPRS + 32 37__PT_R5 = __PT_GPRS + 40 38__PT_R6 = __PT_GPRS + 48 39__PT_R7 = __PT_GPRS + 56 40__PT_R8 = __PT_GPRS + 64 41__PT_R9 = __PT_GPRS + 72 42__PT_R10 = __PT_GPRS + 80 43__PT_R11 = __PT_GPRS + 88 44__PT_R12 = __PT_GPRS + 96 45__PT_R13 = __PT_GPRS + 104 46__PT_R14 = __PT_GPRS + 112 47__PT_R15 = __PT_GPRS + 120 48 49STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER 50STACK_SIZE = 1 << STACK_SHIFT 51STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE 52 53_LPP_OFFSET = __LC_LPP 54 55 .macro CHECK_STACK savearea 56#ifdef CONFIG_CHECK_STACK 57 tml %r15,STACK_SIZE - CONFIG_STACK_GUARD 58 lghi %r14,\savearea 59 jz stack_overflow 60#endif 61 .endm 62 63 .macro CHECK_VMAP_STACK savearea,oklabel 64#ifdef CONFIG_VMAP_STACK 65 lgr %r14,%r15 66 nill %r14,0x10000 - STACK_SIZE 67 oill %r14,STACK_INIT 68 clg %r14,__LC_KERNEL_STACK 69 je \oklabel 70 clg %r14,__LC_ASYNC_STACK 71 je \oklabel 72 clg %r14,__LC_MCCK_STACK 73 je \oklabel 74 clg %r14,__LC_NODAT_STACK 75 je \oklabel 76 clg %r14,__LC_RESTART_STACK 77 je \oklabel 78 lghi %r14,\savearea 79 j stack_overflow 80#else 81 j \oklabel 82#endif 83 .endm 84 85 .macro STCK savearea 86 ALTERNATIVE ".insn s,0xb2050000,\savearea", \ 87 ".insn s,0xb27c0000,\savearea", 25 88 .endm 89 90 /* 91 * The TSTMSK macro generates a test-under-mask instruction by 92 * calculating the memory offset for the specified mask value. 93 * Mask value can be any constant. The macro shifts the mask 94 * value to calculate the memory offset for the test-under-mask 95 * instruction. 96 */ 97 .macro TSTMSK addr, mask, size=8, bytepos=0 98 .if (\bytepos < \size) && (\mask >> 8) 99 .if (\mask & 0xff) 100 .error "Mask exceeds byte boundary" 101 .endif 102 TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)" 103 .exitm 104 .endif 105 .ifeq \mask 106 .error "Mask must not be zero" 107 .endif 108 off = \size - \bytepos - 1 109 tm off+\addr, \mask 110 .endm 111 112 .macro BPOFF 113 ALTERNATIVE "", ".long 0xb2e8c000", 82 114 .endm 115 116 .macro BPON 117 ALTERNATIVE "", ".long 0xb2e8d000", 82 118 .endm 119 120 .macro BPENTER tif_ptr,tif_mask 121 ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .long 0xb2e8d000", \ 122 "", 82 123 .endm 124 125 .macro BPEXIT tif_ptr,tif_mask 126 TSTMSK \tif_ptr,\tif_mask 127 ALTERNATIVE "jz .+8; .long 0xb2e8c000", \ 128 "jnz .+8; .long 0xb2e8d000", 82 129 .endm 130 131 /* 132 * The CHKSTG macro jumps to the provided label in case the 133 * machine check interruption code reports one of unrecoverable 134 * storage errors: 135 * - Storage error uncorrected 136 * - Storage key error uncorrected 137 * - Storage degradation with Failing-storage-address validity 138 */ 139 .macro CHKSTG errlabel 140 TSTMSK __LC_MCCK_CODE,(MCCK_CODE_STG_ERROR|MCCK_CODE_STG_KEY_ERROR) 141 jnz \errlabel 142 TSTMSK __LC_MCCK_CODE,MCCK_CODE_STG_DEGRAD 143 jz .Loklabel\@ 144 TSTMSK __LC_MCCK_CODE,MCCK_CODE_STG_FAIL_ADDR 145 jnz \errlabel 146.Loklabel\@: 147 .endm 148 149#if IS_ENABLED(CONFIG_KVM) 150 /* 151 * The OUTSIDE macro jumps to the provided label in case the value 152 * in the provided register is outside of the provided range. The 153 * macro is useful for checking whether a PSW stored in a register 154 * pair points inside or outside of a block of instructions. 155 * @reg: register to check 156 * @start: start of the range 157 * @end: end of the range 158 * @outside_label: jump here if @reg is outside of [@start..@end) 159 */ 160 .macro OUTSIDE reg,start,end,outside_label 161 lgr %r14,\reg 162 larl %r13,\start 163 slgr %r14,%r13 164 lghi %r13,\end - \start 165 clgr %r14,%r13 166 jhe \outside_label 167 .endm 168 169 .macro SIEEXIT 170 lg %r9,__SF_SIE_CONTROL(%r15) # get control block pointer 171 ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE 172 lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce 173 larl %r9,sie_exit # skip forward to sie_exit 174 .endm 175#endif 176 177 GEN_BR_THUNK %r14 178 GEN_BR_THUNK %r14,%r13 179 180 .section .kprobes.text, "ax" 181.Ldummy: 182 /* 183 * This nop exists only in order to avoid that __bpon starts at 184 * the beginning of the kprobes text section. In that case we would 185 * have several symbols at the same address. E.g. objdump would take 186 * an arbitrary symbol name when disassembling this code. 187 * With the added nop in between the __bpon symbol is unique 188 * again. 189 */ 190 nop 0 191 192ENTRY(__bpon) 193 .globl __bpon 194 BPON 195 BR_EX %r14 196ENDPROC(__bpon) 197 198/* 199 * Scheduler resume function, called by switch_to 200 * gpr2 = (task_struct *) prev 201 * gpr3 = (task_struct *) next 202 * Returns: 203 * gpr2 = prev 204 */ 205ENTRY(__switch_to) 206 stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task 207 lghi %r4,__TASK_stack 208 lghi %r1,__TASK_thread 209 llill %r5,STACK_INIT 210 stg %r15,__THREAD_ksp(%r1,%r2) # store kernel stack of prev 211 lg %r15,0(%r4,%r3) # start of kernel stack of next 212 agr %r15,%r5 # end of kernel stack of next 213 stg %r3,__LC_CURRENT # store task struct of next 214 stg %r15,__LC_KERNEL_STACK # store end of kernel stack 215 lg %r15,__THREAD_ksp(%r1,%r3) # load kernel stack of next 216 aghi %r3,__TASK_pid 217 mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next 218 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 219 ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40 220 BR_EX %r14 221ENDPROC(__switch_to) 222 223#if IS_ENABLED(CONFIG_KVM) 224/* 225 * sie64a calling convention: 226 * %r2 pointer to sie control block 227 * %r3 guest register save area 228 */ 229ENTRY(sie64a) 230 stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers 231 lg %r12,__LC_CURRENT 232 stg %r2,__SF_SIE_CONTROL(%r15) # save control block pointer 233 stg %r3,__SF_SIE_SAVEAREA(%r15) # save guest register save area 234 xc __SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0 235 mvc __SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags 236 lmg %r0,%r13,0(%r3) # load guest gprs 0-13 237 lg %r14,__LC_GMAP # get gmap pointer 238 ltgr %r14,%r14 239 jz .Lsie_gmap 240 lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce 241.Lsie_gmap: 242 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer 243 oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now 244 tm __SIE_PROG20+3(%r14),3 # last exit... 245 jnz .Lsie_skip 246 TSTMSK __LC_CPU_FLAGS,_CIF_FPU 247 jo .Lsie_skip # exit if fp/vx regs changed 248 BPEXIT __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) 249.Lsie_entry: 250 sie 0(%r14) 251# Let the next instruction be NOP to avoid triggering a machine check 252# and handling it in a guest as result of the instruction execution. 253 nopr 7 254.Lsie_leave: 255 BPOFF 256 BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) 257.Lsie_skip: 258 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE 259 lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce 260.Lsie_done: 261# some program checks are suppressing. C code (e.g. do_protection_exception) 262# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There 263# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable. 264# Other instructions between sie64a and .Lsie_done should not cause program 265# interrupts. So lets use 3 nops as a landing pad for all possible rewinds. 266.Lrewind_pad6: 267 nopr 7 268.Lrewind_pad4: 269 nopr 7 270.Lrewind_pad2: 271 nopr 7 272 .globl sie_exit 273sie_exit: 274 lg %r14,__SF_SIE_SAVEAREA(%r15) # load guest register save area 275 stmg %r0,%r13,0(%r14) # save guest gprs 0-13 276 xgr %r0,%r0 # clear guest registers to 277 xgr %r1,%r1 # prevent speculative use 278 xgr %r3,%r3 279 xgr %r4,%r4 280 xgr %r5,%r5 281 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers 282 lg %r2,__SF_SIE_REASON(%r15) # return exit reason code 283 BR_EX %r14 284.Lsie_fault: 285 lghi %r14,-EFAULT 286 stg %r14,__SF_SIE_REASON(%r15) # set exit reason code 287 j sie_exit 288 289 EX_TABLE(.Lrewind_pad6,.Lsie_fault) 290 EX_TABLE(.Lrewind_pad4,.Lsie_fault) 291 EX_TABLE(.Lrewind_pad2,.Lsie_fault) 292 EX_TABLE(sie_exit,.Lsie_fault) 293ENDPROC(sie64a) 294EXPORT_SYMBOL(sie64a) 295EXPORT_SYMBOL(sie_exit) 296#endif 297 298/* 299 * SVC interrupt handler routine. System calls are synchronous events and 300 * are entered with interrupts disabled. 301 */ 302 303ENTRY(system_call) 304 stpt __LC_SYS_ENTER_TIMER 305 stmg %r8,%r15,__LC_SAVE_AREA_SYNC 306 BPOFF 307 lghi %r14,0 308.Lsysc_per: 309 lctlg %c1,%c1,__LC_KERNEL_ASCE 310 lg %r12,__LC_CURRENT 311 lg %r15,__LC_KERNEL_STACK 312 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 313 stmg %r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 314 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 315 # clear user controlled register to prevent speculative use 316 xgr %r0,%r0 317 xgr %r1,%r1 318 xgr %r4,%r4 319 xgr %r5,%r5 320 xgr %r6,%r6 321 xgr %r7,%r7 322 xgr %r8,%r8 323 xgr %r9,%r9 324 xgr %r10,%r10 325 xgr %r11,%r11 326 la %r2,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs 327 mvc __PT_R8(64,%r2),__LC_SAVE_AREA_SYNC 328 lgr %r3,%r14 329 brasl %r14,__do_syscall 330 lctlg %c1,%c1,__LC_USER_ASCE 331 mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) 332 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP 333 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 334 stpt __LC_EXIT_TIMER 335 b __LC_RETURN_LPSWE 336ENDPROC(system_call) 337 338# 339# a new process exits the kernel with ret_from_fork 340# 341ENTRY(ret_from_fork) 342 lgr %r3,%r11 343 brasl %r14,__ret_from_fork 344 lctlg %c1,%c1,__LC_USER_ASCE 345 mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) 346 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP 347 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 348 stpt __LC_EXIT_TIMER 349 b __LC_RETURN_LPSWE 350ENDPROC(ret_from_fork) 351 352/* 353 * Program check handler routine 354 */ 355 356ENTRY(pgm_check_handler) 357 stpt __LC_SYS_ENTER_TIMER 358 BPOFF 359 stmg %r8,%r15,__LC_SAVE_AREA_SYNC 360 lg %r12,__LC_CURRENT 361 lghi %r10,0 362 lmg %r8,%r9,__LC_PGM_OLD_PSW 363 tmhh %r8,0x0001 # coming from user space? 364 jno .Lpgm_skip_asce 365 lctlg %c1,%c1,__LC_KERNEL_ASCE 366 j 3f # -> fault in user space 367.Lpgm_skip_asce: 368#if IS_ENABLED(CONFIG_KVM) 369 # cleanup critical section for program checks in sie64a 370 OUTSIDE %r9,.Lsie_gmap,.Lsie_done,1f 371 SIEEXIT 372 lghi %r10,_PIF_GUEST_FAULT 373#endif 3741: tmhh %r8,0x4000 # PER bit set in old PSW ? 375 jnz 2f # -> enabled, can't be a double fault 376 tm __LC_PGM_ILC+3,0x80 # check for per exception 377 jnz .Lpgm_svcper # -> single stepped svc 3782: CHECK_STACK __LC_SAVE_AREA_SYNC 379 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 380 # CHECK_VMAP_STACK branches to stack_overflow or 4f 381 CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f 3823: BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 383 lg %r15,__LC_KERNEL_STACK 3844: la %r11,STACK_FRAME_OVERHEAD(%r15) 385 stg %r10,__PT_FLAGS(%r11) 386 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 387 stmg %r0,%r7,__PT_R0(%r11) 388 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC 389 stmg %r8,%r9,__PT_PSW(%r11) 390 391 # clear user controlled registers to prevent speculative use 392 xgr %r0,%r0 393 xgr %r1,%r1 394 xgr %r3,%r3 395 xgr %r4,%r4 396 xgr %r5,%r5 397 xgr %r6,%r6 398 xgr %r7,%r7 399 lgr %r2,%r11 400 brasl %r14,__do_pgm_check 401 tmhh %r8,0x0001 # returning to user space? 402 jno .Lpgm_exit_kernel 403 lctlg %c1,%c1,__LC_USER_ASCE 404 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP 405 stpt __LC_EXIT_TIMER 406.Lpgm_exit_kernel: 407 mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) 408 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 409 b __LC_RETURN_LPSWE 410 411# 412# single stepped system call 413# 414.Lpgm_svcper: 415 mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW 416 larl %r14,.Lsysc_per 417 stg %r14,__LC_RETURN_PSW+8 418 lghi %r14,1 419 lpswe __LC_RETURN_PSW # branch to .Lsysc_per 420ENDPROC(pgm_check_handler) 421 422/* 423 * Interrupt handler macro used for external and IO interrupts. 424 */ 425.macro INT_HANDLER name,lc_old_psw,handler 426ENTRY(\name) 427 STCK __LC_INT_CLOCK 428 stpt __LC_SYS_ENTER_TIMER 429 BPOFF 430 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC 431 lg %r12,__LC_CURRENT 432 lmg %r8,%r9,\lc_old_psw 433 tmhh %r8,0x0001 # interrupting from user ? 434 jnz 1f 435#if IS_ENABLED(CONFIG_KVM) 436 OUTSIDE %r9,.Lsie_gmap,.Lsie_done,0f 437 BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) 438 SIEEXIT 439#endif 4400: CHECK_STACK __LC_SAVE_AREA_ASYNC 441 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 442 j 2f 4431: BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 444 lctlg %c1,%c1,__LC_KERNEL_ASCE 445 lg %r15,__LC_KERNEL_STACK 4462: xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 447 la %r11,STACK_FRAME_OVERHEAD(%r15) 448 stmg %r0,%r7,__PT_R0(%r11) 449 # clear user controlled registers to prevent speculative use 450 xgr %r0,%r0 451 xgr %r1,%r1 452 xgr %r3,%r3 453 xgr %r4,%r4 454 xgr %r5,%r5 455 xgr %r6,%r6 456 xgr %r7,%r7 457 xgr %r10,%r10 458 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 459 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC 460 stmg %r8,%r9,__PT_PSW(%r11) 461 tm %r8,0x0001 # coming from user space? 462 jno 1f 463 lctlg %c1,%c1,__LC_KERNEL_ASCE 4641: lgr %r2,%r11 # pass pointer to pt_regs 465 brasl %r14,\handler 466 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) 467 tmhh %r8,0x0001 # returning to user ? 468 jno 2f 469 lctlg %c1,%c1,__LC_USER_ASCE 470 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP 471 stpt __LC_EXIT_TIMER 4722: lmg %r0,%r15,__PT_R0(%r11) 473 b __LC_RETURN_LPSWE 474ENDPROC(\name) 475.endm 476 477INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq 478INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq 479 480/* 481 * Load idle PSW. 482 */ 483ENTRY(psw_idle) 484 stg %r14,(__SF_GPRS+8*8)(%r15) 485 stg %r3,__SF_EMPTY(%r15) 486 larl %r1,psw_idle_exit 487 stg %r1,__SF_EMPTY+8(%r15) 488 larl %r1,smp_cpu_mtid 489 llgf %r1,0(%r1) 490 ltgr %r1,%r1 491 jz .Lpsw_idle_stcctm 492 .insn rsy,0xeb0000000017,%r1,5,__MT_CYCLES_ENTER(%r2) 493.Lpsw_idle_stcctm: 494 oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT 495 BPON 496 STCK __CLOCK_IDLE_ENTER(%r2) 497 stpt __TIMER_IDLE_ENTER(%r2) 498 lpswe __SF_EMPTY(%r15) 499.globl psw_idle_exit 500psw_idle_exit: 501 BR_EX %r14 502ENDPROC(psw_idle) 503 504/* 505 * Machine check handler routines 506 */ 507ENTRY(mcck_int_handler) 508 STCK __LC_MCCK_CLOCK 509 BPOFF 510 la %r1,4095 # validate r1 511 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # validate cpu timer 512 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs 513 lg %r12,__LC_CURRENT 514 lmg %r8,%r9,__LC_MCK_OLD_PSW 515 TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE 516 jo .Lmcck_panic # yes -> rest of mcck code invalid 517 TSTMSK __LC_MCCK_CODE,MCCK_CODE_CR_VALID 518 jno .Lmcck_panic # control registers invalid -> panic 519 la %r14,4095 520 lctlg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r14) # validate ctl regs 521 ptlb 522 lghi %r14,__LC_CPU_TIMER_SAVE_AREA 523 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 524 TSTMSK __LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID 525 jo 3f 526 la %r14,__LC_SYS_ENTER_TIMER 527 clc 0(8,%r14),__LC_EXIT_TIMER 528 jl 1f 529 la %r14,__LC_EXIT_TIMER 5301: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER 531 jl 2f 532 la %r14,__LC_LAST_UPDATE_TIMER 5332: spt 0(%r14) 534 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 5353: TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID 536 jno .Lmcck_panic 537 tmhh %r8,0x0001 # interrupting from user ? 538 jnz 6f 539 TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID 540 jno .Lmcck_panic 541#if IS_ENABLED(CONFIG_KVM) 542 OUTSIDE %r9,.Lsie_gmap,.Lsie_done,6f 543 OUTSIDE %r9,.Lsie_entry,.Lsie_leave,4f 544 oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST 545 j 5f 5464: CHKSTG .Lmcck_panic 5475: larl %r14,.Lstosm_tmp 548 stosm 0(%r14),0x04 # turn dat on, keep irqs off 549 BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) 550 SIEEXIT 551 j .Lmcck_stack 552#endif 5536: CHKSTG .Lmcck_panic 554 larl %r14,.Lstosm_tmp 555 stosm 0(%r14),0x04 # turn dat on, keep irqs off 556 tmhh %r8,0x0001 # interrupting from user ? 557 jz .Lmcck_stack 558 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 559.Lmcck_stack: 560 lg %r15,__LC_MCCK_STACK 561 la %r11,STACK_FRAME_OVERHEAD(%r15) 562 stctg %c1,%c1,__PT_CR1(%r11) 563 lctlg %c1,%c1,__LC_KERNEL_ASCE 564 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 565 lghi %r14,__LC_GPREGS_SAVE_AREA+64 566 stmg %r0,%r7,__PT_R0(%r11) 567 # clear user controlled registers to prevent speculative use 568 xgr %r0,%r0 569 xgr %r1,%r1 570 xgr %r3,%r3 571 xgr %r4,%r4 572 xgr %r5,%r5 573 xgr %r6,%r6 574 xgr %r7,%r7 575 xgr %r10,%r10 576 mvc __PT_R8(64,%r11),0(%r14) 577 stmg %r8,%r9,__PT_PSW(%r11) 578 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 579 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 580 lgr %r2,%r11 # pass pointer to pt_regs 581 brasl %r14,s390_do_machine_check 582 cghi %r2,0 583 je .Lmcck_return 584 lg %r1,__LC_KERNEL_STACK # switch to kernel stack 585 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 586 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) 587 la %r11,STACK_FRAME_OVERHEAD(%r1) 588 lgr %r15,%r1 589 brasl %r14,s390_handle_mcck 590.Lmcck_return: 591 lctlg %c1,%c1,__PT_CR1(%r11) 592 lmg %r0,%r10,__PT_R0(%r11) 593 mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW 594 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? 595 jno 0f 596 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP 597 stpt __LC_EXIT_TIMER 5980: lmg %r11,%r15,__PT_R11(%r11) 599 b __LC_RETURN_MCCK_LPSWE 600 601.Lmcck_panic: 602 /* 603 * Iterate over all possible CPU addresses in the range 0..0xffff 604 * and stop each CPU using signal processor. Use compare and swap 605 * to allow just one CPU-stopper and prevent concurrent CPUs from 606 * stopping each other while leaving the others running. 607 */ 608 lhi %r5,0 609 lhi %r6,1 610 larl %r7,.Lstop_lock 611 cs %r5,%r6,0(%r7) # single CPU-stopper only 612 jnz 4f 613 larl %r7,.Lthis_cpu 614 stap 0(%r7) # this CPU address 615 lh %r4,0(%r7) 616 nilh %r4,0 617 lhi %r0,1 618 sll %r0,16 # CPU counter 619 lhi %r3,0 # next CPU address 6200: cr %r3,%r4 621 je 2f 6221: sigp %r1,%r3,SIGP_STOP # stop next CPU 623 brc SIGP_CC_BUSY,1b 6242: ahi %r3,1 625 brct %r0,0b 6263: sigp %r1,%r4,SIGP_STOP # stop this CPU 627 brc SIGP_CC_BUSY,3b 6284: j 4b 629ENDPROC(mcck_int_handler) 630 631ENTRY(restart_int_handler) 632 ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40 633 stg %r15,__LC_SAVE_AREA_RESTART 634 TSTMSK __LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4 635 jz 0f 636 la %r15,4095 637 lctlg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r15) 6380: larl %r15,.Lstosm_tmp 639 stosm 0(%r15),0x04 # turn dat on, keep irqs off 640 lg %r15,__LC_RESTART_STACK 641 xc STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15) 642 stmg %r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 643 mvc STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART 644 mvc STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW 645 xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15) 646 lg %r1,__LC_RESTART_FN # load fn, parm & source cpu 647 lg %r2,__LC_RESTART_DATA 648 lgf %r3,__LC_RESTART_SOURCE 649 ltgr %r3,%r3 # test source cpu address 650 jm 1f # negative -> skip source stop 6510: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu 652 brc 10,0b # wait for status stored 6531: basr %r14,%r1 # call function 654 stap __SF_EMPTY(%r15) # store cpu address 655 llgh %r3,__SF_EMPTY(%r15) 6562: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu 657 brc 2,2b 6583: j 3b 659ENDPROC(restart_int_handler) 660 661 .section .kprobes.text, "ax" 662 663#if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK) 664/* 665 * The synchronous or the asynchronous stack overflowed. We are dead. 666 * No need to properly save the registers, we are going to panic anyway. 667 * Setup a pt_regs so that show_trace can provide a good call trace. 668 */ 669ENTRY(stack_overflow) 670 lg %r15,__LC_NODAT_STACK # change to panic stack 671 la %r11,STACK_FRAME_OVERHEAD(%r15) 672 stmg %r0,%r7,__PT_R0(%r11) 673 stmg %r8,%r9,__PT_PSW(%r11) 674 mvc __PT_R8(64,%r11),0(%r14) 675 stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2 676 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 677 lgr %r2,%r11 # pass pointer to pt_regs 678 jg kernel_stack_overflow 679ENDPROC(stack_overflow) 680#endif 681 682 .section .data, "aw" 683 .align 4 684.Lstop_lock: .long 0 685.Lthis_cpu: .short 0 686.Lstosm_tmp: .byte 0 687 .section .rodata, "a" 688#define SYSCALL(esame,emu) .quad __s390x_ ## esame 689 .globl sys_call_table 690sys_call_table: 691#include "asm/syscall_table.h" 692#undef SYSCALL 693 694#ifdef CONFIG_COMPAT 695 696#define SYSCALL(esame,emu) .quad __s390_ ## emu 697 .globl sys_call_table_emu 698sys_call_table_emu: 699#include "asm/syscall_table.h" 700#undef SYSCALL 701#endif 702