1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * S390 version
4 * Copyright IBM Corp. 1999
5 * Author(s): Hartmut Penner (hp@de.ibm.com)
6 * Ulrich Weigand (uweigand@de.ibm.com)
7 *
8 * Derived from "arch/i386/mm/fault.c"
9 * Copyright (C) 1995 Linus Torvalds
10 */
11
12 #include <linux/kernel_stat.h>
13 #include <linux/perf_event.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/sched/debug.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/string.h>
20 #include <linux/types.h>
21 #include <linux/ptrace.h>
22 #include <linux/mman.h>
23 #include <linux/mm.h>
24 #include <linux/compat.h>
25 #include <linux/smp.h>
26 #include <linux/kdebug.h>
27 #include <linux/init.h>
28 #include <linux/console.h>
29 #include <linux/extable.h>
30 #include <linux/hardirq.h>
31 #include <linux/kprobes.h>
32 #include <linux/uaccess.h>
33 #include <linux/hugetlb.h>
34 #include <linux/kfence.h>
35 #include <asm/asm-extable.h>
36 #include <asm/asm-offsets.h>
37 #include <asm/diag.h>
38 #include <asm/gmap.h>
39 #include <asm/irq.h>
40 #include <asm/mmu_context.h>
41 #include <asm/facility.h>
42 #include <asm/uv.h>
43 #include "../kernel/entry.h"
44
45 #define __FAIL_ADDR_MASK -4096L
46
47 /*
48 * Allocate private vm_fault_reason from top. Please make sure it won't
49 * collide with vm_fault_reason.
50 */
51 #define VM_FAULT_BADCONTEXT ((__force vm_fault_t)0x80000000)
52 #define VM_FAULT_BADMAP ((__force vm_fault_t)0x40000000)
53 #define VM_FAULT_BADACCESS ((__force vm_fault_t)0x20000000)
54 #define VM_FAULT_SIGNAL ((__force vm_fault_t)0x10000000)
55 #define VM_FAULT_PFAULT ((__force vm_fault_t)0x8000000)
56
57 enum fault_type {
58 KERNEL_FAULT,
59 USER_FAULT,
60 GMAP_FAULT,
61 };
62
63 static unsigned long store_indication __read_mostly;
64
fault_init(void)65 static int __init fault_init(void)
66 {
67 if (test_facility(75))
68 store_indication = 0xc00;
69 return 0;
70 }
71 early_initcall(fault_init);
72
73 /*
74 * Find out which address space caused the exception.
75 */
get_fault_type(struct pt_regs * regs)76 static enum fault_type get_fault_type(struct pt_regs *regs)
77 {
78 unsigned long trans_exc_code;
79
80 trans_exc_code = regs->int_parm_long & 3;
81 if (likely(trans_exc_code == 0)) {
82 /* primary space exception */
83 if (user_mode(regs))
84 return USER_FAULT;
85 if (!IS_ENABLED(CONFIG_PGSTE))
86 return KERNEL_FAULT;
87 if (test_pt_regs_flag(regs, PIF_GUEST_FAULT))
88 return GMAP_FAULT;
89 return KERNEL_FAULT;
90 }
91 if (trans_exc_code == 2)
92 return USER_FAULT;
93 if (trans_exc_code == 1) {
94 /* access register mode, not used in the kernel */
95 return USER_FAULT;
96 }
97 /* home space exception -> access via kernel ASCE */
98 return KERNEL_FAULT;
99 }
100
get_fault_address(struct pt_regs * regs)101 static unsigned long get_fault_address(struct pt_regs *regs)
102 {
103 unsigned long trans_exc_code = regs->int_parm_long;
104
105 return trans_exc_code & __FAIL_ADDR_MASK;
106 }
107
fault_is_write(struct pt_regs * regs)108 static bool fault_is_write(struct pt_regs *regs)
109 {
110 unsigned long trans_exc_code = regs->int_parm_long;
111
112 return (trans_exc_code & store_indication) == 0x400;
113 }
114
bad_address(void * p)115 static int bad_address(void *p)
116 {
117 unsigned long dummy;
118
119 return get_kernel_nofault(dummy, (unsigned long *)p);
120 }
121
dump_pagetable(unsigned long asce,unsigned long address)122 static void dump_pagetable(unsigned long asce, unsigned long address)
123 {
124 unsigned long *table = __va(asce & _ASCE_ORIGIN);
125
126 pr_alert("AS:%016lx ", asce);
127 switch (asce & _ASCE_TYPE_MASK) {
128 case _ASCE_TYPE_REGION1:
129 table += (address & _REGION1_INDEX) >> _REGION1_SHIFT;
130 if (bad_address(table))
131 goto bad;
132 pr_cont("R1:%016lx ", *table);
133 if (*table & _REGION_ENTRY_INVALID)
134 goto out;
135 table = __va(*table & _REGION_ENTRY_ORIGIN);
136 fallthrough;
137 case _ASCE_TYPE_REGION2:
138 table += (address & _REGION2_INDEX) >> _REGION2_SHIFT;
139 if (bad_address(table))
140 goto bad;
141 pr_cont("R2:%016lx ", *table);
142 if (*table & _REGION_ENTRY_INVALID)
143 goto out;
144 table = __va(*table & _REGION_ENTRY_ORIGIN);
145 fallthrough;
146 case _ASCE_TYPE_REGION3:
147 table += (address & _REGION3_INDEX) >> _REGION3_SHIFT;
148 if (bad_address(table))
149 goto bad;
150 pr_cont("R3:%016lx ", *table);
151 if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
152 goto out;
153 table = __va(*table & _REGION_ENTRY_ORIGIN);
154 fallthrough;
155 case _ASCE_TYPE_SEGMENT:
156 table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
157 if (bad_address(table))
158 goto bad;
159 pr_cont("S:%016lx ", *table);
160 if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
161 goto out;
162 table = __va(*table & _SEGMENT_ENTRY_ORIGIN);
163 }
164 table += (address & _PAGE_INDEX) >> _PAGE_SHIFT;
165 if (bad_address(table))
166 goto bad;
167 pr_cont("P:%016lx ", *table);
168 out:
169 pr_cont("\n");
170 return;
171 bad:
172 pr_cont("BAD\n");
173 }
174
dump_fault_info(struct pt_regs * regs)175 static void dump_fault_info(struct pt_regs *regs)
176 {
177 unsigned long asce;
178
179 pr_alert("Failing address: %016lx TEID: %016lx\n",
180 regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
181 pr_alert("Fault in ");
182 switch (regs->int_parm_long & 3) {
183 case 3:
184 pr_cont("home space ");
185 break;
186 case 2:
187 pr_cont("secondary space ");
188 break;
189 case 1:
190 pr_cont("access register ");
191 break;
192 case 0:
193 pr_cont("primary space ");
194 break;
195 }
196 pr_cont("mode while using ");
197 switch (get_fault_type(regs)) {
198 case USER_FAULT:
199 asce = S390_lowcore.user_asce;
200 pr_cont("user ");
201 break;
202 case GMAP_FAULT:
203 asce = ((struct gmap *) S390_lowcore.gmap)->asce;
204 pr_cont("gmap ");
205 break;
206 case KERNEL_FAULT:
207 asce = S390_lowcore.kernel_asce;
208 pr_cont("kernel ");
209 break;
210 default:
211 unreachable();
212 }
213 pr_cont("ASCE.\n");
214 dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
215 }
216
217 int show_unhandled_signals = 1;
218
report_user_fault(struct pt_regs * regs,long signr,int is_mm_fault)219 void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault)
220 {
221 if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
222 return;
223 if (!unhandled_signal(current, signr))
224 return;
225 if (!printk_ratelimit())
226 return;
227 printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ",
228 regs->int_code & 0xffff, regs->int_code >> 17);
229 print_vma_addr(KERN_CONT "in ", regs->psw.addr);
230 printk(KERN_CONT "\n");
231 if (is_mm_fault)
232 dump_fault_info(regs);
233 show_regs(regs);
234 }
235
236 /*
237 * Send SIGSEGV to task. This is an external routine
238 * to keep the stack usage of do_page_fault small.
239 */
do_sigsegv(struct pt_regs * regs,int si_code)240 static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
241 {
242 report_user_fault(regs, SIGSEGV, 1);
243 force_sig_fault(SIGSEGV, si_code,
244 (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
245 }
246
do_no_context(struct pt_regs * regs,vm_fault_t fault)247 static noinline void do_no_context(struct pt_regs *regs, vm_fault_t fault)
248 {
249 enum fault_type fault_type;
250 unsigned long address;
251 bool is_write;
252
253 if (fixup_exception(regs))
254 return;
255 fault_type = get_fault_type(regs);
256 if ((fault_type == KERNEL_FAULT) && (fault == VM_FAULT_BADCONTEXT)) {
257 address = get_fault_address(regs);
258 is_write = fault_is_write(regs);
259 if (kfence_handle_page_fault(address, is_write, regs))
260 return;
261 }
262 /*
263 * Oops. The kernel tried to access some bad page. We'll have to
264 * terminate things with extreme prejudice.
265 */
266 if (fault_type == KERNEL_FAULT)
267 printk(KERN_ALERT "Unable to handle kernel pointer dereference"
268 " in virtual kernel address space\n");
269 else
270 printk(KERN_ALERT "Unable to handle kernel paging request"
271 " in virtual user address space\n");
272 dump_fault_info(regs);
273 die(regs, "Oops");
274 }
275
do_low_address(struct pt_regs * regs)276 static noinline void do_low_address(struct pt_regs *regs)
277 {
278 /* Low-address protection hit in kernel mode means
279 NULL pointer write access in kernel mode. */
280 if (regs->psw.mask & PSW_MASK_PSTATE) {
281 /* Low-address protection hit in user mode 'cannot happen'. */
282 die (regs, "Low-address protection");
283 }
284
285 do_no_context(regs, VM_FAULT_BADACCESS);
286 }
287
do_sigbus(struct pt_regs * regs)288 static noinline void do_sigbus(struct pt_regs *regs)
289 {
290 /*
291 * Send a sigbus, regardless of whether we were in kernel
292 * or user mode.
293 */
294 force_sig_fault(SIGBUS, BUS_ADRERR,
295 (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
296 }
297
do_fault_error(struct pt_regs * regs,vm_fault_t fault)298 static noinline void do_fault_error(struct pt_regs *regs, vm_fault_t fault)
299 {
300 int si_code;
301
302 switch (fault) {
303 case VM_FAULT_BADACCESS:
304 case VM_FAULT_BADMAP:
305 /* Bad memory access. Check if it is kernel or user space. */
306 if (user_mode(regs)) {
307 /* User mode accesses just cause a SIGSEGV */
308 si_code = (fault == VM_FAULT_BADMAP) ?
309 SEGV_MAPERR : SEGV_ACCERR;
310 do_sigsegv(regs, si_code);
311 break;
312 }
313 fallthrough;
314 case VM_FAULT_BADCONTEXT:
315 case VM_FAULT_PFAULT:
316 do_no_context(regs, fault);
317 break;
318 case VM_FAULT_SIGNAL:
319 if (!user_mode(regs))
320 do_no_context(regs, fault);
321 break;
322 default: /* fault & VM_FAULT_ERROR */
323 if (fault & VM_FAULT_OOM) {
324 if (!user_mode(regs))
325 do_no_context(regs, fault);
326 else
327 pagefault_out_of_memory();
328 } else if (fault & VM_FAULT_SIGSEGV) {
329 /* Kernel mode? Handle exceptions or die */
330 if (!user_mode(regs))
331 do_no_context(regs, fault);
332 else
333 do_sigsegv(regs, SEGV_MAPERR);
334 } else if (fault & (VM_FAULT_SIGBUS | VM_FAULT_HWPOISON)) {
335 /* Kernel mode? Handle exceptions or die */
336 if (!user_mode(regs))
337 do_no_context(regs, fault);
338 else
339 do_sigbus(regs);
340 } else {
341 pr_emerg("Unexpected fault flags: %08x\n", fault);
342 BUG();
343 }
344 break;
345 }
346 }
347
348 /*
349 * This routine handles page faults. It determines the address,
350 * and the problem, and then passes it off to one of the appropriate
351 * routines.
352 *
353 * interruption code (int_code):
354 * 04 Protection -> Write-Protection (suppression)
355 * 10 Segment translation -> Not present (nullification)
356 * 11 Page translation -> Not present (nullification)
357 * 3b Region third trans. -> Not present (nullification)
358 */
do_exception(struct pt_regs * regs,int access)359 static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
360 {
361 struct gmap *gmap;
362 struct task_struct *tsk;
363 struct mm_struct *mm;
364 struct vm_area_struct *vma;
365 enum fault_type type;
366 unsigned long address;
367 unsigned int flags;
368 vm_fault_t fault;
369 bool is_write;
370
371 tsk = current;
372 /*
373 * The instruction that caused the program check has
374 * been nullified. Don't signal single step via SIGTRAP.
375 */
376 clear_thread_flag(TIF_PER_TRAP);
377
378 if (kprobe_page_fault(regs, 14))
379 return 0;
380
381 mm = tsk->mm;
382 address = get_fault_address(regs);
383 is_write = fault_is_write(regs);
384
385 /*
386 * Verify that the fault happened in user space, that
387 * we are not in an interrupt and that there is a
388 * user context.
389 */
390 fault = VM_FAULT_BADCONTEXT;
391 type = get_fault_type(regs);
392 switch (type) {
393 case KERNEL_FAULT:
394 goto out;
395 case USER_FAULT:
396 case GMAP_FAULT:
397 if (faulthandler_disabled() || !mm)
398 goto out;
399 break;
400 }
401
402 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
403 flags = FAULT_FLAG_DEFAULT;
404 if (user_mode(regs))
405 flags |= FAULT_FLAG_USER;
406 if (is_write)
407 access = VM_WRITE;
408 if (access == VM_WRITE)
409 flags |= FAULT_FLAG_WRITE;
410 if (!(flags & FAULT_FLAG_USER))
411 goto lock_mmap;
412 vma = lock_vma_under_rcu(mm, address);
413 if (!vma)
414 goto lock_mmap;
415 if (!(vma->vm_flags & access)) {
416 vma_end_read(vma);
417 goto lock_mmap;
418 }
419 fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs);
420 if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
421 vma_end_read(vma);
422 if (!(fault & VM_FAULT_RETRY)) {
423 count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
424 if (likely(!(fault & VM_FAULT_ERROR)))
425 fault = 0;
426 goto out;
427 }
428 count_vm_vma_lock_event(VMA_LOCK_RETRY);
429 if (fault & VM_FAULT_MAJOR)
430 flags |= FAULT_FLAG_TRIED;
431
432 /* Quick path to respond to signals */
433 if (fault_signal_pending(fault, regs)) {
434 fault = VM_FAULT_SIGNAL;
435 goto out;
436 }
437 lock_mmap:
438 mmap_read_lock(mm);
439
440 gmap = NULL;
441 if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
442 gmap = (struct gmap *) S390_lowcore.gmap;
443 current->thread.gmap_addr = address;
444 current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE);
445 current->thread.gmap_int_code = regs->int_code & 0xffff;
446 address = __gmap_translate(gmap, address);
447 if (address == -EFAULT) {
448 fault = VM_FAULT_BADMAP;
449 goto out_up;
450 }
451 if (gmap->pfault_enabled)
452 flags |= FAULT_FLAG_RETRY_NOWAIT;
453 }
454
455 retry:
456 fault = VM_FAULT_BADMAP;
457 vma = find_vma(mm, address);
458 if (!vma)
459 goto out_up;
460
461 if (unlikely(vma->vm_start > address)) {
462 if (!(vma->vm_flags & VM_GROWSDOWN))
463 goto out_up;
464 vma = expand_stack(mm, address);
465 if (!vma)
466 goto out;
467 }
468
469 /*
470 * Ok, we have a good vm_area for this memory access, so
471 * we can handle it..
472 */
473 fault = VM_FAULT_BADACCESS;
474 if (unlikely(!(vma->vm_flags & access)))
475 goto out_up;
476
477 /*
478 * If for any reason at all we couldn't handle the fault,
479 * make sure we exit gracefully rather than endlessly redo
480 * the fault.
481 */
482 fault = handle_mm_fault(vma, address, flags, regs);
483 if (fault_signal_pending(fault, regs)) {
484 fault = VM_FAULT_SIGNAL;
485 if (flags & FAULT_FLAG_RETRY_NOWAIT)
486 goto out_up;
487 goto out;
488 }
489
490 /* The fault is fully completed (including releasing mmap lock) */
491 if (fault & VM_FAULT_COMPLETED) {
492 if (gmap) {
493 mmap_read_lock(mm);
494 goto out_gmap;
495 }
496 fault = 0;
497 goto out;
498 }
499
500 if (unlikely(fault & VM_FAULT_ERROR))
501 goto out_up;
502
503 if (fault & VM_FAULT_RETRY) {
504 if (IS_ENABLED(CONFIG_PGSTE) && gmap &&
505 (flags & FAULT_FLAG_RETRY_NOWAIT)) {
506 /*
507 * FAULT_FLAG_RETRY_NOWAIT has been set, mmap_lock has
508 * not been released
509 */
510 current->thread.gmap_pfault = 1;
511 fault = VM_FAULT_PFAULT;
512 goto out_up;
513 }
514 flags &= ~FAULT_FLAG_RETRY_NOWAIT;
515 flags |= FAULT_FLAG_TRIED;
516 mmap_read_lock(mm);
517 goto retry;
518 }
519 out_gmap:
520 if (IS_ENABLED(CONFIG_PGSTE) && gmap) {
521 address = __gmap_link(gmap, current->thread.gmap_addr,
522 address);
523 if (address == -EFAULT) {
524 fault = VM_FAULT_BADMAP;
525 goto out_up;
526 }
527 if (address == -ENOMEM) {
528 fault = VM_FAULT_OOM;
529 goto out_up;
530 }
531 }
532 fault = 0;
533 out_up:
534 mmap_read_unlock(mm);
535 out:
536 return fault;
537 }
538
do_protection_exception(struct pt_regs * regs)539 void do_protection_exception(struct pt_regs *regs)
540 {
541 unsigned long trans_exc_code;
542 int access;
543 vm_fault_t fault;
544
545 trans_exc_code = regs->int_parm_long;
546 /*
547 * Protection exceptions are suppressing, decrement psw address.
548 * The exception to this rule are aborted transactions, for these
549 * the PSW already points to the correct location.
550 */
551 if (!(regs->int_code & 0x200))
552 regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
553 /*
554 * Check for low-address protection. This needs to be treated
555 * as a special case because the translation exception code
556 * field is not guaranteed to contain valid data in this case.
557 */
558 if (unlikely(!(trans_exc_code & 4))) {
559 do_low_address(regs);
560 return;
561 }
562 if (unlikely(MACHINE_HAS_NX && (trans_exc_code & 0x80))) {
563 regs->int_parm_long = (trans_exc_code & ~PAGE_MASK) |
564 (regs->psw.addr & PAGE_MASK);
565 access = VM_EXEC;
566 fault = VM_FAULT_BADACCESS;
567 } else {
568 access = VM_WRITE;
569 fault = do_exception(regs, access);
570 }
571 if (unlikely(fault))
572 do_fault_error(regs, fault);
573 }
574 NOKPROBE_SYMBOL(do_protection_exception);
575
do_dat_exception(struct pt_regs * regs)576 void do_dat_exception(struct pt_regs *regs)
577 {
578 int access;
579 vm_fault_t fault;
580
581 access = VM_ACCESS_FLAGS;
582 fault = do_exception(regs, access);
583 if (unlikely(fault))
584 do_fault_error(regs, fault);
585 }
586 NOKPROBE_SYMBOL(do_dat_exception);
587
588 #if IS_ENABLED(CONFIG_PGSTE)
589
do_secure_storage_access(struct pt_regs * regs)590 void do_secure_storage_access(struct pt_regs *regs)
591 {
592 unsigned long addr = regs->int_parm_long & __FAIL_ADDR_MASK;
593 struct vm_area_struct *vma;
594 struct mm_struct *mm;
595 struct page *page;
596 struct gmap *gmap;
597 int rc;
598
599 /*
600 * bit 61 tells us if the address is valid, if it's not we
601 * have a major problem and should stop the kernel or send a
602 * SIGSEGV to the process. Unfortunately bit 61 is not
603 * reliable without the misc UV feature so we need to check
604 * for that as well.
605 */
606 if (uv_has_feature(BIT_UV_FEAT_MISC) &&
607 !test_bit_inv(61, ®s->int_parm_long)) {
608 /*
609 * When this happens, userspace did something that it
610 * was not supposed to do, e.g. branching into secure
611 * memory. Trigger a segmentation fault.
612 */
613 if (user_mode(regs)) {
614 send_sig(SIGSEGV, current, 0);
615 return;
616 }
617
618 /*
619 * The kernel should never run into this case and we
620 * have no way out of this situation.
621 */
622 panic("Unexpected PGM 0x3d with TEID bit 61=0");
623 }
624
625 switch (get_fault_type(regs)) {
626 case GMAP_FAULT:
627 mm = current->mm;
628 gmap = (struct gmap *)S390_lowcore.gmap;
629 mmap_read_lock(mm);
630 addr = __gmap_translate(gmap, addr);
631 mmap_read_unlock(mm);
632 if (IS_ERR_VALUE(addr)) {
633 do_fault_error(regs, VM_FAULT_BADMAP);
634 break;
635 }
636 fallthrough;
637 case USER_FAULT:
638 mm = current->mm;
639 mmap_read_lock(mm);
640 vma = find_vma(mm, addr);
641 if (!vma) {
642 mmap_read_unlock(mm);
643 do_fault_error(regs, VM_FAULT_BADMAP);
644 break;
645 }
646 page = follow_page(vma, addr, FOLL_WRITE | FOLL_GET);
647 if (IS_ERR_OR_NULL(page)) {
648 mmap_read_unlock(mm);
649 break;
650 }
651 if (arch_make_page_accessible(page))
652 send_sig(SIGSEGV, current, 0);
653 put_page(page);
654 mmap_read_unlock(mm);
655 break;
656 case KERNEL_FAULT:
657 page = phys_to_page(addr);
658 if (unlikely(!try_get_page(page)))
659 break;
660 rc = arch_make_page_accessible(page);
661 put_page(page);
662 if (rc)
663 BUG();
664 break;
665 default:
666 do_fault_error(regs, VM_FAULT_BADMAP);
667 WARN_ON_ONCE(1);
668 }
669 }
670 NOKPROBE_SYMBOL(do_secure_storage_access);
671
do_non_secure_storage_access(struct pt_regs * regs)672 void do_non_secure_storage_access(struct pt_regs *regs)
673 {
674 unsigned long gaddr = regs->int_parm_long & __FAIL_ADDR_MASK;
675 struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
676
677 if (get_fault_type(regs) != GMAP_FAULT) {
678 do_fault_error(regs, VM_FAULT_BADMAP);
679 WARN_ON_ONCE(1);
680 return;
681 }
682
683 if (gmap_convert_to_secure(gmap, gaddr) == -EINVAL)
684 send_sig(SIGSEGV, current, 0);
685 }
686 NOKPROBE_SYMBOL(do_non_secure_storage_access);
687
do_secure_storage_violation(struct pt_regs * regs)688 void do_secure_storage_violation(struct pt_regs *regs)
689 {
690 unsigned long gaddr = regs->int_parm_long & __FAIL_ADDR_MASK;
691 struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
692
693 /*
694 * If the VM has been rebooted, its address space might still contain
695 * secure pages from the previous boot.
696 * Clear the page so it can be reused.
697 */
698 if (!gmap_destroy_page(gmap, gaddr))
699 return;
700 /*
701 * Either KVM messed up the secure guest mapping or the same
702 * page is mapped into multiple secure guests.
703 *
704 * This exception is only triggered when a guest 2 is running
705 * and can therefore never occur in kernel context.
706 */
707 printk_ratelimited(KERN_WARNING
708 "Secure storage violation in task: %s, pid %d\n",
709 current->comm, current->pid);
710 send_sig(SIGSEGV, current, 0);
711 }
712
713 #endif /* CONFIG_PGSTE */
714