• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  PowerPC version
4  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5  *
6  *  Derived from "arch/i386/mm/fault.c"
7  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
8  *
9  *  Modified by Cort Dougan and Paul Mackerras.
10  *
11  *  Modified for PPC64 by Dave Engebretsen (engebret@ibm.com)
12  */
13 
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/sched/task_stack.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/string.h>
20 #include <linux/types.h>
21 #include <linux/pagemap.h>
22 #include <linux/ptrace.h>
23 #include <linux/mman.h>
24 #include <linux/mm.h>
25 #include <linux/interrupt.h>
26 #include <linux/highmem.h>
27 #include <linux/extable.h>
28 #include <linux/kprobes.h>
29 #include <linux/kdebug.h>
30 #include <linux/perf_event.h>
31 #include <linux/ratelimit.h>
32 #include <linux/context_tracking.h>
33 #include <linux/hugetlb.h>
34 #include <linux/uaccess.h>
35 #include <linux/kfence.h>
36 #include <linux/pkeys.h>
37 
38 #include <asm/firmware.h>
39 #include <asm/interrupt.h>
40 #include <asm/page.h>
41 #include <asm/mmu.h>
42 #include <asm/mmu_context.h>
43 #include <asm/siginfo.h>
44 #include <asm/debug.h>
45 #include <asm/kup.h>
46 #include <asm/inst.h>
47 
48 
49 /*
50  * do_page_fault error handling helpers
51  */
52 
53 static int
__bad_area_nosemaphore(struct pt_regs * regs,unsigned long address,int si_code)54 __bad_area_nosemaphore(struct pt_regs *regs, unsigned long address, int si_code)
55 {
56 	/*
57 	 * If we are in kernel mode, bail out with a SEGV, this will
58 	 * be caught by the assembly which will restore the non-volatile
59 	 * registers before calling bad_page_fault()
60 	 */
61 	if (!user_mode(regs))
62 		return SIGSEGV;
63 
64 	_exception(SIGSEGV, regs, si_code, address);
65 
66 	return 0;
67 }
68 
bad_area_nosemaphore(struct pt_regs * regs,unsigned long address)69 static noinline int bad_area_nosemaphore(struct pt_regs *regs, unsigned long address)
70 {
71 	return __bad_area_nosemaphore(regs, address, SEGV_MAPERR);
72 }
73 
__bad_area(struct pt_regs * regs,unsigned long address,int si_code)74 static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code)
75 {
76 	struct mm_struct *mm = current->mm;
77 
78 	/*
79 	 * Something tried to access memory that isn't in our memory map..
80 	 * Fix it, but check if it's kernel or user first..
81 	 */
82 	mmap_read_unlock(mm);
83 
84 	return __bad_area_nosemaphore(regs, address, si_code);
85 }
86 
bad_area(struct pt_regs * regs,unsigned long address)87 static noinline int bad_area(struct pt_regs *regs, unsigned long address)
88 {
89 	return __bad_area(regs, address, SEGV_MAPERR);
90 }
91 
bad_access_pkey(struct pt_regs * regs,unsigned long address,struct vm_area_struct * vma)92 static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address,
93 				    struct vm_area_struct *vma)
94 {
95 	struct mm_struct *mm = current->mm;
96 	int pkey;
97 
98 	/*
99 	 * We don't try to fetch the pkey from page table because reading
100 	 * page table without locking doesn't guarantee stable pte value.
101 	 * Hence the pkey value that we return to userspace can be different
102 	 * from the pkey that actually caused access error.
103 	 *
104 	 * It does *not* guarantee that the VMA we find here
105 	 * was the one that we faulted on.
106 	 *
107 	 * 1. T1   : mprotect_key(foo, PAGE_SIZE, pkey=4);
108 	 * 2. T1   : set AMR to deny access to pkey=4, touches, page
109 	 * 3. T1   : faults...
110 	 * 4.    T2: mprotect_key(foo, PAGE_SIZE, pkey=5);
111 	 * 5. T1   : enters fault handler, takes mmap_lock, etc...
112 	 * 6. T1   : reaches here, sees vma_pkey(vma)=5, when we really
113 	 *	     faulted on a pte with its pkey=4.
114 	 */
115 	pkey = vma_pkey(vma);
116 
117 	mmap_read_unlock(mm);
118 
119 	/*
120 	 * If we are in kernel mode, bail out with a SEGV, this will
121 	 * be caught by the assembly which will restore the non-volatile
122 	 * registers before calling bad_page_fault()
123 	 */
124 	if (!user_mode(regs))
125 		return SIGSEGV;
126 
127 	_exception_pkey(regs, address, pkey);
128 
129 	return 0;
130 }
131 
bad_access(struct pt_regs * regs,unsigned long address)132 static noinline int bad_access(struct pt_regs *regs, unsigned long address)
133 {
134 	return __bad_area(regs, address, SEGV_ACCERR);
135 }
136 
do_sigbus(struct pt_regs * regs,unsigned long address,vm_fault_t fault)137 static int do_sigbus(struct pt_regs *regs, unsigned long address,
138 		     vm_fault_t fault)
139 {
140 	if (!user_mode(regs))
141 		return SIGBUS;
142 
143 	current->thread.trap_nr = BUS_ADRERR;
144 #ifdef CONFIG_MEMORY_FAILURE
145 	if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
146 		unsigned int lsb = 0; /* shutup gcc */
147 
148 		pr_err("MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
149 			current->comm, current->pid, address);
150 
151 		if (fault & VM_FAULT_HWPOISON_LARGE)
152 			lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
153 		if (fault & VM_FAULT_HWPOISON)
154 			lsb = PAGE_SHIFT;
155 
156 		force_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb);
157 		return 0;
158 	}
159 
160 #endif
161 	force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
162 	return 0;
163 }
164 
mm_fault_error(struct pt_regs * regs,unsigned long addr,vm_fault_t fault)165 static int mm_fault_error(struct pt_regs *regs, unsigned long addr,
166 				vm_fault_t fault)
167 {
168 	/*
169 	 * Kernel page fault interrupted by SIGKILL. We have no reason to
170 	 * continue processing.
171 	 */
172 	if (fatal_signal_pending(current) && !user_mode(regs))
173 		return SIGKILL;
174 
175 	/* Out of memory */
176 	if (fault & VM_FAULT_OOM) {
177 		/*
178 		 * We ran out of memory, or some other thing happened to us that
179 		 * made us unable to handle the page fault gracefully.
180 		 */
181 		if (!user_mode(regs))
182 			return SIGSEGV;
183 		pagefault_out_of_memory();
184 	} else {
185 		if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
186 			     VM_FAULT_HWPOISON_LARGE))
187 			return do_sigbus(regs, addr, fault);
188 		else if (fault & VM_FAULT_SIGSEGV)
189 			return bad_area_nosemaphore(regs, addr);
190 		else
191 			BUG();
192 	}
193 	return 0;
194 }
195 
196 /* Is this a bad kernel fault ? */
bad_kernel_fault(struct pt_regs * regs,unsigned long error_code,unsigned long address,bool is_write)197 static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code,
198 			     unsigned long address, bool is_write)
199 {
200 	int is_exec = TRAP(regs) == INTERRUPT_INST_STORAGE;
201 
202 	if (is_exec) {
203 		pr_crit_ratelimited("kernel tried to execute %s page (%lx) - exploit attempt? (uid: %d)\n",
204 				    address >= TASK_SIZE ? "exec-protected" : "user",
205 				    address,
206 				    from_kuid(&init_user_ns, current_uid()));
207 
208 		// Kernel exec fault is always bad
209 		return true;
210 	}
211 
212 	// Kernel fault on kernel address is bad
213 	if (address >= TASK_SIZE)
214 		return true;
215 
216 	// Read/write fault blocked by KUAP is bad, it can never succeed.
217 	if (bad_kuap_fault(regs, address, is_write)) {
218 		pr_crit_ratelimited("Kernel attempted to %s user page (%lx) - exploit attempt? (uid: %d)\n",
219 				    is_write ? "write" : "read", address,
220 				    from_kuid(&init_user_ns, current_uid()));
221 
222 		// Fault on user outside of certain regions (eg. copy_tofrom_user()) is bad
223 		if (!search_exception_tables(regs->nip))
224 			return true;
225 
226 		// Read/write fault in a valid region (the exception table search passed
227 		// above), but blocked by KUAP is bad, it can never succeed.
228 		return WARN(true, "Bug: %s fault blocked by KUAP!", is_write ? "Write" : "Read");
229 	}
230 
231 	// What's left? Kernel fault on user and allowed by KUAP in the faulting context.
232 	return false;
233 }
234 
access_pkey_error(bool is_write,bool is_exec,bool is_pkey,struct vm_area_struct * vma)235 static bool access_pkey_error(bool is_write, bool is_exec, bool is_pkey,
236 			      struct vm_area_struct *vma)
237 {
238 	/*
239 	 * Make sure to check the VMA so that we do not perform
240 	 * faults just to hit a pkey fault as soon as we fill in a
241 	 * page. Only called for current mm, hence foreign == 0
242 	 */
243 	if (!arch_vma_access_permitted(vma, is_write, is_exec, 0))
244 		return true;
245 
246 	return false;
247 }
248 
access_error(bool is_write,bool is_exec,struct vm_area_struct * vma)249 static bool access_error(bool is_write, bool is_exec, struct vm_area_struct *vma)
250 {
251 	/*
252 	 * Allow execution from readable areas if the MMU does not
253 	 * provide separate controls over reading and executing.
254 	 *
255 	 * Note: That code used to not be enabled for 4xx/BookE.
256 	 * It is now as I/D cache coherency for these is done at
257 	 * set_pte_at() time and I see no reason why the test
258 	 * below wouldn't be valid on those processors. This -may-
259 	 * break programs compiled with a really old ABI though.
260 	 */
261 	if (is_exec) {
262 		return !(vma->vm_flags & VM_EXEC) &&
263 			(cpu_has_feature(CPU_FTR_NOEXECUTE) ||
264 			 !(vma->vm_flags & (VM_READ | VM_WRITE)));
265 	}
266 
267 	if (is_write) {
268 		if (unlikely(!(vma->vm_flags & VM_WRITE)))
269 			return true;
270 		return false;
271 	}
272 
273 	if (unlikely(!vma_is_accessible(vma)))
274 		return true;
275 	/*
276 	 * We should ideally do the vma pkey access check here. But in the
277 	 * fault path, handle_mm_fault() also does the same check. To avoid
278 	 * these multiple checks, we skip it here and handle access error due
279 	 * to pkeys later.
280 	 */
281 	return false;
282 }
283 
284 #ifdef CONFIG_PPC_SMLPAR
cmo_account_page_fault(void)285 static inline void cmo_account_page_fault(void)
286 {
287 	if (firmware_has_feature(FW_FEATURE_CMO)) {
288 		u32 page_ins;
289 
290 		preempt_disable();
291 		page_ins = be32_to_cpu(get_lppaca()->page_ins);
292 		page_ins += 1 << PAGE_FACTOR;
293 		get_lppaca()->page_ins = cpu_to_be32(page_ins);
294 		preempt_enable();
295 	}
296 }
297 #else
cmo_account_page_fault(void)298 static inline void cmo_account_page_fault(void) { }
299 #endif /* CONFIG_PPC_SMLPAR */
300 
sanity_check_fault(bool is_write,bool is_user,unsigned long error_code,unsigned long address)301 static void sanity_check_fault(bool is_write, bool is_user,
302 			       unsigned long error_code, unsigned long address)
303 {
304 	/*
305 	 * Userspace trying to access kernel address, we get PROTFAULT for that.
306 	 */
307 	if (is_user && address >= TASK_SIZE) {
308 		if ((long)address == -1)
309 			return;
310 
311 		pr_crit_ratelimited("%s[%d]: User access of kernel address (%lx) - exploit attempt? (uid: %d)\n",
312 				   current->comm, current->pid, address,
313 				   from_kuid(&init_user_ns, current_uid()));
314 		return;
315 	}
316 
317 	if (!IS_ENABLED(CONFIG_PPC_BOOK3S))
318 		return;
319 
320 	/*
321 	 * For hash translation mode, we should never get a
322 	 * PROTFAULT. Any update to pte to reduce access will result in us
323 	 * removing the hash page table entry, thus resulting in a DSISR_NOHPTE
324 	 * fault instead of DSISR_PROTFAULT.
325 	 *
326 	 * A pte update to relax the access will not result in a hash page table
327 	 * entry invalidate and hence can result in DSISR_PROTFAULT.
328 	 * ptep_set_access_flags() doesn't do a hpte flush. This is why we have
329 	 * the special !is_write in the below conditional.
330 	 *
331 	 * For platforms that doesn't supports coherent icache and do support
332 	 * per page noexec bit, we do setup things such that we do the
333 	 * sync between D/I cache via fault. But that is handled via low level
334 	 * hash fault code (hash_page_do_lazy_icache()) and we should not reach
335 	 * here in such case.
336 	 *
337 	 * For wrong access that can result in PROTFAULT, the above vma->vm_flags
338 	 * check should handle those and hence we should fall to the bad_area
339 	 * handling correctly.
340 	 *
341 	 * For embedded with per page exec support that doesn't support coherent
342 	 * icache we do get PROTFAULT and we handle that D/I cache sync in
343 	 * set_pte_at while taking the noexec/prot fault. Hence this is WARN_ON
344 	 * is conditional for server MMU.
345 	 *
346 	 * For radix, we can get prot fault for autonuma case, because radix
347 	 * page table will have them marked noaccess for user.
348 	 */
349 	if (radix_enabled() || is_write)
350 		return;
351 
352 	WARN_ON_ONCE(error_code & DSISR_PROTFAULT);
353 }
354 
355 /*
356  * Define the correct "is_write" bit in error_code based
357  * on the processor family
358  */
359 #if (defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
360 #define page_fault_is_write(__err)	((__err) & ESR_DST)
361 #else
362 #define page_fault_is_write(__err)	((__err) & DSISR_ISSTORE)
363 #endif
364 
365 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
366 #define page_fault_is_bad(__err)	(0)
367 #elif defined(CONFIG_PPC_8xx)
368 #define page_fault_is_bad(__err)	((__err) & DSISR_NOEXEC_OR_G)
369 #elif defined(CONFIG_PPC64)
370 #define page_fault_is_bad(__err)	((__err) & DSISR_BAD_FAULT_64S)
371 #else
372 #define page_fault_is_bad(__err)	((__err) & DSISR_BAD_FAULT_32S)
373 #endif
374 
375 /*
376  * For 600- and 800-family processors, the error_code parameter is DSISR
377  * for a data fault, SRR1 for an instruction fault.
378  * For 400-family processors the error_code parameter is ESR for a data fault,
379  * 0 for an instruction fault.
380  * For 64-bit processors, the error_code parameter is DSISR for a data access
381  * fault, SRR1 & 0x08000000 for an instruction access fault.
382  *
383  * The return value is 0 if the fault was handled, or the signal
384  * number if this is a kernel fault that can't be handled here.
385  */
___do_page_fault(struct pt_regs * regs,unsigned long address,unsigned long error_code)386 static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
387 			   unsigned long error_code)
388 {
389 	struct vm_area_struct * vma;
390 	struct mm_struct *mm = current->mm;
391 	unsigned int flags = FAULT_FLAG_DEFAULT;
392 	int is_exec = TRAP(regs) == INTERRUPT_INST_STORAGE;
393 	int is_user = user_mode(regs);
394 	int is_write = page_fault_is_write(error_code);
395 	vm_fault_t fault, major = 0;
396 	bool kprobe_fault = kprobe_page_fault(regs, 11);
397 #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
398 	struct vm_area_struct pvma;
399 	unsigned long seq;
400 #endif
401 
402 	if (unlikely(debugger_fault_handler(regs) || kprobe_fault))
403 		return 0;
404 
405 	if (unlikely(page_fault_is_bad(error_code))) {
406 		if (is_user) {
407 			_exception(SIGBUS, regs, BUS_OBJERR, address);
408 			return 0;
409 		}
410 		return SIGBUS;
411 	}
412 
413 	/* Additional sanity check(s) */
414 	sanity_check_fault(is_write, is_user, error_code, address);
415 
416 	/*
417 	 * The kernel should never take an execute fault nor should it
418 	 * take a page fault to a kernel address or a page fault to a user
419 	 * address outside of dedicated places
420 	 */
421 	if (unlikely(!is_user && bad_kernel_fault(regs, error_code, address, is_write))) {
422 		if (kfence_handle_page_fault(address, is_write, regs))
423 			return 0;
424 
425 		return SIGSEGV;
426 	}
427 
428 	/*
429 	 * If we're in an interrupt, have no user context or are running
430 	 * in a region with pagefaults disabled then we must not take the fault
431 	 */
432 	if (unlikely(faulthandler_disabled() || !mm)) {
433 		if (is_user)
434 			printk_ratelimited(KERN_ERR "Page fault in user mode"
435 					   " with faulthandler_disabled()=%d"
436 					   " mm=%p\n",
437 					   faulthandler_disabled(), mm);
438 		return bad_area_nosemaphore(regs, address);
439 	}
440 
441 	interrupt_cond_local_irq_enable(regs);
442 
443 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
444 
445 	/*
446 	 * We want to do this outside mmap_lock, because reading code around nip
447 	 * can result in fault, which will cause a deadlock when called with
448 	 * mmap_lock held
449 	 */
450 	if (is_user)
451 		flags |= FAULT_FLAG_USER;
452 	if (is_write)
453 		flags |= FAULT_FLAG_WRITE;
454 	if (is_exec)
455 		flags |= FAULT_FLAG_INSTRUCTION;
456 
457 #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
458 	/*
459 	 * No need to try speculative faults for kernel or
460 	 * single threaded user space.
461 	 */
462 	if (!(flags & FAULT_FLAG_USER) || atomic_read(&mm->mm_users) == 1)
463 		goto no_spf;
464 
465 	count_vm_event(SPF_ATTEMPT);
466 	seq = mmap_seq_read_start(mm);
467 	if (seq & 1) {
468 		count_vm_spf_event(SPF_ABORT_ODD);
469 		goto spf_abort;
470 	}
471 	vma = get_vma(mm, address);
472 	if (!vma) {
473 		count_vm_spf_event(SPF_ABORT_UNMAPPED);
474 		goto spf_abort;
475 	}
476 	if (!vma_can_speculate(vma, flags)) {
477 		put_vma(vma);
478 		count_vm_spf_event(SPF_ABORT_NO_SPECULATE);
479 		goto spf_abort;
480 	}
481 	pvma = *vma;
482 	if (!mmap_seq_read_check(mm, seq, SPF_ABORT_VMA_COPY)) {
483 		put_vma(vma);
484 		goto spf_abort;
485 	}
486 #ifdef CONFIG_PPC_MEM_KEYS
487 	if (unlikely(access_pkey_error(is_write, is_exec,
488 				       (error_code & DSISR_KEYFAULT), &pvma))) {
489 		put_vma(vma);
490 		count_vm_spf_event(SPF_ABORT_ACCESS_ERROR);
491 		goto spf_abort;
492 	}
493 #endif /* CONFIG_PPC_MEM_KEYS */
494 	if (unlikely(access_error(is_write, is_exec, &pvma))) {
495 		put_vma(vma);
496 		count_vm_spf_event(SPF_ABORT_ACCESS_ERROR);
497 		goto spf_abort;
498 	}
499 	fault = do_handle_mm_fault(&pvma, address,
500 			flags | FAULT_FLAG_SPECULATIVE, seq, regs);
501 	put_vma(vma);
502 	major |= fault & VM_FAULT_MAJOR;
503 
504 	if (fault_signal_pending(fault, regs))
505 		return user_mode(regs) ? 0 : SIGBUS;
506 	if (!(fault & VM_FAULT_RETRY))
507 		goto done;
508 
509 spf_abort:
510 	count_vm_event(SPF_ABORT);
511 no_spf:
512 
513 #endif	/* CONFIG_SPECULATIVE_PAGE_FAULT */
514 
515 	/* When running in the kernel we expect faults to occur only to
516 	 * addresses in user space.  All other faults represent errors in the
517 	 * kernel and should generate an OOPS.  Unfortunately, in the case of an
518 	 * erroneous fault occurring in a code path which already holds mmap_lock
519 	 * we will deadlock attempting to validate the fault against the
520 	 * address space.  Luckily the kernel only validly references user
521 	 * space from well defined areas of code, which are listed in the
522 	 * exceptions table.
523 	 *
524 	 * As the vast majority of faults will be valid we will only perform
525 	 * the source reference check when there is a possibility of a deadlock.
526 	 * Attempt to lock the address space, if we cannot we then validate the
527 	 * source.  If this is invalid we can skip the address space check,
528 	 * thus avoiding the deadlock.
529 	 */
530 	if (unlikely(!mmap_read_trylock(mm))) {
531 		if (!is_user && !search_exception_tables(regs->nip))
532 			return bad_area_nosemaphore(regs, address);
533 
534 retry:
535 		mmap_read_lock(mm);
536 	} else {
537 		/*
538 		 * The above down_read_trylock() might have succeeded in
539 		 * which case we'll have missed the might_sleep() from
540 		 * down_read():
541 		 */
542 		might_sleep();
543 	}
544 
545 	vma = find_vma(mm, address);
546 	if (unlikely(!vma))
547 		return bad_area(regs, address);
548 
549 	if (unlikely(vma->vm_start > address)) {
550 		if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
551 			return bad_area(regs, address);
552 
553 		if (unlikely(expand_stack(vma, address)))
554 			return bad_area(regs, address);
555 	}
556 
557 	if (unlikely(access_pkey_error(is_write, is_exec,
558 				       (error_code & DSISR_KEYFAULT), vma)))
559 		return bad_access_pkey(regs, address, vma);
560 
561 	if (unlikely(access_error(is_write, is_exec, vma)))
562 		return bad_access(regs, address);
563 
564 	/*
565 	 * If for any reason at all we couldn't handle the fault,
566 	 * make sure we exit gracefully rather than endlessly redo
567 	 * the fault.
568 	 */
569 	fault = handle_mm_fault(vma, address, flags, regs);
570 
571 	major |= fault & VM_FAULT_MAJOR;
572 
573 	if (fault_signal_pending(fault, regs))
574 		return user_mode(regs) ? 0 : SIGBUS;
575 
576 	/*
577 	 * Handle the retry right now, the mmap_lock has been released in that
578 	 * case.
579 	 */
580 	if (unlikely(fault & VM_FAULT_RETRY)) {
581 		if (flags & FAULT_FLAG_ALLOW_RETRY) {
582 			flags |= FAULT_FLAG_TRIED;
583 			goto retry;
584 		}
585 	}
586 
587 	mmap_read_unlock(current->mm);
588 #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
589 done:
590 #endif
591 
592 	if (unlikely(fault & VM_FAULT_ERROR))
593 		return mm_fault_error(regs, address, fault);
594 
595 	/*
596 	 * Major/minor page fault accounting.
597 	 */
598 	if (major)
599 		cmo_account_page_fault();
600 
601 	return 0;
602 }
603 NOKPROBE_SYMBOL(___do_page_fault);
604 
__do_page_fault(struct pt_regs * regs)605 static __always_inline void __do_page_fault(struct pt_regs *regs)
606 {
607 	long err;
608 
609 	err = ___do_page_fault(regs, regs->dar, regs->dsisr);
610 	if (unlikely(err))
611 		bad_page_fault(regs, err);
612 }
613 
DEFINE_INTERRUPT_HANDLER(do_page_fault)614 DEFINE_INTERRUPT_HANDLER(do_page_fault)
615 {
616 	__do_page_fault(regs);
617 }
618 
619 #ifdef CONFIG_PPC_BOOK3S_64
620 /* Same as do_page_fault but interrupt entry has already run in do_hash_fault */
hash__do_page_fault(struct pt_regs * regs)621 void hash__do_page_fault(struct pt_regs *regs)
622 {
623 	__do_page_fault(regs);
624 }
625 NOKPROBE_SYMBOL(hash__do_page_fault);
626 #endif
627 
628 /*
629  * bad_page_fault is called when we have a bad access from the kernel.
630  * It is called from the DSI and ISI handlers in head.S and from some
631  * of the procedures in traps.c.
632  */
__bad_page_fault(struct pt_regs * regs,int sig)633 static void __bad_page_fault(struct pt_regs *regs, int sig)
634 {
635 	int is_write = page_fault_is_write(regs->dsisr);
636 	const char *msg;
637 
638 	/* kernel has accessed a bad area */
639 
640 	if (regs->dar < PAGE_SIZE)
641 		msg = "Kernel NULL pointer dereference";
642 	else
643 		msg = "Unable to handle kernel data access";
644 
645 	switch (TRAP(regs)) {
646 	case INTERRUPT_DATA_STORAGE:
647 	case INTERRUPT_H_DATA_STORAGE:
648 		pr_alert("BUG: %s on %s at 0x%08lx\n", msg,
649 			 is_write ? "write" : "read", regs->dar);
650 		break;
651 	case INTERRUPT_DATA_SEGMENT:
652 		pr_alert("BUG: %s at 0x%08lx\n", msg, regs->dar);
653 		break;
654 	case INTERRUPT_INST_STORAGE:
655 	case INTERRUPT_INST_SEGMENT:
656 		pr_alert("BUG: Unable to handle kernel instruction fetch%s",
657 			 regs->nip < PAGE_SIZE ? " (NULL pointer?)\n" : "\n");
658 		break;
659 	case INTERRUPT_ALIGNMENT:
660 		pr_alert("BUG: Unable to handle kernel unaligned access at 0x%08lx\n",
661 			 regs->dar);
662 		break;
663 	default:
664 		pr_alert("BUG: Unable to handle unknown paging fault at 0x%08lx\n",
665 			 regs->dar);
666 		break;
667 	}
668 	printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n",
669 		regs->nip);
670 
671 	if (task_stack_end_corrupted(current))
672 		printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");
673 
674 	die("Kernel access of bad area", regs, sig);
675 }
676 
bad_page_fault(struct pt_regs * regs,int sig)677 void bad_page_fault(struct pt_regs *regs, int sig)
678 {
679 	const struct exception_table_entry *entry;
680 
681 	/* Are we prepared to handle this fault?  */
682 	entry = search_exception_tables(instruction_pointer(regs));
683 	if (entry)
684 		instruction_pointer_set(regs, extable_fixup(entry));
685 	else
686 		__bad_page_fault(regs, sig);
687 }
688 
689 #ifdef CONFIG_PPC_BOOK3S_64
DEFINE_INTERRUPT_HANDLER(do_bad_page_fault_segv)690 DEFINE_INTERRUPT_HANDLER(do_bad_page_fault_segv)
691 {
692 	bad_page_fault(regs, SIGSEGV);
693 }
694 #endif
695