• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  S390 version
3  *    Copyright IBM Corp. 1999
4  *    Author(s): Hartmut Penner (hp@de.ibm.com)
5  *               Ulrich Weigand (uweigand@de.ibm.com)
6  *
7  *  Derived from "arch/i386/mm/fault.c"
8  *    Copyright (C) 1995  Linus Torvalds
9  */
10 
11 #include <linux/kernel_stat.h>
12 #include <linux/perf_event.h>
13 #include <linux/signal.h>
14 #include <linux/sched.h>
15 #include <linux/kernel.h>
16 #include <linux/errno.h>
17 #include <linux/string.h>
18 #include <linux/types.h>
19 #include <linux/ptrace.h>
20 #include <linux/mman.h>
21 #include <linux/mm.h>
22 #include <linux/compat.h>
23 #include <linux/smp.h>
24 #include <linux/kdebug.h>
25 #include <linux/init.h>
26 #include <linux/console.h>
27 #include <linux/module.h>
28 #include <linux/hardirq.h>
29 #include <linux/kprobes.h>
30 #include <linux/uaccess.h>
31 #include <linux/hugetlb.h>
32 #include <asm/asm-offsets.h>
33 #include <asm/pgtable.h>
34 #include <asm/irq.h>
35 #include <asm/mmu_context.h>
36 #include <asm/facility.h>
37 #include "../kernel/entry.h"
38 
39 #ifndef CONFIG_64BIT
40 #define __FAIL_ADDR_MASK 0x7ffff000
41 #define __SUBCODE_MASK 0x0200
42 #define __PF_RES_FIELD 0ULL
43 #else /* CONFIG_64BIT */
44 #define __FAIL_ADDR_MASK -4096L
45 #define __SUBCODE_MASK 0x0600
46 #define __PF_RES_FIELD 0x8000000000000000ULL
47 #endif /* CONFIG_64BIT */
48 
49 #define VM_FAULT_BADCONTEXT	0x010000
50 #define VM_FAULT_BADMAP		0x020000
51 #define VM_FAULT_BADACCESS	0x040000
52 #define VM_FAULT_SIGNAL		0x080000
53 #define VM_FAULT_PFAULT		0x100000
54 
55 static unsigned long store_indication __read_mostly;
56 
57 #ifdef CONFIG_64BIT
fault_init(void)58 static int __init fault_init(void)
59 {
60 	if (test_facility(75))
61 		store_indication = 0xc00;
62 	return 0;
63 }
64 early_initcall(fault_init);
65 #endif
66 
notify_page_fault(struct pt_regs * regs)67 static inline int notify_page_fault(struct pt_regs *regs)
68 {
69 	int ret = 0;
70 
71 	/* kprobe_running() needs smp_processor_id() */
72 	if (kprobes_built_in() && !user_mode(regs)) {
73 		preempt_disable();
74 		if (kprobe_running() && kprobe_fault_handler(regs, 14))
75 			ret = 1;
76 		preempt_enable();
77 	}
78 	return ret;
79 }
80 
81 
82 /*
83  * Unlock any spinlocks which will prevent us from getting the
84  * message out.
85  */
bust_spinlocks(int yes)86 void bust_spinlocks(int yes)
87 {
88 	if (yes) {
89 		oops_in_progress = 1;
90 	} else {
91 		int loglevel_save = console_loglevel;
92 		console_unblank();
93 		oops_in_progress = 0;
94 		/*
95 		 * OK, the message is on the console.  Now we call printk()
96 		 * without oops_in_progress set so that printk will give klogd
97 		 * a poke.  Hold onto your hats...
98 		 */
99 		console_loglevel = 15;
100 		printk(" ");
101 		console_loglevel = loglevel_save;
102 	}
103 }
104 
105 /*
106  * Returns the address space associated with the fault.
107  * Returns 0 for kernel space and 1 for user space.
108  */
user_space_fault(struct pt_regs * regs)109 static inline int user_space_fault(struct pt_regs *regs)
110 {
111 	unsigned long trans_exc_code;
112 
113 	/*
114 	 * The lowest two bits of the translation exception
115 	 * identification indicate which paging table was used.
116 	 */
117 	trans_exc_code = regs->int_parm_long & 3;
118 	if (trans_exc_code == 3) /* home space -> kernel */
119 		return 0;
120 	if (user_mode(regs))
121 		return 1;
122 	if (trans_exc_code == 2) /* secondary space -> set_fs */
123 		return current->thread.mm_segment.ar4;
124 	if (current->flags & PF_VCPU)
125 		return 1;
126 	return 0;
127 }
128 
bad_address(void * p)129 static int bad_address(void *p)
130 {
131 	unsigned long dummy;
132 
133 	return probe_kernel_address((unsigned long *)p, dummy);
134 }
135 
136 #ifdef CONFIG_64BIT
dump_pagetable(unsigned long asce,unsigned long address)137 static void dump_pagetable(unsigned long asce, unsigned long address)
138 {
139 	unsigned long *table = __va(asce & PAGE_MASK);
140 
141 	pr_alert("AS:%016lx ", asce);
142 	switch (asce & _ASCE_TYPE_MASK) {
143 	case _ASCE_TYPE_REGION1:
144 		table = table + ((address >> 53) & 0x7ff);
145 		if (bad_address(table))
146 			goto bad;
147 		pr_cont("R1:%016lx ", *table);
148 		if (*table & _REGION_ENTRY_INVALID)
149 			goto out;
150 		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
151 		/* fallthrough */
152 	case _ASCE_TYPE_REGION2:
153 		table = table + ((address >> 42) & 0x7ff);
154 		if (bad_address(table))
155 			goto bad;
156 		pr_cont("R2:%016lx ", *table);
157 		if (*table & _REGION_ENTRY_INVALID)
158 			goto out;
159 		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
160 		/* fallthrough */
161 	case _ASCE_TYPE_REGION3:
162 		table = table + ((address >> 31) & 0x7ff);
163 		if (bad_address(table))
164 			goto bad;
165 		pr_cont("R3:%016lx ", *table);
166 		if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
167 			goto out;
168 		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
169 		/* fallthrough */
170 	case _ASCE_TYPE_SEGMENT:
171 		table = table + ((address >> 20) & 0x7ff);
172 		if (bad_address(table))
173 			goto bad;
174 		pr_cont(KERN_CONT "S:%016lx ", *table);
175 		if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
176 			goto out;
177 		table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
178 	}
179 	table = table + ((address >> 12) & 0xff);
180 	if (bad_address(table))
181 		goto bad;
182 	pr_cont("P:%016lx ", *table);
183 out:
184 	pr_cont("\n");
185 	return;
186 bad:
187 	pr_cont("BAD\n");
188 }
189 
190 #else /* CONFIG_64BIT */
191 
dump_pagetable(unsigned long asce,unsigned long address)192 static void dump_pagetable(unsigned long asce, unsigned long address)
193 {
194 	unsigned long *table = __va(asce & PAGE_MASK);
195 
196 	pr_alert("AS:%08lx ", asce);
197 	table = table + ((address >> 20) & 0x7ff);
198 	if (bad_address(table))
199 		goto bad;
200 	pr_cont("S:%08lx ", *table);
201 	if (*table & _SEGMENT_ENTRY_INVALID)
202 		goto out;
203 	table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
204 	table = table + ((address >> 12) & 0xff);
205 	if (bad_address(table))
206 		goto bad;
207 	pr_cont("P:%08lx ", *table);
208 out:
209 	pr_cont("\n");
210 	return;
211 bad:
212 	pr_cont("BAD\n");
213 }
214 
215 #endif /* CONFIG_64BIT */
216 
dump_fault_info(struct pt_regs * regs)217 static void dump_fault_info(struct pt_regs *regs)
218 {
219 	unsigned long asce;
220 
221 	pr_alert("Fault in ");
222 	switch (regs->int_parm_long & 3) {
223 	case 3:
224 		pr_cont("home space ");
225 		break;
226 	case 2:
227 		pr_cont("secondary space ");
228 		break;
229 	case 1:
230 		pr_cont("access register ");
231 		break;
232 	case 0:
233 		pr_cont("primary space ");
234 		break;
235 	}
236 	pr_cont("mode while using ");
237 	if (!user_space_fault(regs)) {
238 		asce = S390_lowcore.kernel_asce;
239 		pr_cont("kernel ");
240 	}
241 #ifdef CONFIG_PGSTE
242 	else if ((current->flags & PF_VCPU) && S390_lowcore.gmap) {
243 		struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
244 		asce = gmap->asce;
245 		pr_cont("gmap ");
246 	}
247 #endif
248 	else {
249 		asce = S390_lowcore.user_asce;
250 		pr_cont("user ");
251 	}
252 	pr_cont("ASCE.\n");
253 	dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
254 }
255 
report_user_fault(struct pt_regs * regs,long signr)256 static inline void report_user_fault(struct pt_regs *regs, long signr)
257 {
258 	if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
259 		return;
260 	if (!unhandled_signal(current, signr))
261 		return;
262 	if (!printk_ratelimit())
263 		return;
264 	printk(KERN_ALERT "User process fault: interruption code 0x%X ",
265 	       regs->int_code);
266 	print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN);
267 	printk(KERN_CONT "\n");
268 	printk(KERN_ALERT "failing address: %016lx TEID: %016lx\n",
269 	       regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
270 	dump_fault_info(regs);
271 	show_regs(regs);
272 }
273 
274 /*
275  * Send SIGSEGV to task.  This is an external routine
276  * to keep the stack usage of do_page_fault small.
277  */
do_sigsegv(struct pt_regs * regs,int si_code)278 static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
279 {
280 	struct siginfo si;
281 
282 	report_user_fault(regs, SIGSEGV);
283 	si.si_signo = SIGSEGV;
284 	si.si_code = si_code;
285 	si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
286 	force_sig_info(SIGSEGV, &si, current);
287 }
288 
do_no_context(struct pt_regs * regs)289 static noinline void do_no_context(struct pt_regs *regs)
290 {
291 	const struct exception_table_entry *fixup;
292 	unsigned long address;
293 
294 	/* Are we prepared to handle this kernel fault?  */
295 	fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
296 	if (fixup) {
297 		regs->psw.addr = extable_fixup(fixup) | PSW_ADDR_AMODE;
298 		return;
299 	}
300 
301 	/*
302 	 * Oops. The kernel tried to access some bad page. We'll have to
303 	 * terminate things with extreme prejudice.
304 	 */
305 	address = regs->int_parm_long & __FAIL_ADDR_MASK;
306 	if (!user_space_fault(regs))
307 		printk(KERN_ALERT "Unable to handle kernel pointer dereference"
308 		       " in virtual kernel address space\n");
309 	else
310 		printk(KERN_ALERT "Unable to handle kernel paging request"
311 		       " in virtual user address space\n");
312 	printk(KERN_ALERT "failing address: %016lx TEID: %016lx\n",
313 	       regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
314 	dump_fault_info(regs);
315 	die(regs, "Oops");
316 	do_exit(SIGKILL);
317 }
318 
do_low_address(struct pt_regs * regs)319 static noinline void do_low_address(struct pt_regs *regs)
320 {
321 	/* Low-address protection hit in kernel mode means
322 	   NULL pointer write access in kernel mode.  */
323 	if (regs->psw.mask & PSW_MASK_PSTATE) {
324 		/* Low-address protection hit in user mode 'cannot happen'. */
325 		die (regs, "Low-address protection");
326 		do_exit(SIGKILL);
327 	}
328 
329 	do_no_context(regs);
330 }
331 
do_sigbus(struct pt_regs * regs)332 static noinline void do_sigbus(struct pt_regs *regs)
333 {
334 	struct task_struct *tsk = current;
335 	struct siginfo si;
336 
337 	/*
338 	 * Send a sigbus, regardless of whether we were in kernel
339 	 * or user mode.
340 	 */
341 	si.si_signo = SIGBUS;
342 	si.si_errno = 0;
343 	si.si_code = BUS_ADRERR;
344 	si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
345 	force_sig_info(SIGBUS, &si, tsk);
346 }
347 
do_fault_error(struct pt_regs * regs,int fault)348 static noinline void do_fault_error(struct pt_regs *regs, int fault)
349 {
350 	int si_code;
351 
352 	switch (fault) {
353 	case VM_FAULT_BADACCESS:
354 	case VM_FAULT_BADMAP:
355 		/* Bad memory access. Check if it is kernel or user space. */
356 		if (user_mode(regs)) {
357 			/* User mode accesses just cause a SIGSEGV */
358 			si_code = (fault == VM_FAULT_BADMAP) ?
359 				SEGV_MAPERR : SEGV_ACCERR;
360 			do_sigsegv(regs, si_code);
361 			return;
362 		}
363 	case VM_FAULT_BADCONTEXT:
364 	case VM_FAULT_PFAULT:
365 		do_no_context(regs);
366 		break;
367 	case VM_FAULT_SIGNAL:
368 		if (!user_mode(regs))
369 			do_no_context(regs);
370 		break;
371 	default: /* fault & VM_FAULT_ERROR */
372 		if (fault & VM_FAULT_OOM) {
373 			if (!user_mode(regs))
374 				do_no_context(regs);
375 			else
376 				pagefault_out_of_memory();
377 		} else if (fault & VM_FAULT_SIGSEGV) {
378 			/* Kernel mode? Handle exceptions or die */
379 			if (!user_mode(regs))
380 				do_no_context(regs);
381 			else
382 				do_sigsegv(regs, SEGV_MAPERR);
383 		} else if (fault & VM_FAULT_SIGBUS) {
384 			/* Kernel mode? Handle exceptions or die */
385 			if (!user_mode(regs))
386 				do_no_context(regs);
387 			else
388 				do_sigbus(regs);
389 		} else
390 			BUG();
391 		break;
392 	}
393 }
394 
395 /*
396  * This routine handles page faults.  It determines the address,
397  * and the problem, and then passes it off to one of the appropriate
398  * routines.
399  *
400  * interruption code (int_code):
401  *   04       Protection           ->  Write-Protection  (suprression)
402  *   10       Segment translation  ->  Not present       (nullification)
403  *   11       Page translation     ->  Not present       (nullification)
404  *   3b       Region third trans.  ->  Not present       (nullification)
405  */
do_exception(struct pt_regs * regs,int access)406 static inline int do_exception(struct pt_regs *regs, int access)
407 {
408 #ifdef CONFIG_PGSTE
409 	struct gmap *gmap;
410 #endif
411 	struct task_struct *tsk;
412 	struct mm_struct *mm;
413 	struct vm_area_struct *vma;
414 	unsigned long trans_exc_code;
415 	unsigned long address;
416 	unsigned int flags;
417 	int fault;
418 
419 	tsk = current;
420 	/*
421 	 * The instruction that caused the program check has
422 	 * been nullified. Don't signal single step via SIGTRAP.
423 	 */
424 	clear_pt_regs_flag(regs, PIF_PER_TRAP);
425 
426 	if (notify_page_fault(regs))
427 		return 0;
428 
429 	mm = tsk->mm;
430 	trans_exc_code = regs->int_parm_long;
431 
432 	/*
433 	 * Verify that the fault happened in user space, that
434 	 * we are not in an interrupt and that there is a
435 	 * user context.
436 	 */
437 	fault = VM_FAULT_BADCONTEXT;
438 	if (unlikely(!user_space_fault(regs) || in_atomic() || !mm))
439 		goto out;
440 
441 	address = trans_exc_code & __FAIL_ADDR_MASK;
442 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
443 	flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
444 	if (user_mode(regs))
445 		flags |= FAULT_FLAG_USER;
446 	if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
447 		flags |= FAULT_FLAG_WRITE;
448 	down_read(&mm->mmap_sem);
449 
450 #ifdef CONFIG_PGSTE
451 	gmap = (current->flags & PF_VCPU) ?
452 		(struct gmap *) S390_lowcore.gmap : NULL;
453 	if (gmap) {
454 		current->thread.gmap_addr = address;
455 		address = __gmap_translate(gmap, address);
456 		if (address == -EFAULT) {
457 			fault = VM_FAULT_BADMAP;
458 			goto out_up;
459 		}
460 		if (gmap->pfault_enabled)
461 			flags |= FAULT_FLAG_RETRY_NOWAIT;
462 	}
463 #endif
464 
465 retry:
466 	fault = VM_FAULT_BADMAP;
467 	vma = find_vma(mm, address);
468 	if (!vma)
469 		goto out_up;
470 
471 	if (unlikely(vma->vm_start > address)) {
472 		if (!(vma->vm_flags & VM_GROWSDOWN))
473 			goto out_up;
474 		if (expand_stack(vma, address))
475 			goto out_up;
476 	}
477 
478 	/*
479 	 * Ok, we have a good vm_area for this memory access, so
480 	 * we can handle it..
481 	 */
482 	fault = VM_FAULT_BADACCESS;
483 	if (unlikely(!(vma->vm_flags & access)))
484 		goto out_up;
485 
486 	if (is_vm_hugetlb_page(vma))
487 		address &= HPAGE_MASK;
488 	/*
489 	 * If for any reason at all we couldn't handle the fault,
490 	 * make sure we exit gracefully rather than endlessly redo
491 	 * the fault.
492 	 */
493 	fault = handle_mm_fault(mm, vma, address, flags);
494 	/* No reason to continue if interrupted by SIGKILL. */
495 	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
496 		fault = VM_FAULT_SIGNAL;
497 		goto out;
498 	}
499 	if (unlikely(fault & VM_FAULT_ERROR))
500 		goto out_up;
501 
502 	/*
503 	 * Major/minor page fault accounting is only done on the
504 	 * initial attempt. If we go through a retry, it is extremely
505 	 * likely that the page will be found in page cache at that point.
506 	 */
507 	if (flags & FAULT_FLAG_ALLOW_RETRY) {
508 		if (fault & VM_FAULT_MAJOR) {
509 			tsk->maj_flt++;
510 			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
511 				      regs, address);
512 		} else {
513 			tsk->min_flt++;
514 			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
515 				      regs, address);
516 		}
517 		if (fault & VM_FAULT_RETRY) {
518 #ifdef CONFIG_PGSTE
519 			if (gmap && (flags & FAULT_FLAG_RETRY_NOWAIT)) {
520 				/* FAULT_FLAG_RETRY_NOWAIT has been set,
521 				 * mmap_sem has not been released */
522 				current->thread.gmap_pfault = 1;
523 				fault = VM_FAULT_PFAULT;
524 				goto out_up;
525 			}
526 #endif
527 			/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
528 			 * of starvation. */
529 			flags &= ~(FAULT_FLAG_ALLOW_RETRY |
530 				   FAULT_FLAG_RETRY_NOWAIT);
531 			flags |= FAULT_FLAG_TRIED;
532 			down_read(&mm->mmap_sem);
533 			goto retry;
534 		}
535 	}
536 #ifdef CONFIG_PGSTE
537 	if (gmap) {
538 		address =  __gmap_link(gmap, current->thread.gmap_addr,
539 				       address);
540 		if (address == -EFAULT) {
541 			fault = VM_FAULT_BADMAP;
542 			goto out_up;
543 		}
544 		if (address == -ENOMEM) {
545 			fault = VM_FAULT_OOM;
546 			goto out_up;
547 		}
548 	}
549 #endif
550 	fault = 0;
551 out_up:
552 	up_read(&mm->mmap_sem);
553 out:
554 	return fault;
555 }
556 
do_protection_exception(struct pt_regs * regs)557 void __kprobes do_protection_exception(struct pt_regs *regs)
558 {
559 	unsigned long trans_exc_code;
560 	int fault;
561 
562 	trans_exc_code = regs->int_parm_long;
563 	/*
564 	 * Protection exceptions are suppressing, decrement psw address.
565 	 * The exception to this rule are aborted transactions, for these
566 	 * the PSW already points to the correct location.
567 	 */
568 	if (!(regs->int_code & 0x200))
569 		regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
570 	/*
571 	 * Check for low-address protection.  This needs to be treated
572 	 * as a special case because the translation exception code
573 	 * field is not guaranteed to contain valid data in this case.
574 	 */
575 	if (unlikely(!(trans_exc_code & 4))) {
576 		do_low_address(regs);
577 		return;
578 	}
579 	fault = do_exception(regs, VM_WRITE);
580 	if (unlikely(fault))
581 		do_fault_error(regs, fault);
582 }
583 
do_dat_exception(struct pt_regs * regs)584 void __kprobes do_dat_exception(struct pt_regs *regs)
585 {
586 	int access, fault;
587 
588 	access = VM_READ | VM_EXEC | VM_WRITE;
589 	fault = do_exception(regs, access);
590 	if (unlikely(fault))
591 		do_fault_error(regs, fault);
592 }
593 
594 #ifdef CONFIG_PFAULT
595 /*
596  * 'pfault' pseudo page faults routines.
597  */
598 static int pfault_disable;
599 
nopfault(char * str)600 static int __init nopfault(char *str)
601 {
602 	pfault_disable = 1;
603 	return 1;
604 }
605 
606 __setup("nopfault", nopfault);
607 
608 struct pfault_refbk {
609 	u16 refdiagc;
610 	u16 reffcode;
611 	u16 refdwlen;
612 	u16 refversn;
613 	u64 refgaddr;
614 	u64 refselmk;
615 	u64 refcmpmk;
616 	u64 reserved;
617 } __attribute__ ((packed, aligned(8)));
618 
pfault_init(void)619 int pfault_init(void)
620 {
621 	struct pfault_refbk refbk = {
622 		.refdiagc = 0x258,
623 		.reffcode = 0,
624 		.refdwlen = 5,
625 		.refversn = 2,
626 		.refgaddr = __LC_CURRENT_PID,
627 		.refselmk = 1ULL << 48,
628 		.refcmpmk = 1ULL << 48,
629 		.reserved = __PF_RES_FIELD };
630         int rc;
631 
632 	if (pfault_disable)
633 		return -1;
634 	asm volatile(
635 		"	diag	%1,%0,0x258\n"
636 		"0:	j	2f\n"
637 		"1:	la	%0,8\n"
638 		"2:\n"
639 		EX_TABLE(0b,1b)
640 		: "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc");
641         return rc;
642 }
643 
pfault_fini(void)644 void pfault_fini(void)
645 {
646 	struct pfault_refbk refbk = {
647 		.refdiagc = 0x258,
648 		.reffcode = 1,
649 		.refdwlen = 5,
650 		.refversn = 2,
651 	};
652 
653 	if (pfault_disable)
654 		return;
655 	asm volatile(
656 		"	diag	%0,0,0x258\n"
657 		"0:\n"
658 		EX_TABLE(0b,0b)
659 		: : "a" (&refbk), "m" (refbk) : "cc");
660 }
661 
662 static DEFINE_SPINLOCK(pfault_lock);
663 static LIST_HEAD(pfault_list);
664 
pfault_interrupt(struct ext_code ext_code,unsigned int param32,unsigned long param64)665 static void pfault_interrupt(struct ext_code ext_code,
666 			     unsigned int param32, unsigned long param64)
667 {
668 	struct task_struct *tsk;
669 	__u16 subcode;
670 	pid_t pid;
671 
672 	/*
673 	 * Get the external interruption subcode & pfault
674 	 * initial/completion signal bit. VM stores this
675 	 * in the 'cpu address' field associated with the
676          * external interrupt.
677 	 */
678 	subcode = ext_code.subcode;
679 	if ((subcode & 0xff00) != __SUBCODE_MASK)
680 		return;
681 	inc_irq_stat(IRQEXT_PFL);
682 	/* Get the token (= pid of the affected task). */
683 	pid = sizeof(void *) == 4 ? param32 : param64;
684 	rcu_read_lock();
685 	tsk = find_task_by_pid_ns(pid, &init_pid_ns);
686 	if (tsk)
687 		get_task_struct(tsk);
688 	rcu_read_unlock();
689 	if (!tsk)
690 		return;
691 	spin_lock(&pfault_lock);
692 	if (subcode & 0x0080) {
693 		/* signal bit is set -> a page has been swapped in by VM */
694 		if (tsk->thread.pfault_wait == 1) {
695 			/* Initial interrupt was faster than the completion
696 			 * interrupt. pfault_wait is valid. Set pfault_wait
697 			 * back to zero and wake up the process. This can
698 			 * safely be done because the task is still sleeping
699 			 * and can't produce new pfaults. */
700 			tsk->thread.pfault_wait = 0;
701 			list_del(&tsk->thread.list);
702 			wake_up_process(tsk);
703 			put_task_struct(tsk);
704 		} else {
705 			/* Completion interrupt was faster than initial
706 			 * interrupt. Set pfault_wait to -1 so the initial
707 			 * interrupt doesn't put the task to sleep.
708 			 * If the task is not running, ignore the completion
709 			 * interrupt since it must be a leftover of a PFAULT
710 			 * CANCEL operation which didn't remove all pending
711 			 * completion interrupts. */
712 			if (tsk->state == TASK_RUNNING)
713 				tsk->thread.pfault_wait = -1;
714 		}
715 	} else {
716 		/* signal bit not set -> a real page is missing. */
717 		if (WARN_ON_ONCE(tsk != current))
718 			goto out;
719 		if (tsk->thread.pfault_wait == 1) {
720 			/* Already on the list with a reference: put to sleep */
721 			__set_task_state(tsk, TASK_UNINTERRUPTIBLE);
722 			set_tsk_need_resched(tsk);
723 		} else if (tsk->thread.pfault_wait == -1) {
724 			/* Completion interrupt was faster than the initial
725 			 * interrupt (pfault_wait == -1). Set pfault_wait
726 			 * back to zero and exit. */
727 			tsk->thread.pfault_wait = 0;
728 		} else {
729 			/* Initial interrupt arrived before completion
730 			 * interrupt. Let the task sleep.
731 			 * An extra task reference is needed since a different
732 			 * cpu may set the task state to TASK_RUNNING again
733 			 * before the scheduler is reached. */
734 			get_task_struct(tsk);
735 			tsk->thread.pfault_wait = 1;
736 			list_add(&tsk->thread.list, &pfault_list);
737 			__set_task_state(tsk, TASK_UNINTERRUPTIBLE);
738 			set_tsk_need_resched(tsk);
739 		}
740 	}
741 out:
742 	spin_unlock(&pfault_lock);
743 	put_task_struct(tsk);
744 }
745 
pfault_cpu_notify(struct notifier_block * self,unsigned long action,void * hcpu)746 static int pfault_cpu_notify(struct notifier_block *self, unsigned long action,
747 			     void *hcpu)
748 {
749 	struct thread_struct *thread, *next;
750 	struct task_struct *tsk;
751 
752 	switch (action & ~CPU_TASKS_FROZEN) {
753 	case CPU_DEAD:
754 		spin_lock_irq(&pfault_lock);
755 		list_for_each_entry_safe(thread, next, &pfault_list, list) {
756 			thread->pfault_wait = 0;
757 			list_del(&thread->list);
758 			tsk = container_of(thread, struct task_struct, thread);
759 			wake_up_process(tsk);
760 			put_task_struct(tsk);
761 		}
762 		spin_unlock_irq(&pfault_lock);
763 		break;
764 	default:
765 		break;
766 	}
767 	return NOTIFY_OK;
768 }
769 
pfault_irq_init(void)770 static int __init pfault_irq_init(void)
771 {
772 	int rc;
773 
774 	rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
775 	if (rc)
776 		goto out_extint;
777 	rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
778 	if (rc)
779 		goto out_pfault;
780 	irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
781 	hotcpu_notifier(pfault_cpu_notify, 0);
782 	return 0;
783 
784 out_pfault:
785 	unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
786 out_extint:
787 	pfault_disable = 1;
788 	return rc;
789 }
790 early_initcall(pfault_irq_init);
791 
792 #endif /* CONFIG_PFAULT */
793