1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3
4 #include <linux/signal.h>
5 #include <linux/module.h>
6 #include <linux/sched.h>
7 #include <linux/interrupt.h>
8 #include <linux/kernel.h>
9 #include <linux/errno.h>
10 #include <linux/string.h>
11 #include <linux/types.h>
12 #include <linux/ptrace.h>
13 #include <linux/mman.h>
14 #include <linux/mm.h>
15 #include <linux/smp.h>
16 #include <linux/version.h>
17 #include <linux/vt_kern.h>
18 #include <linux/extable.h>
19 #include <linux/uaccess.h>
20 #include <linux/perf_event.h>
21 #include <linux/kprobes.h>
22
23 #include <asm/hardirq.h>
24 #include <asm/mmu_context.h>
25 #include <asm/traps.h>
26 #include <asm/page.h>
27
fixup_exception(struct pt_regs * regs)28 int fixup_exception(struct pt_regs *regs)
29 {
30 const struct exception_table_entry *fixup;
31
32 fixup = search_exception_tables(instruction_pointer(regs));
33 if (fixup) {
34 regs->pc = fixup->nextinsn;
35
36 return 1;
37 }
38
39 return 0;
40 }
41
42 /*
43 * This routine handles page faults. It determines the address,
44 * and the problem, and then passes it off to one of the appropriate
45 * routines.
46 */
do_page_fault(struct pt_regs * regs,unsigned long write,unsigned long mmu_meh)47 asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
48 unsigned long mmu_meh)
49 {
50 struct vm_area_struct *vma = NULL;
51 struct task_struct *tsk = current;
52 struct mm_struct *mm = tsk->mm;
53 int si_code;
54 int fault;
55 unsigned long address = mmu_meh & PAGE_MASK;
56
57 if (kprobe_page_fault(regs, tsk->thread.trap_no))
58 return;
59
60 si_code = SEGV_MAPERR;
61
62 #ifndef CONFIG_CPU_HAS_TLBI
63 /*
64 * We fault-in kernel-space virtual memory on-demand. The
65 * 'reference' page table is init_mm.pgd.
66 *
67 * NOTE! We MUST NOT take any locks for this case. We may
68 * be in an interrupt or a critical region, and should
69 * only copy the information from the master page table,
70 * nothing more.
71 */
72 if (unlikely(address >= VMALLOC_START) &&
73 unlikely(address <= VMALLOC_END)) {
74 /*
75 * Synchronize this task's top level page-table
76 * with the 'reference' page table.
77 *
78 * Do _not_ use "tsk" here. We might be inside
79 * an interrupt in the middle of a task switch..
80 */
81 int offset = pgd_index(address);
82 pgd_t *pgd, *pgd_k;
83 pud_t *pud, *pud_k;
84 pmd_t *pmd, *pmd_k;
85 pte_t *pte_k;
86
87 unsigned long pgd_base;
88
89 pgd_base = (unsigned long)__va(get_pgd());
90 pgd = (pgd_t *)pgd_base + offset;
91 pgd_k = init_mm.pgd + offset;
92
93 if (!pgd_present(*pgd_k))
94 goto no_context;
95 set_pgd(pgd, *pgd_k);
96
97 pud = (pud_t *)pgd;
98 pud_k = (pud_t *)pgd_k;
99 if (!pud_present(*pud_k))
100 goto no_context;
101
102 pmd = pmd_offset(pud, address);
103 pmd_k = pmd_offset(pud_k, address);
104 if (!pmd_present(*pmd_k))
105 goto no_context;
106 set_pmd(pmd, *pmd_k);
107
108 pte_k = pte_offset_kernel(pmd_k, address);
109 if (!pte_present(*pte_k))
110 goto no_context;
111 return;
112 }
113 #endif
114
115 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
116 /*
117 * If we're in an interrupt or have no user
118 * context, we must not take the fault..
119 */
120 if (in_atomic() || !mm)
121 goto bad_area_nosemaphore;
122
123 mmap_read_lock(mm);
124 vma = find_vma(mm, address);
125 if (!vma)
126 goto bad_area;
127 if (vma->vm_start <= address)
128 goto good_area;
129 if (!(vma->vm_flags & VM_GROWSDOWN))
130 goto bad_area;
131 if (expand_stack(vma, address))
132 goto bad_area;
133 /*
134 * Ok, we have a good vm_area for this memory access, so
135 * we can handle it..
136 */
137 good_area:
138 si_code = SEGV_ACCERR;
139
140 if (write) {
141 if (!(vma->vm_flags & VM_WRITE))
142 goto bad_area;
143 } else {
144 if (unlikely(!vma_is_accessible(vma)))
145 goto bad_area;
146 }
147
148 /*
149 * If for any reason at all we couldn't handle the fault,
150 * make sure we exit gracefully rather than endlessly redo
151 * the fault.
152 */
153 fault = handle_mm_fault(vma, address, write ? FAULT_FLAG_WRITE : 0,
154 regs);
155 if (unlikely(fault & VM_FAULT_ERROR)) {
156 if (fault & VM_FAULT_OOM)
157 goto out_of_memory;
158 else if (fault & VM_FAULT_SIGBUS)
159 goto do_sigbus;
160 else if (fault & VM_FAULT_SIGSEGV)
161 goto bad_area;
162 BUG();
163 }
164 mmap_read_unlock(mm);
165 return;
166
167 /*
168 * Something tried to access memory that isn't in our memory map..
169 * Fix it, but check if it's kernel or user first..
170 */
171 bad_area:
172 mmap_read_unlock(mm);
173
174 bad_area_nosemaphore:
175 /* User mode accesses just cause a SIGSEGV */
176 if (user_mode(regs)) {
177 tsk->thread.trap_no = trap_no(regs);
178 force_sig_fault(SIGSEGV, si_code, (void __user *)address);
179 return;
180 }
181
182 no_context:
183 tsk->thread.trap_no = trap_no(regs);
184
185 /* Are we prepared to handle this kernel fault? */
186 if (fixup_exception(regs))
187 return;
188
189 /*
190 * Oops. The kernel tried to access some bad page. We'll have to
191 * terminate things with extreme prejudice.
192 */
193 bust_spinlocks(1);
194 pr_alert("Unable to handle kernel paging request at virtual "
195 "address 0x%08lx, pc: 0x%08lx\n", address, regs->pc);
196 die(regs, "Oops");
197
198 out_of_memory:
199 tsk->thread.trap_no = trap_no(regs);
200
201 /*
202 * We ran out of memory, call the OOM killer, and return the userspace
203 * (which will retry the fault, or kill us if we got oom-killed).
204 */
205 pagefault_out_of_memory();
206 return;
207
208 do_sigbus:
209 tsk->thread.trap_no = trap_no(regs);
210
211 mmap_read_unlock(mm);
212
213 /* Kernel mode? Handle exceptions or die */
214 if (!user_mode(regs))
215 goto no_context;
216
217 force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
218 }
219