1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
4 * Lennox Wu <lennox.wu@sunplusct.com>
5 * Chen Liqin <liqin.chen@sunplusct.com>
6 * Copyright (C) 2012 Regents of the University of California
7 */
8
9
10 #include <linux/mm.h>
11 #include <linux/kernel.h>
12 #include <linux/interrupt.h>
13 #include <linux/perf_event.h>
14 #include <linux/signal.h>
15 #include <linux/uaccess.h>
16 #include <linux/kprobes.h>
17 #include <linux/kfence.h>
18
19 #include <asm/ptrace.h>
20 #include <asm/tlbflush.h>
21
22 #include "../kernel/head.h"
23
die_kernel_fault(const char * msg,unsigned long addr,struct pt_regs * regs)24 static void die_kernel_fault(const char *msg, unsigned long addr,
25 struct pt_regs *regs)
26 {
27 bust_spinlocks(1);
28
29 pr_alert("Unable to handle kernel %s at virtual address " REG_FMT "\n", msg,
30 addr);
31
32 bust_spinlocks(0);
33 die(regs, "Oops");
34 make_task_dead(SIGKILL);
35 }
36
no_context(struct pt_regs * regs,unsigned long addr)37 static inline void no_context(struct pt_regs *regs, unsigned long addr)
38 {
39 const char *msg;
40
41 /* Are we prepared to handle this kernel fault? */
42 if (fixup_exception(regs))
43 return;
44
45 /*
46 * Oops. The kernel tried to access some bad page. We'll have to
47 * terminate things with extreme prejudice.
48 */
49 if (addr < PAGE_SIZE)
50 msg = "NULL pointer dereference";
51 else {
52 if (kfence_handle_page_fault(addr, regs->cause == EXC_STORE_PAGE_FAULT, regs))
53 return;
54
55 msg = "paging request";
56 }
57
58 die_kernel_fault(msg, addr, regs);
59 }
60
mm_fault_error(struct pt_regs * regs,unsigned long addr,vm_fault_t fault)61 static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault)
62 {
63 if (fault & VM_FAULT_OOM) {
64 /*
65 * We ran out of memory, call the OOM killer, and return the userspace
66 * (which will retry the fault, or kill us if we got oom-killed).
67 */
68 if (!user_mode(regs)) {
69 no_context(regs, addr);
70 return;
71 }
72 pagefault_out_of_memory();
73 return;
74 } else if (fault & VM_FAULT_SIGBUS) {
75 /* Kernel mode? Handle exceptions or die */
76 if (!user_mode(regs)) {
77 no_context(regs, addr);
78 return;
79 }
80 do_trap(regs, SIGBUS, BUS_ADRERR, addr);
81 return;
82 }
83 BUG();
84 }
85
bad_area(struct pt_regs * regs,struct mm_struct * mm,int code,unsigned long addr)86 static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
87 {
88 /*
89 * Something tried to access memory that isn't in our memory map.
90 * Fix it, but check if it's kernel or user first.
91 */
92 mmap_read_unlock(mm);
93 /* User mode accesses just cause a SIGSEGV */
94 if (user_mode(regs)) {
95 do_trap(regs, SIGSEGV, code, addr);
96 return;
97 }
98
99 no_context(regs, addr);
100 }
101
vmalloc_fault(struct pt_regs * regs,int code,unsigned long addr)102 static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long addr)
103 {
104 pgd_t *pgd, *pgd_k;
105 pud_t *pud, *pud_k;
106 p4d_t *p4d, *p4d_k;
107 pmd_t *pmd, *pmd_k;
108 pte_t *pte_k;
109 int index;
110 unsigned long pfn;
111
112 /* User mode accesses just cause a SIGSEGV */
113 if (user_mode(regs))
114 return do_trap(regs, SIGSEGV, code, addr);
115
116 /*
117 * Synchronize this task's top level page-table
118 * with the 'reference' page table.
119 *
120 * Do _not_ use "tsk->active_mm->pgd" here.
121 * We might be inside an interrupt in the middle
122 * of a task switch.
123 */
124 index = pgd_index(addr);
125 pfn = csr_read(CSR_SATP) & SATP_PPN;
126 pgd = (pgd_t *)pfn_to_virt(pfn) + index;
127 pgd_k = init_mm.pgd + index;
128
129 if (!pgd_present(*pgd_k)) {
130 no_context(regs, addr);
131 return;
132 }
133 set_pgd(pgd, *pgd_k);
134
135 p4d = p4d_offset(pgd, addr);
136 p4d_k = p4d_offset(pgd_k, addr);
137 if (!p4d_present(*p4d_k)) {
138 no_context(regs, addr);
139 return;
140 }
141
142 pud = pud_offset(p4d, addr);
143 pud_k = pud_offset(p4d_k, addr);
144 if (!pud_present(*pud_k)) {
145 no_context(regs, addr);
146 return;
147 }
148
149 /*
150 * Since the vmalloc area is global, it is unnecessary
151 * to copy individual PTEs
152 */
153 pmd = pmd_offset(pud, addr);
154 pmd_k = pmd_offset(pud_k, addr);
155 if (!pmd_present(*pmd_k)) {
156 no_context(regs, addr);
157 return;
158 }
159 set_pmd(pmd, *pmd_k);
160
161 /*
162 * Make sure the actual PTE exists as well to
163 * catch kernel vmalloc-area accesses to non-mapped
164 * addresses. If we don't do this, this will just
165 * silently loop forever.
166 */
167 pte_k = pte_offset_kernel(pmd_k, addr);
168 if (!pte_present(*pte_k)) {
169 no_context(regs, addr);
170 return;
171 }
172
173 /*
174 * The kernel assumes that TLBs don't cache invalid
175 * entries, but in RISC-V, SFENCE.VMA specifies an
176 * ordering constraint, not a cache flush; it is
177 * necessary even after writing invalid entries.
178 */
179 local_flush_tlb_page(addr);
180 }
181
access_error(unsigned long cause,struct vm_area_struct * vma)182 static inline bool access_error(unsigned long cause, struct vm_area_struct *vma)
183 {
184 switch (cause) {
185 case EXC_INST_PAGE_FAULT:
186 if (!(vma->vm_flags & VM_EXEC)) {
187 return true;
188 }
189 break;
190 case EXC_LOAD_PAGE_FAULT:
191 /* Write implies read */
192 if (!(vma->vm_flags & (VM_READ | VM_WRITE))) {
193 return true;
194 }
195 break;
196 case EXC_STORE_PAGE_FAULT:
197 if (!(vma->vm_flags & VM_WRITE)) {
198 return true;
199 }
200 break;
201 default:
202 panic("%s: unhandled cause %lu", __func__, cause);
203 }
204 return false;
205 }
206
207 /*
208 * This routine handles page faults. It determines the address and the
209 * problem, and then passes it off to one of the appropriate routines.
210 */
do_page_fault(struct pt_regs * regs)211 asmlinkage void do_page_fault(struct pt_regs *regs)
212 {
213 struct task_struct *tsk;
214 struct vm_area_struct *vma;
215 struct mm_struct *mm;
216 unsigned long addr, cause;
217 unsigned int flags = FAULT_FLAG_DEFAULT;
218 int code = SEGV_MAPERR;
219 vm_fault_t fault;
220
221 cause = regs->cause;
222 addr = regs->badaddr;
223
224 tsk = current;
225 mm = tsk->mm;
226
227 if (kprobe_page_fault(regs, cause))
228 return;
229
230 /*
231 * Fault-in kernel-space virtual memory on-demand.
232 * The 'reference' page table is init_mm.pgd.
233 *
234 * NOTE! We MUST NOT take any locks for this case. We may
235 * be in an interrupt or a critical region, and should
236 * only copy the information from the master page table,
237 * nothing more.
238 */
239 if (unlikely((addr >= VMALLOC_START) && (addr <= VMALLOC_END))) {
240 vmalloc_fault(regs, code, addr);
241 return;
242 }
243
244 #ifdef CONFIG_64BIT
245 /*
246 * Modules in 64bit kernels lie in their own virtual region which is not
247 * in the vmalloc region, but dealing with page faults in this region
248 * or the vmalloc region amounts to doing the same thing: checking that
249 * the mapping exists in init_mm.pgd and updating user page table, so
250 * just use vmalloc_fault.
251 */
252 if (unlikely(addr >= MODULES_VADDR && addr < MODULES_END)) {
253 vmalloc_fault(regs, code, addr);
254 return;
255 }
256 #endif
257 /* Enable interrupts if they were enabled in the parent context. */
258 if (likely(regs->status & SR_PIE))
259 local_irq_enable();
260
261 /*
262 * If we're in an interrupt, have no user context, or are running
263 * in an atomic region, then we must not take the fault.
264 */
265 if (unlikely(faulthandler_disabled() || !mm)) {
266 tsk->thread.bad_cause = cause;
267 no_context(regs, addr);
268 return;
269 }
270
271 if (user_mode(regs))
272 flags |= FAULT_FLAG_USER;
273
274 if (!user_mode(regs) && addr < TASK_SIZE && unlikely(!(regs->status & SR_SUM))) {
275 if (fixup_exception(regs))
276 return;
277
278 die_kernel_fault("access to user memory without uaccess routines", addr, regs);
279 }
280
281 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
282
283 if (cause == EXC_STORE_PAGE_FAULT)
284 flags |= FAULT_FLAG_WRITE;
285 else if (cause == EXC_INST_PAGE_FAULT)
286 flags |= FAULT_FLAG_INSTRUCTION;
287 retry:
288 mmap_read_lock(mm);
289 vma = find_vma(mm, addr);
290 if (unlikely(!vma)) {
291 tsk->thread.bad_cause = cause;
292 bad_area(regs, mm, code, addr);
293 return;
294 }
295 if (likely(vma->vm_start <= addr))
296 goto good_area;
297 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
298 tsk->thread.bad_cause = cause;
299 bad_area(regs, mm, code, addr);
300 return;
301 }
302 if (unlikely(expand_stack(vma, addr))) {
303 tsk->thread.bad_cause = cause;
304 bad_area(regs, mm, code, addr);
305 return;
306 }
307
308 /*
309 * Ok, we have a good vm_area for this memory access, so
310 * we can handle it.
311 */
312 good_area:
313 code = SEGV_ACCERR;
314
315 if (unlikely(access_error(cause, vma))) {
316 tsk->thread.bad_cause = cause;
317 bad_area(regs, mm, code, addr);
318 return;
319 }
320
321 /*
322 * If for any reason at all we could not handle the fault,
323 * make sure we exit gracefully rather than endlessly redo
324 * the fault.
325 */
326 fault = handle_mm_fault(vma, addr, flags, regs);
327
328 /*
329 * If we need to retry but a fatal signal is pending, handle the
330 * signal first. We do not need to release the mmap_lock because it
331 * would already be released in __lock_page_or_retry in mm/filemap.c.
332 */
333 if (fault_signal_pending(fault, regs))
334 return;
335
336 if (unlikely((fault & VM_FAULT_RETRY) && (flags & FAULT_FLAG_ALLOW_RETRY))) {
337 flags |= FAULT_FLAG_TRIED;
338
339 /*
340 * No need to mmap_read_unlock(mm) as we would
341 * have already released it in __lock_page_or_retry
342 * in mm/filemap.c.
343 */
344 goto retry;
345 }
346
347 mmap_read_unlock(mm);
348
349 if (unlikely(fault & VM_FAULT_ERROR)) {
350 tsk->thread.bad_cause = cause;
351 mm_fault_error(regs, addr, fault);
352 return;
353 }
354 return;
355 }
356 NOKPROBE_SYMBOL(do_page_fault);
357