• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
4  *  Lennox Wu <lennox.wu@sunplusct.com>
5  *  Chen Liqin <liqin.chen@sunplusct.com>
6  * Copyright (C) 2012 Regents of the University of California
7  */
8 
9 
10 #include <linux/mm.h>
11 #include <linux/kernel.h>
12 #include <linux/interrupt.h>
13 #include <linux/perf_event.h>
14 #include <linux/signal.h>
15 #include <linux/uaccess.h>
16 
17 #include <asm/ptrace.h>
18 #include <asm/tlbflush.h>
19 
20 #include "../kernel/head.h"
21 
no_context(struct pt_regs * regs,unsigned long addr)22 static inline void no_context(struct pt_regs *regs, unsigned long addr)
23 {
24 	/* Are we prepared to handle this kernel fault? */
25 	if (fixup_exception(regs))
26 		return;
27 
28 	/*
29 	 * Oops. The kernel tried to access some bad page. We'll have to
30 	 * terminate things with extreme prejudice.
31 	 */
32 	bust_spinlocks(1);
33 	pr_alert("Unable to handle kernel %s at virtual address " REG_FMT "\n",
34 		(addr < PAGE_SIZE) ? "NULL pointer dereference" :
35 		"paging request", addr);
36 	die(regs, "Oops");
37 	make_task_dead(SIGKILL);
38 }
39 
mm_fault_error(struct pt_regs * regs,unsigned long addr,vm_fault_t fault)40 static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault)
41 {
42 	if (fault & VM_FAULT_OOM) {
43 		/*
44 		 * We ran out of memory, call the OOM killer, and return the userspace
45 		 * (which will retry the fault, or kill us if we got oom-killed).
46 		 */
47 		if (!user_mode(regs)) {
48 			no_context(regs, addr);
49 			return;
50 		}
51 		pagefault_out_of_memory();
52 		return;
53 	} else if (fault & VM_FAULT_SIGBUS) {
54 		/* Kernel mode? Handle exceptions or die */
55 		if (!user_mode(regs)) {
56 			no_context(regs, addr);
57 			return;
58 		}
59 		do_trap(regs, SIGBUS, BUS_ADRERR, addr);
60 		return;
61 	}
62 	BUG();
63 }
64 
bad_area(struct pt_regs * regs,struct mm_struct * mm,int code,unsigned long addr)65 static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
66 {
67 	/*
68 	 * Something tried to access memory that isn't in our memory map.
69 	 * Fix it, but check if it's kernel or user first.
70 	 */
71 	mmap_read_unlock(mm);
72 	/* User mode accesses just cause a SIGSEGV */
73 	if (user_mode(regs)) {
74 		do_trap(regs, SIGSEGV, code, addr);
75 		return;
76 	}
77 
78 	no_context(regs, addr);
79 }
80 
vmalloc_fault(struct pt_regs * regs,int code,unsigned long addr)81 static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long addr)
82 {
83 	pgd_t *pgd, *pgd_k;
84 	pud_t *pud, *pud_k;
85 	p4d_t *p4d, *p4d_k;
86 	pmd_t *pmd, *pmd_k;
87 	pte_t *pte_k;
88 	int index;
89 	unsigned long pfn;
90 
91 	/* User mode accesses just cause a SIGSEGV */
92 	if (user_mode(regs))
93 		return do_trap(regs, SIGSEGV, code, addr);
94 
95 	/*
96 	 * Synchronize this task's top level page-table
97 	 * with the 'reference' page table.
98 	 *
99 	 * Do _not_ use "tsk->active_mm->pgd" here.
100 	 * We might be inside an interrupt in the middle
101 	 * of a task switch.
102 	 */
103 	index = pgd_index(addr);
104 	pfn = csr_read(CSR_SATP) & SATP_PPN;
105 	pgd = (pgd_t *)pfn_to_virt(pfn) + index;
106 	pgd_k = init_mm.pgd + index;
107 
108 	if (!pgd_present(*pgd_k)) {
109 		no_context(regs, addr);
110 		return;
111 	}
112 	set_pgd(pgd, *pgd_k);
113 
114 	p4d = p4d_offset(pgd, addr);
115 	p4d_k = p4d_offset(pgd_k, addr);
116 	if (!p4d_present(*p4d_k)) {
117 		no_context(regs, addr);
118 		return;
119 	}
120 
121 	pud = pud_offset(p4d, addr);
122 	pud_k = pud_offset(p4d_k, addr);
123 	if (!pud_present(*pud_k)) {
124 		no_context(regs, addr);
125 		return;
126 	}
127 
128 	/*
129 	 * Since the vmalloc area is global, it is unnecessary
130 	 * to copy individual PTEs
131 	 */
132 	pmd = pmd_offset(pud, addr);
133 	pmd_k = pmd_offset(pud_k, addr);
134 	if (!pmd_present(*pmd_k)) {
135 		no_context(regs, addr);
136 		return;
137 	}
138 	set_pmd(pmd, *pmd_k);
139 
140 	/*
141 	 * Make sure the actual PTE exists as well to
142 	 * catch kernel vmalloc-area accesses to non-mapped
143 	 * addresses. If we don't do this, this will just
144 	 * silently loop forever.
145 	 */
146 	pte_k = pte_offset_kernel(pmd_k, addr);
147 	if (!pte_present(*pte_k)) {
148 		no_context(regs, addr);
149 		return;
150 	}
151 
152 	/*
153 	 * The kernel assumes that TLBs don't cache invalid
154 	 * entries, but in RISC-V, SFENCE.VMA specifies an
155 	 * ordering constraint, not a cache flush; it is
156 	 * necessary even after writing invalid entries.
157 	 */
158 	local_flush_tlb_page(addr);
159 }
160 
access_error(unsigned long cause,struct vm_area_struct * vma)161 static inline bool access_error(unsigned long cause, struct vm_area_struct *vma)
162 {
163 	switch (cause) {
164 	case EXC_INST_PAGE_FAULT:
165 		if (!(vma->vm_flags & VM_EXEC)) {
166 			return true;
167 		}
168 		break;
169 	case EXC_LOAD_PAGE_FAULT:
170 		/* Write implies read */
171 		if (!(vma->vm_flags & (VM_READ | VM_WRITE))) {
172 			return true;
173 		}
174 		break;
175 	case EXC_STORE_PAGE_FAULT:
176 		if (!(vma->vm_flags & VM_WRITE)) {
177 			return true;
178 		}
179 		break;
180 	default:
181 		panic("%s: unhandled cause %lu", __func__, cause);
182 	}
183 	return false;
184 }
185 
186 /*
187  * This routine handles page faults.  It determines the address and the
188  * problem, and then passes it off to one of the appropriate routines.
189  */
do_page_fault(struct pt_regs * regs)190 asmlinkage void do_page_fault(struct pt_regs *regs)
191 {
192 	struct task_struct *tsk;
193 	struct vm_area_struct *vma;
194 	struct mm_struct *mm;
195 	unsigned long addr, cause;
196 	unsigned int flags = FAULT_FLAG_DEFAULT;
197 	int code = SEGV_MAPERR;
198 	vm_fault_t fault;
199 
200 	cause = regs->cause;
201 	addr = regs->badaddr;
202 
203 	tsk = current;
204 	mm = tsk->mm;
205 
206 	/*
207 	 * Fault-in kernel-space virtual memory on-demand.
208 	 * The 'reference' page table is init_mm.pgd.
209 	 *
210 	 * NOTE! We MUST NOT take any locks for this case. We may
211 	 * be in an interrupt or a critical region, and should
212 	 * only copy the information from the master page table,
213 	 * nothing more.
214 	 */
215 	if (unlikely((addr >= VMALLOC_START) && (addr <= VMALLOC_END))) {
216 		vmalloc_fault(regs, code, addr);
217 		return;
218 	}
219 
220 #ifdef CONFIG_64BIT
221 	/*
222 	 * Modules in 64bit kernels lie in their own virtual region which is not
223 	 * in the vmalloc region, but dealing with page faults in this region
224 	 * or the vmalloc region amounts to doing the same thing: checking that
225 	 * the mapping exists in init_mm.pgd and updating user page table, so
226 	 * just use vmalloc_fault.
227 	 */
228 	if (unlikely(addr >= MODULES_VADDR && addr < MODULES_END)) {
229 		vmalloc_fault(regs, code, addr);
230 		return;
231 	}
232 #endif
233 	/* Enable interrupts if they were enabled in the parent context. */
234 	if (likely(regs->status & SR_PIE))
235 		local_irq_enable();
236 
237 	/*
238 	 * If we're in an interrupt, have no user context, or are running
239 	 * in an atomic region, then we must not take the fault.
240 	 */
241 	if (unlikely(faulthandler_disabled() || !mm)) {
242 		no_context(regs, addr);
243 		return;
244 	}
245 
246 	if (user_mode(regs))
247 		flags |= FAULT_FLAG_USER;
248 
249 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
250 
251 	if (cause == EXC_STORE_PAGE_FAULT)
252 		flags |= FAULT_FLAG_WRITE;
253 	else if (cause == EXC_INST_PAGE_FAULT)
254 		flags |= FAULT_FLAG_INSTRUCTION;
255 retry:
256 	mmap_read_lock(mm);
257 	vma = find_vma(mm, addr);
258 	if (unlikely(!vma)) {
259 		bad_area(regs, mm, code, addr);
260 		return;
261 	}
262 	if (likely(vma->vm_start <= addr))
263 		goto good_area;
264 	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
265 		bad_area(regs, mm, code, addr);
266 		return;
267 	}
268 	if (unlikely(expand_stack(vma, addr))) {
269 		bad_area(regs, mm, code, addr);
270 		return;
271 	}
272 
273 	/*
274 	 * Ok, we have a good vm_area for this memory access, so
275 	 * we can handle it.
276 	 */
277 good_area:
278 	code = SEGV_ACCERR;
279 
280 	if (unlikely(access_error(cause, vma))) {
281 		bad_area(regs, mm, code, addr);
282 		return;
283 	}
284 
285 	/*
286 	 * If for any reason at all we could not handle the fault,
287 	 * make sure we exit gracefully rather than endlessly redo
288 	 * the fault.
289 	 */
290 	fault = handle_mm_fault(vma, addr, flags, regs);
291 
292 	/*
293 	 * If we need to retry but a fatal signal is pending, handle the
294 	 * signal first. We do not need to release the mmap_lock because it
295 	 * would already be released in __lock_page_or_retry in mm/filemap.c.
296 	 */
297 	if (fault_signal_pending(fault, regs))
298 		return;
299 
300 	if (unlikely((fault & VM_FAULT_RETRY) && (flags & FAULT_FLAG_ALLOW_RETRY))) {
301 		flags |= FAULT_FLAG_TRIED;
302 
303 		/*
304 		 * No need to mmap_read_unlock(mm) as we would
305 		 * have already released it in __lock_page_or_retry
306 		 * in mm/filemap.c.
307 		 */
308 		goto retry;
309 	}
310 
311 	mmap_read_unlock(mm);
312 
313 	if (unlikely(fault & VM_FAULT_ERROR)) {
314 		mm_fault_error(regs, addr, fault);
315 		return;
316 	}
317 	return;
318 }
319