• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * arch/sh/mm/tlb-flush_64.c
3  *
4  * Copyright (C) 2000, 2001  Paolo Alberelli
5  * Copyright (C) 2003  Richard Curnow (/proc/tlb, bug fixes)
6  * Copyright (C) 2003  Paul Mundt
7  *
8  * This file is subject to the terms and conditions of the GNU General Public
9  * License.  See the file "COPYING" in the main directory of this archive
10  * for more details.
11  */
12 #include <linux/signal.h>
13 #include <linux/rwsem.h>
14 #include <linux/sched.h>
15 #include <linux/kernel.h>
16 #include <linux/errno.h>
17 #include <linux/string.h>
18 #include <linux/types.h>
19 #include <linux/ptrace.h>
20 #include <linux/mman.h>
21 #include <linux/mm.h>
22 #include <linux/smp.h>
23 #include <linux/interrupt.h>
24 #include <asm/system.h>
25 #include <asm/io.h>
26 #include <asm/tlb.h>
27 #include <asm/uaccess.h>
28 #include <asm/pgalloc.h>
29 #include <asm/mmu_context.h>
30 
31 extern void die(const char *,struct pt_regs *,long);
32 
33 #define PFLAG(val,flag)   (( (val) & (flag) ) ? #flag : "" )
34 #define PPROT(flag) PFLAG(pgprot_val(prot),flag)
35 
print_prots(pgprot_t prot)36 static inline void print_prots(pgprot_t prot)
37 {
38 	printk("prot is 0x%08lx\n",pgprot_val(prot));
39 
40 	printk("%s %s %s %s %s\n",PPROT(_PAGE_SHARED),PPROT(_PAGE_READ),
41 	       PPROT(_PAGE_EXECUTE),PPROT(_PAGE_WRITE),PPROT(_PAGE_USER));
42 }
43 
print_vma(struct vm_area_struct * vma)44 static inline void print_vma(struct vm_area_struct *vma)
45 {
46 	printk("vma start 0x%08lx\n", vma->vm_start);
47 	printk("vma end   0x%08lx\n", vma->vm_end);
48 
49 	print_prots(vma->vm_page_prot);
50 	printk("vm_flags 0x%08lx\n", vma->vm_flags);
51 }
52 
print_task(struct task_struct * tsk)53 static inline void print_task(struct task_struct *tsk)
54 {
55 	printk("Task pid %d\n", task_pid_nr(tsk));
56 }
57 
lookup_pte(struct mm_struct * mm,unsigned long address)58 static pte_t *lookup_pte(struct mm_struct *mm, unsigned long address)
59 {
60 	pgd_t *dir;
61 	pud_t *pud;
62 	pmd_t *pmd;
63 	pte_t *pte;
64 	pte_t entry;
65 
66 	dir = pgd_offset(mm, address);
67 	if (pgd_none(*dir))
68 		return NULL;
69 
70 	pud = pud_offset(dir, address);
71 	if (pud_none(*pud))
72 		return NULL;
73 
74 	pmd = pmd_offset(pud, address);
75 	if (pmd_none(*pmd))
76 		return NULL;
77 
78 	pte = pte_offset_kernel(pmd, address);
79 	entry = *pte;
80 	if (pte_none(entry) || !pte_present(entry))
81 		return NULL;
82 
83 	return pte;
84 }
85 
86 /*
87  * This routine handles page faults.  It determines the address,
88  * and the problem, and then passes it off to one of the appropriate
89  * routines.
90  */
do_page_fault(struct pt_regs * regs,unsigned long writeaccess,unsigned long textaccess,unsigned long address)91 asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
92 			      unsigned long textaccess, unsigned long address)
93 {
94 	struct task_struct *tsk;
95 	struct mm_struct *mm;
96 	struct vm_area_struct * vma;
97 	const struct exception_table_entry *fixup;
98 	pte_t *pte;
99 	int fault;
100 
101 	/* SIM
102 	 * Note this is now called with interrupts still disabled
103 	 * This is to cope with being called for a missing IO port
104 	 * address with interrupts disabled. This should be fixed as
105 	 * soon as we have a better 'fast path' miss handler.
106 	 *
107 	 * Plus take care how you try and debug this stuff.
108 	 * For example, writing debug data to a port which you
109 	 * have just faulted on is not going to work.
110 	 */
111 
112 	tsk = current;
113 	mm = tsk->mm;
114 
115 	/* Not an IO address, so reenable interrupts */
116 	local_irq_enable();
117 
118 	/*
119 	 * If we're in an interrupt or have no user
120 	 * context, we must not take the fault..
121 	 */
122 	if (in_atomic() || !mm)
123 		goto no_context;
124 
125 	/* TLB misses upon some cache flushes get done under cli() */
126 	down_read(&mm->mmap_sem);
127 
128 	vma = find_vma(mm, address);
129 
130 	if (!vma) {
131 #ifdef DEBUG_FAULT
132 		print_task(tsk);
133 		printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
134 		       __func__, __LINE__,
135 		       address,regs->pc,textaccess,writeaccess);
136 		show_regs(regs);
137 #endif
138 		goto bad_area;
139 	}
140 	if (vma->vm_start <= address) {
141 		goto good_area;
142 	}
143 
144 	if (!(vma->vm_flags & VM_GROWSDOWN)) {
145 #ifdef DEBUG_FAULT
146 		print_task(tsk);
147 		printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
148 		       __func__, __LINE__,
149 		       address,regs->pc,textaccess,writeaccess);
150 		show_regs(regs);
151 
152 		print_vma(vma);
153 #endif
154 		goto bad_area;
155 	}
156 	if (expand_stack(vma, address)) {
157 #ifdef DEBUG_FAULT
158 		print_task(tsk);
159 		printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
160 		       __func__, __LINE__,
161 		       address,regs->pc,textaccess,writeaccess);
162 		show_regs(regs);
163 #endif
164 		goto bad_area;
165 	}
166 /*
167  * Ok, we have a good vm_area for this memory access, so
168  * we can handle it..
169  */
170 good_area:
171 	if (textaccess) {
172 		if (!(vma->vm_flags & VM_EXEC))
173 			goto bad_area;
174 	} else {
175 		if (writeaccess) {
176 			if (!(vma->vm_flags & VM_WRITE))
177 				goto bad_area;
178 		} else {
179 			if (!(vma->vm_flags & VM_READ))
180 				goto bad_area;
181 		}
182 	}
183 
184 	/*
185 	 * If for any reason at all we couldn't handle the fault,
186 	 * make sure we exit gracefully rather than endlessly redo
187 	 * the fault.
188 	 */
189 survive:
190 	fault = handle_mm_fault(mm, vma, address, writeaccess);
191 	if (unlikely(fault & VM_FAULT_ERROR)) {
192 		if (fault & VM_FAULT_OOM)
193 			goto out_of_memory;
194 		else if (fault & VM_FAULT_SIGBUS)
195 			goto do_sigbus;
196 		BUG();
197 	}
198 	if (fault & VM_FAULT_MAJOR)
199 		tsk->maj_flt++;
200 	else
201 		tsk->min_flt++;
202 
203 	/* If we get here, the page fault has been handled.  Do the TLB refill
204 	   now from the newly-setup PTE, to avoid having to fault again right
205 	   away on the same instruction. */
206 	pte = lookup_pte (mm, address);
207 	if (!pte) {
208 		/* From empirical evidence, we can get here, due to
209 		   !pte_present(pte).  (e.g. if a swap-in occurs, and the page
210 		   is swapped back out again before the process that wanted it
211 		   gets rescheduled?) */
212 		goto no_pte;
213 	}
214 
215 	__do_tlb_refill(address, textaccess, pte);
216 
217 no_pte:
218 
219 	up_read(&mm->mmap_sem);
220 	return;
221 
222 /*
223  * Something tried to access memory that isn't in our memory map..
224  * Fix it, but check if it's kernel or user first..
225  */
226 bad_area:
227 #ifdef DEBUG_FAULT
228 	printk("fault:bad area\n");
229 #endif
230 	up_read(&mm->mmap_sem);
231 
232 	if (user_mode(regs)) {
233 		static int count=0;
234 		siginfo_t info;
235 		if (count < 4) {
236 			/* This is really to help debug faults when starting
237 			 * usermode, so only need a few */
238 			count++;
239 			printk("user mode bad_area address=%08lx pid=%d (%s) pc=%08lx\n",
240 				address, task_pid_nr(current), current->comm,
241 				(unsigned long) regs->pc);
242 #if 0
243 			show_regs(regs);
244 #endif
245 		}
246 		if (is_global_init(tsk)) {
247 			panic("INIT had user mode bad_area\n");
248 		}
249 		tsk->thread.address = address;
250 		tsk->thread.error_code = writeaccess;
251 		info.si_signo = SIGSEGV;
252 		info.si_errno = 0;
253 		info.si_addr = (void *) address;
254 		force_sig_info(SIGSEGV, &info, tsk);
255 		return;
256 	}
257 
258 no_context:
259 #ifdef DEBUG_FAULT
260 	printk("fault:No context\n");
261 #endif
262 	/* Are we prepared to handle this kernel fault?  */
263 	fixup = search_exception_tables(regs->pc);
264 	if (fixup) {
265 		regs->pc = fixup->fixup;
266 		return;
267 	}
268 
269 /*
270  * Oops. The kernel tried to access some bad page. We'll have to
271  * terminate things with extreme prejudice.
272  *
273  */
274 	if (address < PAGE_SIZE)
275 		printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
276 	else
277 		printk(KERN_ALERT "Unable to handle kernel paging request");
278 	printk(" at virtual address %08lx\n", address);
279 	printk(KERN_ALERT "pc = %08Lx%08Lx\n", regs->pc >> 32, regs->pc & 0xffffffff);
280 	die("Oops", regs, writeaccess);
281 	do_exit(SIGKILL);
282 
283 /*
284  * We ran out of memory, or some other thing happened to us that made
285  * us unable to handle the page fault gracefully.
286  */
287 out_of_memory:
288 	if (is_global_init(current)) {
289 		panic("INIT out of memory\n");
290 		yield();
291 		goto survive;
292 	}
293 	printk("fault:Out of memory\n");
294 	up_read(&mm->mmap_sem);
295 	if (is_global_init(current)) {
296 		yield();
297 		down_read(&mm->mmap_sem);
298 		goto survive;
299 	}
300 	printk("VM: killing process %s\n", tsk->comm);
301 	if (user_mode(regs))
302 		do_group_exit(SIGKILL);
303 	goto no_context;
304 
305 do_sigbus:
306 	printk("fault:Do sigbus\n");
307 	up_read(&mm->mmap_sem);
308 
309 	/*
310 	 * Send a sigbus, regardless of whether we were in kernel
311 	 * or user mode.
312 	 */
313 	tsk->thread.address = address;
314 	tsk->thread.error_code = writeaccess;
315 	tsk->thread.trap_no = 14;
316 	force_sig(SIGBUS, tsk);
317 
318 	/* Kernel mode? Handle exceptions or die */
319 	if (!user_mode(regs))
320 		goto no_context;
321 }
322 
update_mmu_cache(struct vm_area_struct * vma,unsigned long address,pte_t pte)323 void update_mmu_cache(struct vm_area_struct * vma,
324 			unsigned long address, pte_t pte)
325 {
326 	/*
327 	 * This appears to get called once for every pte entry that gets
328 	 * established => I don't think it's efficient to try refilling the
329 	 * TLBs with the pages - some may not get accessed even.  Also, for
330 	 * executable pages, it is impossible to determine reliably here which
331 	 * TLB they should be mapped into (or both even).
332 	 *
333 	 * So, just do nothing here and handle faults on demand.  In the
334 	 * TLBMISS handling case, the refill is now done anyway after the pte
335 	 * has been fixed up, so that deals with most useful cases.
336 	 */
337 }
338 
local_flush_tlb_one(unsigned long asid,unsigned long page)339 void local_flush_tlb_one(unsigned long asid, unsigned long page)
340 {
341 	unsigned long long match, pteh=0, lpage;
342 	unsigned long tlb;
343 
344 	/*
345 	 * Sign-extend based on neff.
346 	 */
347 	lpage = (page & NEFF_SIGN) ? (page | NEFF_MASK) : page;
348 	match = (asid << PTEH_ASID_SHIFT) | PTEH_VALID;
349 	match |= lpage;
350 
351 	for_each_itlb_entry(tlb) {
352 		asm volatile ("getcfg	%1, 0, %0"
353 			      : "=r" (pteh)
354 			      : "r" (tlb) );
355 
356 		if (pteh == match) {
357 			__flush_tlb_slot(tlb);
358 			break;
359 		}
360 	}
361 
362 	for_each_dtlb_entry(tlb) {
363 		asm volatile ("getcfg	%1, 0, %0"
364 			      : "=r" (pteh)
365 			      : "r" (tlb) );
366 
367 		if (pteh == match) {
368 			__flush_tlb_slot(tlb);
369 			break;
370 		}
371 
372 	}
373 }
374 
local_flush_tlb_page(struct vm_area_struct * vma,unsigned long page)375 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
376 {
377 	unsigned long flags;
378 
379 	if (vma->vm_mm) {
380 		page &= PAGE_MASK;
381 		local_irq_save(flags);
382 		local_flush_tlb_one(get_asid(), page);
383 		local_irq_restore(flags);
384 	}
385 }
386 
local_flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)387 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
388 			   unsigned long end)
389 {
390 	unsigned long flags;
391 	unsigned long long match, pteh=0, pteh_epn, pteh_low;
392 	unsigned long tlb;
393 	unsigned int cpu = smp_processor_id();
394 	struct mm_struct *mm;
395 
396 	mm = vma->vm_mm;
397 	if (cpu_context(cpu, mm) == NO_CONTEXT)
398 		return;
399 
400 	local_irq_save(flags);
401 
402 	start &= PAGE_MASK;
403 	end &= PAGE_MASK;
404 
405 	match = (cpu_asid(cpu, mm) << PTEH_ASID_SHIFT) | PTEH_VALID;
406 
407 	/* Flush ITLB */
408 	for_each_itlb_entry(tlb) {
409 		asm volatile ("getcfg	%1, 0, %0"
410 			      : "=r" (pteh)
411 			      : "r" (tlb) );
412 
413 		pteh_epn = pteh & PAGE_MASK;
414 		pteh_low = pteh & ~PAGE_MASK;
415 
416 		if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)
417 			__flush_tlb_slot(tlb);
418 	}
419 
420 	/* Flush DTLB */
421 	for_each_dtlb_entry(tlb) {
422 		asm volatile ("getcfg	%1, 0, %0"
423 			      : "=r" (pteh)
424 			      : "r" (tlb) );
425 
426 		pteh_epn = pteh & PAGE_MASK;
427 		pteh_low = pteh & ~PAGE_MASK;
428 
429 		if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)
430 			__flush_tlb_slot(tlb);
431 	}
432 
433 	local_irq_restore(flags);
434 }
435 
local_flush_tlb_mm(struct mm_struct * mm)436 void local_flush_tlb_mm(struct mm_struct *mm)
437 {
438 	unsigned long flags;
439 	unsigned int cpu = smp_processor_id();
440 
441 	if (cpu_context(cpu, mm) == NO_CONTEXT)
442 		return;
443 
444 	local_irq_save(flags);
445 
446 	cpu_context(cpu, mm) = NO_CONTEXT;
447 	if (mm == current->mm)
448 		activate_context(mm, cpu);
449 
450 	local_irq_restore(flags);
451 }
452 
local_flush_tlb_all(void)453 void local_flush_tlb_all(void)
454 {
455 	/* Invalidate all, including shared pages, excluding fixed TLBs */
456 	unsigned long flags, tlb;
457 
458 	local_irq_save(flags);
459 
460 	/* Flush each ITLB entry */
461 	for_each_itlb_entry(tlb)
462 		__flush_tlb_slot(tlb);
463 
464 	/* Flush each DTLB entry */
465 	for_each_dtlb_entry(tlb)
466 		__flush_tlb_slot(tlb);
467 
468 	local_irq_restore(flags);
469 }
470 
local_flush_tlb_kernel_range(unsigned long start,unsigned long end)471 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
472 {
473         /* FIXME: Optimize this later.. */
474         flush_tlb_all();
475 }
476