• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * arch/xtensa/mm/tlb.c
3  *
4  * Logic that manipulates the Xtensa MMU.  Derived from MIPS.
5  *
6  * This file is subject to the terms and conditions of the GNU General Public
7  * License.  See the file "COPYING" in the main directory of this archive
8  * for more details.
9  *
10  * Copyright (C) 2001 - 2003 Tensilica Inc.
11  *
12  * Joe Taylor
13  * Chris Zankel	<chris@zankel.net>
14  * Marc Gauthier
15  */
16 
17 #include <linux/mm.h>
18 #include <asm/processor.h>
19 #include <asm/mmu_context.h>
20 #include <asm/tlbflush.h>
21 #include <asm/system.h>
22 #include <asm/cacheflush.h>
23 
24 
__flush_itlb_all(void)25 static inline void __flush_itlb_all (void)
26 {
27 	int w, i;
28 
29 	for (w = 0; w < ITLB_ARF_WAYS; w++) {
30 		for (i = 0; i < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); i++) {
31 			int e = w + (i << PAGE_SHIFT);
32 			invalidate_itlb_entry_no_isync(e);
33 		}
34 	}
35 	asm volatile ("isync\n");
36 }
37 
__flush_dtlb_all(void)38 static inline void __flush_dtlb_all (void)
39 {
40 	int w, i;
41 
42 	for (w = 0; w < DTLB_ARF_WAYS; w++) {
43 		for (i = 0; i < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); i++) {
44 			int e = w + (i << PAGE_SHIFT);
45 			invalidate_dtlb_entry_no_isync(e);
46 		}
47 	}
48 	asm volatile ("isync\n");
49 }
50 
51 
flush_tlb_all(void)52 void flush_tlb_all (void)
53 {
54 	__flush_itlb_all();
55 	__flush_dtlb_all();
56 }
57 
58 /* If mm is current, we simply assign the current task a new ASID, thus,
59  * invalidating all previous tlb entries. If mm is someone else's user mapping,
60  * wie invalidate the context, thus, when that user mapping is swapped in,
61  * a new context will be assigned to it.
62  */
63 
flush_tlb_mm(struct mm_struct * mm)64 void flush_tlb_mm(struct mm_struct *mm)
65 {
66 	if (mm == current->active_mm) {
67 		int flags;
68 		local_save_flags(flags);
69 		__get_new_mmu_context(mm);
70 		__load_mmu_context(mm);
71 		local_irq_restore(flags);
72 	}
73 	else
74 		mm->context = 0;
75 }
76 
77 #define _ITLB_ENTRIES (ITLB_ARF_WAYS << XCHAL_ITLB_ARF_ENTRIES_LOG2)
78 #define _DTLB_ENTRIES (DTLB_ARF_WAYS << XCHAL_DTLB_ARF_ENTRIES_LOG2)
79 #if _ITLB_ENTRIES > _DTLB_ENTRIES
80 # define _TLB_ENTRIES _ITLB_ENTRIES
81 #else
82 # define _TLB_ENTRIES _DTLB_ENTRIES
83 #endif
84 
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)85 void flush_tlb_range (struct vm_area_struct *vma,
86     		      unsigned long start, unsigned long end)
87 {
88 	struct mm_struct *mm = vma->vm_mm;
89 	unsigned long flags;
90 
91 	if (mm->context == NO_CONTEXT)
92 		return;
93 
94 #if 0
95 	printk("[tlbrange<%02lx,%08lx,%08lx>]\n",
96 			(unsigned long)mm->context, start, end);
97 #endif
98 	local_save_flags(flags);
99 
100 	if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) {
101 		int oldpid = get_rasid_register();
102 		set_rasid_register (ASID_INSERT(mm->context));
103 		start &= PAGE_MASK;
104  		if (vma->vm_flags & VM_EXEC)
105 			while(start < end) {
106 				invalidate_itlb_mapping(start);
107 				invalidate_dtlb_mapping(start);
108 				start += PAGE_SIZE;
109 			}
110 		else
111 			while(start < end) {
112 				invalidate_dtlb_mapping(start);
113 				start += PAGE_SIZE;
114 			}
115 
116 		set_rasid_register(oldpid);
117 	} else {
118 		flush_tlb_mm(mm);
119 	}
120 	local_irq_restore(flags);
121 }
122 
flush_tlb_page(struct vm_area_struct * vma,unsigned long page)123 void flush_tlb_page (struct vm_area_struct *vma, unsigned long page)
124 {
125 	struct mm_struct* mm = vma->vm_mm;
126 	unsigned long flags;
127 	int oldpid;
128 
129 	if(mm->context == NO_CONTEXT)
130 		return;
131 
132 	local_save_flags(flags);
133 
134        	oldpid = get_rasid_register();
135 
136 	if (vma->vm_flags & VM_EXEC)
137 		invalidate_itlb_mapping(page);
138 	invalidate_dtlb_mapping(page);
139 
140 	set_rasid_register(oldpid);
141 
142 	local_irq_restore(flags);
143 }
144 
145