1 /*
2 * arch/xtensa/mm/tlb.c
3 *
4 * Logic that manipulates the Xtensa MMU. Derived from MIPS.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2001 - 2003 Tensilica Inc.
11 *
12 * Joe Taylor
13 * Chris Zankel <chris@zankel.net>
14 * Marc Gauthier
15 */
16
17 #include <linux/mm.h>
18 #include <asm/processor.h>
19 #include <asm/mmu_context.h>
20 #include <asm/tlbflush.h>
21 #include <asm/cacheflush.h>
22
23
__flush_itlb_all(void)24 static inline void __flush_itlb_all (void)
25 {
26 int w, i;
27
28 for (w = 0; w < ITLB_ARF_WAYS; w++) {
29 for (i = 0; i < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); i++) {
30 int e = w + (i << PAGE_SHIFT);
31 invalidate_itlb_entry_no_isync(e);
32 }
33 }
34 asm volatile ("isync\n");
35 }
36
__flush_dtlb_all(void)37 static inline void __flush_dtlb_all (void)
38 {
39 int w, i;
40
41 for (w = 0; w < DTLB_ARF_WAYS; w++) {
42 for (i = 0; i < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); i++) {
43 int e = w + (i << PAGE_SHIFT);
44 invalidate_dtlb_entry_no_isync(e);
45 }
46 }
47 asm volatile ("isync\n");
48 }
49
50
local_flush_tlb_all(void)51 void local_flush_tlb_all(void)
52 {
53 __flush_itlb_all();
54 __flush_dtlb_all();
55 }
56
57 /* If mm is current, we simply assign the current task a new ASID, thus,
58 * invalidating all previous tlb entries. If mm is someone else's user mapping,
59 * wie invalidate the context, thus, when that user mapping is swapped in,
60 * a new context will be assigned to it.
61 */
62
local_flush_tlb_mm(struct mm_struct * mm)63 void local_flush_tlb_mm(struct mm_struct *mm)
64 {
65 int cpu = smp_processor_id();
66
67 if (mm == current->active_mm) {
68 unsigned long flags;
69 local_irq_save(flags);
70 mm->context.asid[cpu] = NO_CONTEXT;
71 activate_context(mm, cpu);
72 local_irq_restore(flags);
73 } else {
74 mm->context.asid[cpu] = NO_CONTEXT;
75 mm->context.cpu = -1;
76 }
77 }
78
79
80 #define _ITLB_ENTRIES (ITLB_ARF_WAYS << XCHAL_ITLB_ARF_ENTRIES_LOG2)
81 #define _DTLB_ENTRIES (DTLB_ARF_WAYS << XCHAL_DTLB_ARF_ENTRIES_LOG2)
82 #if _ITLB_ENTRIES > _DTLB_ENTRIES
83 # define _TLB_ENTRIES _ITLB_ENTRIES
84 #else
85 # define _TLB_ENTRIES _DTLB_ENTRIES
86 #endif
87
local_flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)88 void local_flush_tlb_range(struct vm_area_struct *vma,
89 unsigned long start, unsigned long end)
90 {
91 int cpu = smp_processor_id();
92 struct mm_struct *mm = vma->vm_mm;
93 unsigned long flags;
94
95 if (mm->context.asid[cpu] == NO_CONTEXT)
96 return;
97
98 pr_debug("[tlbrange<%02lx,%08lx,%08lx>]\n",
99 (unsigned long)mm->context.asid[cpu], start, end);
100 local_irq_save(flags);
101
102 if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) {
103 int oldpid = get_rasid_register();
104
105 set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
106 start &= PAGE_MASK;
107 if (vma->vm_flags & VM_EXEC)
108 while(start < end) {
109 invalidate_itlb_mapping(start);
110 invalidate_dtlb_mapping(start);
111 start += PAGE_SIZE;
112 }
113 else
114 while(start < end) {
115 invalidate_dtlb_mapping(start);
116 start += PAGE_SIZE;
117 }
118
119 set_rasid_register(oldpid);
120 } else {
121 local_flush_tlb_mm(mm);
122 }
123 local_irq_restore(flags);
124 }
125
local_flush_tlb_page(struct vm_area_struct * vma,unsigned long page)126 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
127 {
128 int cpu = smp_processor_id();
129 struct mm_struct* mm = vma->vm_mm;
130 unsigned long flags;
131 int oldpid;
132
133 if (mm->context.asid[cpu] == NO_CONTEXT)
134 return;
135
136 local_irq_save(flags);
137
138 oldpid = get_rasid_register();
139 set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
140
141 if (vma->vm_flags & VM_EXEC)
142 invalidate_itlb_mapping(page);
143 invalidate_dtlb_mapping(page);
144
145 set_rasid_register(oldpid);
146
147 local_irq_restore(flags);
148 }
149
local_flush_tlb_kernel_range(unsigned long start,unsigned long end)150 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
151 {
152 if (end > start && start >= TASK_SIZE && end <= PAGE_OFFSET &&
153 end - start < _TLB_ENTRIES << PAGE_SHIFT) {
154 start &= PAGE_MASK;
155 while (start < end) {
156 invalidate_itlb_mapping(start);
157 invalidate_dtlb_mapping(start);
158 start += PAGE_SIZE;
159 }
160 } else {
161 local_flush_tlb_all();
162 }
163 }
164
165 #ifdef CONFIG_DEBUG_TLB_SANITY
166
get_pte_for_vaddr(unsigned vaddr)167 static unsigned get_pte_for_vaddr(unsigned vaddr)
168 {
169 struct task_struct *task = get_current();
170 struct mm_struct *mm = task->mm;
171 pgd_t *pgd;
172 p4d_t *p4d;
173 pud_t *pud;
174 pmd_t *pmd;
175 pte_t *pte;
176
177 if (!mm)
178 mm = task->active_mm;
179 pgd = pgd_offset(mm, vaddr);
180 if (pgd_none_or_clear_bad(pgd))
181 return 0;
182 p4d = p4d_offset(pgd, vaddr);
183 if (p4d_none_or_clear_bad(p4d))
184 return 0;
185 pud = pud_offset(p4d, vaddr);
186 if (pud_none_or_clear_bad(pud))
187 return 0;
188 pmd = pmd_offset(pud, vaddr);
189 if (pmd_none_or_clear_bad(pmd))
190 return 0;
191 pte = pte_offset_map(pmd, vaddr);
192 if (!pte)
193 return 0;
194 return pte_val(*pte);
195 }
196
197 enum {
198 TLB_SUSPICIOUS = 1,
199 TLB_INSANE = 2,
200 };
201
tlb_insane(void)202 static void tlb_insane(void)
203 {
204 BUG_ON(1);
205 }
206
tlb_suspicious(void)207 static void tlb_suspicious(void)
208 {
209 WARN_ON(1);
210 }
211
212 /*
213 * Check that TLB entries with kernel ASID (1) have kernel VMA (>= TASK_SIZE),
214 * and TLB entries with user ASID (>=4) have VMA < TASK_SIZE.
215 *
216 * Check that valid TLB entries either have the same PA as the PTE, or PTE is
217 * marked as non-present. Non-present PTE and the page with non-zero refcount
218 * and zero mapcount is normal for batched TLB flush operation. Zero refcount
219 * means that the page was freed prematurely. Non-zero mapcount is unusual,
220 * but does not necessary means an error, thus marked as suspicious.
221 */
check_tlb_entry(unsigned w,unsigned e,bool dtlb)222 static int check_tlb_entry(unsigned w, unsigned e, bool dtlb)
223 {
224 unsigned tlbidx = w | (e << PAGE_SHIFT);
225 unsigned r0 = dtlb ?
226 read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx);
227 unsigned r1 = dtlb ?
228 read_dtlb_translation(tlbidx) : read_itlb_translation(tlbidx);
229 unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT);
230 unsigned pte = get_pte_for_vaddr(vpn);
231 unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK;
232 unsigned tlb_asid = r0 & ASID_MASK;
233 bool kernel = tlb_asid == 1;
234 int rc = 0;
235
236 if (tlb_asid > 0 && ((vpn < TASK_SIZE) == kernel)) {
237 pr_err("%cTLB: way: %u, entry: %u, VPN %08x in %s PTE\n",
238 dtlb ? 'D' : 'I', w, e, vpn,
239 kernel ? "kernel" : "user");
240 rc |= TLB_INSANE;
241 }
242
243 if (tlb_asid == mm_asid) {
244 if ((pte ^ r1) & PAGE_MASK) {
245 pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n",
246 dtlb ? 'D' : 'I', w, e, r0, r1, pte);
247 if (pte == 0 || !pte_present(__pte(pte))) {
248 struct page *p = pfn_to_page(r1 >> PAGE_SHIFT);
249 pr_err("page refcount: %d, mapcount: %d\n",
250 page_count(p),
251 page_mapcount(p));
252 if (!page_count(p))
253 rc |= TLB_INSANE;
254 else if (page_mapcount(p))
255 rc |= TLB_SUSPICIOUS;
256 } else {
257 rc |= TLB_INSANE;
258 }
259 }
260 }
261 return rc;
262 }
263
check_tlb_sanity(void)264 void check_tlb_sanity(void)
265 {
266 unsigned long flags;
267 unsigned w, e;
268 int bug = 0;
269
270 local_irq_save(flags);
271 for (w = 0; w < DTLB_ARF_WAYS; ++w)
272 for (e = 0; e < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); ++e)
273 bug |= check_tlb_entry(w, e, true);
274 for (w = 0; w < ITLB_ARF_WAYS; ++w)
275 for (e = 0; e < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); ++e)
276 bug |= check_tlb_entry(w, e, false);
277 if (bug & TLB_INSANE)
278 tlb_insane();
279 if (bug & TLB_SUSPICIOUS)
280 tlb_suspicious();
281 local_irq_restore(flags);
282 }
283
284 #endif /* CONFIG_DEBUG_TLB_SANITY */
285