1 /*
2 * AVR32 TLB operations
3 *
4 * Copyright (C) 2004-2006 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/mm.h>
11
12 #include <asm/mmu_context.h>
13
14 /* TODO: Get the correct number from the CONFIG1 system register */
15 #define NR_TLB_ENTRIES 32
16
show_dtlb_entry(unsigned int index)17 static void show_dtlb_entry(unsigned int index)
18 {
19 u32 tlbehi, tlbehi_save, tlbelo, mmucr, mmucr_save;
20 unsigned long flags;
21
22 local_irq_save(flags);
23 mmucr_save = sysreg_read(MMUCR);
24 tlbehi_save = sysreg_read(TLBEHI);
25 mmucr = SYSREG_BFINS(DRP, index, mmucr_save);
26 sysreg_write(MMUCR, mmucr);
27
28 __builtin_tlbr();
29 cpu_sync_pipeline();
30
31 tlbehi = sysreg_read(TLBEHI);
32 tlbelo = sysreg_read(TLBELO);
33
34 printk("%2u: %c %c %02x %05x %05x %o %o %c %c %c %c\n",
35 index,
36 SYSREG_BFEXT(TLBEHI_V, tlbehi) ? '1' : '0',
37 SYSREG_BFEXT(G, tlbelo) ? '1' : '0',
38 SYSREG_BFEXT(ASID, tlbehi),
39 SYSREG_BFEXT(VPN, tlbehi) >> 2,
40 SYSREG_BFEXT(PFN, tlbelo) >> 2,
41 SYSREG_BFEXT(AP, tlbelo),
42 SYSREG_BFEXT(SZ, tlbelo),
43 SYSREG_BFEXT(TLBELO_C, tlbelo) ? 'C' : ' ',
44 SYSREG_BFEXT(B, tlbelo) ? 'B' : ' ',
45 SYSREG_BFEXT(W, tlbelo) ? 'W' : ' ',
46 SYSREG_BFEXT(TLBELO_D, tlbelo) ? 'D' : ' ');
47
48 sysreg_write(MMUCR, mmucr_save);
49 sysreg_write(TLBEHI, tlbehi_save);
50 cpu_sync_pipeline();
51 local_irq_restore(flags);
52 }
53
dump_dtlb(void)54 void dump_dtlb(void)
55 {
56 unsigned int i;
57
58 printk("ID V G ASID VPN PFN AP SZ C B W D\n");
59 for (i = 0; i < NR_TLB_ENTRIES; i++)
60 show_dtlb_entry(i);
61 }
62
update_dtlb(unsigned long address,pte_t pte)63 static void update_dtlb(unsigned long address, pte_t pte)
64 {
65 u32 tlbehi;
66 u32 mmucr;
67
68 /*
69 * We're not changing the ASID here, so no need to flush the
70 * pipeline.
71 */
72 tlbehi = sysreg_read(TLBEHI);
73 tlbehi = SYSREG_BF(ASID, SYSREG_BFEXT(ASID, tlbehi));
74 tlbehi |= address & MMU_VPN_MASK;
75 tlbehi |= SYSREG_BIT(TLBEHI_V);
76 sysreg_write(TLBEHI, tlbehi);
77
78 /* Does this mapping already exist? */
79 __builtin_tlbs();
80 mmucr = sysreg_read(MMUCR);
81
82 if (mmucr & SYSREG_BIT(MMUCR_N)) {
83 /* Not found -- pick a not-recently-accessed entry */
84 unsigned int rp;
85 u32 tlbar = sysreg_read(TLBARLO);
86
87 rp = 32 - fls(tlbar);
88 if (rp == 32) {
89 rp = 0;
90 sysreg_write(TLBARLO, -1L);
91 }
92
93 mmucr = SYSREG_BFINS(DRP, rp, mmucr);
94 sysreg_write(MMUCR, mmucr);
95 }
96
97 sysreg_write(TLBELO, pte_val(pte) & _PAGE_FLAGS_HARDWARE_MASK);
98
99 /* Let's go */
100 __builtin_tlbw();
101 }
102
update_mmu_cache(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)103 void update_mmu_cache(struct vm_area_struct *vma,
104 unsigned long address, pte_t *ptep)
105 {
106 unsigned long flags;
107
108 /* ptrace may call this routine */
109 if (vma && current->active_mm != vma->vm_mm)
110 return;
111
112 local_irq_save(flags);
113 update_dtlb(address, *ptep);
114 local_irq_restore(flags);
115 }
116
__flush_tlb_page(unsigned long asid,unsigned long page)117 static void __flush_tlb_page(unsigned long asid, unsigned long page)
118 {
119 u32 mmucr, tlbehi;
120
121 /*
122 * Caller is responsible for masking out non-PFN bits in page
123 * and changing the current ASID if necessary. This means that
124 * we don't need to flush the pipeline after writing TLBEHI.
125 */
126 tlbehi = page | asid;
127 sysreg_write(TLBEHI, tlbehi);
128
129 __builtin_tlbs();
130 mmucr = sysreg_read(MMUCR);
131
132 if (!(mmucr & SYSREG_BIT(MMUCR_N))) {
133 unsigned int entry;
134 u32 tlbarlo;
135
136 /* Clear the "valid" bit */
137 sysreg_write(TLBEHI, tlbehi);
138
139 /* mark the entry as "not accessed" */
140 entry = SYSREG_BFEXT(DRP, mmucr);
141 tlbarlo = sysreg_read(TLBARLO);
142 tlbarlo |= (0x80000000UL >> entry);
143 sysreg_write(TLBARLO, tlbarlo);
144
145 /* update the entry with valid bit clear */
146 __builtin_tlbw();
147 }
148 }
149
flush_tlb_page(struct vm_area_struct * vma,unsigned long page)150 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
151 {
152 if (vma->vm_mm && vma->vm_mm->context != NO_CONTEXT) {
153 unsigned long flags, asid;
154 unsigned long saved_asid = MMU_NO_ASID;
155
156 asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK;
157 page &= PAGE_MASK;
158
159 local_irq_save(flags);
160 if (vma->vm_mm != current->mm) {
161 saved_asid = get_asid();
162 set_asid(asid);
163 }
164
165 __flush_tlb_page(asid, page);
166
167 if (saved_asid != MMU_NO_ASID)
168 set_asid(saved_asid);
169 local_irq_restore(flags);
170 }
171 }
172
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)173 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
174 unsigned long end)
175 {
176 struct mm_struct *mm = vma->vm_mm;
177
178 if (mm->context != NO_CONTEXT) {
179 unsigned long flags;
180 int size;
181
182 local_irq_save(flags);
183 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
184
185 if (size > (MMU_DTLB_ENTRIES / 4)) { /* Too many entries to flush */
186 mm->context = NO_CONTEXT;
187 if (mm == current->mm)
188 activate_context(mm);
189 } else {
190 unsigned long asid;
191 unsigned long saved_asid;
192
193 asid = mm->context & MMU_CONTEXT_ASID_MASK;
194 saved_asid = MMU_NO_ASID;
195
196 start &= PAGE_MASK;
197 end += (PAGE_SIZE - 1);
198 end &= PAGE_MASK;
199
200 if (mm != current->mm) {
201 saved_asid = get_asid();
202 set_asid(asid);
203 }
204
205 while (start < end) {
206 __flush_tlb_page(asid, start);
207 start += PAGE_SIZE;
208 }
209 if (saved_asid != MMU_NO_ASID)
210 set_asid(saved_asid);
211 }
212 local_irq_restore(flags);
213 }
214 }
215
216 /*
217 * This function depends on the pages to be flushed having the G
218 * (global) bit set in their pte. This is true for all
219 * PAGE_KERNEL(_RO) pages.
220 */
flush_tlb_kernel_range(unsigned long start,unsigned long end)221 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
222 {
223 unsigned long flags;
224 int size;
225
226 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
227 if (size > (MMU_DTLB_ENTRIES / 4)) { /* Too many entries to flush */
228 flush_tlb_all();
229 } else {
230 unsigned long asid;
231
232 local_irq_save(flags);
233 asid = get_asid();
234
235 start &= PAGE_MASK;
236 end += (PAGE_SIZE - 1);
237 end &= PAGE_MASK;
238
239 while (start < end) {
240 __flush_tlb_page(asid, start);
241 start += PAGE_SIZE;
242 }
243 local_irq_restore(flags);
244 }
245 }
246
flush_tlb_mm(struct mm_struct * mm)247 void flush_tlb_mm(struct mm_struct *mm)
248 {
249 /* Invalidate all TLB entries of this process by getting a new ASID */
250 if (mm->context != NO_CONTEXT) {
251 unsigned long flags;
252
253 local_irq_save(flags);
254 mm->context = NO_CONTEXT;
255 if (mm == current->mm)
256 activate_context(mm);
257 local_irq_restore(flags);
258 }
259 }
260
flush_tlb_all(void)261 void flush_tlb_all(void)
262 {
263 unsigned long flags;
264
265 local_irq_save(flags);
266 sysreg_write(MMUCR, sysreg_read(MMUCR) | SYSREG_BIT(MMUCR_I));
267 local_irq_restore(flags);
268 }
269
270 #ifdef CONFIG_PROC_FS
271
272 #include <linux/seq_file.h>
273 #include <linux/proc_fs.h>
274 #include <linux/init.h>
275
tlb_start(struct seq_file * tlb,loff_t * pos)276 static void *tlb_start(struct seq_file *tlb, loff_t *pos)
277 {
278 static unsigned long tlb_index;
279
280 if (*pos >= NR_TLB_ENTRIES)
281 return NULL;
282
283 tlb_index = 0;
284 return &tlb_index;
285 }
286
tlb_next(struct seq_file * tlb,void * v,loff_t * pos)287 static void *tlb_next(struct seq_file *tlb, void *v, loff_t *pos)
288 {
289 unsigned long *index = v;
290
291 if (*index >= NR_TLB_ENTRIES - 1)
292 return NULL;
293
294 ++*pos;
295 ++*index;
296 return index;
297 }
298
tlb_stop(struct seq_file * tlb,void * v)299 static void tlb_stop(struct seq_file *tlb, void *v)
300 {
301
302 }
303
tlb_show(struct seq_file * tlb,void * v)304 static int tlb_show(struct seq_file *tlb, void *v)
305 {
306 unsigned int tlbehi, tlbehi_save, tlbelo, mmucr, mmucr_save;
307 unsigned long flags;
308 unsigned long *index = v;
309
310 if (*index == 0)
311 seq_puts(tlb, "ID V G ASID VPN PFN AP SZ C B W D\n");
312
313 BUG_ON(*index >= NR_TLB_ENTRIES);
314
315 local_irq_save(flags);
316 mmucr_save = sysreg_read(MMUCR);
317 tlbehi_save = sysreg_read(TLBEHI);
318 mmucr = SYSREG_BFINS(DRP, *index, mmucr_save);
319 sysreg_write(MMUCR, mmucr);
320
321 /* TLBR might change the ASID */
322 __builtin_tlbr();
323 cpu_sync_pipeline();
324
325 tlbehi = sysreg_read(TLBEHI);
326 tlbelo = sysreg_read(TLBELO);
327
328 sysreg_write(MMUCR, mmucr_save);
329 sysreg_write(TLBEHI, tlbehi_save);
330 cpu_sync_pipeline();
331 local_irq_restore(flags);
332
333 seq_printf(tlb, "%2lu: %c %c %02x %05x %05x %o %o %c %c %c %c\n",
334 *index,
335 SYSREG_BFEXT(TLBEHI_V, tlbehi) ? '1' : '0',
336 SYSREG_BFEXT(G, tlbelo) ? '1' : '0',
337 SYSREG_BFEXT(ASID, tlbehi),
338 SYSREG_BFEXT(VPN, tlbehi) >> 2,
339 SYSREG_BFEXT(PFN, tlbelo) >> 2,
340 SYSREG_BFEXT(AP, tlbelo),
341 SYSREG_BFEXT(SZ, tlbelo),
342 SYSREG_BFEXT(TLBELO_C, tlbelo) ? '1' : '0',
343 SYSREG_BFEXT(B, tlbelo) ? '1' : '0',
344 SYSREG_BFEXT(W, tlbelo) ? '1' : '0',
345 SYSREG_BFEXT(TLBELO_D, tlbelo) ? '1' : '0');
346
347 return 0;
348 }
349
350 static const struct seq_operations tlb_ops = {
351 .start = tlb_start,
352 .next = tlb_next,
353 .stop = tlb_stop,
354 .show = tlb_show,
355 };
356
tlb_open(struct inode * inode,struct file * file)357 static int tlb_open(struct inode *inode, struct file *file)
358 {
359 return seq_open(file, &tlb_ops);
360 }
361
362 static const struct file_operations proc_tlb_operations = {
363 .open = tlb_open,
364 .read = seq_read,
365 .llseek = seq_lseek,
366 .release = seq_release,
367 };
368
proctlb_init(void)369 static int __init proctlb_init(void)
370 {
371 proc_create("tlb", 0, NULL, &proc_tlb_operations);
372 return 0;
373 }
374 late_initcall(proctlb_init);
375 #endif /* CONFIG_PROC_FS */
376