• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _M68K_TLBFLUSH_H
3 #define _M68K_TLBFLUSH_H
4 
5 #ifdef CONFIG_MMU
6 #ifndef CONFIG_SUN3
7 
8 #include <asm/current.h>
9 #include <asm/mcfmmu.h>
10 
flush_tlb_kernel_page(void * addr)11 static inline void flush_tlb_kernel_page(void *addr)
12 {
13 	if (CPU_IS_COLDFIRE) {
14 		mmu_write(MMUOR, MMUOR_CNL);
15 	} else if (CPU_IS_040_OR_060) {
16 		set_fc(SUPER_DATA);
17 		__asm__ __volatile__(".chip 68040\n\t"
18 				     "pflush (%0)\n\t"
19 				     ".chip 68k"
20 				     : : "a" (addr));
21 		set_fc(USER_DATA);
22 	} else if (CPU_IS_020_OR_030)
23 		__asm__ __volatile__("pflush #4,#4,(%0)" : : "a" (addr));
24 }
25 
26 /*
27  * flush all user-space atc entries.
28  */
__flush_tlb(void)29 static inline void __flush_tlb(void)
30 {
31 	if (CPU_IS_COLDFIRE) {
32 		mmu_write(MMUOR, MMUOR_CNL);
33 	} else if (CPU_IS_040_OR_060) {
34 		__asm__ __volatile__(".chip 68040\n\t"
35 				     "pflushan\n\t"
36 				     ".chip 68k");
37 	} else if (CPU_IS_020_OR_030) {
38 		__asm__ __volatile__("pflush #0,#4");
39 	}
40 }
41 
__flush_tlb040_one(unsigned long addr)42 static inline void __flush_tlb040_one(unsigned long addr)
43 {
44 	__asm__ __volatile__(".chip 68040\n\t"
45 			     "pflush (%0)\n\t"
46 			     ".chip 68k"
47 			     : : "a" (addr));
48 }
49 
__flush_tlb_one(unsigned long addr)50 static inline void __flush_tlb_one(unsigned long addr)
51 {
52 	if (CPU_IS_COLDFIRE)
53 		mmu_write(MMUOR, MMUOR_CNL);
54 	else if (CPU_IS_040_OR_060)
55 		__flush_tlb040_one(addr);
56 	else if (CPU_IS_020_OR_030)
57 		__asm__ __volatile__("pflush #0,#4,(%0)" : : "a" (addr));
58 }
59 
60 #define flush_tlb() __flush_tlb()
61 
62 /*
63  * flush all atc entries (both kernel and user-space entries).
64  */
flush_tlb_all(void)65 static inline void flush_tlb_all(void)
66 {
67 	if (CPU_IS_COLDFIRE) {
68 		mmu_write(MMUOR, MMUOR_CNL);
69 	} else if (CPU_IS_040_OR_060) {
70 		__asm__ __volatile__(".chip 68040\n\t"
71 				     "pflusha\n\t"
72 				     ".chip 68k");
73 	} else if (CPU_IS_020_OR_030) {
74 		__asm__ __volatile__("pflusha");
75 	}
76 }
77 
flush_tlb_mm(struct mm_struct * mm)78 static inline void flush_tlb_mm(struct mm_struct *mm)
79 {
80 	if (mm == current->active_mm)
81 		__flush_tlb();
82 }
83 
flush_tlb_page(struct vm_area_struct * vma,unsigned long addr)84 static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
85 {
86 	if (vma->vm_mm == current->active_mm)
87 		__flush_tlb_one(addr);
88 }
89 
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)90 static inline void flush_tlb_range(struct vm_area_struct *vma,
91 				   unsigned long start, unsigned long end)
92 {
93 	if (vma->vm_mm == current->active_mm)
94 		__flush_tlb();
95 }
96 
flush_tlb_kernel_range(unsigned long start,unsigned long end)97 static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
98 {
99 	flush_tlb_all();
100 }
101 
102 #else
103 
104 
105 /* Reserved PMEGs. */
106 extern char sun3_reserved_pmeg[SUN3_PMEGS_NUM];
107 extern unsigned long pmeg_vaddr[SUN3_PMEGS_NUM];
108 extern unsigned char pmeg_alloc[SUN3_PMEGS_NUM];
109 extern unsigned char pmeg_ctx[SUN3_PMEGS_NUM];
110 
111 /* Flush all userspace mappings one by one...  (why no flush command,
112    sun?) */
flush_tlb_all(void)113 static inline void flush_tlb_all(void)
114 {
115        unsigned long addr;
116        unsigned char ctx, oldctx;
117 
118        oldctx = sun3_get_context();
119        for(addr = 0x00000000; addr < TASK_SIZE; addr += SUN3_PMEG_SIZE) {
120 	       for(ctx = 0; ctx < 8; ctx++) {
121 		       sun3_put_context(ctx);
122 		       sun3_put_segmap(addr, SUN3_INVALID_PMEG);
123 	       }
124        }
125 
126        sun3_put_context(oldctx);
127        /* erase all of the userspace pmeg maps, we've clobbered them
128 	  all anyway */
129        for(addr = 0; addr < SUN3_INVALID_PMEG; addr++) {
130 	       if(pmeg_alloc[addr] == 1) {
131 		       pmeg_alloc[addr] = 0;
132 		       pmeg_ctx[addr] = 0;
133 		       pmeg_vaddr[addr] = 0;
134 	       }
135        }
136 
137 }
138 
139 /* Clear user TLB entries within the context named in mm */
flush_tlb_mm(struct mm_struct * mm)140 static inline void flush_tlb_mm (struct mm_struct *mm)
141 {
142      unsigned char oldctx;
143      unsigned char seg;
144      unsigned long i;
145 
146      oldctx = sun3_get_context();
147      sun3_put_context(mm->context);
148 
149      for(i = 0; i < TASK_SIZE; i += SUN3_PMEG_SIZE) {
150 	     seg = sun3_get_segmap(i);
151 	     if(seg == SUN3_INVALID_PMEG)
152 		     continue;
153 
154 	     sun3_put_segmap(i, SUN3_INVALID_PMEG);
155 	     pmeg_alloc[seg] = 0;
156 	     pmeg_ctx[seg] = 0;
157 	     pmeg_vaddr[seg] = 0;
158      }
159 
160      sun3_put_context(oldctx);
161 
162 }
163 
164 /* Flush a single TLB page. In this case, we're limited to flushing a
165    single PMEG */
flush_tlb_page(struct vm_area_struct * vma,unsigned long addr)166 static inline void flush_tlb_page (struct vm_area_struct *vma,
167 				   unsigned long addr)
168 {
169 	unsigned char oldctx;
170 	unsigned char i;
171 
172 	oldctx = sun3_get_context();
173 	sun3_put_context(vma->vm_mm->context);
174 	addr &= ~SUN3_PMEG_MASK;
175 	if((i = sun3_get_segmap(addr)) != SUN3_INVALID_PMEG)
176 	{
177 		pmeg_alloc[i] = 0;
178 		pmeg_ctx[i] = 0;
179 		pmeg_vaddr[i] = 0;
180 		sun3_put_segmap (addr,  SUN3_INVALID_PMEG);
181 	}
182 	sun3_put_context(oldctx);
183 
184 }
185 /* Flush a range of pages from TLB. */
186 
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)187 static inline void flush_tlb_range (struct vm_area_struct *vma,
188 		      unsigned long start, unsigned long end)
189 {
190 	struct mm_struct *mm = vma->vm_mm;
191 	unsigned char seg, oldctx;
192 
193 	start &= ~SUN3_PMEG_MASK;
194 
195 	oldctx = sun3_get_context();
196 	sun3_put_context(mm->context);
197 
198 	while(start < end)
199 	{
200 		if((seg = sun3_get_segmap(start)) == SUN3_INVALID_PMEG)
201 		     goto next;
202 		if(pmeg_ctx[seg] == mm->context) {
203 			pmeg_alloc[seg] = 0;
204 			pmeg_ctx[seg] = 0;
205 			pmeg_vaddr[seg] = 0;
206 		}
207 		sun3_put_segmap(start, SUN3_INVALID_PMEG);
208 	next:
209 		start += SUN3_PMEG_SIZE;
210 	}
211 }
212 
flush_tlb_kernel_range(unsigned long start,unsigned long end)213 static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
214 {
215 	flush_tlb_all();
216 }
217 
218 /* Flush kernel page from TLB. */
flush_tlb_kernel_page(unsigned long addr)219 static inline void flush_tlb_kernel_page (unsigned long addr)
220 {
221 	sun3_put_segmap (addr & ~(SUN3_PMEG_SIZE - 1), SUN3_INVALID_PMEG);
222 }
223 
224 #endif
225 
226 #else /* !CONFIG_MMU */
227 
228 /*
229  * flush all user-space atc entries.
230  */
__flush_tlb(void)231 static inline void __flush_tlb(void)
232 {
233 	BUG();
234 }
235 
__flush_tlb_one(unsigned long addr)236 static inline void __flush_tlb_one(unsigned long addr)
237 {
238 	BUG();
239 }
240 
241 #define flush_tlb() __flush_tlb()
242 
243 /*
244  * flush all atc entries (both kernel and user-space entries).
245  */
flush_tlb_all(void)246 static inline void flush_tlb_all(void)
247 {
248 	BUG();
249 }
250 
flush_tlb_mm(struct mm_struct * mm)251 static inline void flush_tlb_mm(struct mm_struct *mm)
252 {
253 	BUG();
254 }
255 
flush_tlb_page(struct vm_area_struct * vma,unsigned long addr)256 static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
257 {
258 	BUG();
259 }
260 
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)261 static inline void flush_tlb_range(struct vm_area_struct *vma,
262 				   unsigned long start, unsigned long end)
263 {
264 	BUG();
265 }
266 
flush_tlb_kernel_page(unsigned long addr)267 static inline void flush_tlb_kernel_page(unsigned long addr)
268 {
269 	BUG();
270 }
271 
272 #endif /* CONFIG_MMU */
273 
274 #endif /* _M68K_TLBFLUSH_H */
275