1 /*
2 * arch/xtensa/mm/cache.c
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2001-2006 Tensilica Inc.
9 *
10 * Chris Zankel <chris@zankel.net>
11 * Joe Taylor
12 * Marc Gauthier
13 *
14 */
15
16 #include <linux/init.h>
17 #include <linux/signal.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/string.h>
22 #include <linux/types.h>
23 #include <linux/ptrace.h>
24 #include <linux/bootmem.h>
25 #include <linux/swap.h>
26 #include <linux/pagemap.h>
27
28 #include <asm/bootparam.h>
29 #include <asm/mmu_context.h>
30 #include <asm/tlb.h>
31 #include <asm/tlbflush.h>
32 #include <asm/page.h>
33 #include <asm/pgalloc.h>
34 #include <asm/pgtable.h>
35
36 //#define printd(x...) printk(x)
37 #define printd(x...) do { } while(0)
38
39 /*
40 * Note:
41 * The kernel provides one architecture bit PG_arch_1 in the page flags that
42 * can be used for cache coherency.
43 *
44 * I$-D$ coherency.
45 *
46 * The Xtensa architecture doesn't keep the instruction cache coherent with
47 * the data cache. We use the architecture bit to indicate if the caches
48 * are coherent. The kernel clears this bit whenever a page is added to the
49 * page cache. At that time, the caches might not be in sync. We, therefore,
50 * define this flag as 'clean' if set.
51 *
52 * D-cache aliasing.
53 *
54 * With cache aliasing, we have to always flush the cache when pages are
55 * unmapped (see tlb_start_vma(). So, we use this flag to indicate a dirty
56 * page.
57 *
58 *
59 *
60 */
61
62 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
kmap_invalidate_coherent(struct page * page,unsigned long vaddr)63 static inline void kmap_invalidate_coherent(struct page *page,
64 unsigned long vaddr)
65 {
66 if (!DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) {
67 unsigned long kvaddr;
68
69 if (!PageHighMem(page)) {
70 kvaddr = (unsigned long)page_to_virt(page);
71
72 __invalidate_dcache_page(kvaddr);
73 } else {
74 kvaddr = TLBTEMP_BASE_1 +
75 (page_to_phys(page) & DCACHE_ALIAS_MASK);
76
77 __invalidate_dcache_page_alias(kvaddr,
78 page_to_phys(page));
79 }
80 }
81 }
82
coherent_kvaddr(struct page * page,unsigned long base,unsigned long vaddr,unsigned long * paddr)83 static inline void *coherent_kvaddr(struct page *page, unsigned long base,
84 unsigned long vaddr, unsigned long *paddr)
85 {
86 if (PageHighMem(page) || !DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) {
87 *paddr = page_to_phys(page);
88 return (void *)(base + (vaddr & DCACHE_ALIAS_MASK));
89 } else {
90 *paddr = 0;
91 return page_to_virt(page);
92 }
93 }
94
clear_user_highpage(struct page * page,unsigned long vaddr)95 void clear_user_highpage(struct page *page, unsigned long vaddr)
96 {
97 unsigned long paddr;
98 void *kvaddr = coherent_kvaddr(page, TLBTEMP_BASE_1, vaddr, &paddr);
99
100 preempt_disable();
101 kmap_invalidate_coherent(page, vaddr);
102 set_bit(PG_arch_1, &page->flags);
103 clear_page_alias(kvaddr, paddr);
104 preempt_enable();
105 }
106 EXPORT_SYMBOL(clear_user_highpage);
107
copy_user_highpage(struct page * dst,struct page * src,unsigned long vaddr,struct vm_area_struct * vma)108 void copy_user_highpage(struct page *dst, struct page *src,
109 unsigned long vaddr, struct vm_area_struct *vma)
110 {
111 unsigned long dst_paddr, src_paddr;
112 void *dst_vaddr = coherent_kvaddr(dst, TLBTEMP_BASE_1, vaddr,
113 &dst_paddr);
114 void *src_vaddr = coherent_kvaddr(src, TLBTEMP_BASE_2, vaddr,
115 &src_paddr);
116
117 preempt_disable();
118 kmap_invalidate_coherent(dst, vaddr);
119 set_bit(PG_arch_1, &dst->flags);
120 copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr);
121 preempt_enable();
122 }
123 EXPORT_SYMBOL(copy_user_highpage);
124
125 /*
126 * Any time the kernel writes to a user page cache page, or it is about to
127 * read from a page cache page this routine is called.
128 *
129 */
130
flush_dcache_page(struct page * page)131 void flush_dcache_page(struct page *page)
132 {
133 struct address_space *mapping = page_mapping(page);
134
135 /*
136 * If we have a mapping but the page is not mapped to user-space
137 * yet, we simply mark this page dirty and defer flushing the
138 * caches until update_mmu().
139 */
140
141 if (mapping && !mapping_mapped(mapping)) {
142 if (!test_bit(PG_arch_1, &page->flags))
143 set_bit(PG_arch_1, &page->flags);
144 return;
145
146 } else {
147
148 unsigned long phys = page_to_phys(page);
149 unsigned long temp = page->index << PAGE_SHIFT;
150 unsigned long alias = !(DCACHE_ALIAS_EQ(temp, phys));
151 unsigned long virt;
152
153 /*
154 * Flush the page in kernel space and user space.
155 * Note that we can omit that step if aliasing is not
156 * an issue, but we do have to synchronize I$ and D$
157 * if we have a mapping.
158 */
159
160 if (!alias && !mapping)
161 return;
162
163 virt = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
164 __flush_invalidate_dcache_page_alias(virt, phys);
165
166 virt = TLBTEMP_BASE_1 + (temp & DCACHE_ALIAS_MASK);
167
168 if (alias)
169 __flush_invalidate_dcache_page_alias(virt, phys);
170
171 if (mapping)
172 __invalidate_icache_page_alias(virt, phys);
173 }
174
175 /* There shouldn't be an entry in the cache for this page anymore. */
176 }
177 EXPORT_SYMBOL(flush_dcache_page);
178
179 /*
180 * For now, flush the whole cache. FIXME??
181 */
182
local_flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)183 void local_flush_cache_range(struct vm_area_struct *vma,
184 unsigned long start, unsigned long end)
185 {
186 __flush_invalidate_dcache_all();
187 __invalidate_icache_all();
188 }
189 EXPORT_SYMBOL(local_flush_cache_range);
190
191 /*
192 * Remove any entry in the cache for this page.
193 *
194 * Note that this function is only called for user pages, so use the
195 * alias versions of the cache flush functions.
196 */
197
local_flush_cache_page(struct vm_area_struct * vma,unsigned long address,unsigned long pfn)198 void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address,
199 unsigned long pfn)
200 {
201 /* Note that we have to use the 'alias' address to avoid multi-hit */
202
203 unsigned long phys = page_to_phys(pfn_to_page(pfn));
204 unsigned long virt = TLBTEMP_BASE_1 + (address & DCACHE_ALIAS_MASK);
205
206 __flush_invalidate_dcache_page_alias(virt, phys);
207 __invalidate_icache_page_alias(virt, phys);
208 }
209 EXPORT_SYMBOL(local_flush_cache_page);
210
211 #endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
212
213 void
update_mmu_cache(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)214 update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
215 {
216 unsigned long pfn = pte_pfn(*ptep);
217 struct page *page;
218
219 if (!pfn_valid(pfn))
220 return;
221
222 page = pfn_to_page(pfn);
223
224 /* Invalidate old entry in TLBs */
225
226 flush_tlb_page(vma, addr);
227
228 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
229
230 if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {
231 unsigned long phys = page_to_phys(page);
232 unsigned long tmp;
233
234 tmp = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
235 __flush_invalidate_dcache_page_alias(tmp, phys);
236 tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
237 __flush_invalidate_dcache_page_alias(tmp, phys);
238 __invalidate_icache_page_alias(tmp, phys);
239
240 clear_bit(PG_arch_1, &page->flags);
241 }
242 #else
243 if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)
244 && (vma->vm_flags & VM_EXEC) != 0) {
245 unsigned long paddr = (unsigned long)kmap_atomic(page);
246 __flush_dcache_page(paddr);
247 __invalidate_icache_page(paddr);
248 set_bit(PG_arch_1, &page->flags);
249 kunmap_atomic((void *)paddr);
250 }
251 #endif
252 }
253
254 /*
255 * access_process_vm() has called get_user_pages(), which has done a
256 * flush_dcache_page() on the page.
257 */
258
259 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
260
copy_to_user_page(struct vm_area_struct * vma,struct page * page,unsigned long vaddr,void * dst,const void * src,unsigned long len)261 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
262 unsigned long vaddr, void *dst, const void *src,
263 unsigned long len)
264 {
265 unsigned long phys = page_to_phys(page);
266 unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
267
268 /* Flush and invalidate user page if aliased. */
269
270 if (alias) {
271 unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
272 __flush_invalidate_dcache_page_alias(t, phys);
273 }
274
275 /* Copy data */
276
277 memcpy(dst, src, len);
278
279 /*
280 * Flush and invalidate kernel page if aliased and synchronize
281 * data and instruction caches for executable pages.
282 */
283
284 if (alias) {
285 unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
286
287 __flush_invalidate_dcache_range((unsigned long) dst, len);
288 if ((vma->vm_flags & VM_EXEC) != 0)
289 __invalidate_icache_page_alias(t, phys);
290
291 } else if ((vma->vm_flags & VM_EXEC) != 0) {
292 __flush_dcache_range((unsigned long)dst,len);
293 __invalidate_icache_range((unsigned long) dst, len);
294 }
295 }
296
copy_from_user_page(struct vm_area_struct * vma,struct page * page,unsigned long vaddr,void * dst,const void * src,unsigned long len)297 extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
298 unsigned long vaddr, void *dst, const void *src,
299 unsigned long len)
300 {
301 unsigned long phys = page_to_phys(page);
302 unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
303
304 /*
305 * Flush user page if aliased.
306 * (Note: a simply flush would be sufficient)
307 */
308
309 if (alias) {
310 unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
311 __flush_invalidate_dcache_page_alias(t, phys);
312 }
313
314 memcpy(dst, src, len);
315 }
316
317 #endif
318