• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * arch/xtensa/mm/cache.c
3  *
4  * This file is subject to the terms and conditions of the GNU General Public
5  * License.  See the file "COPYING" in the main directory of this archive
6  * for more details.
7  *
8  * Copyright (C) 2001-2006 Tensilica Inc.
9  *
10  * Chris Zankel	<chris@zankel.net>
11  * Joe Taylor
12  * Marc Gauthier
13  *
14  */
15 
16 #include <linux/init.h>
17 #include <linux/signal.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/string.h>
22 #include <linux/types.h>
23 #include <linux/ptrace.h>
24 #include <linux/memblock.h>
25 #include <linux/swap.h>
26 #include <linux/pagemap.h>
27 
28 #include <asm/bootparam.h>
29 #include <asm/mmu_context.h>
30 #include <asm/tlb.h>
31 #include <asm/tlbflush.h>
32 #include <asm/page.h>
33 #include <asm/pgalloc.h>
34 #include <asm/pgtable.h>
35 
36 /*
37  * Note:
38  * The kernel provides one architecture bit PG_arch_1 in the page flags that
39  * can be used for cache coherency.
40  *
41  * I$-D$ coherency.
42  *
43  * The Xtensa architecture doesn't keep the instruction cache coherent with
44  * the data cache. We use the architecture bit to indicate if the caches
45  * are coherent. The kernel clears this bit whenever a page is added to the
46  * page cache. At that time, the caches might not be in sync. We, therefore,
47  * define this flag as 'clean' if set.
48  *
49  * D-cache aliasing.
50  *
51  * With cache aliasing, we have to always flush the cache when pages are
52  * unmapped (see tlb_start_vma(). So, we use this flag to indicate a dirty
53  * page.
54  *
55  *
56  *
57  */
58 
59 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
kmap_invalidate_coherent(struct page * page,unsigned long vaddr)60 static inline void kmap_invalidate_coherent(struct page *page,
61 					    unsigned long vaddr)
62 {
63 	if (!DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) {
64 		unsigned long kvaddr;
65 
66 		if (!PageHighMem(page)) {
67 			kvaddr = (unsigned long)page_to_virt(page);
68 
69 			__invalidate_dcache_page(kvaddr);
70 		} else {
71 			kvaddr = TLBTEMP_BASE_1 +
72 				(page_to_phys(page) & DCACHE_ALIAS_MASK);
73 
74 			preempt_disable();
75 			__invalidate_dcache_page_alias(kvaddr,
76 						       page_to_phys(page));
77 			preempt_enable();
78 		}
79 	}
80 }
81 
coherent_kvaddr(struct page * page,unsigned long base,unsigned long vaddr,unsigned long * paddr)82 static inline void *coherent_kvaddr(struct page *page, unsigned long base,
83 				    unsigned long vaddr, unsigned long *paddr)
84 {
85 	if (PageHighMem(page) || !DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) {
86 		*paddr = page_to_phys(page);
87 		return (void *)(base + (vaddr & DCACHE_ALIAS_MASK));
88 	} else {
89 		*paddr = 0;
90 		return page_to_virt(page);
91 	}
92 }
93 
clear_user_highpage(struct page * page,unsigned long vaddr)94 void clear_user_highpage(struct page *page, unsigned long vaddr)
95 {
96 	unsigned long paddr;
97 	void *kvaddr = coherent_kvaddr(page, TLBTEMP_BASE_1, vaddr, &paddr);
98 
99 	preempt_disable();
100 	kmap_invalidate_coherent(page, vaddr);
101 	set_bit(PG_arch_1, &page->flags);
102 	clear_page_alias(kvaddr, paddr);
103 	preempt_enable();
104 }
105 EXPORT_SYMBOL(clear_user_highpage);
106 
copy_user_highpage(struct page * dst,struct page * src,unsigned long vaddr,struct vm_area_struct * vma)107 void copy_user_highpage(struct page *dst, struct page *src,
108 			unsigned long vaddr, struct vm_area_struct *vma)
109 {
110 	unsigned long dst_paddr, src_paddr;
111 	void *dst_vaddr = coherent_kvaddr(dst, TLBTEMP_BASE_1, vaddr,
112 					  &dst_paddr);
113 	void *src_vaddr = coherent_kvaddr(src, TLBTEMP_BASE_2, vaddr,
114 					  &src_paddr);
115 
116 	preempt_disable();
117 	kmap_invalidate_coherent(dst, vaddr);
118 	set_bit(PG_arch_1, &dst->flags);
119 	copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr);
120 	preempt_enable();
121 }
122 EXPORT_SYMBOL(copy_user_highpage);
123 
124 /*
125  * Any time the kernel writes to a user page cache page, or it is about to
126  * read from a page cache page this routine is called.
127  *
128  */
129 
flush_dcache_page(struct page * page)130 void flush_dcache_page(struct page *page)
131 {
132 	struct address_space *mapping = page_mapping_file(page);
133 
134 	/*
135 	 * If we have a mapping but the page is not mapped to user-space
136 	 * yet, we simply mark this page dirty and defer flushing the
137 	 * caches until update_mmu().
138 	 */
139 
140 	if (mapping && !mapping_mapped(mapping)) {
141 		if (!test_bit(PG_arch_1, &page->flags))
142 			set_bit(PG_arch_1, &page->flags);
143 		return;
144 
145 	} else {
146 
147 		unsigned long phys = page_to_phys(page);
148 		unsigned long temp = page->index << PAGE_SHIFT;
149 		unsigned long alias = !(DCACHE_ALIAS_EQ(temp, phys));
150 		unsigned long virt;
151 
152 		/*
153 		 * Flush the page in kernel space and user space.
154 		 * Note that we can omit that step if aliasing is not
155 		 * an issue, but we do have to synchronize I$ and D$
156 		 * if we have a mapping.
157 		 */
158 
159 		if (!alias && !mapping)
160 			return;
161 
162 		preempt_disable();
163 		virt = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
164 		__flush_invalidate_dcache_page_alias(virt, phys);
165 
166 		virt = TLBTEMP_BASE_1 + (temp & DCACHE_ALIAS_MASK);
167 
168 		if (alias)
169 			__flush_invalidate_dcache_page_alias(virt, phys);
170 
171 		if (mapping)
172 			__invalidate_icache_page_alias(virt, phys);
173 		preempt_enable();
174 	}
175 
176 	/* There shouldn't be an entry in the cache for this page anymore. */
177 }
178 EXPORT_SYMBOL(flush_dcache_page);
179 
180 /*
181  * For now, flush the whole cache. FIXME??
182  */
183 
local_flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)184 void local_flush_cache_range(struct vm_area_struct *vma,
185 		       unsigned long start, unsigned long end)
186 {
187 	__flush_invalidate_dcache_all();
188 	__invalidate_icache_all();
189 }
190 EXPORT_SYMBOL(local_flush_cache_range);
191 
192 /*
193  * Remove any entry in the cache for this page.
194  *
195  * Note that this function is only called for user pages, so use the
196  * alias versions of the cache flush functions.
197  */
198 
local_flush_cache_page(struct vm_area_struct * vma,unsigned long address,unsigned long pfn)199 void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address,
200 		      unsigned long pfn)
201 {
202 	/* Note that we have to use the 'alias' address to avoid multi-hit */
203 
204 	unsigned long phys = page_to_phys(pfn_to_page(pfn));
205 	unsigned long virt = TLBTEMP_BASE_1 + (address & DCACHE_ALIAS_MASK);
206 
207 	preempt_disable();
208 	__flush_invalidate_dcache_page_alias(virt, phys);
209 	__invalidate_icache_page_alias(virt, phys);
210 	preempt_enable();
211 }
212 EXPORT_SYMBOL(local_flush_cache_page);
213 
214 #endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
215 
216 void
update_mmu_cache(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)217 update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
218 {
219 	unsigned long pfn = pte_pfn(*ptep);
220 	struct page *page;
221 
222 	if (!pfn_valid(pfn))
223 		return;
224 
225 	page = pfn_to_page(pfn);
226 
227 	/* Invalidate old entry in TLBs */
228 
229 	flush_tlb_page(vma, addr);
230 
231 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
232 
233 	if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {
234 		unsigned long phys = page_to_phys(page);
235 		unsigned long tmp;
236 
237 		preempt_disable();
238 		tmp = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
239 		__flush_invalidate_dcache_page_alias(tmp, phys);
240 		tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
241 		__flush_invalidate_dcache_page_alias(tmp, phys);
242 		__invalidate_icache_page_alias(tmp, phys);
243 		preempt_enable();
244 
245 		clear_bit(PG_arch_1, &page->flags);
246 	}
247 #else
248 	if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)
249 	    && (vma->vm_flags & VM_EXEC) != 0) {
250 		unsigned long paddr = (unsigned long)kmap_atomic(page);
251 		__flush_dcache_page(paddr);
252 		__invalidate_icache_page(paddr);
253 		set_bit(PG_arch_1, &page->flags);
254 		kunmap_atomic((void *)paddr);
255 	}
256 #endif
257 }
258 
259 /*
260  * access_process_vm() has called get_user_pages(), which has done a
261  * flush_dcache_page() on the page.
262  */
263 
264 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
265 
copy_to_user_page(struct vm_area_struct * vma,struct page * page,unsigned long vaddr,void * dst,const void * src,unsigned long len)266 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
267 		unsigned long vaddr, void *dst, const void *src,
268 		unsigned long len)
269 {
270 	unsigned long phys = page_to_phys(page);
271 	unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
272 
273 	/* Flush and invalidate user page if aliased. */
274 
275 	if (alias) {
276 		unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
277 		preempt_disable();
278 		__flush_invalidate_dcache_page_alias(t, phys);
279 		preempt_enable();
280 	}
281 
282 	/* Copy data */
283 
284 	memcpy(dst, src, len);
285 
286 	/*
287 	 * Flush and invalidate kernel page if aliased and synchronize
288 	 * data and instruction caches for executable pages.
289 	 */
290 
291 	if (alias) {
292 		unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
293 
294 		preempt_disable();
295 		__flush_invalidate_dcache_range((unsigned long) dst, len);
296 		if ((vma->vm_flags & VM_EXEC) != 0)
297 			__invalidate_icache_page_alias(t, phys);
298 		preempt_enable();
299 
300 	} else if ((vma->vm_flags & VM_EXEC) != 0) {
301 		__flush_dcache_range((unsigned long)dst,len);
302 		__invalidate_icache_range((unsigned long) dst, len);
303 	}
304 }
305 
copy_from_user_page(struct vm_area_struct * vma,struct page * page,unsigned long vaddr,void * dst,const void * src,unsigned long len)306 extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
307 		unsigned long vaddr, void *dst, const void *src,
308 		unsigned long len)
309 {
310 	unsigned long phys = page_to_phys(page);
311 	unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
312 
313 	/*
314 	 * Flush user page if aliased.
315 	 * (Note: a simply flush would be sufficient)
316 	 */
317 
318 	if (alias) {
319 		unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
320 		preempt_disable();
321 		__flush_invalidate_dcache_page_alias(t, phys);
322 		preempt_enable();
323 	}
324 
325 	memcpy(dst, src, len);
326 }
327 
328 #endif
329