• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef _M68K_CACHEFLUSH_H
2 #define _M68K_CACHEFLUSH_H
3 
4 #include <linux/mm.h>
5 #ifdef CONFIG_COLDFIRE
6 #include <asm/mcfsim.h>
7 #endif
8 
9 /* cache code */
10 #define FLUSH_I_AND_D	(0x00000808)
11 #define FLUSH_I		(0x00000008)
12 
13 #ifndef ICACHE_MAX_ADDR
14 #define ICACHE_MAX_ADDR	0
15 #define ICACHE_SET_MASK	0
16 #define DCACHE_MAX_ADDR	0
17 #define DCACHE_SETMASK	0
18 #endif
19 #ifndef CACHE_MODE
20 #define	CACHE_MODE	0
21 #define	CACR_ICINVA	0
22 #define	CACR_DCINVA	0
23 #define	CACR_BCINVA	0
24 #endif
25 
26 /*
27  * ColdFire architecture has no way to clear individual cache lines, so we
28  * are stuck invalidating all the cache entries when we want a clear operation.
29  */
clear_cf_icache(unsigned long start,unsigned long end)30 static inline void clear_cf_icache(unsigned long start, unsigned long end)
31 {
32 	__asm__ __volatile__ (
33 		"movec	%0,%%cacr\n\t"
34 		"nop"
35 		:
36 		: "r" (CACHE_MODE | CACR_ICINVA | CACR_BCINVA));
37 }
38 
clear_cf_dcache(unsigned long start,unsigned long end)39 static inline void clear_cf_dcache(unsigned long start, unsigned long end)
40 {
41 	__asm__ __volatile__ (
42 		"movec	%0,%%cacr\n\t"
43 		"nop"
44 		:
45 		: "r" (CACHE_MODE | CACR_DCINVA));
46 }
47 
clear_cf_bcache(unsigned long start,unsigned long end)48 static inline void clear_cf_bcache(unsigned long start, unsigned long end)
49 {
50 	__asm__ __volatile__ (
51 		"movec	%0,%%cacr\n\t"
52 		"nop"
53 		:
54 		: "r" (CACHE_MODE | CACR_ICINVA | CACR_BCINVA | CACR_DCINVA));
55 }
56 
57 /*
58  * Use the ColdFire cpushl instruction to push (and invalidate) cache lines.
59  * The start and end addresses are cache line numbers not memory addresses.
60  */
flush_cf_icache(unsigned long start,unsigned long end)61 static inline void flush_cf_icache(unsigned long start, unsigned long end)
62 {
63 	unsigned long set;
64 
65 	for (set = start; set <= end; set += (0x10 - 3)) {
66 		__asm__ __volatile__ (
67 			"cpushl %%ic,(%0)\n\t"
68 			"addq%.l #1,%0\n\t"
69 			"cpushl %%ic,(%0)\n\t"
70 			"addq%.l #1,%0\n\t"
71 			"cpushl %%ic,(%0)\n\t"
72 			"addq%.l #1,%0\n\t"
73 			"cpushl %%ic,(%0)"
74 			: "=a" (set)
75 			: "a" (set));
76 	}
77 }
78 
flush_cf_dcache(unsigned long start,unsigned long end)79 static inline void flush_cf_dcache(unsigned long start, unsigned long end)
80 {
81 	unsigned long set;
82 
83 	for (set = start; set <= end; set += (0x10 - 3)) {
84 		__asm__ __volatile__ (
85 			"cpushl %%dc,(%0)\n\t"
86 			"addq%.l #1,%0\n\t"
87 			"cpushl %%dc,(%0)\n\t"
88 			"addq%.l #1,%0\n\t"
89 			"cpushl %%dc,(%0)\n\t"
90 			"addq%.l #1,%0\n\t"
91 			"cpushl %%dc,(%0)"
92 			: "=a" (set)
93 			: "a" (set));
94 	}
95 }
96 
flush_cf_bcache(unsigned long start,unsigned long end)97 static inline void flush_cf_bcache(unsigned long start, unsigned long end)
98 {
99 	unsigned long set;
100 
101 	for (set = start; set <= end; set += (0x10 - 3)) {
102 		__asm__ __volatile__ (
103 			"cpushl %%bc,(%0)\n\t"
104 			"addq%.l #1,%0\n\t"
105 			"cpushl %%bc,(%0)\n\t"
106 			"addq%.l #1,%0\n\t"
107 			"cpushl %%bc,(%0)\n\t"
108 			"addq%.l #1,%0\n\t"
109 			"cpushl %%bc,(%0)"
110 			: "=a" (set)
111 			: "a" (set));
112 	}
113 }
114 
115 /*
116  * Cache handling functions
117  */
118 
flush_icache(void)119 static inline void flush_icache(void)
120 {
121 	if (CPU_IS_COLDFIRE) {
122 		flush_cf_icache(0, ICACHE_MAX_ADDR);
123 	} else if (CPU_IS_040_OR_060) {
124 		asm volatile (	"nop\n"
125 			"	.chip	68040\n"
126 			"	cpusha	%bc\n"
127 			"	.chip	68k");
128 	} else {
129 		unsigned long tmp;
130 		asm volatile (	"movec	%%cacr,%0\n"
131 			"	or.w	%1,%0\n"
132 			"	movec	%0,%%cacr"
133 			: "=&d" (tmp)
134 			: "id" (FLUSH_I));
135 	}
136 }
137 
138 /*
139  * invalidate the cache for the specified memory range.
140  * It starts at the physical address specified for
141  * the given number of bytes.
142  */
143 extern void cache_clear(unsigned long paddr, int len);
144 /*
145  * push any dirty cache in the specified memory range.
146  * It starts at the physical address specified for
147  * the given number of bytes.
148  */
149 extern void cache_push(unsigned long paddr, int len);
150 
151 /*
152  * push and invalidate pages in the specified user virtual
153  * memory range.
154  */
155 extern void cache_push_v(unsigned long vaddr, int len);
156 
157 /* This is needed whenever the virtual mapping of the current
158    process changes.  */
159 #define __flush_cache_all()					\
160 ({								\
161 	if (CPU_IS_COLDFIRE) {					\
162 		flush_cf_dcache(0, DCACHE_MAX_ADDR);		\
163 	} else if (CPU_IS_040_OR_060) {				\
164 		__asm__ __volatile__("nop\n\t"			\
165 				     ".chip 68040\n\t"		\
166 				     "cpusha %dc\n\t"		\
167 				     ".chip 68k");		\
168 	} else {						\
169 		unsigned long _tmp;				\
170 		__asm__ __volatile__("movec %%cacr,%0\n\t"	\
171 				     "orw %1,%0\n\t"		\
172 				     "movec %0,%%cacr"		\
173 				     : "=&d" (_tmp)		\
174 				     : "di" (FLUSH_I_AND_D));	\
175 	}							\
176 })
177 
178 #define __flush_cache_030()					\
179 ({								\
180 	if (CPU_IS_020_OR_030) {				\
181 		unsigned long _tmp;				\
182 		__asm__ __volatile__("movec %%cacr,%0\n\t"	\
183 				     "orw %1,%0\n\t"		\
184 				     "movec %0,%%cacr"		\
185 				     : "=&d" (_tmp)		\
186 				     : "di" (FLUSH_I_AND_D));	\
187 	}							\
188 })
189 
190 #define flush_cache_all() __flush_cache_all()
191 
192 #define flush_cache_vmap(start, end)		flush_cache_all()
193 #define flush_cache_vunmap(start, end)		flush_cache_all()
194 
flush_cache_mm(struct mm_struct * mm)195 static inline void flush_cache_mm(struct mm_struct *mm)
196 {
197 	if (mm == current->mm)
198 		__flush_cache_030();
199 }
200 
201 #define flush_cache_dup_mm(mm)			flush_cache_mm(mm)
202 
203 /* flush_cache_range/flush_cache_page must be macros to avoid
204    a dependency on linux/mm.h, which includes this file... */
flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)205 static inline void flush_cache_range(struct vm_area_struct *vma,
206 				     unsigned long start,
207 				     unsigned long end)
208 {
209 	if (vma->vm_mm == current->mm)
210 	        __flush_cache_030();
211 }
212 
flush_cache_page(struct vm_area_struct * vma,unsigned long vmaddr,unsigned long pfn)213 static inline void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
214 {
215 	if (vma->vm_mm == current->mm)
216 	        __flush_cache_030();
217 }
218 
219 
220 /* Push the page at kernel virtual address and clear the icache */
221 /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
__flush_page_to_ram(void * vaddr)222 static inline void __flush_page_to_ram(void *vaddr)
223 {
224 	if (CPU_IS_COLDFIRE) {
225 		unsigned long addr, start, end;
226 		addr = ((unsigned long) vaddr) & ~(PAGE_SIZE - 1);
227 		start = addr & ICACHE_SET_MASK;
228 		end = (addr + PAGE_SIZE - 1) & ICACHE_SET_MASK;
229 		if (start > end) {
230 			flush_cf_bcache(0, end);
231 			end = ICACHE_MAX_ADDR;
232 		}
233 		flush_cf_bcache(start, end);
234 	} else if (CPU_IS_040_OR_060) {
235 		__asm__ __volatile__("nop\n\t"
236 				     ".chip 68040\n\t"
237 				     "cpushp %%bc,(%0)\n\t"
238 				     ".chip 68k"
239 				     : : "a" (__pa(vaddr)));
240 	} else {
241 		unsigned long _tmp;
242 		__asm__ __volatile__("movec %%cacr,%0\n\t"
243 				     "orw %1,%0\n\t"
244 				     "movec %0,%%cacr"
245 				     : "=&d" (_tmp)
246 				     : "di" (FLUSH_I));
247 	}
248 }
249 
250 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
251 #define flush_dcache_page(page)		__flush_page_to_ram(page_address(page))
252 #define flush_dcache_mmap_lock(mapping)		do { } while (0)
253 #define flush_dcache_mmap_unlock(mapping)	do { } while (0)
254 #define flush_icache_page(vma, page)	__flush_page_to_ram(page_address(page))
255 
256 extern void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
257 				    unsigned long addr, int len);
258 extern void flush_icache_range(unsigned long address, unsigned long endaddr);
259 
copy_to_user_page(struct vm_area_struct * vma,struct page * page,unsigned long vaddr,void * dst,void * src,int len)260 static inline void copy_to_user_page(struct vm_area_struct *vma,
261 				     struct page *page, unsigned long vaddr,
262 				     void *dst, void *src, int len)
263 {
264 	flush_cache_page(vma, vaddr, page_to_pfn(page));
265 	memcpy(dst, src, len);
266 	flush_icache_user_range(vma, page, vaddr, len);
267 }
copy_from_user_page(struct vm_area_struct * vma,struct page * page,unsigned long vaddr,void * dst,void * src,int len)268 static inline void copy_from_user_page(struct vm_area_struct *vma,
269 				       struct page *page, unsigned long vaddr,
270 				       void *dst, void *src, int len)
271 {
272 	flush_cache_page(vma, vaddr, page_to_pfn(page));
273 	memcpy(dst, src, len);
274 }
275 
276 #endif /* _M68K_CACHEFLUSH_H */
277