• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Inline assembly cache operations.
7  *
8  * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
9  * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org)
10  * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org)
11  */
12 #ifndef _ASM_R4KCACHE_H
13 #define _ASM_R4KCACHE_H
14 
15 #include <linux/stringify.h>
16 
17 #include <asm/asm.h>
18 #include <asm/asm-eva.h>
19 #include <asm/cacheops.h>
20 #include <asm/compiler.h>
21 #include <asm/cpu-features.h>
22 #include <asm/cpu-type.h>
23 #include <asm/mipsmtregs.h>
24 #include <asm/mmzone.h>
25 #include <asm/unroll.h>
26 #include <linux/uaccess.h> /* for uaccess_kernel() */
27 
28 extern void (*r4k_blast_dcache)(void);
29 extern void (*r4k_blast_icache)(void);
30 
31 /*
32  * This macro return a properly sign-extended address suitable as base address
33  * for indexed cache operations.  Two issues here:
34  *
35  *  - The MIPS32 and MIPS64 specs permit an implementation to directly derive
36  *    the index bits from the virtual address.	This breaks with tradition
37  *    set by the R4000.	 To keep unpleasant surprises from happening we pick
38  *    an address in KSEG0 / CKSEG0.
39  *  - We need a properly sign extended address for 64-bit code.	 To get away
40  *    without ifdefs we let the compiler do it by a type cast.
41  */
42 #define INDEX_BASE	CKSEG0
43 
44 #define _cache_op(insn, op, addr)					\
45 	__asm__ __volatile__(						\
46 	"	.set	push					\n"	\
47 	"	.set	noreorder				\n"	\
48 	"	.set "MIPS_ISA_ARCH_LEVEL"			\n"	\
49 	"	" insn("%0", "%1") "				\n"	\
50 	"	.set	pop					\n"	\
51 	:								\
52 	: "i" (op), "R" (*(unsigned char *)(addr)))
53 
54 #define cache_op(op, addr)						\
55 	_cache_op(kernel_cache, op, addr)
56 
flush_icache_line_indexed(unsigned long addr)57 static inline void flush_icache_line_indexed(unsigned long addr)
58 {
59 	cache_op(Index_Invalidate_I, addr);
60 }
61 
flush_dcache_line_indexed(unsigned long addr)62 static inline void flush_dcache_line_indexed(unsigned long addr)
63 {
64 	cache_op(Index_Writeback_Inv_D, addr);
65 }
66 
flush_scache_line_indexed(unsigned long addr)67 static inline void flush_scache_line_indexed(unsigned long addr)
68 {
69 	cache_op(Index_Writeback_Inv_SD, addr);
70 }
71 
flush_icache_line(unsigned long addr)72 static inline void flush_icache_line(unsigned long addr)
73 {
74 	switch (boot_cpu_type()) {
75 	case CPU_LOONGSON2EF:
76 		cache_op(Hit_Invalidate_I_Loongson2, addr);
77 		break;
78 
79 	default:
80 		cache_op(Hit_Invalidate_I, addr);
81 		break;
82 	}
83 }
84 
flush_dcache_line(unsigned long addr)85 static inline void flush_dcache_line(unsigned long addr)
86 {
87 	cache_op(Hit_Writeback_Inv_D, addr);
88 }
89 
invalidate_dcache_line(unsigned long addr)90 static inline void invalidate_dcache_line(unsigned long addr)
91 {
92 	cache_op(Hit_Invalidate_D, addr);
93 }
94 
invalidate_scache_line(unsigned long addr)95 static inline void invalidate_scache_line(unsigned long addr)
96 {
97 	cache_op(Hit_Invalidate_SD, addr);
98 }
99 
flush_scache_line(unsigned long addr)100 static inline void flush_scache_line(unsigned long addr)
101 {
102 	cache_op(Hit_Writeback_Inv_SD, addr);
103 }
104 
105 #define protected_cache_op(op,addr)				\
106 ({								\
107 	int __err = 0;						\
108 	__asm__ __volatile__(					\
109 	"	.set	push			\n"		\
110 	"	.set	noreorder		\n"		\
111 	"	.set "MIPS_ISA_ARCH_LEVEL"	\n"		\
112 	"1:	cache	%1, (%2)		\n"		\
113 	"2:	.insn				\n"		\
114 	"	.set	pop			\n"		\
115 	"	.section .fixup,\"ax\"		\n"		\
116 	"3:	li	%0, %3			\n"		\
117 	"	j	2b			\n"		\
118 	"	.previous			\n"		\
119 	"	.section __ex_table,\"a\"	\n"		\
120 	"	"STR(PTR)" 1b, 3b		\n"		\
121 	"	.previous"					\
122 	: "+r" (__err)						\
123 	: "i" (op), "r" (addr), "i" (-EFAULT));			\
124 	__err;							\
125 })
126 
127 
128 #define protected_cachee_op(op,addr)				\
129 ({								\
130 	int __err = 0;						\
131 	__asm__ __volatile__(					\
132 	"	.set	push			\n"		\
133 	"	.set	noreorder		\n"		\
134 	"	.set	mips0			\n"		\
135 	"	.set	eva			\n"		\
136 	"1:	cachee	%1, (%2)		\n"		\
137 	"2:	.insn				\n"		\
138 	"	.set	pop			\n"		\
139 	"	.section .fixup,\"ax\"		\n"		\
140 	"3:	li	%0, %3			\n"		\
141 	"	j	2b			\n"		\
142 	"	.previous			\n"		\
143 	"	.section __ex_table,\"a\"	\n"		\
144 	"	"STR(PTR)" 1b, 3b		\n"		\
145 	"	.previous"					\
146 	: "+r" (__err)						\
147 	: "i" (op), "r" (addr), "i" (-EFAULT));			\
148 	__err;							\
149 })
150 
151 /*
152  * The next two are for badland addresses like signal trampolines.
153  */
protected_flush_icache_line(unsigned long addr)154 static inline int protected_flush_icache_line(unsigned long addr)
155 {
156 	switch (boot_cpu_type()) {
157 	case CPU_LOONGSON2EF:
158 		return protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
159 
160 	default:
161 #ifdef CONFIG_EVA
162 		return protected_cachee_op(Hit_Invalidate_I, addr);
163 #else
164 		return protected_cache_op(Hit_Invalidate_I, addr);
165 #endif
166 	}
167 }
168 
169 /*
170  * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D
171  * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style
172  * caches.  We're talking about one cacheline unnecessarily getting invalidated
173  * here so the penalty isn't overly hard.
174  */
protected_writeback_dcache_line(unsigned long addr)175 static inline int protected_writeback_dcache_line(unsigned long addr)
176 {
177 #ifdef CONFIG_EVA
178 	return protected_cachee_op(Hit_Writeback_Inv_D, addr);
179 #else
180 	return protected_cache_op(Hit_Writeback_Inv_D, addr);
181 #endif
182 }
183 
protected_writeback_scache_line(unsigned long addr)184 static inline int protected_writeback_scache_line(unsigned long addr)
185 {
186 #ifdef CONFIG_EVA
187 	return protected_cachee_op(Hit_Writeback_Inv_SD, addr);
188 #else
189 	return protected_cache_op(Hit_Writeback_Inv_SD, addr);
190 #endif
191 }
192 
193 /*
194  * This one is RM7000-specific
195  */
invalidate_tcache_page(unsigned long addr)196 static inline void invalidate_tcache_page(unsigned long addr)
197 {
198 	cache_op(Page_Invalidate_T, addr);
199 }
200 
201 #define cache_unroll(times, insn, op, addr, lsize) do {			\
202 	int i = 0;							\
203 	unroll(times, _cache_op, insn, op, (addr) + (i++ * (lsize)));	\
204 } while (0)
205 
206 /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
207 #define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra)	\
208 static inline void extra##blast_##pfx##cache##lsize(void)		\
209 {									\
210 	unsigned long start = INDEX_BASE;				\
211 	unsigned long end = start + current_cpu_data.desc.waysize;	\
212 	unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;	\
213 	unsigned long ws_end = current_cpu_data.desc.ways <<		\
214 			       current_cpu_data.desc.waybit;		\
215 	unsigned long ws, addr;						\
216 									\
217 	for (ws = 0; ws < ws_end; ws += ws_inc)				\
218 		for (addr = start; addr < end; addr += lsize * 32)	\
219 			cache_unroll(32, kernel_cache, indexop,		\
220 				     addr | ws, lsize);			\
221 }									\
222 									\
223 static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
224 {									\
225 	unsigned long start = page;					\
226 	unsigned long end = page + PAGE_SIZE;				\
227 									\
228 	do {								\
229 		cache_unroll(32, kernel_cache, hitop, start, lsize);	\
230 		start += lsize * 32;					\
231 	} while (start < end);						\
232 }									\
233 									\
234 static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
235 {									\
236 	unsigned long indexmask = current_cpu_data.desc.waysize - 1;	\
237 	unsigned long start = INDEX_BASE + (page & indexmask);		\
238 	unsigned long end = start + PAGE_SIZE;				\
239 	unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;	\
240 	unsigned long ws_end = current_cpu_data.desc.ways <<		\
241 			       current_cpu_data.desc.waybit;		\
242 	unsigned long ws, addr;						\
243 									\
244 	for (ws = 0; ws < ws_end; ws += ws_inc)				\
245 		for (addr = start; addr < end; addr += lsize * 32)	\
246 			cache_unroll(32, kernel_cache, indexop,		\
247 				     addr | ws, lsize);			\
248 }
249 
250 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
251 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, )
252 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, )
253 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, )
254 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, )
255 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_)
256 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, )
257 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, )
258 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, )
259 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
260 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 128, )
261 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 128, )
262 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
263 
264 __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
265 __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, )
266 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, )
267 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, )
268 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
269 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
270 
271 #define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \
272 static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
273 {									\
274 	unsigned long start = page;					\
275 	unsigned long end = page + PAGE_SIZE;				\
276 									\
277 	do {								\
278 		cache_unroll(32, user_cache, hitop, start, lsize);	\
279 		start += lsize * 32;					\
280 	} while (start < end);						\
281 }
282 
283 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
284 			 16)
285 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
286 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
287 			 32)
288 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
289 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
290 			 64)
291 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
292 
293 /* build blast_xxx_range, protected_blast_xxx_range */
294 #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra)	\
295 static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
296 						    unsigned long end)	\
297 {									\
298 	unsigned long lsize = cpu_##desc##_line_size();			\
299 	unsigned long addr = start & ~(lsize - 1);			\
300 	unsigned long aend = (end - 1) & ~(lsize - 1);			\
301 									\
302 	while (1) {							\
303 		prot##cache_op(hitop, addr);				\
304 		if (addr == aend)					\
305 			break;						\
306 		addr += lsize;						\
307 	}								\
308 }
309 
310 #ifndef CONFIG_EVA
311 
312 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
313 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
314 
315 #else
316 
317 #define __BUILD_PROT_BLAST_CACHE_RANGE(pfx, desc, hitop)		\
318 static inline void protected_blast_##pfx##cache##_range(unsigned long start,\
319 							unsigned long end) \
320 {									\
321 	unsigned long lsize = cpu_##desc##_line_size();			\
322 	unsigned long addr = start & ~(lsize - 1);			\
323 	unsigned long aend = (end - 1) & ~(lsize - 1);			\
324 									\
325 	if (!uaccess_kernel()) {					\
326 		while (1) {						\
327 			protected_cachee_op(hitop, addr);		\
328 			if (addr == aend)				\
329 				break;					\
330 			addr += lsize;					\
331 		}							\
332 	} else {							\
333 		while (1) {						\
334 			protected_cache_op(hitop, addr);		\
335 			if (addr == aend)				\
336 				break;					\
337 			addr += lsize;					\
338 		}                                                       \
339 									\
340 	}								\
341 }
342 
343 __BUILD_PROT_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D)
344 __BUILD_PROT_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I)
345 
346 #endif
347 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
348 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
349 	protected_, loongson2_)
350 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
351 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , )
352 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
353 /* blast_inv_dcache_range */
354 __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
355 __BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
356 
357 /* Currently, this is very specific to Loongson-3 */
358 #define __BUILD_BLAST_CACHE_NODE(pfx, desc, indexop, hitop, lsize)	\
359 static inline void blast_##pfx##cache##lsize##_node(long node)		\
360 {									\
361 	unsigned long start = CAC_BASE | nid_to_addrbase(node);		\
362 	unsigned long end = start + current_cpu_data.desc.waysize;	\
363 	unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;	\
364 	unsigned long ws_end = current_cpu_data.desc.ways <<		\
365 			       current_cpu_data.desc.waybit;		\
366 	unsigned long ws, addr;						\
367 									\
368 	for (ws = 0; ws < ws_end; ws += ws_inc)				\
369 		for (addr = start; addr < end; addr += lsize * 32)	\
370 			cache_unroll(32, kernel_cache, indexop,		\
371 				     addr | ws, lsize);			\
372 }
373 
374 __BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
375 __BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
376 __BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64)
377 __BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
378 
379 #endif /* _ASM_R4KCACHE_H */
380