• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Inline assembly cache operations.
7  *
8  * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
9  * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org)
10  * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org)
11  */
12 #ifndef _ASM_R4KCACHE_H
13 #define _ASM_R4KCACHE_H
14 
15 #include <linux/stringify.h>
16 
17 #include <asm/asm.h>
18 #include <asm/cacheops.h>
19 #include <asm/compiler.h>
20 #include <asm/cpu-features.h>
21 #include <asm/cpu-type.h>
22 #include <asm/mipsmtregs.h>
23 #include <asm/mmzone.h>
24 #include <linux/uaccess.h> /* for uaccess_kernel() */
25 
26 extern void (*r4k_blast_dcache)(void);
27 extern void (*r4k_blast_icache)(void);
28 
29 /*
30  * This macro return a properly sign-extended address suitable as base address
31  * for indexed cache operations.  Two issues here:
32  *
33  *  - The MIPS32 and MIPS64 specs permit an implementation to directly derive
34  *    the index bits from the virtual address.	This breaks with tradition
35  *    set by the R4000.	 To keep unpleasant surprises from happening we pick
36  *    an address in KSEG0 / CKSEG0.
37  *  - We need a properly sign extended address for 64-bit code.	 To get away
38  *    without ifdefs we let the compiler do it by a type cast.
39  */
40 #define INDEX_BASE	CKSEG0
41 
42 #define cache_op(op,addr)						\
43 	__asm__ __volatile__(						\
44 	"	.set	push					\n"	\
45 	"	.set	noreorder				\n"	\
46 	"	.set "MIPS_ISA_ARCH_LEVEL"			\n"	\
47 	"	cache	%0, %1					\n"	\
48 	"	.set	pop					\n"	\
49 	:								\
50 	: "i" (op), "R" (*(unsigned char *)(addr)))
51 
52 #ifdef CONFIG_MIPS_MT
53 
54 #define __iflush_prologue						\
55 	unsigned long redundance;					\
56 	extern int mt_n_iflushes;					\
57 	for (redundance = 0; redundance < mt_n_iflushes; redundance++) {
58 
59 #define __iflush_epilogue						\
60 	}
61 
62 #define __dflush_prologue						\
63 	unsigned long redundance;					\
64 	extern int mt_n_dflushes;					\
65 	for (redundance = 0; redundance < mt_n_dflushes; redundance++) {
66 
67 #define __dflush_epilogue \
68 	}
69 
70 #define __inv_dflush_prologue __dflush_prologue
71 #define __inv_dflush_epilogue __dflush_epilogue
72 #define __sflush_prologue {
73 #define __sflush_epilogue }
74 #define __inv_sflush_prologue __sflush_prologue
75 #define __inv_sflush_epilogue __sflush_epilogue
76 
77 #else /* CONFIG_MIPS_MT */
78 
79 #define __iflush_prologue {
80 #define __iflush_epilogue }
81 #define __dflush_prologue {
82 #define __dflush_epilogue }
83 #define __inv_dflush_prologue {
84 #define __inv_dflush_epilogue }
85 #define __sflush_prologue {
86 #define __sflush_epilogue }
87 #define __inv_sflush_prologue {
88 #define __inv_sflush_epilogue }
89 
90 #endif /* CONFIG_MIPS_MT */
91 
flush_icache_line_indexed(unsigned long addr)92 static inline void flush_icache_line_indexed(unsigned long addr)
93 {
94 	__iflush_prologue
95 	cache_op(Index_Invalidate_I, addr);
96 	__iflush_epilogue
97 }
98 
flush_dcache_line_indexed(unsigned long addr)99 static inline void flush_dcache_line_indexed(unsigned long addr)
100 {
101 	__dflush_prologue
102 	cache_op(Index_Writeback_Inv_D, addr);
103 	__dflush_epilogue
104 }
105 
flush_scache_line_indexed(unsigned long addr)106 static inline void flush_scache_line_indexed(unsigned long addr)
107 {
108 	cache_op(Index_Writeback_Inv_SD, addr);
109 }
110 
flush_icache_line(unsigned long addr)111 static inline void flush_icache_line(unsigned long addr)
112 {
113 	__iflush_prologue
114 	switch (boot_cpu_type()) {
115 	case CPU_LOONGSON2:
116 		cache_op(Hit_Invalidate_I_Loongson2, addr);
117 		break;
118 
119 	default:
120 		cache_op(Hit_Invalidate_I, addr);
121 		break;
122 	}
123 	__iflush_epilogue
124 }
125 
flush_dcache_line(unsigned long addr)126 static inline void flush_dcache_line(unsigned long addr)
127 {
128 	__dflush_prologue
129 	cache_op(Hit_Writeback_Inv_D, addr);
130 	__dflush_epilogue
131 }
132 
invalidate_dcache_line(unsigned long addr)133 static inline void invalidate_dcache_line(unsigned long addr)
134 {
135 	__dflush_prologue
136 	cache_op(Hit_Invalidate_D, addr);
137 	__dflush_epilogue
138 }
139 
invalidate_scache_line(unsigned long addr)140 static inline void invalidate_scache_line(unsigned long addr)
141 {
142 	cache_op(Hit_Invalidate_SD, addr);
143 }
144 
flush_scache_line(unsigned long addr)145 static inline void flush_scache_line(unsigned long addr)
146 {
147 	cache_op(Hit_Writeback_Inv_SD, addr);
148 }
149 
150 #define protected_cache_op(op,addr)				\
151 ({								\
152 	int __err = 0;						\
153 	__asm__ __volatile__(					\
154 	"	.set	push			\n"		\
155 	"	.set	noreorder		\n"		\
156 	"	.set "MIPS_ISA_ARCH_LEVEL"	\n"		\
157 	"1:	cache	%1, (%2)		\n"		\
158 	"2:	.insn				\n"		\
159 	"	.set	pop			\n"		\
160 	"	.section .fixup,\"ax\"		\n"		\
161 	"3:	li	%0, %3			\n"		\
162 	"	j	2b			\n"		\
163 	"	.previous			\n"		\
164 	"	.section __ex_table,\"a\"	\n"		\
165 	"	"STR(PTR)" 1b, 3b		\n"		\
166 	"	.previous"					\
167 	: "+r" (__err)						\
168 	: "i" (op), "r" (addr), "i" (-EFAULT));			\
169 	__err;							\
170 })
171 
172 
173 #define protected_cachee_op(op,addr)				\
174 ({								\
175 	int __err = 0;						\
176 	__asm__ __volatile__(					\
177 	"	.set	push			\n"		\
178 	"	.set	noreorder		\n"		\
179 	"	.set	mips0			\n"		\
180 	"	.set	eva			\n"		\
181 	"1:	cachee	%1, (%2)		\n"		\
182 	"2:	.insn				\n"		\
183 	"	.set	pop			\n"		\
184 	"	.section .fixup,\"ax\"		\n"		\
185 	"3:	li	%0, %3			\n"		\
186 	"	j	2b			\n"		\
187 	"	.previous			\n"		\
188 	"	.section __ex_table,\"a\"	\n"		\
189 	"	"STR(PTR)" 1b, 3b		\n"		\
190 	"	.previous"					\
191 	: "+r" (__err)						\
192 	: "i" (op), "r" (addr), "i" (-EFAULT));			\
193 	__err;							\
194 })
195 
196 /*
197  * The next two are for badland addresses like signal trampolines.
198  */
protected_flush_icache_line(unsigned long addr)199 static inline int protected_flush_icache_line(unsigned long addr)
200 {
201 	switch (boot_cpu_type()) {
202 	case CPU_LOONGSON2:
203 		return protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
204 
205 	default:
206 #ifdef CONFIG_EVA
207 		return protected_cachee_op(Hit_Invalidate_I, addr);
208 #else
209 		return protected_cache_op(Hit_Invalidate_I, addr);
210 #endif
211 	}
212 }
213 
214 /*
215  * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D
216  * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style
217  * caches.  We're talking about one cacheline unnecessarily getting invalidated
218  * here so the penalty isn't overly hard.
219  */
protected_writeback_dcache_line(unsigned long addr)220 static inline int protected_writeback_dcache_line(unsigned long addr)
221 {
222 #ifdef CONFIG_EVA
223 	return protected_cachee_op(Hit_Writeback_Inv_D, addr);
224 #else
225 	return protected_cache_op(Hit_Writeback_Inv_D, addr);
226 #endif
227 }
228 
protected_writeback_scache_line(unsigned long addr)229 static inline int protected_writeback_scache_line(unsigned long addr)
230 {
231 #ifdef CONFIG_EVA
232 	return protected_cachee_op(Hit_Writeback_Inv_SD, addr);
233 #else
234 	return protected_cache_op(Hit_Writeback_Inv_SD, addr);
235 #endif
236 }
237 
238 /*
239  * This one is RM7000-specific
240  */
invalidate_tcache_page(unsigned long addr)241 static inline void invalidate_tcache_page(unsigned long addr)
242 {
243 	cache_op(Page_Invalidate_T, addr);
244 }
245 
246 #ifndef CONFIG_CPU_MIPSR6
247 #define cache16_unroll32(base,op)					\
248 	__asm__ __volatile__(						\
249 	"	.set push					\n"	\
250 	"	.set noreorder					\n"	\
251 	"	.set mips3					\n"	\
252 	"	cache %1, 0x000(%0); cache %1, 0x010(%0)	\n"	\
253 	"	cache %1, 0x020(%0); cache %1, 0x030(%0)	\n"	\
254 	"	cache %1, 0x040(%0); cache %1, 0x050(%0)	\n"	\
255 	"	cache %1, 0x060(%0); cache %1, 0x070(%0)	\n"	\
256 	"	cache %1, 0x080(%0); cache %1, 0x090(%0)	\n"	\
257 	"	cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)	\n"	\
258 	"	cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)	\n"	\
259 	"	cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)	\n"	\
260 	"	cache %1, 0x100(%0); cache %1, 0x110(%0)	\n"	\
261 	"	cache %1, 0x120(%0); cache %1, 0x130(%0)	\n"	\
262 	"	cache %1, 0x140(%0); cache %1, 0x150(%0)	\n"	\
263 	"	cache %1, 0x160(%0); cache %1, 0x170(%0)	\n"	\
264 	"	cache %1, 0x180(%0); cache %1, 0x190(%0)	\n"	\
265 	"	cache %1, 0x1a0(%0); cache %1, 0x1b0(%0)	\n"	\
266 	"	cache %1, 0x1c0(%0); cache %1, 0x1d0(%0)	\n"	\
267 	"	cache %1, 0x1e0(%0); cache %1, 0x1f0(%0)	\n"	\
268 	"	.set pop					\n"	\
269 		:							\
270 		: "r" (base),						\
271 		  "i" (op));
272 
273 #define cache32_unroll32(base,op)					\
274 	__asm__ __volatile__(						\
275 	"	.set push					\n"	\
276 	"	.set noreorder					\n"	\
277 	"	.set mips3					\n"	\
278 	"	cache %1, 0x000(%0); cache %1, 0x020(%0)	\n"	\
279 	"	cache %1, 0x040(%0); cache %1, 0x060(%0)	\n"	\
280 	"	cache %1, 0x080(%0); cache %1, 0x0a0(%0)	\n"	\
281 	"	cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)	\n"	\
282 	"	cache %1, 0x100(%0); cache %1, 0x120(%0)	\n"	\
283 	"	cache %1, 0x140(%0); cache %1, 0x160(%0)	\n"	\
284 	"	cache %1, 0x180(%0); cache %1, 0x1a0(%0)	\n"	\
285 	"	cache %1, 0x1c0(%0); cache %1, 0x1e0(%0)	\n"	\
286 	"	cache %1, 0x200(%0); cache %1, 0x220(%0)	\n"	\
287 	"	cache %1, 0x240(%0); cache %1, 0x260(%0)	\n"	\
288 	"	cache %1, 0x280(%0); cache %1, 0x2a0(%0)	\n"	\
289 	"	cache %1, 0x2c0(%0); cache %1, 0x2e0(%0)	\n"	\
290 	"	cache %1, 0x300(%0); cache %1, 0x320(%0)	\n"	\
291 	"	cache %1, 0x340(%0); cache %1, 0x360(%0)	\n"	\
292 	"	cache %1, 0x380(%0); cache %1, 0x3a0(%0)	\n"	\
293 	"	cache %1, 0x3c0(%0); cache %1, 0x3e0(%0)	\n"	\
294 	"	.set pop					\n"	\
295 		:							\
296 		: "r" (base),						\
297 		  "i" (op));
298 
299 #define cache64_unroll32(base,op)					\
300 	__asm__ __volatile__(						\
301 	"	.set push					\n"	\
302 	"	.set noreorder					\n"	\
303 	"	.set mips3					\n"	\
304 	"	cache %1, 0x000(%0); cache %1, 0x040(%0)	\n"	\
305 	"	cache %1, 0x080(%0); cache %1, 0x0c0(%0)	\n"	\
306 	"	cache %1, 0x100(%0); cache %1, 0x140(%0)	\n"	\
307 	"	cache %1, 0x180(%0); cache %1, 0x1c0(%0)	\n"	\
308 	"	cache %1, 0x200(%0); cache %1, 0x240(%0)	\n"	\
309 	"	cache %1, 0x280(%0); cache %1, 0x2c0(%0)	\n"	\
310 	"	cache %1, 0x300(%0); cache %1, 0x340(%0)	\n"	\
311 	"	cache %1, 0x380(%0); cache %1, 0x3c0(%0)	\n"	\
312 	"	cache %1, 0x400(%0); cache %1, 0x440(%0)	\n"	\
313 	"	cache %1, 0x480(%0); cache %1, 0x4c0(%0)	\n"	\
314 	"	cache %1, 0x500(%0); cache %1, 0x540(%0)	\n"	\
315 	"	cache %1, 0x580(%0); cache %1, 0x5c0(%0)	\n"	\
316 	"	cache %1, 0x600(%0); cache %1, 0x640(%0)	\n"	\
317 	"	cache %1, 0x680(%0); cache %1, 0x6c0(%0)	\n"	\
318 	"	cache %1, 0x700(%0); cache %1, 0x740(%0)	\n"	\
319 	"	cache %1, 0x780(%0); cache %1, 0x7c0(%0)	\n"	\
320 	"	.set pop					\n"	\
321 		:							\
322 		: "r" (base),						\
323 		  "i" (op));
324 
325 #define cache128_unroll32(base,op)					\
326 	__asm__ __volatile__(						\
327 	"	.set push					\n"	\
328 	"	.set noreorder					\n"	\
329 	"	.set mips3					\n"	\
330 	"	cache %1, 0x000(%0); cache %1, 0x080(%0)	\n"	\
331 	"	cache %1, 0x100(%0); cache %1, 0x180(%0)	\n"	\
332 	"	cache %1, 0x200(%0); cache %1, 0x280(%0)	\n"	\
333 	"	cache %1, 0x300(%0); cache %1, 0x380(%0)	\n"	\
334 	"	cache %1, 0x400(%0); cache %1, 0x480(%0)	\n"	\
335 	"	cache %1, 0x500(%0); cache %1, 0x580(%0)	\n"	\
336 	"	cache %1, 0x600(%0); cache %1, 0x680(%0)	\n"	\
337 	"	cache %1, 0x700(%0); cache %1, 0x780(%0)	\n"	\
338 	"	cache %1, 0x800(%0); cache %1, 0x880(%0)	\n"	\
339 	"	cache %1, 0x900(%0); cache %1, 0x980(%0)	\n"	\
340 	"	cache %1, 0xa00(%0); cache %1, 0xa80(%0)	\n"	\
341 	"	cache %1, 0xb00(%0); cache %1, 0xb80(%0)	\n"	\
342 	"	cache %1, 0xc00(%0); cache %1, 0xc80(%0)	\n"	\
343 	"	cache %1, 0xd00(%0); cache %1, 0xd80(%0)	\n"	\
344 	"	cache %1, 0xe00(%0); cache %1, 0xe80(%0)	\n"	\
345 	"	cache %1, 0xf00(%0); cache %1, 0xf80(%0)	\n"	\
346 	"	.set pop					\n"	\
347 		:							\
348 		: "r" (base),						\
349 		  "i" (op));
350 
351 #else
352 /*
353  * MIPS R6 changed the cache opcode and moved to a 8-bit offset field.
354  * This means we now need to increment the base register before we flush
355  * more cache lines
356  */
357 #define cache16_unroll32(base,op)				\
358 	__asm__ __volatile__(					\
359 	"	.set push\n"					\
360 	"	.set noreorder\n"				\
361 	"	.set mips64r6\n"				\
362 	"	.set noat\n"					\
363 	"	cache %1, 0x000(%0); cache %1, 0x010(%0)\n"	\
364 	"	cache %1, 0x020(%0); cache %1, 0x030(%0)\n"	\
365 	"	cache %1, 0x040(%0); cache %1, 0x050(%0)\n"	\
366 	"	cache %1, 0x060(%0); cache %1, 0x070(%0)\n"	\
367 	"	cache %1, 0x080(%0); cache %1, 0x090(%0)\n"	\
368 	"	cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)\n"	\
369 	"	cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)\n"	\
370 	"	cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)\n"	\
371 	"	"__stringify(LONG_ADDIU)" $1, %0, 0x100	\n"	\
372 	"	cache %1, 0x000($1); cache %1, 0x010($1)\n"	\
373 	"	cache %1, 0x020($1); cache %1, 0x030($1)\n"	\
374 	"	cache %1, 0x040($1); cache %1, 0x050($1)\n"	\
375 	"	cache %1, 0x060($1); cache %1, 0x070($1)\n"	\
376 	"	cache %1, 0x080($1); cache %1, 0x090($1)\n"	\
377 	"	cache %1, 0x0a0($1); cache %1, 0x0b0($1)\n"	\
378 	"	cache %1, 0x0c0($1); cache %1, 0x0d0($1)\n"	\
379 	"	cache %1, 0x0e0($1); cache %1, 0x0f0($1)\n"	\
380 	"	.set pop\n"					\
381 		:						\
382 		: "r" (base),					\
383 		  "i" (op));
384 
385 #define cache32_unroll32(base,op)				\
386 	__asm__ __volatile__(					\
387 	"	.set push\n"					\
388 	"	.set noreorder\n"				\
389 	"	.set mips64r6\n"				\
390 	"	.set noat\n"					\
391 	"	cache %1, 0x000(%0); cache %1, 0x020(%0)\n"	\
392 	"	cache %1, 0x040(%0); cache %1, 0x060(%0)\n"	\
393 	"	cache %1, 0x080(%0); cache %1, 0x0a0(%0)\n"	\
394 	"	cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)\n"	\
395 	"	"__stringify(LONG_ADDIU)" $1, %0, 0x100 \n"	\
396 	"	cache %1, 0x000($1); cache %1, 0x020($1)\n"	\
397 	"	cache %1, 0x040($1); cache %1, 0x060($1)\n"	\
398 	"	cache %1, 0x080($1); cache %1, 0x0a0($1)\n"	\
399 	"	cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n"	\
400 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
401 	"	cache %1, 0x000($1); cache %1, 0x020($1)\n"	\
402 	"	cache %1, 0x040($1); cache %1, 0x060($1)\n"	\
403 	"	cache %1, 0x080($1); cache %1, 0x0a0($1)\n"	\
404 	"	cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n"	\
405 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100\n"	\
406 	"	cache %1, 0x000($1); cache %1, 0x020($1)\n"	\
407 	"	cache %1, 0x040($1); cache %1, 0x060($1)\n"	\
408 	"	cache %1, 0x080($1); cache %1, 0x0a0($1)\n"	\
409 	"	cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n"	\
410 	"	.set pop\n"					\
411 		:						\
412 		: "r" (base),					\
413 		  "i" (op));
414 
415 #define cache64_unroll32(base,op)				\
416 	__asm__ __volatile__(					\
417 	"	.set push\n"					\
418 	"	.set noreorder\n"				\
419 	"	.set mips64r6\n"				\
420 	"	.set noat\n"					\
421 	"	cache %1, 0x000(%0); cache %1, 0x040(%0)\n"	\
422 	"	cache %1, 0x080(%0); cache %1, 0x0c0(%0)\n"	\
423 	"	"__stringify(LONG_ADDIU)" $1, %0, 0x100 \n"	\
424 	"	cache %1, 0x000($1); cache %1, 0x040($1)\n"	\
425 	"	cache %1, 0x080($1); cache %1, 0x0c0($1)\n"	\
426 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
427 	"	cache %1, 0x000($1); cache %1, 0x040($1)\n"	\
428 	"	cache %1, 0x080($1); cache %1, 0x0c0($1)\n"	\
429 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
430 	"	cache %1, 0x000($1); cache %1, 0x040($1)\n"	\
431 	"	cache %1, 0x080($1); cache %1, 0x0c0($1)\n"	\
432 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
433 	"	cache %1, 0x000($1); cache %1, 0x040($1)\n"	\
434 	"	cache %1, 0x080($1); cache %1, 0x0c0($1)\n"	\
435 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
436 	"	cache %1, 0x000($1); cache %1, 0x040($1)\n"	\
437 	"	cache %1, 0x080($1); cache %1, 0x0c0($1)\n"	\
438 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
439 	"	cache %1, 0x000($1); cache %1, 0x040($1)\n"	\
440 	"	cache %1, 0x080($1); cache %1, 0x0c0($1)\n"	\
441 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
442 	"	cache %1, 0x000($1); cache %1, 0x040($1)\n"	\
443 	"	cache %1, 0x080($1); cache %1, 0x0c0($1)\n"	\
444 	"	.set pop\n"					\
445 		:						\
446 		: "r" (base),					\
447 		  "i" (op));
448 
449 #define cache128_unroll32(base,op)				\
450 	__asm__ __volatile__(					\
451 	"	.set push\n"					\
452 	"	.set noreorder\n"				\
453 	"	.set mips64r6\n"				\
454 	"	.set noat\n"					\
455 	"	cache %1, 0x000(%0); cache %1, 0x080(%0)\n"	\
456 	"	"__stringify(LONG_ADDIU)" $1, %0, 0x100 \n"	\
457 	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
458 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
459 	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
460 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
461 	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
462 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
463 	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
464 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
465 	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
466 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
467 	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
468 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
469 	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
470 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
471 	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
472 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
473 	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
474 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
475 	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
476 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
477 	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
478 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
479 	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
480 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
481 	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
482 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
483 	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
484 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
485 	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
486 	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
487 	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
488 	"	.set pop\n"					\
489 		:						\
490 		: "r" (base),					\
491 		  "i" (op));
492 #endif /* CONFIG_CPU_MIPSR6 */
493 
494 /*
495  * Perform the cache operation specified by op using a user mode virtual
496  * address while in kernel mode.
497  */
498 #define cache16_unroll32_user(base,op)					\
499 	__asm__ __volatile__(						\
500 	"	.set push					\n"	\
501 	"	.set noreorder					\n"	\
502 	"	.set mips0					\n"	\
503 	"	.set eva					\n"	\
504 	"	cachee %1, 0x000(%0); cachee %1, 0x010(%0)	\n"	\
505 	"	cachee %1, 0x020(%0); cachee %1, 0x030(%0)	\n"	\
506 	"	cachee %1, 0x040(%0); cachee %1, 0x050(%0)	\n"	\
507 	"	cachee %1, 0x060(%0); cachee %1, 0x070(%0)	\n"	\
508 	"	cachee %1, 0x080(%0); cachee %1, 0x090(%0)	\n"	\
509 	"	cachee %1, 0x0a0(%0); cachee %1, 0x0b0(%0)	\n"	\
510 	"	cachee %1, 0x0c0(%0); cachee %1, 0x0d0(%0)	\n"	\
511 	"	cachee %1, 0x0e0(%0); cachee %1, 0x0f0(%0)	\n"	\
512 	"	cachee %1, 0x100(%0); cachee %1, 0x110(%0)	\n"	\
513 	"	cachee %1, 0x120(%0); cachee %1, 0x130(%0)	\n"	\
514 	"	cachee %1, 0x140(%0); cachee %1, 0x150(%0)	\n"	\
515 	"	cachee %1, 0x160(%0); cachee %1, 0x170(%0)	\n"	\
516 	"	cachee %1, 0x180(%0); cachee %1, 0x190(%0)	\n"	\
517 	"	cachee %1, 0x1a0(%0); cachee %1, 0x1b0(%0)	\n"	\
518 	"	cachee %1, 0x1c0(%0); cachee %1, 0x1d0(%0)	\n"	\
519 	"	cachee %1, 0x1e0(%0); cachee %1, 0x1f0(%0)	\n"	\
520 	"	.set pop					\n"	\
521 		:							\
522 		: "r" (base),						\
523 		  "i" (op));
524 
525 #define cache32_unroll32_user(base, op)					\
526 	__asm__ __volatile__(						\
527 	"	.set push					\n"	\
528 	"	.set noreorder					\n"	\
529 	"	.set mips0					\n"	\
530 	"	.set eva					\n"	\
531 	"	cachee %1, 0x000(%0); cachee %1, 0x020(%0)	\n"	\
532 	"	cachee %1, 0x040(%0); cachee %1, 0x060(%0)	\n"	\
533 	"	cachee %1, 0x080(%0); cachee %1, 0x0a0(%0)	\n"	\
534 	"	cachee %1, 0x0c0(%0); cachee %1, 0x0e0(%0)	\n"	\
535 	"	cachee %1, 0x100(%0); cachee %1, 0x120(%0)	\n"	\
536 	"	cachee %1, 0x140(%0); cachee %1, 0x160(%0)	\n"	\
537 	"	cachee %1, 0x180(%0); cachee %1, 0x1a0(%0)	\n"	\
538 	"	cachee %1, 0x1c0(%0); cachee %1, 0x1e0(%0)	\n"	\
539 	"	cachee %1, 0x200(%0); cachee %1, 0x220(%0)	\n"	\
540 	"	cachee %1, 0x240(%0); cachee %1, 0x260(%0)	\n"	\
541 	"	cachee %1, 0x280(%0); cachee %1, 0x2a0(%0)	\n"	\
542 	"	cachee %1, 0x2c0(%0); cachee %1, 0x2e0(%0)	\n"	\
543 	"	cachee %1, 0x300(%0); cachee %1, 0x320(%0)	\n"	\
544 	"	cachee %1, 0x340(%0); cachee %1, 0x360(%0)	\n"	\
545 	"	cachee %1, 0x380(%0); cachee %1, 0x3a0(%0)	\n"	\
546 	"	cachee %1, 0x3c0(%0); cachee %1, 0x3e0(%0)	\n"	\
547 	"	.set pop					\n"	\
548 		:							\
549 		: "r" (base),						\
550 		  "i" (op));
551 
552 #define cache64_unroll32_user(base, op)					\
553 	__asm__ __volatile__(						\
554 	"	.set push					\n"	\
555 	"	.set noreorder					\n"	\
556 	"	.set mips0					\n"	\
557 	"	.set eva					\n"	\
558 	"	cachee %1, 0x000(%0); cachee %1, 0x040(%0)	\n"	\
559 	"	cachee %1, 0x080(%0); cachee %1, 0x0c0(%0)	\n"	\
560 	"	cachee %1, 0x100(%0); cachee %1, 0x140(%0)	\n"	\
561 	"	cachee %1, 0x180(%0); cachee %1, 0x1c0(%0)	\n"	\
562 	"	cachee %1, 0x200(%0); cachee %1, 0x240(%0)	\n"	\
563 	"	cachee %1, 0x280(%0); cachee %1, 0x2c0(%0)	\n"	\
564 	"	cachee %1, 0x300(%0); cachee %1, 0x340(%0)	\n"	\
565 	"	cachee %1, 0x380(%0); cachee %1, 0x3c0(%0)	\n"	\
566 	"	cachee %1, 0x400(%0); cachee %1, 0x440(%0)	\n"	\
567 	"	cachee %1, 0x480(%0); cachee %1, 0x4c0(%0)	\n"	\
568 	"	cachee %1, 0x500(%0); cachee %1, 0x540(%0)	\n"	\
569 	"	cachee %1, 0x580(%0); cachee %1, 0x5c0(%0)	\n"	\
570 	"	cachee %1, 0x600(%0); cachee %1, 0x640(%0)	\n"	\
571 	"	cachee %1, 0x680(%0); cachee %1, 0x6c0(%0)	\n"	\
572 	"	cachee %1, 0x700(%0); cachee %1, 0x740(%0)	\n"	\
573 	"	cachee %1, 0x780(%0); cachee %1, 0x7c0(%0)	\n"	\
574 	"	.set pop					\n"	\
575 		:							\
576 		: "r" (base),						\
577 		  "i" (op));
578 
579 /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
580 #define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra)	\
581 static inline void extra##blast_##pfx##cache##lsize(void)		\
582 {									\
583 	unsigned long start = INDEX_BASE;				\
584 	unsigned long end = start + current_cpu_data.desc.waysize;	\
585 	unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;	\
586 	unsigned long ws_end = current_cpu_data.desc.ways <<		\
587 			       current_cpu_data.desc.waybit;		\
588 	unsigned long ws, addr;						\
589 									\
590 	__##pfx##flush_prologue						\
591 									\
592 	for (ws = 0; ws < ws_end; ws += ws_inc)				\
593 		for (addr = start; addr < end; addr += lsize * 32)	\
594 			cache##lsize##_unroll32(addr|ws, indexop);	\
595 									\
596 	__##pfx##flush_epilogue						\
597 }									\
598 									\
599 static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
600 {									\
601 	unsigned long start = page;					\
602 	unsigned long end = page + PAGE_SIZE;				\
603 									\
604 	__##pfx##flush_prologue						\
605 									\
606 	do {								\
607 		cache##lsize##_unroll32(start, hitop);			\
608 		start += lsize * 32;					\
609 	} while (start < end);						\
610 									\
611 	__##pfx##flush_epilogue						\
612 }									\
613 									\
614 static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
615 {									\
616 	unsigned long indexmask = current_cpu_data.desc.waysize - 1;	\
617 	unsigned long start = INDEX_BASE + (page & indexmask);		\
618 	unsigned long end = start + PAGE_SIZE;				\
619 	unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;	\
620 	unsigned long ws_end = current_cpu_data.desc.ways <<		\
621 			       current_cpu_data.desc.waybit;		\
622 	unsigned long ws, addr;						\
623 									\
624 	__##pfx##flush_prologue						\
625 									\
626 	for (ws = 0; ws < ws_end; ws += ws_inc)				\
627 		for (addr = start; addr < end; addr += lsize * 32)	\
628 			cache##lsize##_unroll32(addr|ws, indexop);	\
629 									\
630 	__##pfx##flush_epilogue						\
631 }
632 
633 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
634 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, )
635 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, )
636 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, )
637 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, )
638 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_)
639 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, )
640 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, )
641 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, )
642 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
643 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 128, )
644 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 128, )
645 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
646 
647 __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
648 __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, )
649 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, )
650 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, )
651 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
652 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
653 
654 #define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \
655 static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
656 {									\
657 	unsigned long start = page;					\
658 	unsigned long end = page + PAGE_SIZE;				\
659 									\
660 	__##pfx##flush_prologue						\
661 									\
662 	do {								\
663 		cache##lsize##_unroll32_user(start, hitop);             \
664 		start += lsize * 32;					\
665 	} while (start < end);						\
666 									\
667 	__##pfx##flush_epilogue						\
668 }
669 
670 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
671 			 16)
672 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
673 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
674 			 32)
675 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
676 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
677 			 64)
678 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
679 
680 /* build blast_xxx_range, protected_blast_xxx_range */
681 #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra)	\
682 static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
683 						    unsigned long end)	\
684 {									\
685 	unsigned long lsize = cpu_##desc##_line_size();			\
686 	unsigned long addr = start & ~(lsize - 1);			\
687 	unsigned long aend = (end - 1) & ~(lsize - 1);			\
688 									\
689 	__##pfx##flush_prologue						\
690 									\
691 	while (1) {							\
692 		prot##cache_op(hitop, addr);				\
693 		if (addr == aend)					\
694 			break;						\
695 		addr += lsize;						\
696 	}								\
697 									\
698 	__##pfx##flush_epilogue						\
699 }
700 
701 #ifndef CONFIG_EVA
702 
703 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
704 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
705 
706 #else
707 
708 #define __BUILD_PROT_BLAST_CACHE_RANGE(pfx, desc, hitop)		\
709 static inline void protected_blast_##pfx##cache##_range(unsigned long start,\
710 							unsigned long end) \
711 {									\
712 	unsigned long lsize = cpu_##desc##_line_size();			\
713 	unsigned long addr = start & ~(lsize - 1);			\
714 	unsigned long aend = (end - 1) & ~(lsize - 1);			\
715 									\
716 	__##pfx##flush_prologue						\
717 									\
718 	if (!uaccess_kernel()) {					\
719 		while (1) {						\
720 			protected_cachee_op(hitop, addr);		\
721 			if (addr == aend)				\
722 				break;					\
723 			addr += lsize;					\
724 		}							\
725 	} else {							\
726 		while (1) {						\
727 			protected_cache_op(hitop, addr);		\
728 			if (addr == aend)				\
729 				break;					\
730 			addr += lsize;					\
731 		}                                                       \
732 									\
733 	}								\
734 	__##pfx##flush_epilogue						\
735 }
736 
737 __BUILD_PROT_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D)
738 __BUILD_PROT_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I)
739 
740 #endif
741 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
742 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
743 	protected_, loongson2_)
744 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
745 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , )
746 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
747 /* blast_inv_dcache_range */
748 __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
749 __BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
750 
751 /* Currently, this is very specific to Loongson-3 */
752 #define __BUILD_BLAST_CACHE_NODE(pfx, desc, indexop, hitop, lsize)	\
753 static inline void blast_##pfx##cache##lsize##_node(long node)		\
754 {									\
755 	unsigned long start = CAC_BASE | nid_to_addrbase(node);		\
756 	unsigned long end = start + current_cpu_data.desc.waysize;	\
757 	unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;	\
758 	unsigned long ws_end = current_cpu_data.desc.ways <<		\
759 			       current_cpu_data.desc.waybit;		\
760 	unsigned long ws, addr;						\
761 									\
762 	for (ws = 0; ws < ws_end; ws += ws_inc)				\
763 		for (addr = start; addr < end; addr += lsize * 32)	\
764 			cache##lsize##_unroll32(addr|ws, indexop);	\
765 }
766 
767 __BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
768 __BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
769 __BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64)
770 __BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
771 
772 #endif /* _ASM_R4KCACHE_H */
773