1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Inline assembly cache operations.
7 *
8 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
9 * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org)
10 * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org)
11 */
12 #ifndef _ASM_R4KCACHE_H
13 #define _ASM_R4KCACHE_H
14
15 #include <asm/asm.h>
16 #include <asm/cacheops.h>
17 #include <asm/cpu-features.h>
18 #include <asm/mipsmtregs.h>
19 #include <asm/uaccess.h>
20
21 /*
22 * This macro return a properly sign-extended address suitable as base address
23 * for indexed cache operations. Two issues here:
24 *
25 * - The MIPS32 and MIPS64 specs permit an implementation to directly derive
26 * the index bits from the virtual address. This breaks with tradition
27 * set by the R4000. To keep unpleasant surprises from happening we pick
28 * an address in KSEG0 / CKSEG0.
29 * - We need a properly sign extended address for 64-bit code. To get away
30 * without ifdefs we let the compiler do it by a type cast.
31 */
32 #ifndef INDEX_BASE
33 #define INDEX_BASE CKSEG0
34 #endif
35
36 #ifdef CONFIG_CPU_MIPSR6
37 #define cache_op(op,addr) \
38 __asm__ __volatile__( \
39 " .set push \n" \
40 " .set noreorder \n" \
41 " .set mips64r6\n\t \n" \
42 " cache %0, %1 \n" \
43 " .set pop \n" \
44 : \
45 : "i" (op), "R" (*(unsigned char *)(addr)))
46 #else
47 #define cache_op(op,addr) \
48 __asm__ __volatile__( \
49 " .set push \n" \
50 " .set noreorder \n" \
51 " .set mips3\n\t \n" \
52 " cache %0, %1 \n" \
53 " .set pop \n" \
54 : \
55 : "i" (op), "R" (*(unsigned char *)(addr)))
56 #endif
57
58 #ifdef CONFIG_MIPS_MT
59 /*
60 * Temporary hacks for SMTC debug. Optionally force single-threaded
61 * execution during I-cache flushes.
62 */
63
64 #ifdef PROTECT_CACHE_FLUSHES
65
66 extern int mt_protiflush;
67 extern int mt_protdflush;
68 extern void mt_cflush_lockdown(void);
69 extern void mt_cflush_release(void);
70
71 #define BEGIN_MT_IPROT \
72 unsigned long flags = 0; \
73 unsigned long mtflags = 0; \
74 if(mt_protiflush) { \
75 local_irq_save(flags); \
76 ehb(); \
77 mtflags = dvpe(); \
78 mt_cflush_lockdown(); \
79 }
80
81 #define END_MT_IPROT \
82 if(mt_protiflush) { \
83 mt_cflush_release(); \
84 evpe(mtflags); \
85 local_irq_restore(flags); \
86 }
87
88 #define BEGIN_MT_DPROT \
89 unsigned long flags = 0; \
90 unsigned long mtflags = 0; \
91 if(mt_protdflush) { \
92 local_irq_save(flags); \
93 ehb(); \
94 mtflags = dvpe(); \
95 mt_cflush_lockdown(); \
96 }
97
98 #define END_MT_DPROT \
99 if(mt_protdflush) { \
100 mt_cflush_release(); \
101 evpe(mtflags); \
102 local_irq_restore(flags); \
103 }
104
105 #else
106
107 #define BEGIN_MT_IPROT
108 #define BEGIN_MT_DPROT
109 #define END_MT_IPROT
110 #define END_MT_DPROT
111
112 #endif /* PROTECT_CACHE_FLUSHES */
113
114 #define __iflush_prologue \
115 unsigned long redundance; \
116 extern int mt_n_iflushes; \
117 BEGIN_MT_IPROT \
118 for (redundance = 0; redundance < mt_n_iflushes; redundance++) {
119
120 #define __iflush_epilogue \
121 END_MT_IPROT \
122 }
123
124 #define __dflush_prologue \
125 unsigned long redundance; \
126 extern int mt_n_dflushes; \
127 BEGIN_MT_DPROT \
128 for (redundance = 0; redundance < mt_n_dflushes; redundance++) {
129
130 #define __dflush_epilogue \
131 END_MT_DPROT \
132 }
133
134 #define __inv_dflush_prologue __dflush_prologue
135 #define __inv_dflush_epilogue __dflush_epilogue
136 #define __sflush_prologue {
137 #define __sflush_epilogue }
138 #define __inv_sflush_prologue __sflush_prologue
139 #define __inv_sflush_epilogue __sflush_epilogue
140
141 #else /* CONFIG_MIPS_MT */
142
143 #define __iflush_prologue {
144 #define __iflush_epilogue }
145 #define __dflush_prologue {
146 #define __dflush_epilogue }
147 #define __inv_dflush_prologue {
148 #define __inv_dflush_epilogue }
149 #define __sflush_prologue {
150 #define __sflush_epilogue }
151 #define __inv_sflush_prologue {
152 #define __inv_sflush_epilogue }
153
154 #endif /* CONFIG_MIPS_MT */
155
flush_icache_line_indexed(unsigned long addr)156 static inline void flush_icache_line_indexed(unsigned long addr)
157 {
158 __iflush_prologue
159 cache_op(Index_Invalidate_I, addr);
160 __iflush_epilogue
161 }
162
flush_dcache_line_indexed(unsigned long addr)163 static inline void flush_dcache_line_indexed(unsigned long addr)
164 {
165 __dflush_prologue
166 cache_op(Index_Writeback_Inv_D, addr);
167 __dflush_epilogue
168 }
169
flush_scache_line_indexed(unsigned long addr)170 static inline void flush_scache_line_indexed(unsigned long addr)
171 {
172 cache_op(Index_Writeback_Inv_SD, addr);
173 }
174
flush_icache_line(unsigned long addr)175 static inline void flush_icache_line(unsigned long addr)
176 {
177 __iflush_prologue
178 cache_op(Hit_Invalidate_I, addr);
179 __iflush_epilogue
180 }
181
flush_dcache_line(unsigned long addr)182 static inline void flush_dcache_line(unsigned long addr)
183 {
184 __dflush_prologue
185 cache_op(Hit_Writeback_Inv_D, addr);
186 __dflush_epilogue
187 }
188
invalidate_dcache_line(unsigned long addr)189 static inline void invalidate_dcache_line(unsigned long addr)
190 {
191 __dflush_prologue
192 cache_op(Hit_Invalidate_D, addr);
193 __dflush_epilogue
194 }
195
invalidate_scache_line(unsigned long addr)196 static inline void invalidate_scache_line(unsigned long addr)
197 {
198 cache_op(Hit_Invalidate_SD, addr);
199 }
200
flush_scache_line(unsigned long addr)201 static inline void flush_scache_line(unsigned long addr)
202 {
203 cache_op(Hit_Writeback_Inv_SD, addr);
204 }
205
206 #ifdef CONFIG_CPU_MIPSR6
207 #define protected_cache_op(op,addr) \
208 __asm__ __volatile__( \
209 " .set push \n" \
210 " .set noreorder \n" \
211 " .set mips64r6 \n" \
212 "1: cache %0, (%1) \n" \
213 "2: .set pop \n" \
214 " .section __ex_table,\"a\" \n" \
215 " "STR(PTR)" 1b, 2b \n" \
216 " .previous" \
217 : \
218 : "i" (op), "r" (addr))
219 #else
220 #define protected_cache_op(op,addr) \
221 __asm__ __volatile__( \
222 " .set push \n" \
223 " .set noreorder \n" \
224 " .set mips3 \n" \
225 "1: cache %0, (%1) \n" \
226 "2: .set pop \n" \
227 " .section __ex_table,\"a\" \n" \
228 " "STR(PTR)" 1b, 2b \n" \
229 " .previous" \
230 : \
231 : "i" (op), "r" (addr))
232 #endif
233
234 #ifdef CONFIG_EVA
235 #define protected_cachee_op(op,addr) \
236 __asm__ __volatile__( \
237 " .set push \n" \
238 " .set noreorder \n" \
239 " .set eva \n" \
240 "1: cachee %0, (%1) \n" \
241 "2: .set pop \n" \
242 " .section __ex_table,\"a\" \n" \
243 " "STR(PTR)" 1b, 2b \n" \
244 " .previous" \
245 : \
246 : "i" (op), "r" (addr))
247 #endif
248
249 /*
250 * The next two are for badland addresses like signal trampolines.
251 */
protected_flush_icache_line(unsigned long addr)252 static inline void protected_flush_icache_line(unsigned long addr)
253 {
254 #ifndef CONFIG_EVA
255 protected_cache_op(Hit_Invalidate_I, addr);
256 #else
257 protected_cachee_op(Hit_Invalidate_I, addr);
258 #endif
259 }
260
261 /*
262 * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D
263 * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style
264 * caches. We're talking about one cacheline unnecessarily getting invalidated
265 * here so the penalty isn't overly hard.
266 */
protected_writeback_dcache_line(unsigned long addr)267 static inline void protected_writeback_dcache_line(unsigned long addr)
268 {
269 #ifndef CONFIG_EVA
270 protected_cache_op(Hit_Writeback_Inv_D, addr);
271 #else
272 protected_cachee_op(Hit_Writeback_Inv_D, addr);
273 #endif
274 }
275
protected_writeback_scache_line(unsigned long addr)276 static inline void protected_writeback_scache_line(unsigned long addr)
277 {
278 protected_cache_op(Hit_Writeback_Inv_SD, addr);
279 }
280
281 /*
282 * This one is RM7000-specific
283 */
invalidate_tcache_page(unsigned long addr)284 static inline void invalidate_tcache_page(unsigned long addr)
285 {
286 cache_op(Page_Invalidate_T, addr);
287 }
288
289 #ifdef CONFIG_CPU_MIPSR6
290
291 #define cache16_unroll32(base,op) \
292 __asm__ __volatile__( \
293 " .set push \n" \
294 " .set noreorder \n" \
295 " .set mips64r6 \n" \
296 " .set noat \n" \
297 " cache %1, 0x000(%0); cache %1, 0x010(%0) \n" \
298 " cache %1, 0x020(%0); cache %1, 0x030(%0) \n" \
299 " cache %1, 0x040(%0); cache %1, 0x050(%0) \n" \
300 " cache %1, 0x060(%0); cache %1, 0x070(%0) \n" \
301 " cache %1, 0x080(%0); cache %1, 0x090(%0) \n" \
302 " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0) \n" \
303 " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0) \n" \
304 " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0) \n" \
305 STR(PTR_ADDIU) " $1, %0, 0x100 \n" \
306 " cache %1, 0x000($1); cache %1, 0x010($1) \n" \
307 " cache %1, 0x020($1); cache %1, 0x030($1) \n" \
308 " cache %1, 0x040($1); cache %1, 0x050($1) \n" \
309 " cache %1, 0x060($1); cache %1, 0x070($1) \n" \
310 " cache %1, 0x080($1); cache %1, 0x090($1) \n" \
311 " cache %1, 0x0a0($1); cache %1, 0x0b0($1) \n" \
312 " cache %1, 0x0c0($1); cache %1, 0x0d0($1) \n" \
313 " cache %1, 0x0e0($1); cache %1, 0x0f0($1) \n" \
314 " .set pop \n" \
315 : \
316 : "r" (base), \
317 "i" (op));
318
319 #define cache32_unroll32(base,op) \
320 __asm__ __volatile__( \
321 " .set push \n" \
322 " .set noreorder \n" \
323 " .set mips64r6 \n" \
324 " .set noat \n" \
325 " cache %1, 0x000(%0); cache %1, 0x020(%0) \n" \
326 " cache %1, 0x040(%0); cache %1, 0x060(%0) \n" \
327 " cache %1, 0x080(%0); cache %1, 0x0a0(%0) \n" \
328 " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0) \n" \
329 STR(PTR_ADDIU) " $1, %0, 0x100 \n" \
330 " cache %1, 0x000($1); cache %1, 0x020($1) \n" \
331 " cache %1, 0x040($1); cache %1, 0x060($1) \n" \
332 " cache %1, 0x080($1); cache %1, 0x0a0($1) \n" \
333 " cache %1, 0x0c0($1); cache %1, 0x0e0($1) \n" \
334 STR(PTR_ADDIU) " $1, $1, 0x100 \n" \
335 " cache %1, 0x000($1); cache %1, 0x020($1) \n" \
336 " cache %1, 0x040($1); cache %1, 0x060($1) \n" \
337 " cache %1, 0x080($1); cache %1, 0x0a0($1) \n" \
338 " cache %1, 0x0c0($1); cache %1, 0x0e0($1) \n" \
339 STR(PTR_ADDIU) " $1, $1, 0x100 \n" \
340 " cache %1, 0x000($1); cache %1, 0x020($1) \n" \
341 " cache %1, 0x040($1); cache %1, 0x060($1) \n" \
342 " cache %1, 0x080($1); cache %1, 0x0a0($1) \n" \
343 " cache %1, 0x0c0($1); cache %1, 0x0e0($1) \n" \
344 " .set pop \n" \
345 : \
346 : "r" (base), \
347 "i" (op));
348
349 #define cache64_unroll32(base,op) \
350 __asm__ __volatile__( \
351 " .set push \n" \
352 " .set noreorder \n" \
353 " .set mips64r6 \n" \
354 " .set noat \n" \
355 " cache %1, 0x000(%0); cache %1, 0x040(%0) \n" \
356 " cache %1, 0x080(%0); cache %1, 0x0c0(%0) \n" \
357 STR(PTR_ADDIU) " $1, %0, 0x100 \n" \
358 " cache %1, 0x000($1); cache %1, 0x040($1) \n" \
359 " cache %1, 0x080($1); cache %1, 0x0c0($1) \n" \
360 STR(PTR_ADDIU) " $1, $1, 0x100 \n" \
361 " cache %1, 0x000($1); cache %1, 0x040($1) \n" \
362 " cache %1, 0x080($1); cache %1, 0x0c0($1) \n" \
363 STR(PTR_ADDIU) " $1, $1, 0x100 \n" \
364 " cache %1, 0x000($1); cache %1, 0x040($1) \n" \
365 " cache %1, 0x080($1); cache %1, 0x0c0($1) \n" \
366 STR(PTR_ADDIU) " $1, $1, 0x100 \n" \
367 " cache %1, 0x000($1); cache %1, 0x040($1) \n" \
368 " cache %1, 0x080($1); cache %1, 0x0c0($1) \n" \
369 STR(PTR_ADDIU) " $1, $1, 0x100 \n" \
370 " cache %1, 0x000($1); cache %1, 0x040($1) \n" \
371 " cache %1, 0x080($1); cache %1, 0x0c0($1) \n" \
372 STR(PTR_ADDIU) " $1, $1, 0x100 \n" \
373 " cache %1, 0x000($1); cache %1, 0x040($1) \n" \
374 " cache %1, 0x080($1); cache %1, 0x0c0($1) \n" \
375 STR(PTR_ADDIU) " $1, $1, 0x100 \n" \
376 " cache %1, 0x000($1); cache %1, 0x040($1) \n" \
377 " cache %1, 0x080($1); cache %1, 0x0c0($1) \n" \
378 " .set pop \n" \
379 : \
380 : "r" (base), \
381 "i" (op));
382
383 #define cache128_unroll32(base,op) \
384 __asm__ __volatile__( \
385 " .set push \n" \
386 " .set noreorder \n" \
387 " .set mips64r6 \n" \
388 " .set noat \n" \
389 " cache %1, 0x000(%0); cache %1, 0x080(%0) \n" \
390 STR(PTR_ADDIU) " $1, %0, 0x100 \n" \
391 " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
392 STR(PTR_ADDIU) " $1, $1, 0x100 \n" \
393 " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
394 STR(PTR_ADDIU) " $1, $1, 0x100 \n" \
395 " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
396 STR(PTR_ADDIU) " $1, $1, 0x100 \n" \
397 " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
398 STR(PTR_ADDIU) " $1, $1, 0x100 \n" \
399 " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
400 STR(PTR_ADDIU) " $1, $1, 0x100 \n" \
401 " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
402 STR(PTR_ADDIU) " $1, $1, 0x100 \n" \
403 " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
404 STR(PTR_ADDIU) " $1, $1, 0x100 \n" \
405 " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
406 STR(PTR_ADDIU) " $1, $1, 0x100 \n" \
407 " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
408 STR(PTR_ADDIU) " $1, $1, 0x100 \n" \
409 " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
410 STR(PTR_ADDIU) " $1, $1, 0x100 \n" \
411 " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
412 STR(PTR_ADDIU) " $1, $1, 0x100 \n" \
413 " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
414 STR(PTR_ADDIU) " $1, $1, 0x100 \n" \
415 " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
416 STR(PTR_ADDIU) " $1, $1, 0x100 \n" \
417 " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
418 STR(PTR_ADDIU) " $1, $1, 0x100 \n" \
419 " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
420 " .set pop \n" \
421 : \
422 : "r" (base), \
423 "i" (op));
424
425 #else /* !CONFIG_CPU_MIPSR6 */
426
427 #define cache16_unroll32(base,op) \
428 __asm__ __volatile__( \
429 " .set push \n" \
430 " .set noreorder \n" \
431 " .set mips3 \n" \
432 " cache %1, 0x000(%0); cache %1, 0x010(%0) \n" \
433 " cache %1, 0x020(%0); cache %1, 0x030(%0) \n" \
434 " cache %1, 0x040(%0); cache %1, 0x050(%0) \n" \
435 " cache %1, 0x060(%0); cache %1, 0x070(%0) \n" \
436 " cache %1, 0x080(%0); cache %1, 0x090(%0) \n" \
437 " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0) \n" \
438 " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0) \n" \
439 " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0) \n" \
440 " cache %1, 0x100(%0); cache %1, 0x110(%0) \n" \
441 " cache %1, 0x120(%0); cache %1, 0x130(%0) \n" \
442 " cache %1, 0x140(%0); cache %1, 0x150(%0) \n" \
443 " cache %1, 0x160(%0); cache %1, 0x170(%0) \n" \
444 " cache %1, 0x180(%0); cache %1, 0x190(%0) \n" \
445 " cache %1, 0x1a0(%0); cache %1, 0x1b0(%0) \n" \
446 " cache %1, 0x1c0(%0); cache %1, 0x1d0(%0) \n" \
447 " cache %1, 0x1e0(%0); cache %1, 0x1f0(%0) \n" \
448 " .set pop \n" \
449 : \
450 : "r" (base), \
451 "i" (op));
452
453 #define cache32_unroll32(base,op) \
454 __asm__ __volatile__( \
455 " .set push \n" \
456 " .set noreorder \n" \
457 " .set mips3 \n" \
458 " cache %1, 0x000(%0); cache %1, 0x020(%0) \n" \
459 " cache %1, 0x040(%0); cache %1, 0x060(%0) \n" \
460 " cache %1, 0x080(%0); cache %1, 0x0a0(%0) \n" \
461 " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0) \n" \
462 " cache %1, 0x100(%0); cache %1, 0x120(%0) \n" \
463 " cache %1, 0x140(%0); cache %1, 0x160(%0) \n" \
464 " cache %1, 0x180(%0); cache %1, 0x1a0(%0) \n" \
465 " cache %1, 0x1c0(%0); cache %1, 0x1e0(%0) \n" \
466 " cache %1, 0x200(%0); cache %1, 0x220(%0) \n" \
467 " cache %1, 0x240(%0); cache %1, 0x260(%0) \n" \
468 " cache %1, 0x280(%0); cache %1, 0x2a0(%0) \n" \
469 " cache %1, 0x2c0(%0); cache %1, 0x2e0(%0) \n" \
470 " cache %1, 0x300(%0); cache %1, 0x320(%0) \n" \
471 " cache %1, 0x340(%0); cache %1, 0x360(%0) \n" \
472 " cache %1, 0x380(%0); cache %1, 0x3a0(%0) \n" \
473 " cache %1, 0x3c0(%0); cache %1, 0x3e0(%0) \n" \
474 " .set pop \n" \
475 : \
476 : "r" (base), \
477 "i" (op));
478
479 #define cache64_unroll32(base,op) \
480 __asm__ __volatile__( \
481 " .set push \n" \
482 " .set noreorder \n" \
483 " .set mips3 \n" \
484 " cache %1, 0x000(%0); cache %1, 0x040(%0) \n" \
485 " cache %1, 0x080(%0); cache %1, 0x0c0(%0) \n" \
486 " cache %1, 0x100(%0); cache %1, 0x140(%0) \n" \
487 " cache %1, 0x180(%0); cache %1, 0x1c0(%0) \n" \
488 " cache %1, 0x200(%0); cache %1, 0x240(%0) \n" \
489 " cache %1, 0x280(%0); cache %1, 0x2c0(%0) \n" \
490 " cache %1, 0x300(%0); cache %1, 0x340(%0) \n" \
491 " cache %1, 0x380(%0); cache %1, 0x3c0(%0) \n" \
492 " cache %1, 0x400(%0); cache %1, 0x440(%0) \n" \
493 " cache %1, 0x480(%0); cache %1, 0x4c0(%0) \n" \
494 " cache %1, 0x500(%0); cache %1, 0x540(%0) \n" \
495 " cache %1, 0x580(%0); cache %1, 0x5c0(%0) \n" \
496 " cache %1, 0x600(%0); cache %1, 0x640(%0) \n" \
497 " cache %1, 0x680(%0); cache %1, 0x6c0(%0) \n" \
498 " cache %1, 0x700(%0); cache %1, 0x740(%0) \n" \
499 " cache %1, 0x780(%0); cache %1, 0x7c0(%0) \n" \
500 " .set pop \n" \
501 : \
502 : "r" (base), \
503 "i" (op));
504
505 #define cache128_unroll32(base,op) \
506 __asm__ __volatile__( \
507 " .set push \n" \
508 " .set noreorder \n" \
509 " .set mips3 \n" \
510 " cache %1, 0x000(%0); cache %1, 0x080(%0) \n" \
511 " cache %1, 0x100(%0); cache %1, 0x180(%0) \n" \
512 " cache %1, 0x200(%0); cache %1, 0x280(%0) \n" \
513 " cache %1, 0x300(%0); cache %1, 0x380(%0) \n" \
514 " cache %1, 0x400(%0); cache %1, 0x480(%0) \n" \
515 " cache %1, 0x500(%0); cache %1, 0x580(%0) \n" \
516 " cache %1, 0x600(%0); cache %1, 0x680(%0) \n" \
517 " cache %1, 0x700(%0); cache %1, 0x780(%0) \n" \
518 " cache %1, 0x800(%0); cache %1, 0x880(%0) \n" \
519 " cache %1, 0x900(%0); cache %1, 0x980(%0) \n" \
520 " cache %1, 0xa00(%0); cache %1, 0xa80(%0) \n" \
521 " cache %1, 0xb00(%0); cache %1, 0xb80(%0) \n" \
522 " cache %1, 0xc00(%0); cache %1, 0xc80(%0) \n" \
523 " cache %1, 0xd00(%0); cache %1, 0xd80(%0) \n" \
524 " cache %1, 0xe00(%0); cache %1, 0xe80(%0) \n" \
525 " cache %1, 0xf00(%0); cache %1, 0xf80(%0) \n" \
526 " .set pop \n" \
527 : \
528 : "r" (base), \
529 "i" (op));
530 #endif /* CONFIG_CPU_MIPSR6 */
531
532 #ifdef CONFIG_EVA
533 #define cache16_unroll32_user(base,op) \
534 __asm__ __volatile__( \
535 " .set push \n" \
536 " .set noreorder \n" \
537 " .set eva \n" \
538 " cachee %1, 0x000(%0); cachee %1, 0x010(%0) \n" \
539 " cachee %1, 0x020(%0); cachee %1, 0x030(%0) \n" \
540 " cachee %1, 0x040(%0); cachee %1, 0x050(%0) \n" \
541 " cachee %1, 0x060(%0); cachee %1, 0x070(%0) \n" \
542 " cachee %1, 0x080(%0); cachee %1, 0x090(%0) \n" \
543 " cachee %1, 0x0a0(%0); cachee %1, 0x0b0(%0) \n" \
544 " cachee %1, 0x0c0(%0); cachee %1, 0x0d0(%0) \n" \
545 " cachee %1, 0x0e0(%0); cachee %1, 0x0f0(%0) \n" \
546 " cachee %1, 0x100(%0); cachee %1, 0x110(%0) \n" \
547 " cachee %1, 0x120(%0); cachee %1, 0x130(%0) \n" \
548 " cachee %1, 0x140(%0); cachee %1, 0x150(%0) \n" \
549 " cachee %1, 0x160(%0); cachee %1, 0x170(%0) \n" \
550 " cachee %1, 0x180(%0); cachee %1, 0x190(%0) \n" \
551 " cachee %1, 0x1a0(%0); cachee %1, 0x1b0(%0) \n" \
552 " cachee %1, 0x1c0(%0); cachee %1, 0x1d0(%0) \n" \
553 " cachee %1, 0x1e0(%0); cachee %1, 0x1f0(%0) \n" \
554 " .set pop \n" \
555 : \
556 : "r" (base), \
557 "i" (op));
558
559 #define cache32_unroll32_user(base,op) \
560 __asm__ __volatile__( \
561 " .set push \n" \
562 " .set noreorder \n" \
563 " .set eva \n" \
564 " cachee %1, 0x000(%0); cachee %1, 0x020(%0) \n" \
565 " cachee %1, 0x040(%0); cachee %1, 0x060(%0) \n" \
566 " cachee %1, 0x080(%0); cachee %1, 0x0a0(%0) \n" \
567 " cachee %1, 0x0c0(%0); cachee %1, 0x0e0(%0) \n" \
568 " cachee %1, 0x100(%0); cachee %1, 0x120(%0) \n" \
569 " cachee %1, 0x140(%0); cachee %1, 0x160(%0) \n" \
570 " cachee %1, 0x180(%0); cachee %1, 0x1a0(%0) \n" \
571 " cachee %1, 0x1c0(%0); cachee %1, 0x1e0(%0) \n" \
572 " cachee %1, 0x200(%0); cachee %1, 0x220(%0) \n" \
573 " cachee %1, 0x240(%0); cachee %1, 0x260(%0) \n" \
574 " cachee %1, 0x280(%0); cachee %1, 0x2a0(%0) \n" \
575 " cachee %1, 0x2c0(%0); cachee %1, 0x2e0(%0) \n" \
576 " cachee %1, 0x300(%0); cachee %1, 0x320(%0) \n" \
577 " cachee %1, 0x340(%0); cachee %1, 0x360(%0) \n" \
578 " cachee %1, 0x380(%0); cachee %1, 0x3a0(%0) \n" \
579 " cachee %1, 0x3c0(%0); cachee %1, 0x3e0(%0) \n" \
580 " .set pop \n" \
581 : \
582 : "r" (base), \
583 "i" (op));
584
585 #define cache64_unroll32_user(base,op) \
586 __asm__ __volatile__( \
587 " .set push \n" \
588 " .set noreorder \n" \
589 " .set eva \n" \
590 " cachee %1, 0x000(%0); cachee %1, 0x040(%0) \n" \
591 " cachee %1, 0x080(%0); cachee %1, 0x0c0(%0) \n" \
592 " cachee %1, 0x100(%0); cachee %1, 0x140(%0) \n" \
593 " cachee %1, 0x180(%0); cachee %1, 0x1c0(%0) \n" \
594 " cachee %1, 0x200(%0); cachee %1, 0x240(%0) \n" \
595 " cachee %1, 0x280(%0); cachee %1, 0x2c0(%0) \n" \
596 " cachee %1, 0x300(%0); cachee %1, 0x340(%0) \n" \
597 " cachee %1, 0x380(%0); cachee %1, 0x3c0(%0) \n" \
598 " cachee %1, 0x400(%0); cachee %1, 0x440(%0) \n" \
599 " cachee %1, 0x480(%0); cachee %1, 0x4c0(%0) \n" \
600 " cachee %1, 0x500(%0); cachee %1, 0x540(%0) \n" \
601 " cachee %1, 0x580(%0); cachee %1, 0x5c0(%0) \n" \
602 " cachee %1, 0x600(%0); cachee %1, 0x640(%0) \n" \
603 " cachee %1, 0x680(%0); cachee %1, 0x6c0(%0) \n" \
604 " cachee %1, 0x700(%0); cachee %1, 0x740(%0) \n" \
605 " cachee %1, 0x780(%0); cachee %1, 0x7c0(%0) \n" \
606 " .set pop \n" \
607 : \
608 : "r" (base), \
609 "i" (op));
610
611 #define cache128_unroll32_user(base,op) \
612 __asm__ __volatile__( \
613 " .set push \n" \
614 " .set noreorder \n" \
615 " .set eva \n" \
616 " cachee %1, 0x000(%0); cachee %1, 0x080(%0) \n" \
617 " cachee %1, 0x100(%0); cachee %1, 0x180(%0) \n" \
618 " cachee %1, 0x200(%0); cachee %1, 0x280(%0) \n" \
619 " cachee %1, 0x300(%0); cachee %1, 0x380(%0) \n" \
620 " cachee %1, 0x400(%0); cachee %1, 0x480(%0) \n" \
621 " cachee %1, 0x500(%0); cachee %1, 0x580(%0) \n" \
622 " cachee %1, 0x600(%0); cachee %1, 0x680(%0) \n" \
623 " cachee %1, 0x700(%0); cachee %1, 0x780(%0) \n" \
624 " cachee %1, 0x800(%0); cachee %1, 0x880(%0) \n" \
625 " cachee %1, 0x900(%0); cachee %1, 0x980(%0) \n" \
626 " cachee %1, 0xa00(%0); cachee %1, 0xa80(%0) \n" \
627 " cachee %1, 0xb00(%0); cachee %1, 0xb80(%0) \n" \
628 " cachee %1, 0xc00(%0); cachee %1, 0xc80(%0) \n" \
629 " cachee %1, 0xd00(%0); cachee %1, 0xd80(%0) \n" \
630 " cachee %1, 0xe00(%0); cachee %1, 0xe80(%0) \n" \
631 " cachee %1, 0xf00(%0); cachee %1, 0xf80(%0) \n" \
632 " .set pop \n" \
633 : \
634 : "r" (base), \
635 "i" (op));
636 #endif
637
638 /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
639 #define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize) \
640 static inline void blast_##pfx##cache##lsize(void) \
641 { \
642 unsigned long start = INDEX_BASE; \
643 unsigned long end = start + current_cpu_data.desc.waysize; \
644 unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
645 unsigned long ws_end = current_cpu_data.desc.ways << \
646 current_cpu_data.desc.waybit; \
647 unsigned long ws, addr; \
648 \
649 __##pfx##flush_prologue \
650 \
651 for (ws = 0; ws < ws_end; ws += ws_inc) \
652 for (addr = start; addr < end; addr += lsize * 32) \
653 cache##lsize##_unroll32(addr|ws, indexop); \
654 \
655 __##pfx##flush_epilogue \
656 } \
657 \
658 static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \
659 { \
660 unsigned long start = page; \
661 unsigned long end = page + PAGE_SIZE; \
662 \
663 __##pfx##flush_prologue \
664 \
665 do { \
666 cache##lsize##_unroll32(start, hitop); \
667 start += lsize * 32; \
668 } while (start < end); \
669 \
670 __##pfx##flush_epilogue \
671 } \
672 \
673 static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
674 { \
675 unsigned long indexmask = current_cpu_data.desc.waysize - 1; \
676 unsigned long start = INDEX_BASE + (page & indexmask); \
677 unsigned long end = start + PAGE_SIZE; \
678 unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
679 unsigned long ws_end = current_cpu_data.desc.ways << \
680 current_cpu_data.desc.waybit; \
681 unsigned long ws, addr; \
682 \
683 __##pfx##flush_prologue \
684 \
685 for (ws = 0; ws < ws_end; ws += ws_inc) \
686 for (addr = start; addr < end; addr += lsize * 32) \
687 cache##lsize##_unroll32(addr|ws, indexop); \
688 \
689 __##pfx##flush_epilogue \
690 }
691
692 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16)
693 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
694 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
695 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32)
696 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
697 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
698 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64)
699 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
700 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64)
701 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
702
703 __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16)
704 __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32)
705 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16)
706 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32)
707 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64)
708 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128)
709
710 #ifdef CONFIG_EVA
711
712 #define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \
713 static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
714 { \
715 unsigned long start = page; \
716 unsigned long end = page + PAGE_SIZE; \
717 \
718 __##pfx##flush_prologue \
719 \
720 do { \
721 cache##lsize##_unroll32_user(start, hitop); \
722 start += lsize * 32; \
723 } while (start < end); \
724 \
725 __##pfx##flush_epilogue \
726 }
727
728 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16)
729 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
730 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32)
731 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
732 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64)
733 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
734
735 #endif
736
737 /* build blast_xxx_range, protected_blast_xxx_range */
738 #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot) \
739 static inline void prot##blast_##pfx##cache##_range(unsigned long start, \
740 unsigned long end) \
741 { \
742 unsigned long lsize = cpu_##desc##_line_size(); \
743 unsigned long addr = start & ~(lsize - 1); \
744 unsigned long aend = (end - 1) & ~(lsize - 1); \
745 \
746 __##pfx##flush_prologue \
747 \
748 while (1) { \
749 prot##cache_op(hitop, addr); \
750 if (addr == aend) \
751 break; \
752 addr += lsize; \
753 } \
754 \
755 __##pfx##flush_epilogue \
756 }
757
758 #ifndef CONFIG_EVA
759
760 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_)
761 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_)
762
763 #else
764
765 #define __BUILD_PROT_BLAST_CACHE_RANGE(pfx, desc, hitop) \
766 static inline void protected_blast_##pfx##cache##_range(unsigned long start, \
767 unsigned long end) \
768 { \
769 unsigned long lsize = cpu_##desc##_line_size(); \
770 unsigned long addr = start & ~(lsize - 1); \
771 unsigned long aend = (end - 1) & ~(lsize - 1); \
772 \
773 __##pfx##flush_prologue \
774 \
775 if (segment_eq(get_fs(), USER_DS)) \
776 while (1) { \
777 protected_cachee_op(hitop, addr); \
778 if (addr == aend) \
779 break; \
780 addr += lsize; \
781 } \
782 else \
783 while (1) { \
784 protected_cache_op(hitop, addr); \
785 if (addr == aend) \
786 break; \
787 addr += lsize; \
788 } \
789 \
790 __##pfx##flush_epilogue \
791 }
792
793 __BUILD_PROT_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D)
794 __BUILD_PROT_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I)
795
796 #endif
797
798 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_)
799 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, )
800 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, )
801 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, )
802 /* blast_inv_dcache_range */
803 __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, )
804 __BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, )
805
806 #endif /* _ASM_R4KCACHE_H */
807