• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
7  * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
8  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9  * Copyright (C) 2012, MIPS Technology, Leonid Yegoshin (yegoshin@mips.com)
10  */
11 #include <linux/hardirq.h>
12 #include <linux/init.h>
13 #include <linux/highmem.h>
14 #include <linux/kernel.h>
15 #include <linux/linkage.h>
16 #include <linux/preempt.h>
17 #include <linux/sched.h>
18 #include <linux/smp.h>
19 #include <linux/mm.h>
20 #include <linux/module.h>
21 #include <linux/bitops.h>
22 
23 #include <asm/bcache.h>
24 #include <asm/bootinfo.h>
25 #include <asm/cache.h>
26 #include <asm/cacheops.h>
27 #include <asm/cpu.h>
28 #include <asm/cpu-features.h>
29 #include <asm/io.h>
30 #include <asm/page.h>
31 #include <asm/pgtable.h>
32 #include <asm/r4kcache.h>
33 #include <asm/sections.h>
34 #include <asm/mmu_context.h>
35 #include <asm/war.h>
36 #include <asm/cacheflush.h> /* for run_uncached() */
37 #include <asm/traps.h>
38 #include <asm/dma-coherence.h>
39 #include <asm/gcmpregs.h>
40 
41 /*
42  * Special Variant of smp_call_function for use by cache functions:
43  *
44  *  o No return value
45  *  o collapses to normal function call on UP kernels
46  *  o collapses to normal function call on systems with a single shared
47  *    primary cache.
48  *  o doesn't disable interrupts on the local CPU
49  *
50  *  Note: this function is used now for address cacheops only
51  *
52  *  Note2: It is unsafe to use address cacheops via SMP call, other CPU may not
53  *         have this process address map (ASID) loaded into EntryHI and
54  *         it usualy requires some tricks, which are absent from this file.
55  *         Cross-CPU address cacheops are much easy and safely.
56  */
r4k_on_each_cpu(void (* func)(void * info),void * info)57 static inline void r4k_on_each_cpu(void (*func) (void *info), void *info)
58 {
59 	preempt_disable();
60 
61 #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
62 	smp_call_function(func, info, 1);
63 #endif
64 	func(info);
65 	preempt_enable();
66 }
67 
68 #if defined(CONFIG_MIPS_CMP) && defined(CONFIG_SMP)
69 #define cpu_has_safe_index_cacheops 0
70 #else
71 #define cpu_has_safe_index_cacheops 1
72 #endif
73 
74 /*
75  * This variant of smp_call_function is used for index cacheops only.
76  */
r4k_indexop_on_each_cpu(void (* func)(void * info),void * info)77 static inline void r4k_indexop_on_each_cpu(void (*func) (void *info), void *info)
78 {
79 	preempt_disable();
80 
81 #ifdef CONFIG_SMP
82 	if (!cpu_has_safe_index_cacheops) {
83 
84 		if (smp_num_siblings > 1) {
85 			cpumask_t tmp_mask = INIT_CPUMASK;
86 			int cpu, this_cpu, n = 0;
87 
88 			/* If processor hasn't safe index cachops (likely)
89 			   then run cache flush on other CPUs.
90 			   But I assume that siblings have common L1 cache, so -
91 			   - run cache flush only once per sibling group. LY22 */
92 
93 			this_cpu = smp_processor_id();
94 			for_each_online_cpu(cpu) {
95 
96 				if (cpumask_test_cpu(cpu, (&per_cpu(cpu_sibling_map, this_cpu))))
97 					continue;
98 
99 				if (cpumask_intersects(&tmp_mask, (&per_cpu(cpu_sibling_map, cpu))))
100 					continue;
101 				cpu_set(cpu, tmp_mask);
102 				n++;
103 			}
104 			if (n)
105 				smp_call_function_many(&tmp_mask, func, info, 1);
106 		} else
107 			smp_call_function(func, info, 1);
108 	}
109 #endif
110 	func(info);
111 	preempt_enable();
112 }
113 
114 /*  Define a rough size where address cacheops are still more optimal than
115  *  index cacheops on whole cache (in D/I-cache size terms).
116  *  Value "2" reflects an expense of smp_call_function() on top of
117  *  whole cache flush via index cacheops.
118  */
119 #ifndef CACHE_CPU_LATENCY
120 #ifdef CONFIG_SMP
121 #define CACHE_CPU_LATENCY   (2)
122 #else
123 #define CACHE_CPU_LATENCY   (1)
124 #endif
125 #endif
126 
127 
128 /*
129  * Must die.
130  */
131 static unsigned long icache_size __read_mostly;
132 static unsigned long dcache_size __read_mostly;
133 static unsigned long scache_size __read_mostly;
134 
135 /*
136  * Dummy cache handling routines for machines without boardcaches
137  */
cache_noop(void)138 static void cache_noop(void) {}
139 
140 static struct bcache_ops no_sc_ops = {
141 	.bc_enable = (void *)cache_noop,
142 	.bc_disable = (void *)cache_noop,
143 	.bc_wback_inv = (void *)cache_noop,
144 	.bc_inv = (void *)cache_noop
145 };
146 
147 struct bcache_ops *bcops = &no_sc_ops;
148 
149 #define cpu_is_r4600_v1_x()	((read_c0_prid() & 0xfffffff0) == 0x00002010)
150 #define cpu_is_r4600_v2_x()	((read_c0_prid() & 0xfffffff0) == 0x00002020)
151 
152 #define R4600_HIT_CACHEOP_WAR_IMPL					\
153 do {									\
154 	if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())		\
155 		*(volatile unsigned long *)CKSEG1;			\
156 	if (R4600_V1_HIT_CACHEOP_WAR)					\
157 		__asm__ __volatile__("nop;nop;nop;nop");		\
158 } while (0)
159 
160 static void (*r4k_blast_dcache_page)(unsigned long addr);
161 
r4k_blast_dcache_page_dc32(unsigned long addr)162 static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
163 {
164 	R4600_HIT_CACHEOP_WAR_IMPL;
165 	blast_dcache32_page(addr);
166 }
167 
r4k_blast_dcache_page_dc64(unsigned long addr)168 static inline void r4k_blast_dcache_page_dc64(unsigned long addr)
169 {
170 	R4600_HIT_CACHEOP_WAR_IMPL;
171 	blast_dcache64_page(addr);
172 }
173 
r4k_blast_dcache_page_setup(void)174 static void __cpuinit r4k_blast_dcache_page_setup(void)
175 {
176 	unsigned long  dc_lsize = cpu_dcache_line_size();
177 
178 	if (dc_lsize == 0)
179 		r4k_blast_dcache_page = (void *)cache_noop;
180 	else if (dc_lsize == 16)
181 		r4k_blast_dcache_page = blast_dcache16_page;
182 	else if (dc_lsize == 32)
183 		r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
184 	else if (dc_lsize == 64)
185 		r4k_blast_dcache_page = r4k_blast_dcache_page_dc64;
186 }
187 
188 #ifndef CONFIG_EVA
189 #define r4k_blast_dcache_user_page  r4k_blast_dcache_page
190 #else
191 
192 static void (*r4k_blast_dcache_user_page)(unsigned long addr);
193 
r4k_blast_dcache_user_page_setup(void)194 static void __cpuinit r4k_blast_dcache_user_page_setup(void)
195 {
196 	unsigned long  dc_lsize = cpu_dcache_line_size();
197 
198 	if (dc_lsize == 0)
199 		r4k_blast_dcache_user_page = (void *)cache_noop;
200 	else if (dc_lsize == 16)
201 		r4k_blast_dcache_user_page = blast_dcache16_user_page;
202 	else if (dc_lsize == 32)
203 		r4k_blast_dcache_user_page = blast_dcache32_user_page;
204 	else if (dc_lsize == 64)
205 		r4k_blast_dcache_user_page = blast_dcache64_user_page;
206 }
207 
208 #endif
209 
210 static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
211 
r4k_blast_dcache_page_indexed_setup(void)212 static void __cpuinit r4k_blast_dcache_page_indexed_setup(void)
213 {
214 	unsigned long dc_lsize = cpu_dcache_line_size();
215 
216 	if (dc_lsize == 0)
217 		r4k_blast_dcache_page_indexed = (void *)cache_noop;
218 	else if (dc_lsize == 16)
219 		r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
220 	else if (dc_lsize == 32)
221 		r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
222 	else if (dc_lsize == 64)
223 		r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed;
224 }
225 
226 void (* r4k_blast_dcache)(void);
227 EXPORT_SYMBOL(r4k_blast_dcache);
228 
r4k_blast_dcache_setup(void)229 static void __cpuinit r4k_blast_dcache_setup(void)
230 {
231 	unsigned long dc_lsize = cpu_dcache_line_size();
232 
233 	if (dc_lsize == 0)
234 		r4k_blast_dcache = (void *)cache_noop;
235 	else if (dc_lsize == 16)
236 		r4k_blast_dcache = blast_dcache16;
237 	else if (dc_lsize == 32)
238 		r4k_blast_dcache = blast_dcache32;
239 	else if (dc_lsize == 64)
240 		r4k_blast_dcache = blast_dcache64;
241 }
242 
243 /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
244 #define JUMP_TO_ALIGN(order) \
245 	__asm__ __volatile__( \
246 		"b\t1f\n\t" \
247 		".align\t" #order "\n\t" \
248 		"1:\n\t" \
249 		)
250 #define CACHE32_UNROLL32_ALIGN	JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
251 #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
252 
blast_r4600_v1_icache32(void)253 static inline void blast_r4600_v1_icache32(void)
254 {
255 	unsigned long flags;
256 
257 	local_irq_save(flags);
258 	blast_icache32();
259 	local_irq_restore(flags);
260 }
261 
tx49_blast_icache32(void)262 static inline void tx49_blast_icache32(void)
263 {
264 	unsigned long start = INDEX_BASE;
265 	unsigned long end = start + current_cpu_data.icache.waysize;
266 	unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
267 	unsigned long ws_end = current_cpu_data.icache.ways <<
268 			       current_cpu_data.icache.waybit;
269 	unsigned long ws, addr;
270 
271 	CACHE32_UNROLL32_ALIGN2;
272 	/* I'm in even chunk.  blast odd chunks */
273 	for (ws = 0; ws < ws_end; ws += ws_inc)
274 		for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
275 			cache32_unroll32(addr|ws, Index_Invalidate_I);
276 	CACHE32_UNROLL32_ALIGN;
277 	/* I'm in odd chunk.  blast even chunks */
278 	for (ws = 0; ws < ws_end; ws += ws_inc)
279 		for (addr = start; addr < end; addr += 0x400 * 2)
280 			cache32_unroll32(addr|ws, Index_Invalidate_I);
281 }
282 
blast_icache32_r4600_v1_page_indexed(unsigned long page)283 static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
284 {
285 	unsigned long flags;
286 
287 	local_irq_save(flags);
288 	blast_icache32_page_indexed(page);
289 	local_irq_restore(flags);
290 }
291 
tx49_blast_icache32_page_indexed(unsigned long page)292 static inline void tx49_blast_icache32_page_indexed(unsigned long page)
293 {
294 	unsigned long indexmask = current_cpu_data.icache.waysize - 1;
295 	unsigned long start = INDEX_BASE + (page & indexmask);
296 	unsigned long end = start + PAGE_SIZE;
297 	unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
298 	unsigned long ws_end = current_cpu_data.icache.ways <<
299 			       current_cpu_data.icache.waybit;
300 	unsigned long ws, addr;
301 
302 	CACHE32_UNROLL32_ALIGN2;
303 	/* I'm in even chunk.  blast odd chunks */
304 	for (ws = 0; ws < ws_end; ws += ws_inc)
305 		for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
306 			cache32_unroll32(addr|ws, Index_Invalidate_I);
307 	CACHE32_UNROLL32_ALIGN;
308 	/* I'm in odd chunk.  blast even chunks */
309 	for (ws = 0; ws < ws_end; ws += ws_inc)
310 		for (addr = start; addr < end; addr += 0x400 * 2)
311 			cache32_unroll32(addr|ws, Index_Invalidate_I);
312 }
313 
314 static void (* r4k_blast_icache_page)(unsigned long addr);
315 
r4k_blast_icache_page_setup(void)316 static void __cpuinit r4k_blast_icache_page_setup(void)
317 {
318 	unsigned long ic_lsize = cpu_icache_line_size();
319 
320 	if (ic_lsize == 0)
321 		r4k_blast_icache_page = (void *)cache_noop;
322 	else if (ic_lsize == 16)
323 		r4k_blast_icache_page = blast_icache16_page;
324 	else if (ic_lsize == 32)
325 		r4k_blast_icache_page = blast_icache32_page;
326 	else if (ic_lsize == 64)
327 		r4k_blast_icache_page = blast_icache64_page;
328 }
329 
330 #ifndef CONFIG_EVA
331 #define r4k_blast_icache_user_page  r4k_blast_icache_page
332 #else
333 
334 static void (* r4k_blast_icache_user_page)(unsigned long addr);
335 
r4k_blast_icache_user_page_setup(void)336 static void __cpuinit r4k_blast_icache_user_page_setup(void)
337 {
338 	unsigned long ic_lsize = cpu_icache_line_size();
339 
340 	if (ic_lsize == 0)
341 		r4k_blast_icache_user_page = (void *)cache_noop;
342 	else if (ic_lsize == 16)
343 		r4k_blast_icache_user_page = blast_icache16_user_page;
344 	else if (ic_lsize == 32)
345 		r4k_blast_icache_user_page = blast_icache32_user_page;
346 	else if (ic_lsize == 64)
347 		r4k_blast_icache_user_page = blast_icache64_user_page;
348 }
349 
350 #endif
351 
352 static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
353 
r4k_blast_icache_page_indexed_setup(void)354 static void __cpuinit r4k_blast_icache_page_indexed_setup(void)
355 {
356 	unsigned long ic_lsize = cpu_icache_line_size();
357 
358 	if (ic_lsize == 0)
359 		r4k_blast_icache_page_indexed = (void *)cache_noop;
360 	else if (ic_lsize == 16)
361 		r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
362 	else if (ic_lsize == 32) {
363 		if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
364 			r4k_blast_icache_page_indexed =
365 				blast_icache32_r4600_v1_page_indexed;
366 		else if (TX49XX_ICACHE_INDEX_INV_WAR)
367 			r4k_blast_icache_page_indexed =
368 				tx49_blast_icache32_page_indexed;
369 		else
370 			r4k_blast_icache_page_indexed =
371 				blast_icache32_page_indexed;
372 	} else if (ic_lsize == 64)
373 		r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
374 }
375 
376 void (* r4k_blast_icache)(void);
377 EXPORT_SYMBOL(r4k_blast_icache);
378 
r4k_blast_icache_setup(void)379 static void __cpuinit r4k_blast_icache_setup(void)
380 {
381 	unsigned long ic_lsize = cpu_icache_line_size();
382 
383 	if (ic_lsize == 0)
384 		r4k_blast_icache = (void *)cache_noop;
385 	else if (ic_lsize == 16)
386 		r4k_blast_icache = blast_icache16;
387 	else if (ic_lsize == 32) {
388 		if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
389 			r4k_blast_icache = blast_r4600_v1_icache32;
390 		else if (TX49XX_ICACHE_INDEX_INV_WAR)
391 			r4k_blast_icache = tx49_blast_icache32;
392 		else
393 			r4k_blast_icache = blast_icache32;
394 	} else if (ic_lsize == 64)
395 		r4k_blast_icache = blast_icache64;
396 }
397 
398 static void (* r4k_blast_scache_page)(unsigned long addr);
399 
r4k_blast_scache_page_setup(void)400 static void __cpuinit r4k_blast_scache_page_setup(void)
401 {
402 	unsigned long sc_lsize = cpu_scache_line_size();
403 
404 	if (scache_size == 0)
405 		r4k_blast_scache_page = (void *)cache_noop;
406 	else if (sc_lsize == 16)
407 		r4k_blast_scache_page = blast_scache16_page;
408 	else if (sc_lsize == 32)
409 		r4k_blast_scache_page = blast_scache32_page;
410 	else if (sc_lsize == 64)
411 		r4k_blast_scache_page = blast_scache64_page;
412 	else if (sc_lsize == 128)
413 		r4k_blast_scache_page = blast_scache128_page;
414 }
415 
416 static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
417 
r4k_blast_scache_page_indexed_setup(void)418 static void __cpuinit r4k_blast_scache_page_indexed_setup(void)
419 {
420 	unsigned long sc_lsize = cpu_scache_line_size();
421 
422 	if (scache_size == 0)
423 		r4k_blast_scache_page_indexed = (void *)cache_noop;
424 	else if (sc_lsize == 16)
425 		r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
426 	else if (sc_lsize == 32)
427 		r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
428 	else if (sc_lsize == 64)
429 		r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
430 	else if (sc_lsize == 128)
431 		r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
432 }
433 
434 static void (* r4k_blast_scache)(void);
435 
r4k_blast_scache_setup(void)436 static void __cpuinit r4k_blast_scache_setup(void)
437 {
438 	unsigned long sc_lsize = cpu_scache_line_size();
439 
440 	if (scache_size == 0)
441 		r4k_blast_scache = (void *)cache_noop;
442 	else if (sc_lsize == 16)
443 		r4k_blast_scache = blast_scache16;
444 	else if (sc_lsize == 32)
445 		r4k_blast_scache = blast_scache32;
446 	else if (sc_lsize == 64)
447 		r4k_blast_scache = blast_scache64;
448 	else if (sc_lsize == 128)
449 		r4k_blast_scache = blast_scache128;
450 }
451 
local_r4k___flush_cache_all(void * args)452 static inline void local_r4k___flush_cache_all(void * args)
453 {
454 #if defined(CONFIG_CPU_LOONGSON2)
455 	r4k_blast_scache();
456 	return;
457 #endif
458 	r4k_blast_dcache();
459 	if (!cpu_has_ic_fills_f_dc)
460 		mb();
461 	r4k_blast_icache();
462 
463 	switch (current_cpu_type()) {
464 	case CPU_R4000SC:
465 	case CPU_R4000MC:
466 	case CPU_R4400SC:
467 	case CPU_R4400MC:
468 	case CPU_R10000:
469 	case CPU_R12000:
470 	case CPU_R14000:
471 		r4k_blast_scache();
472 	}
473 }
474 
r4k___flush_cache_all(void)475 static void r4k___flush_cache_all(void)
476 {
477 	r4k_indexop_on_each_cpu(local_r4k___flush_cache_all, NULL);
478 }
479 
has_valid_asid(const struct mm_struct * mm)480 static inline int has_valid_asid(const struct mm_struct *mm)
481 {
482 #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
483 	int i;
484 
485 	for_each_online_cpu(i)
486 		if (cpu_context(i, mm))
487 			return 1;
488 
489 	return 0;
490 #else
491 	return cpu_context(smp_processor_id(), mm);
492 #endif
493 }
494 
495 
local_r4__flush_dcache(void * args)496 static inline void local_r4__flush_dcache(void *args)
497 {
498 	r4k_blast_dcache();
499 }
500 
501 struct vmap_args {
502 	unsigned long start;
503 	unsigned long end;
504 };
505 
local_r4__flush_cache_vmap(void * args)506 static inline void local_r4__flush_cache_vmap(void *args)
507 {
508 	blast_dcache_range(((struct vmap_args *)args)->start,((struct vmap_args *)args)->end);
509 }
510 
r4k__flush_cache_vmap(unsigned long start,unsigned long end)511 static void r4k__flush_cache_vmap(unsigned long start, unsigned long end)
512 {
513 	unsigned long size = end - start;
514 
515 	if (cpu_has_cm3_inclusive_pcaches)
516 		return;
517 
518 	if (cpu_has_safe_index_cacheops && size >= dcache_size) {
519 		r4k_blast_dcache();
520 	} else {
521 /* Commented out until bug in free_unmap_vmap_area() is fixed - it calls
522    with unmapped page and address cache op does TLB refill exception
523 		if (size >= (dcache_size * CACHE_CPU_LATENCY))
524  */
525 			r4k_indexop_on_each_cpu(local_r4__flush_dcache, NULL);
526 /* Commented out until bug in free_unmap_vmap_area() is fixed - it calls
527    with unmapped page and address cache op does TLB refill exception
528 		else {
529 			struct vmap_args args;
530 
531 			args.start = start;
532 			args.end = end;
533 			r4k_on_each_cpu(local_r4__flush_cache_vmap, (void *)&args);
534 		}
535  */
536 	}
537 }
538 
r4k__flush_cache_vunmap(unsigned long start,unsigned long end)539 static void r4k__flush_cache_vunmap(unsigned long start, unsigned long end)
540 {
541 	unsigned long size = end - start;
542 
543 	if (cpu_has_cm3_inclusive_pcaches)
544 		return;
545 
546 	if (cpu_has_safe_index_cacheops && size >= dcache_size)
547 		r4k_blast_dcache();
548 	else {
549 /* Commented out until bug in free_unmap_vmap_area() is fixed - it calls
550    with unmapped page and address cache op does TLB refill exception
551 		if (size >= (dcache_size * CACHE_CPU_LATENCY))
552  */
553 			r4k_indexop_on_each_cpu(local_r4__flush_dcache, NULL);
554 /* Commented out until bug in free_unmap_vmap_area() is fixed - it calls
555    with unmapped page and address cache op does TLB refill exception
556 		else {
557 			struct vmap_args args;
558 
559 			args.start = start;
560 			args.end = end;
561 			r4k_on_each_cpu(local_r4__flush_cache_vmap, (void *)&args);
562 		}
563  */
564 	}
565 }
566 
567 
local_r4k_flush_cache_range(void * args)568 static inline void local_r4k_flush_cache_range(void * args)
569 {
570 	struct vm_area_struct *vma = args;
571 	int exec = vma->vm_flags & VM_EXEC;
572 
573 	if (!(has_valid_asid(vma->vm_mm)))
574 		return;
575 
576 	if (!cpu_has_cm3_inclusive_pcaches)
577 		r4k_blast_dcache();
578 	if (exec) {
579 		if ((!cpu_has_cm3_inclusive_pcaches) && !cpu_has_ic_fills_f_dc)
580 			mb();
581 		r4k_blast_icache();
582 	}
583 }
584 
r4k_flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)585 static void r4k_flush_cache_range(struct vm_area_struct *vma,
586 	unsigned long start, unsigned long end)
587 {
588 	int exec = vma->vm_flags & VM_EXEC;
589 
590 	if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
591 		r4k_indexop_on_each_cpu(local_r4k_flush_cache_range, vma);
592 }
593 
local_r4k_flush_cache_mm(void * args)594 static inline void local_r4k_flush_cache_mm(void * args)
595 {
596 	struct mm_struct *mm = args;
597 
598 	if (!has_valid_asid(mm))
599 		return;
600 
601 	/*
602 	 * Kludge alert.  For obscure reasons R4000SC and R4400SC go nuts if we
603 	 * only flush the primary caches but R10000 and R12000 behave sane ...
604 	 * R4000SC and R4400SC indexed S-cache ops also invalidate primary
605 	 * caches, so we can bail out early.
606 	 */
607 	if (current_cpu_type() == CPU_R4000SC ||
608 	    current_cpu_type() == CPU_R4000MC ||
609 	    current_cpu_type() == CPU_R4400SC ||
610 	    current_cpu_type() == CPU_R4400MC) {
611 		r4k_blast_scache();
612 		return;
613 	}
614 
615 	r4k_blast_dcache();
616 }
617 
r4k_flush_cache_mm(struct mm_struct * mm)618 static void r4k_flush_cache_mm(struct mm_struct *mm)
619 {
620 	if (!cpu_has_dc_aliases)
621 		return;
622 
623 	r4k_indexop_on_each_cpu(local_r4k_flush_cache_mm, mm);
624 }
625 
626 struct flush_cache_page_args {
627 	struct vm_area_struct *vma;
628 	unsigned long addr;
629 	unsigned long pfn;
630 };
631 
local_r4k_flush_cache_page(void * args)632 static inline void local_r4k_flush_cache_page(void *args)
633 {
634 	struct flush_cache_page_args *fcp_args = args;
635 	struct vm_area_struct *vma = fcp_args->vma;
636 	unsigned long addr = fcp_args->addr;
637 	struct page *page = pfn_to_page(fcp_args->pfn);
638 	int exec = vma->vm_flags & VM_EXEC;
639 	struct mm_struct *mm = vma->vm_mm;
640 	int map_coherent = 0;
641 	pgd_t *pgdp;
642 	pud_t *pudp;
643 	pmd_t *pmdp;
644 	pte_t *ptep;
645 	void *vaddr;
646 	int dontflash = 0;
647 
648 	/*
649 	 * If ownes no valid ASID yet, cannot possibly have gotten
650 	 * this page into the cache.
651 	 */
652 	if (!has_valid_asid(mm))
653 		return;
654 
655 	addr &= PAGE_MASK;
656 	pgdp = pgd_offset(mm, addr);
657 	pudp = pud_offset(pgdp, addr);
658 	pmdp = pmd_offset(pudp, addr);
659 	ptep = pte_offset(pmdp, addr);
660 
661 	/*
662 	 * If the page isn't marked valid, the page cannot possibly be
663 	 * in the cache.
664 	 */
665 	if (!(pte_present(*ptep)))
666 		return;
667 
668 	/*  accelerate it! See below, just skipping kmap_*()/kunmap_*() */
669 	if ((!exec) && !cpu_has_dc_aliases)
670 		return;
671 
672 	if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {
673 		if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
674 			if (!cpu_has_cm3_inclusive_pcaches) {
675 				r4k_blast_dcache_user_page(addr);
676 				if (exec && (!cpu_has_cm2) && !cpu_has_ic_fills_f_dc)
677 					mb();
678 			}
679 			if (exec && !cpu_icache_snoops_remote_store)
680 				r4k_blast_scache_page(addr);
681 		}
682 		if (exec) {
683 			r4k_blast_icache_user_page(addr);
684 			if (gcmp_present)
685 				mb();
686 		}
687 	} else {
688 		/*
689 		 * Use kmap_coherent or kmap_atomic to do flushes for
690 		 * another ASID than the current one.
691 		 */
692 		map_coherent = (cpu_has_dc_aliases &&
693 				page_mapped(page) && !Page_dcache_dirty(page));
694 		if (map_coherent)
695 			vaddr = kmap_coherent(page, addr);
696 		else
697 			vaddr = kmap_atomic(page);
698 		addr = (unsigned long)vaddr;
699 
700 		if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
701 			if (!cpu_has_cm3_inclusive_pcaches) {
702 				r4k_blast_dcache_page(addr);
703 				if (exec && (!cpu_has_cm2) && !cpu_has_ic_fills_f_dc)
704 					mb();
705 			}
706 			if (exec && !cpu_icache_snoops_remote_store)
707 				r4k_blast_scache_page(addr);
708 		}
709 		if (exec) {
710 			if (cpu_has_vtag_icache && mm == current->active_mm) {
711 				int cpu = smp_processor_id();
712 
713 				if (cpu_context(cpu, mm) != 0)
714 					drop_mmu_context(mm, cpu);
715 				dontflash = 1;
716 			} else
717 				if (map_coherent || !cpu_has_ic_aliases) {
718 					r4k_blast_icache_page(addr);
719 					if (gcmp_present)
720 						mb();
721 				}
722 		}
723 
724 		if (map_coherent)
725 			kunmap_coherent();
726 		else
727 			kunmap_atomic(vaddr);
728 
729 		/*  in case of I-cache aliasing - blast it via coherent page */
730 		if (exec && cpu_has_ic_aliases && (!dontflash) && !map_coherent) {
731 			vaddr = kmap_coherent(page, addr);
732 			r4k_blast_icache_page((unsigned long)vaddr);
733 			if (gcmp_present)
734 				mb();
735 			kunmap_coherent();
736 		}
737 	}
738 }
739 
r4k_flush_cache_page(struct vm_area_struct * vma,unsigned long addr,unsigned long pfn)740 static void r4k_flush_cache_page(struct vm_area_struct *vma,
741 	unsigned long addr, unsigned long pfn)
742 {
743 	struct flush_cache_page_args args;
744 
745 	args.vma = vma;
746 	args.addr = addr;
747 	args.pfn = pfn;
748 
749 	r4k_on_each_cpu(local_r4k_flush_cache_page, &args);
750 	if (cpu_has_dc_aliases)
751 		ClearPageDcacheDirty(pfn_to_page(pfn));
752 }
753 
local_r4k_flush_data_cache_page(void * addr)754 static inline void local_r4k_flush_data_cache_page(void * addr)
755 {
756 	r4k_blast_dcache_page((unsigned long) addr);
757 }
758 
r4k_flush_data_cache_page(unsigned long addr)759 static void r4k_flush_data_cache_page(unsigned long addr)
760 {
761 	if (cpu_has_cm3_inclusive_pcaches)
762 		return;
763 
764 	if (in_atomic())
765 		local_r4k_flush_data_cache_page((void *)addr);
766 	else
767 		r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr);
768 }
769 
770 
771 struct mips_flush_data_cache_range_args {
772 	struct vm_area_struct *vma;
773 	unsigned long vaddr;
774 	unsigned long start;
775 	unsigned long len;
776 };
777 
local_r4k_mips_flush_data_cache_range(void * args)778 static inline void local_r4k_mips_flush_data_cache_range(void *args)
779 {
780 	struct mips_flush_data_cache_range_args *f_args = args;
781 	unsigned long vaddr = f_args->vaddr;
782 	unsigned long start = f_args->start;
783 	unsigned long len = f_args->len;
784 	struct vm_area_struct * vma = f_args->vma;
785 
786 	if (!cpu_has_cm3_inclusive_pcaches)
787 		blast_dcache_range(start, start + len);
788 
789 	if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc) {
790 		if ((!cpu_has_cm3_inclusive_pcaches) && !cpu_has_cm2)
791 			mb();
792 
793 		/* vma is given for exec check only, mmap is current,
794 		   so - no non-current vma page flush, just user or kernel */
795 		protected_blast_icache_range(vaddr, vaddr + len);
796 		if (gcmp_present)
797 			mb();
798 	}
799 }
800 
801 /* flush dirty kernel data and a corresponding user instructions (if needed).
802    used in copy_to_user_page() */
r4k_mips_flush_data_cache_range(struct vm_area_struct * vma,unsigned long vaddr,struct page * page,unsigned long start,unsigned long len)803 static void r4k_mips_flush_data_cache_range(struct vm_area_struct *vma,
804 	unsigned long vaddr, struct page *page, unsigned long start,
805 	unsigned long len)
806 {
807 	struct mips_flush_data_cache_range_args args;
808 
809 	if (cpu_has_cm3_inclusive_pcaches && (cpu_has_ic_fills_f_dc ||
810 	    !(vma->vm_flags & VM_EXEC)))
811 		return;
812 
813 	args.vma = vma;
814 	args.vaddr = vaddr;
815 	args.start = start;
816 	args.len = len;
817 
818 	r4k_on_each_cpu(local_r4k_mips_flush_data_cache_range, (void *)&args);
819 }
820 
821 
822 struct flush_icache_range_args {
823 	unsigned long start;
824 	unsigned long end;
825 };
826 
local_r4k_flush_icache(void * args)827 static inline void local_r4k_flush_icache(void *args)
828 {
829 	if ((!cpu_has_ic_fills_f_dc) && !cpu_has_cm3_inclusive_pcaches) {
830 		r4k_blast_dcache();
831 		mb();
832 	}
833 
834 	r4k_blast_icache();
835 	if (gcmp_present)
836 		mb();
837 }
838 
local_r4k_flush_icache_range_ipi(void * args)839 static inline void local_r4k_flush_icache_range_ipi(void *args)
840 {
841 	struct flush_icache_range_args *fir_args = args;
842 	unsigned long start = fir_args->start;
843 	unsigned long end = fir_args->end;
844 
845 	if ((!cpu_has_ic_fills_f_dc) && !cpu_has_cm3_inclusive_pcaches) {
846 		R4600_HIT_CACHEOP_WAR_IMPL;
847 		protected_blast_dcache_range(start, end);
848 
849 		if (!cpu_has_cm2)
850 			mb();
851 	}
852 
853 	protected_blast_icache_range(start, end);
854 	if (gcmp_present)
855 		mb();
856 }
857 
858 /* This function is used only for local CPU only while boot etc */
local_r4k_flush_icache_range(unsigned long start,unsigned long end)859 static inline void local_r4k_flush_icache_range(unsigned long start, unsigned long end)
860 {
861 	if ((!cpu_has_ic_fills_f_dc) && !cpu_has_cm3_inclusive_pcaches) {
862 		if (end - start >= dcache_size) {
863 			r4k_blast_dcache();
864 		} else {
865 			R4600_HIT_CACHEOP_WAR_IMPL;
866 			blast_dcache_range(start, end);
867 		}
868 		mb();
869 	}
870 
871 	if (end - start > icache_size)
872 		r4k_blast_icache();
873 	else
874 		blast_icache_range(start, end);
875 #ifdef CONFIG_EVA
876 	/* This is here to smooth effect of any kind of address aliasing.
877 	   It is used only during boot, so - it doesn't create an impact on
878 	   performance. LY22 */
879 	bc_wback_inv(start, (end - start));
880 #endif
881 	__sync();
882 }
883 
884 /* this function can be called for kernel OR user addresses,
885  * kernel is for module, *gdb*. User is for binfmt_a.out/flat
886  * So - take care, check get_fs() */
r4k_flush_icache_range(unsigned long start,unsigned long end)887 static void r4k_flush_icache_range(unsigned long start, unsigned long end)
888 {
889 	struct flush_icache_range_args args;
890 	unsigned long size = end - start;
891 
892 	args.start = start;
893 	args.end = end;
894 
895 	if (cpu_has_safe_index_cacheops &&
896 	    (((size >= icache_size) && !cpu_has_ic_fills_f_dc) ||
897 	     (size >= dcache_size)))
898 		local_r4k_flush_icache((void *)&args);
899 	else if (((size < (icache_size * CACHE_CPU_LATENCY)) && !cpu_has_ic_fills_f_dc) ||
900 		 (size < (dcache_size * CACHE_CPU_LATENCY))) {
901 		struct flush_icache_range_args args;
902 
903 		args.start = start;
904 		args.end = end;
905 		r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, (void *)&args);
906 	} else
907 		r4k_indexop_on_each_cpu(local_r4k_flush_icache, NULL);
908 	instruction_hazard();
909 }
910 
911 
912 #ifdef CONFIG_DMA_NONCOHERENT
913 
r4k_dma_cache_wback_inv(unsigned long addr,unsigned long size)914 static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
915 {
916 	/* Catch bad driver code */
917 	BUG_ON(size == 0);
918 
919 	preempt_disable();
920 	if (cpu_has_inclusive_pcaches) {
921 		if (size >= scache_size)
922 			r4k_blast_scache();
923 		else
924 			blast_scache_range(addr, addr + size);
925 		preempt_enable();
926 		__sync();
927 		return;
928 	}
929 
930 	/*
931 	 * Either no secondary cache or the available caches don't have the
932 	 * subset property so we have to flush the primary caches
933 	 * explicitly
934 	 */
935 	if (!cpu_has_cm3_inclusive_pcaches) {
936 		if (cpu_has_safe_index_cacheops && size >= dcache_size) {
937 			r4k_blast_dcache();
938 		} else {
939 			R4600_HIT_CACHEOP_WAR_IMPL;
940 			blast_dcache_range(addr, addr + size);
941 		}
942 	}
943 	preempt_enable();
944 
945 	bc_wback_inv(addr, size);
946 	if (!cpu_has_cm2_l2sync)
947 		__sync();
948 }
949 
r4k_dma_cache_inv(unsigned long addr,unsigned long size)950 static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
951 {
952 	/* Catch bad driver code */
953 	BUG_ON(size == 0);
954 
955 	preempt_disable();
956 	if (cpu_has_inclusive_pcaches) {
957 		if (size >= scache_size)
958 			r4k_blast_scache();
959 		else {
960 			/*
961 			 * There is no clearly documented alignment requirement
962 			 * for the cache instruction on MIPS processors and
963 			 * some processors, among them the RM5200 and RM7000
964 			 * QED processors will throw an address error for cache
965 			 * hit ops with insufficient alignment.	 Solved by
966 			 * aligning the address to cache line size.
967 			 */
968 			blast_inv_scache_range(addr, addr + size);
969 		}
970 		preempt_enable();
971 		__sync();
972 		return;
973 	}
974 
975 	if (!cpu_has_cm3_inclusive_pcaches) {
976 		if (cpu_has_safe_index_cacheops && size >= dcache_size) {
977 			r4k_blast_dcache();
978 		} else {
979 			R4600_HIT_CACHEOP_WAR_IMPL;
980 			blast_inv_dcache_range(addr, addr + size);
981 		}
982 	}
983 	preempt_enable();
984 
985 	bc_inv(addr, size);
986 	__sync();
987 }
988 #endif /* CONFIG_DMA_NONCOHERENT */
989 
990 /*
991  * While we're protected against bad userland addresses we don't care
992  * very much about what happens in that case.  Usually a segmentation
993  * fault will dump the process later on anyway ...
994  */
995 #ifdef CONFIG_CPU_MIPSR6
local_r4k_flush_cache_sigtramp(void * arg)996 static void local_r4k_flush_cache_sigtramp(void * arg)
997 {
998 	register unsigned long addr = (unsigned long) arg;
999 
1000 	__asm__ __volatile__(
1001 		"synci  0(%0)       \n"
1002 		"sync   0x10        \n" /* SYNC MB */
1003 		::"r"(addr):"memory");
1004 }
1005 #else
local_r4k_flush_cache_sigtramp(void * arg)1006 static void local_r4k_flush_cache_sigtramp(void * arg)
1007 {
1008 	unsigned long ic_lsize = cpu_icache_line_size();
1009 	unsigned long dc_lsize = cpu_dcache_line_size();
1010 	unsigned long sc_lsize = cpu_scache_line_size();
1011 	unsigned long addr = (unsigned long) arg;
1012 
1013 	R4600_HIT_CACHEOP_WAR_IMPL;
1014 	if (dc_lsize)
1015 		protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
1016 	if (!cpu_icache_snoops_remote_store && scache_size)
1017 		protected_writeback_scache_line(addr & ~(sc_lsize - 1));
1018 	if (ic_lsize)
1019 		protected_flush_icache_line(addr & ~(ic_lsize - 1));
1020 	if (MIPS4K_ICACHE_REFILL_WAR) {
1021 		__asm__ __volatile__ (
1022 			".set push\n\t"
1023 			".set noat\n\t"
1024 			".set mips3\n\t"
1025 #ifdef CONFIG_32BIT
1026 			"la	$at,1f\n\t"
1027 #endif
1028 #ifdef CONFIG_64BIT
1029 			"dla	$at,1f\n\t"
1030 #endif
1031 			"cache	%0,($at)\n\t"
1032 			"nop; nop; nop\n"
1033 			"1:\n\t"
1034 			".set pop"
1035 			:
1036 			: "i" (Hit_Invalidate_I));
1037 	}
1038 	if (MIPS_CACHE_SYNC_WAR)
1039 		__asm__ __volatile__ ("sync");
1040 }
1041 #endif
1042 
r4k_flush_cache_sigtramp(unsigned long addr)1043 static void r4k_flush_cache_sigtramp(unsigned long addr)
1044 {
1045 	r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr);
1046 }
1047 
r4k_flush_icache_all(void)1048 static void r4k_flush_icache_all(void)
1049 {
1050 	if (cpu_has_vtag_icache)
1051 		r4k_blast_icache();
1052 }
1053 
1054 struct flush_kernel_vmap_range_args {
1055 	unsigned long	vaddr;
1056 	int		size;
1057 };
1058 
local_r4k_flush_kernel_vmap_range(void * args)1059 static inline void local_r4k_flush_kernel_vmap_range(void *args)
1060 {
1061 	struct flush_kernel_vmap_range_args *vmra = args;
1062 	unsigned long vaddr = vmra->vaddr;
1063 	int size = vmra->size;
1064 
1065 	/*
1066 	 * Aliases only affect the primary caches so don't bother with
1067 	 * S-caches or T-caches.
1068 	 */
1069 	if (cpu_has_safe_index_cacheops && size >= dcache_size)
1070 		r4k_blast_dcache();
1071 	else {
1072 		R4600_HIT_CACHEOP_WAR_IMPL;
1073 		blast_dcache_range(vaddr, vaddr + size);
1074 	}
1075 }
1076 
r4k_flush_kernel_vmap_range(unsigned long vaddr,int size)1077 static void r4k_flush_kernel_vmap_range(unsigned long vaddr, int size)
1078 {
1079 	struct flush_kernel_vmap_range_args args;
1080 
1081 	if (cpu_has_cm3_inclusive_pcaches)
1082 		return;
1083 
1084 	args.vaddr = (unsigned long) vaddr;
1085 	args.size = size;
1086 
1087 	if (cpu_has_safe_index_cacheops && size >= dcache_size)
1088 		r4k_indexop_on_each_cpu(local_r4k_flush_kernel_vmap_range, &args);
1089 	else
1090 		r4k_on_each_cpu(local_r4k_flush_kernel_vmap_range, &args);
1091 }
1092 
rm7k_erratum31(void)1093 static inline void rm7k_erratum31(void)
1094 {
1095 	const unsigned long ic_lsize = 32;
1096 	unsigned long addr;
1097 
1098 	/* RM7000 erratum #31. The icache is screwed at startup. */
1099 	write_c0_taglo(0);
1100 	write_c0_taghi(0);
1101 
1102 	for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
1103 		__asm__ __volatile__ (
1104 			".set push\n\t"
1105 			".set noreorder\n\t"
1106 			".set mips3\n\t"
1107 			"cache\t%1, 0(%0)\n\t"
1108 			"cache\t%1, 0x1000(%0)\n\t"
1109 			"cache\t%1, 0x2000(%0)\n\t"
1110 			"cache\t%1, 0x3000(%0)\n\t"
1111 			"cache\t%2, 0(%0)\n\t"
1112 			"cache\t%2, 0x1000(%0)\n\t"
1113 			"cache\t%2, 0x2000(%0)\n\t"
1114 			"cache\t%2, 0x3000(%0)\n\t"
1115 			"cache\t%1, 0(%0)\n\t"
1116 			"cache\t%1, 0x1000(%0)\n\t"
1117 			"cache\t%1, 0x2000(%0)\n\t"
1118 			"cache\t%1, 0x3000(%0)\n\t"
1119 			".set pop\n"
1120 			:
1121 			: "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
1122 	}
1123 }
1124 
alias_74k_erratum(struct cpuinfo_mips * c)1125 static inline void alias_74k_erratum(struct cpuinfo_mips *c)
1126 {
1127 	unsigned int imp = c->processor_id & 0xff00;
1128 	unsigned int rev = c->processor_id & PRID_REV_MASK;
1129 
1130 	/*
1131 	 * Early versions of the 74K do not update the cache tags on a
1132 	 * vtag miss/ptag hit which can occur in the case of KSEG0/KUSEG
1133 	 * aliases. In this case it is better to treat the cache as always
1134 	 * having aliases.
1135 	 */
1136 	switch (imp) {
1137 	case PRID_IMP_74K:
1138 		if (rev <= PRID_REV_ENCODE_332(2, 4, 0))
1139 			c->dcache.flags |= MIPS_CACHE_VTAG;
1140 		if (rev == PRID_REV_ENCODE_332(2, 4, 0))
1141 			write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND);
1142 		break;
1143 	case PRID_IMP_1074K:
1144 		if (rev <= PRID_REV_ENCODE_332(1, 1, 0)) {
1145 			c->dcache.flags |= MIPS_CACHE_VTAG;
1146 			write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND);
1147 		}
1148 		break;
1149 	default:
1150 		BUG();
1151 	}
1152 }
1153 
1154 static char *way_string[] __cpuinitdata = { NULL, "direct mapped", "2-way",
1155 	"3-way", "4-way", "5-way", "6-way", "7-way", "8-way",
1156 	"9-way", "10-way", "11-way", "12-way", "13-way", "14-way", "15-way",
1157 	"16-way", "17-way", "18-way", "19-way", "20-way", "21-way", "22-way",
1158 	"23-way", "24-way", "25-way", "26-way", "27-way", "28-way", "29-way",
1159 	"30-way", "31-way", "32-way"
1160 };
1161 
probe_pcache(void)1162 static void __cpuinit probe_pcache(void)
1163 {
1164 	struct cpuinfo_mips *c = &current_cpu_data;
1165 	unsigned int config = read_c0_config();
1166 	unsigned int prid = read_c0_prid();
1167 	unsigned long config1;
1168 	unsigned int lsize;
1169 
1170 	switch (c->cputype) {
1171 	case CPU_R4600:			/* QED style two way caches? */
1172 	case CPU_R4700:
1173 	case CPU_R5000:
1174 	case CPU_NEVADA:
1175 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
1176 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1177 		c->icache.ways = 2;
1178 		c->icache.waybit = __ffs(icache_size/2);
1179 
1180 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
1181 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1182 		c->dcache.ways = 2;
1183 		c->dcache.waybit= __ffs(dcache_size/2);
1184 
1185 		c->options |= MIPS_CPU_CACHE_CDEX_P;
1186 		break;
1187 
1188 	case CPU_R5432:
1189 	case CPU_R5500:
1190 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
1191 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1192 		c->icache.ways = 2;
1193 		c->icache.waybit= 0;
1194 
1195 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
1196 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1197 		c->dcache.ways = 2;
1198 		c->dcache.waybit = 0;
1199 
1200 		c->options |= MIPS_CPU_CACHE_CDEX_P | MIPS_CPU_PREFETCH;
1201 		break;
1202 
1203 	case CPU_TX49XX:
1204 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
1205 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1206 		c->icache.ways = 4;
1207 		c->icache.waybit= 0;
1208 
1209 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
1210 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1211 		c->dcache.ways = 4;
1212 		c->dcache.waybit = 0;
1213 
1214 		c->options |= MIPS_CPU_CACHE_CDEX_P;
1215 		c->options |= MIPS_CPU_PREFETCH;
1216 		break;
1217 
1218 	case CPU_R4000PC:
1219 	case CPU_R4000SC:
1220 	case CPU_R4000MC:
1221 	case CPU_R4400PC:
1222 	case CPU_R4400SC:
1223 	case CPU_R4400MC:
1224 	case CPU_R4300:
1225 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
1226 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1227 		c->icache.ways = 1;
1228 		c->icache.waybit = 0;	/* doesn't matter */
1229 
1230 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
1231 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1232 		c->dcache.ways = 1;
1233 		c->dcache.waybit = 0;	/* does not matter */
1234 
1235 		c->options |= MIPS_CPU_CACHE_CDEX_P;
1236 		break;
1237 
1238 	case CPU_R10000:
1239 	case CPU_R12000:
1240 	case CPU_R14000:
1241 		icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
1242 		c->icache.linesz = 64;
1243 		c->icache.ways = 2;
1244 		c->icache.waybit = 0;
1245 
1246 		dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));
1247 		c->dcache.linesz = 32;
1248 		c->dcache.ways = 2;
1249 		c->dcache.waybit = 0;
1250 
1251 		c->options |= MIPS_CPU_PREFETCH;
1252 		break;
1253 
1254 	case CPU_VR4133:
1255 		write_c0_config(config & ~VR41_CONF_P4K);
1256 	case CPU_VR4131:
1257 		/* Workaround for cache instruction bug of VR4131 */
1258 		if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
1259 		    c->processor_id == 0x0c82U) {
1260 			config |= 0x00400000U;
1261 			if (c->processor_id == 0x0c80U)
1262 				config |= VR41_CONF_BP;
1263 			write_c0_config(config);
1264 		} else
1265 			c->options |= MIPS_CPU_CACHE_CDEX_P;
1266 
1267 		icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
1268 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1269 		c->icache.ways = 2;
1270 		c->icache.waybit = __ffs(icache_size/2);
1271 
1272 		dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
1273 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1274 		c->dcache.ways = 2;
1275 		c->dcache.waybit = __ffs(dcache_size/2);
1276 		break;
1277 
1278 	case CPU_VR41XX:
1279 	case CPU_VR4111:
1280 	case CPU_VR4121:
1281 	case CPU_VR4122:
1282 	case CPU_VR4181:
1283 	case CPU_VR4181A:
1284 		icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
1285 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1286 		c->icache.ways = 1;
1287 		c->icache.waybit = 0;	/* doesn't matter */
1288 
1289 		dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
1290 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1291 		c->dcache.ways = 1;
1292 		c->dcache.waybit = 0;	/* does not matter */
1293 
1294 		c->options |= MIPS_CPU_CACHE_CDEX_P;
1295 		break;
1296 
1297 	case CPU_RM7000:
1298 		rm7k_erratum31();
1299 
1300 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
1301 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1302 		c->icache.ways = 4;
1303 		c->icache.waybit = __ffs(icache_size / c->icache.ways);
1304 
1305 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
1306 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1307 		c->dcache.ways = 4;
1308 		c->dcache.waybit = __ffs(dcache_size / c->dcache.ways);
1309 
1310 		c->options |= MIPS_CPU_CACHE_CDEX_P;
1311 		c->options |= MIPS_CPU_PREFETCH;
1312 		break;
1313 
1314 	case CPU_LOONGSON2:
1315 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
1316 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1317 		if (prid & 0x3)
1318 			c->icache.ways = 4;
1319 		else
1320 			c->icache.ways = 2;
1321 		c->icache.waybit = 0;
1322 
1323 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
1324 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1325 		if (prid & 0x3)
1326 			c->dcache.ways = 4;
1327 		else
1328 			c->dcache.ways = 2;
1329 		c->dcache.waybit = 0;
1330 		break;
1331 
1332 	default:
1333 		if (!(config & MIPS_CONF_M))
1334 			panic("Don't know how to probe P-caches on this cpu.");
1335 
1336 		/*
1337 		 * So we seem to be a MIPS32 or MIPS64 CPU
1338 		 * So let's probe the I-cache ...
1339 		 */
1340 		config1 = read_c0_config1();
1341 
1342 		if ((lsize = ((config1 >> 19) & 7)))
1343 			c->icache.linesz = 2 << lsize;
1344 		else
1345 			c->icache.linesz = lsize;
1346 		c->icache.sets = 32 << (((config1 >> 22) + 1) & 7);
1347 		c->icache.ways = 1 + ((config1 >> 16) & 7);
1348 
1349 		icache_size = c->icache.sets *
1350 			      c->icache.ways *
1351 			      c->icache.linesz;
1352 		c->icache.waybit = __ffs(icache_size/c->icache.ways);
1353 
1354 		if (config & 0x8)		/* VI bit */
1355 			c->icache.flags |= MIPS_CACHE_VTAG;
1356 
1357 		/*
1358 		 * Now probe the MIPS32 / MIPS64 data cache.
1359 		 */
1360 		c->dcache.flags = 0;
1361 
1362 		if ((lsize = ((config1 >> 10) & 7)))
1363 			c->dcache.linesz = 2 << lsize;
1364 		else
1365 			c->dcache.linesz= lsize;
1366 		c->dcache.sets = 32 << (((config1 >> 13) + 1) & 7);
1367 		c->dcache.ways = 1 + ((config1 >> 7) & 7);
1368 
1369 		dcache_size = c->dcache.sets *
1370 			      c->dcache.ways *
1371 			      c->dcache.linesz;
1372 		c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
1373 
1374 		c->options |= MIPS_CPU_PREFETCH;
1375 		break;
1376 	}
1377 
1378 	/*
1379 	 * Processor configuration sanity check for the R4000SC erratum
1380 	 * #5.	With page sizes larger than 32kB there is no possibility
1381 	 * to get a VCE exception anymore so we don't care about this
1382 	 * misconfiguration.  The case is rather theoretical anyway;
1383 	 * presumably no vendor is shipping his hardware in the "bad"
1384 	 * configuration.
1385 	 */
1386 	if ((prid & 0xff00) == PRID_IMP_R4000 && (prid & 0xff) < 0x40 &&
1387 	    !(config & CONF_SC) && c->icache.linesz != 16 &&
1388 	    PAGE_SIZE <= 0x8000)
1389 		panic("Improper R4000SC processor configuration detected");
1390 
1391 	/* compute a couple of other cache variables */
1392 	c->icache.waysize = icache_size / c->icache.ways;
1393 	c->dcache.waysize = dcache_size / c->dcache.ways;
1394 
1395 	c->icache.sets = c->icache.linesz ?
1396 		icache_size / (c->icache.linesz * c->icache.ways) : 0;
1397 	c->dcache.sets = c->dcache.linesz ?
1398 		dcache_size / (c->dcache.linesz * c->dcache.ways) : 0;
1399 
1400 	/*
1401 	 * R10000 and R12000 P-caches are odd in a positive way.  They're 32kB
1402 	 * 2-way virtually indexed so normally would suffer from aliases.  So
1403 	 * normally they'd suffer from aliases but magic in the hardware deals
1404 	 * with that for us so we don't need to take care ourselves.
1405 	 */
1406 	switch (c->cputype) {
1407 	case CPU_20KC:
1408 	case CPU_25KF:
1409 	case CPU_SB1:
1410 	case CPU_SB1A:
1411 	case CPU_XLR:
1412 		c->dcache.flags |= MIPS_CACHE_PINDEX;
1413 		break;
1414 
1415 	case CPU_R10000:
1416 	case CPU_R12000:
1417 	case CPU_R14000:
1418 		break;
1419 
1420 	case CPU_M14KC:
1421 	case CPU_M14KEC:
1422 	case CPU_24K:
1423 	case CPU_34K:
1424 	case CPU_74K:
1425 	case CPU_1004K:
1426 	case CPU_PROAPTIV:
1427 	case CPU_INTERAPTIV:
1428 	case CPU_VIRTUOSO:
1429 	case CPU_P5600:
1430 	case CPU_SAMURAI:
1431 		if (c->cputype == CPU_74K)
1432 			alias_74k_erratum(c);
1433 		if (!(read_c0_config7() & MIPS_CONF7_IAR)) {
1434 			if (c->icache.waysize > PAGE_SIZE)
1435 				c->icache.flags |= MIPS_CACHE_ALIASES;
1436 		}
1437 		if (read_c0_config7() & MIPS_CONF7_AR) {
1438 			/* effectively physically indexed dcache,
1439 			   thus no virtual aliases. */
1440 			c->dcache.flags |= MIPS_CACHE_PINDEX;
1441 			break;
1442 		}
1443 	default:
1444 		if (c->dcache.waysize > PAGE_SIZE)
1445 			c->dcache.flags |= MIPS_CACHE_ALIASES;
1446 	}
1447 
1448 #ifdef  CONFIG_HIGHMEM
1449 	if (((c->dcache.flags & MIPS_CACHE_ALIASES) &&
1450 	     ((c->dcache.waysize / PAGE_SIZE) > FIX_N_COLOURS)) ||
1451 	     ((c->icache.flags & MIPS_CACHE_ALIASES) &&
1452 	     ((c->icache.waysize / PAGE_SIZE) > FIX_N_COLOURS)))
1453 		panic("PAGE_SIZE*WAYS too small for L1 size, too many colors");
1454 #endif
1455 
1456 	switch (c->cputype) {
1457 	case CPU_20KC:
1458 		/*
1459 		 * Some older 20Kc chips doesn't have the 'VI' bit in
1460 		 * the config register.
1461 		 */
1462 		c->icache.flags |= MIPS_CACHE_VTAG;
1463 		break;
1464 
1465 	case CPU_ALCHEMY:
1466 		c->icache.flags |= MIPS_CACHE_IC_F_DC;
1467 		break;
1468 	}
1469 
1470 #ifdef	CONFIG_CPU_LOONGSON2
1471 	/*
1472 	 * LOONGSON2 has 4 way icache, but when using indexed cache op,
1473 	 * one op will act on all 4 ways
1474 	 */
1475 	c->icache.ways = 1;
1476 #endif
1477 
1478 	printk("Primary instruction cache %ldkB, %s, %s, %slinesize %d bytes.\n",
1479 	       icache_size >> 10, way_string[c->icache.ways],
1480 	       c->icache.flags & MIPS_CACHE_VTAG ? "VIVT" : "VIPT",
1481 	       (c->icache.flags & MIPS_CACHE_ALIASES) ?
1482 			"I-cache aliases, " : "",
1483 	       c->icache.linesz);
1484 
1485 	printk("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
1486 	       dcache_size >> 10, way_string[c->dcache.ways],
1487 	       (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT",
1488 	       (c->dcache.flags & MIPS_CACHE_ALIASES) ?
1489 			"cache aliases" : "no aliases",
1490 	       c->dcache.linesz);
1491 }
1492 
1493 /*
1494  * If you even _breathe_ on this function, look at the gcc output and make sure
1495  * it does not pop things on and off the stack for the cache sizing loop that
1496  * executes in KSEG1 space or else you will crash and burn badly.  You have
1497  * been warned.
1498  */
probe_scache(void)1499 static int __cpuinit probe_scache(void)
1500 {
1501 	unsigned long flags, addr, begin, end, pow2;
1502 	unsigned int config = read_c0_config();
1503 	struct cpuinfo_mips *c = &current_cpu_data;
1504 
1505 	if (config & CONF_SC)
1506 		return 0;
1507 
1508 	begin = (unsigned long) &_stext;
1509 	begin &= ~((4 * 1024 * 1024) - 1);
1510 	end = begin + (4 * 1024 * 1024);
1511 
1512 	/*
1513 	 * This is such a bitch, you'd think they would make it easy to do
1514 	 * this.  Away you daemons of stupidity!
1515 	 */
1516 	local_irq_save(flags);
1517 
1518 	/* Fill each size-multiple cache line with a valid tag. */
1519 	pow2 = (64 * 1024);
1520 	for (addr = begin; addr < end; addr = (begin + pow2)) {
1521 		unsigned long *p = (unsigned long *) addr;
1522 		__asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
1523 		pow2 <<= 1;
1524 	}
1525 
1526 	/* Load first line with zero (therefore invalid) tag. */
1527 	write_c0_taglo(0);
1528 	write_c0_taghi(0);
1529 	__asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
1530 	cache_op(Index_Store_Tag_I, begin);
1531 	cache_op(Index_Store_Tag_D, begin);
1532 	cache_op(Index_Store_Tag_SD, begin);
1533 
1534 	/* Now search for the wrap around point. */
1535 	pow2 = (128 * 1024);
1536 	for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
1537 		cache_op(Index_Load_Tag_SD, addr);
1538 		__asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
1539 		if (!read_c0_taglo())
1540 			break;
1541 		pow2 <<= 1;
1542 	}
1543 	local_irq_restore(flags);
1544 	addr -= begin;
1545 
1546 	scache_size = addr;
1547 	c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
1548 	c->scache.ways = 1;
1549 	c->dcache.waybit = 0;		/* does not matter */
1550 
1551 	return 1;
1552 }
1553 
1554 #if defined(CONFIG_CPU_LOONGSON2)
loongson2_sc_init(void)1555 static void __init loongson2_sc_init(void)
1556 {
1557 	struct cpuinfo_mips *c = &current_cpu_data;
1558 
1559 	scache_size = 512*1024;
1560 	c->scache.linesz = 32;
1561 	c->scache.ways = 4;
1562 	c->scache.waybit = 0;
1563 	c->scache.waysize = scache_size / (c->scache.ways);
1564 	c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1565 	pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1566 	       scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1567 
1568 	c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1569 }
1570 #endif
1571 
1572 extern int r5k_sc_init(void);
1573 extern int rm7k_sc_init(void);
1574 extern int mips_sc_init(void);
1575 
setup_scache(void)1576 static void __cpuinit setup_scache(void)
1577 {
1578 	struct cpuinfo_mips *c = &current_cpu_data;
1579 	unsigned int config = read_c0_config();
1580 	int sc_present = 0;
1581 
1582 	/*
1583 	 * Do the probing thing on R4000SC and R4400SC processors.  Other
1584 	 * processors don't have a S-cache that would be relevant to the
1585 	 * Linux memory management.
1586 	 */
1587 	switch (c->cputype) {
1588 	case CPU_R4000SC:
1589 	case CPU_R4000MC:
1590 	case CPU_R4400SC:
1591 	case CPU_R4400MC:
1592 		sc_present = run_uncached(probe_scache);
1593 		if (sc_present)
1594 			c->options |= MIPS_CPU_CACHE_CDEX_S;
1595 		break;
1596 
1597 	case CPU_R10000:
1598 	case CPU_R12000:
1599 	case CPU_R14000:
1600 		scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
1601 		c->scache.linesz = 64 << ((config >> 13) & 1);
1602 		c->scache.ways = 2;
1603 		c->scache.waybit= 0;
1604 		sc_present = 1;
1605 		break;
1606 
1607 	case CPU_R5000:
1608 	case CPU_NEVADA:
1609 #ifdef CONFIG_R5000_CPU_SCACHE
1610 		r5k_sc_init();
1611 #endif
1612 		return;
1613 
1614 	case CPU_RM7000:
1615 #ifdef CONFIG_RM7000_CPU_SCACHE
1616 		rm7k_sc_init();
1617 #endif
1618 		return;
1619 
1620 #if defined(CONFIG_CPU_LOONGSON2)
1621 	case CPU_LOONGSON2:
1622 		loongson2_sc_init();
1623 		return;
1624 #endif
1625 	case CPU_XLP:
1626 		/* don't need to worry about L2, fully coherent */
1627 		return;
1628 
1629 	default:
1630 		if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
1631 				    MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2 |
1632 				    MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) {
1633 #ifdef CONFIG_MIPS_CPU_SCACHE
1634 			if (mips_sc_init ()) {
1635 				scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
1636 				printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
1637 				       scache_size >> 10,
1638 				       way_string[c->scache.ways], c->scache.linesz);
1639 			}
1640 #else
1641 			if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
1642 				panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
1643 #endif
1644 			return;
1645 		}
1646 		sc_present = 0;
1647 	}
1648 
1649 	if (!sc_present)
1650 		return;
1651 
1652 	/* compute a couple of other cache variables */
1653 	c->scache.waysize = scache_size / c->scache.ways;
1654 
1655 	c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1656 
1657 	printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1658 	       scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1659 
1660 	c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1661 }
1662 
au1x00_fixup_config_od(void)1663 void au1x00_fixup_config_od(void)
1664 {
1665 	/*
1666 	 * c0_config.od (bit 19) was write only (and read as 0)
1667 	 * on the early revisions of Alchemy SOCs.  It disables the bus
1668 	 * transaction overlapping and needs to be set to fix various errata.
1669 	 */
1670 	switch (read_c0_prid()) {
1671 	case 0x00030100: /* Au1000 DA */
1672 	case 0x00030201: /* Au1000 HA */
1673 	case 0x00030202: /* Au1000 HB */
1674 	case 0x01030200: /* Au1500 AB */
1675 	/*
1676 	 * Au1100 errata actually keeps silence about this bit, so we set it
1677 	 * just in case for those revisions that require it to be set according
1678 	 * to the (now gone) cpu table.
1679 	 */
1680 	case 0x02030200: /* Au1100 AB */
1681 	case 0x02030201: /* Au1100 BA */
1682 	case 0x02030202: /* Au1100 BC */
1683 		set_c0_config(1 << 19);
1684 		break;
1685 	}
1686 }
1687 
1688 /* CP0 hazard avoidance. */
1689 #define NXP_BARRIER()							\
1690 	 __asm__ __volatile__(						\
1691 	".set noreorder\n\t"						\
1692 	"nop; nop; nop; nop; nop; nop;\n\t"				\
1693 	".set reorder\n\t")
1694 
nxp_pr4450_fixup_config(void)1695 static void nxp_pr4450_fixup_config(void)
1696 {
1697 	unsigned long config0;
1698 
1699 	config0 = read_c0_config();
1700 
1701 	/* clear all three cache coherency fields */
1702 	config0 &= ~(0x7 | (7 << 25) | (7 << 28));
1703 	config0 |= (((_page_cachable_default >> _CACHE_SHIFT) <<  0) |
1704 		    ((_page_cachable_default >> _CACHE_SHIFT) << 25) |
1705 		    ((_page_cachable_default >> _CACHE_SHIFT) << 28));
1706 	write_c0_config(config0);
1707 	NXP_BARRIER();
1708 }
1709 
1710 unsigned int mips_cca = INT_MIN | K_CALG_NONCOHERENT;
1711 
cca_setup(char * str)1712 static int __init cca_setup(char *str)
1713 {
1714 	get_option(&str, &mips_cca);
1715 
1716 	return 0;
1717 }
1718 
1719 early_param("cca", cca_setup);
1720 
coherency_setup(void)1721 static void __cpuinit coherency_setup(void)
1722 {
1723 	if (mips_cca < 0 || mips_cca > 7)
1724 		mips_cca = read_c0_config() & CONF_CM_CMASK;
1725 	_page_cachable_default = mips_cca << _CACHE_SHIFT;
1726 
1727 	pr_debug("Using cache attribute %d\n", mips_cca);
1728 	change_c0_config(CONF_CM_CMASK, mips_cca);
1729 
1730 	/*
1731 	 * c0_status.cu=0 specifies that updates by the sc instruction use
1732 	 * the coherency mode specified by the TLB; 1 means cachable
1733 	 * coherent update on write will be used.  Not all processors have
1734 	 * this bit and; some wire it to zero, others like Toshiba had the
1735 	 * silly idea of putting something else there ...
1736 	 */
1737 	switch (current_cpu_type()) {
1738 	case CPU_R4000PC:
1739 	case CPU_R4000SC:
1740 	case CPU_R4000MC:
1741 	case CPU_R4400PC:
1742 	case CPU_R4400SC:
1743 	case CPU_R4400MC:
1744 		clear_c0_config(CONF_CU);
1745 		break;
1746 	/*
1747 	 * We need to catch the early Alchemy SOCs with
1748 	 * the write-only co_config.od bit and set it back to one on:
1749 	 * Au1000 rev DA, HA, HB;  Au1100 AB, BA, BC, Au1500 AB
1750 	 */
1751 	case CPU_ALCHEMY:
1752 		au1x00_fixup_config_od();
1753 		break;
1754 
1755 	case PRID_IMP_PR4450:
1756 		nxp_pr4450_fixup_config();
1757 		break;
1758 	}
1759 }
1760 
r4k_cache_error_setup(void)1761 static void __cpuinit r4k_cache_error_setup(void)
1762 {
1763 	extern char __weak except_vec2_generic;
1764 	extern char __weak except_vec2_sb1;
1765 	struct cpuinfo_mips *c = &current_cpu_data;
1766 
1767 	switch (c->cputype) {
1768 	case CPU_SB1:
1769 	case CPU_SB1A:
1770 		set_uncached_handler(0x100, &except_vec2_sb1, 0x80);
1771 		break;
1772 
1773 	default:
1774 		set_uncached_handler(0x100, &except_vec2_generic, 0x80);
1775 		break;
1776 	}
1777 }
1778 
r4k_cache_init(void)1779 void __cpuinit r4k_cache_init(void)
1780 {
1781 	extern void build_clear_page(void);
1782 	extern void build_copy_page(void);
1783 	struct cpuinfo_mips *c = &current_cpu_data;
1784 
1785 	probe_pcache();
1786 	setup_scache();
1787 
1788 	r4k_blast_dcache_page_setup();
1789 	r4k_blast_dcache_page_indexed_setup();
1790 	r4k_blast_dcache_setup();
1791 	r4k_blast_icache_page_setup();
1792 	r4k_blast_icache_page_indexed_setup();
1793 	r4k_blast_icache_setup();
1794 	r4k_blast_scache_page_setup();
1795 	r4k_blast_scache_page_indexed_setup();
1796 	r4k_blast_scache_setup();
1797 #ifdef CONFIG_EVA
1798 	r4k_blast_dcache_user_page_setup();
1799 	r4k_blast_icache_user_page_setup();
1800 #endif
1801 
1802 	/*
1803 	 * Some MIPS32 and MIPS64 processors have physically indexed caches.
1804 	 * This code supports virtually indexed processors and will be
1805 	 * unnecessarily inefficient on physically indexed processors.
1806 	 */
1807 	if (c->dcache.linesz && cpu_has_dc_aliases)
1808 		shm_align_mask = max_t( unsigned long,
1809 					c->dcache.sets * c->dcache.linesz - 1,
1810 					PAGE_SIZE - 1);
1811 	else
1812 		shm_align_mask = PAGE_SIZE-1;
1813 
1814 	__flush_cache_vmap	= r4k__flush_cache_vmap;
1815 	__flush_cache_vunmap	= r4k__flush_cache_vunmap;
1816 
1817 	flush_cache_all		= cache_noop;
1818 	__flush_cache_all	= r4k___flush_cache_all;
1819 	flush_cache_mm		= r4k_flush_cache_mm;
1820 	flush_cache_page	= r4k_flush_cache_page;
1821 	flush_cache_range	= r4k_flush_cache_range;
1822 
1823 	__flush_kernel_vmap_range = r4k_flush_kernel_vmap_range;
1824 
1825 	flush_cache_sigtramp	= r4k_flush_cache_sigtramp;
1826 	flush_icache_all	= r4k_flush_icache_all;
1827 	local_flush_data_cache_page	= local_r4k_flush_data_cache_page;
1828 	flush_data_cache_page	= r4k_flush_data_cache_page;
1829 	mips_flush_data_cache_range = r4k_mips_flush_data_cache_range;
1830 	flush_icache_range	= r4k_flush_icache_range;
1831 	local_flush_icache_range	= local_r4k_flush_icache_range;
1832 
1833 #if defined(CONFIG_DMA_NONCOHERENT)
1834 	if (coherentio > 0) {
1835 		_dma_cache_wback_inv	= (void *)cache_noop;
1836 		_dma_cache_wback	= (void *)cache_noop;
1837 		_dma_cache_inv		= (void *)cache_noop;
1838 	} else {
1839 		_dma_cache_wback_inv	= r4k_dma_cache_wback_inv;
1840 		_dma_cache_wback	= r4k_dma_cache_wback_inv;
1841 		_dma_cache_inv		= r4k_dma_cache_inv;
1842 	}
1843 #endif
1844 
1845 	build_clear_page();
1846 	build_copy_page();
1847 
1848 	/*
1849 	 * We want to run CMP kernels on core with and without coherent
1850 	 * caches. Therefore, do not use CONFIG_MIPS_CMP to decide whether
1851 	 * or not to flush caches.
1852 	 */
1853 	local_r4k___flush_cache_all(NULL);
1854 #ifdef CONFIG_EVA
1855 	/* this is done just in case if some address aliasing does exist in
1856 	   board like old Malta memory map. Doesn't hurt anyway. LY22 */
1857 	smp_wmb();
1858 	r4k_blast_scache();
1859 	smp_wmb();
1860 #endif
1861 
1862 	coherency_setup();
1863 	board_cache_error_setup = r4k_cache_error_setup;
1864 }
1865