• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
7  * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
8  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9  */
10 #include <linux/cpu_pm.h>
11 #include <linux/hardirq.h>
12 #include <linux/init.h>
13 #include <linux/highmem.h>
14 #include <linux/kernel.h>
15 #include <linux/linkage.h>
16 #include <linux/preempt.h>
17 #include <linux/sched.h>
18 #include <linux/smp.h>
19 #include <linux/mm.h>
20 #include <linux/module.h>
21 #include <linux/bitops.h>
22 
23 #include <asm/bcache.h>
24 #include <asm/bootinfo.h>
25 #include <asm/cache.h>
26 #include <asm/cacheops.h>
27 #include <asm/cpu.h>
28 #include <asm/cpu-features.h>
29 #include <asm/cpu-type.h>
30 #include <asm/io.h>
31 #include <asm/page.h>
32 #include <asm/pgtable.h>
33 #include <asm/r4kcache.h>
34 #include <asm/sections.h>
35 #include <asm/mmu_context.h>
36 #include <asm/war.h>
37 #include <asm/cacheflush.h> /* for run_uncached() */
38 #include <asm/traps.h>
39 #include <asm/dma-coherence.h>
40 #include <asm/mips-cm.h>
41 
42 /*
43  * Special Variant of smp_call_function for use by cache functions:
44  *
45  *  o No return value
46  *  o collapses to normal function call on UP kernels
47  *  o collapses to normal function call on systems with a single shared
48  *    primary cache.
49  *  o doesn't disable interrupts on the local CPU
50  */
r4k_on_each_cpu(void (* func)(void * info),void * info)51 static inline void r4k_on_each_cpu(void (*func) (void *info), void *info)
52 {
53 	preempt_disable();
54 
55 	/*
56 	 * The Coherent Manager propagates address-based cache ops to other
57 	 * cores but not index-based ops. However, r4k_on_each_cpu is used
58 	 * in both cases so there is no easy way to tell what kind of op is
59 	 * executed to the other cores. The best we can probably do is
60 	 * to restrict that call when a CM is not present because both
61 	 * CM-based SMP protocols (CMP & CPS) restrict index-based cache ops.
62 	 */
63 	if (!mips_cm_present())
64 		smp_call_function_many(&cpu_foreign_map, func, info, 1);
65 	func(info);
66 	preempt_enable();
67 }
68 
69 #if defined(CONFIG_MIPS_CMP) || defined(CONFIG_MIPS_CPS)
70 #define cpu_has_safe_index_cacheops 0
71 #else
72 #define cpu_has_safe_index_cacheops 1
73 #endif
74 
75 /*
76  * Must die.
77  */
78 static unsigned long icache_size __read_mostly;
79 static unsigned long dcache_size __read_mostly;
80 static unsigned long scache_size __read_mostly;
81 
82 /*
83  * Dummy cache handling routines for machines without boardcaches
84  */
cache_noop(void)85 static void cache_noop(void) {}
86 
87 static struct bcache_ops no_sc_ops = {
88 	.bc_enable = (void *)cache_noop,
89 	.bc_disable = (void *)cache_noop,
90 	.bc_wback_inv = (void *)cache_noop,
91 	.bc_inv = (void *)cache_noop
92 };
93 
94 struct bcache_ops *bcops = &no_sc_ops;
95 
96 #define cpu_is_r4600_v1_x()	((read_c0_prid() & 0xfffffff0) == 0x00002010)
97 #define cpu_is_r4600_v2_x()	((read_c0_prid() & 0xfffffff0) == 0x00002020)
98 
99 #define R4600_HIT_CACHEOP_WAR_IMPL					\
100 do {									\
101 	if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())		\
102 		*(volatile unsigned long *)CKSEG1;			\
103 	if (R4600_V1_HIT_CACHEOP_WAR)					\
104 		__asm__ __volatile__("nop;nop;nop;nop");		\
105 } while (0)
106 
107 static void (*r4k_blast_dcache_page)(unsigned long addr);
108 
r4k_blast_dcache_page_dc32(unsigned long addr)109 static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
110 {
111 	R4600_HIT_CACHEOP_WAR_IMPL;
112 	blast_dcache32_page(addr);
113 }
114 
r4k_blast_dcache_page_dc64(unsigned long addr)115 static inline void r4k_blast_dcache_page_dc64(unsigned long addr)
116 {
117 	blast_dcache64_page(addr);
118 }
119 
r4k_blast_dcache_page_dc128(unsigned long addr)120 static inline void r4k_blast_dcache_page_dc128(unsigned long addr)
121 {
122 	blast_dcache128_page(addr);
123 }
124 
r4k_blast_dcache_page_setup(void)125 static void r4k_blast_dcache_page_setup(void)
126 {
127 	unsigned long  dc_lsize = cpu_dcache_line_size();
128 
129 	switch (dc_lsize) {
130 	case 0:
131 		r4k_blast_dcache_page = (void *)cache_noop;
132 		break;
133 	case 16:
134 		r4k_blast_dcache_page = blast_dcache16_page;
135 		break;
136 	case 32:
137 		r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
138 		break;
139 	case 64:
140 		r4k_blast_dcache_page = r4k_blast_dcache_page_dc64;
141 		break;
142 	case 128:
143 		r4k_blast_dcache_page = r4k_blast_dcache_page_dc128;
144 		break;
145 	default:
146 		break;
147 	}
148 }
149 
150 #ifndef CONFIG_EVA
151 #define r4k_blast_dcache_user_page  r4k_blast_dcache_page
152 #else
153 
154 static void (*r4k_blast_dcache_user_page)(unsigned long addr);
155 
r4k_blast_dcache_user_page_setup(void)156 static void r4k_blast_dcache_user_page_setup(void)
157 {
158 	unsigned long  dc_lsize = cpu_dcache_line_size();
159 
160 	if (dc_lsize == 0)
161 		r4k_blast_dcache_user_page = (void *)cache_noop;
162 	else if (dc_lsize == 16)
163 		r4k_blast_dcache_user_page = blast_dcache16_user_page;
164 	else if (dc_lsize == 32)
165 		r4k_blast_dcache_user_page = blast_dcache32_user_page;
166 	else if (dc_lsize == 64)
167 		r4k_blast_dcache_user_page = blast_dcache64_user_page;
168 }
169 
170 #endif
171 
172 static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
173 
r4k_blast_dcache_page_indexed_setup(void)174 static void r4k_blast_dcache_page_indexed_setup(void)
175 {
176 	unsigned long dc_lsize = cpu_dcache_line_size();
177 
178 	if (dc_lsize == 0)
179 		r4k_blast_dcache_page_indexed = (void *)cache_noop;
180 	else if (dc_lsize == 16)
181 		r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
182 	else if (dc_lsize == 32)
183 		r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
184 	else if (dc_lsize == 64)
185 		r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed;
186 	else if (dc_lsize == 128)
187 		r4k_blast_dcache_page_indexed = blast_dcache128_page_indexed;
188 }
189 
190 void (* r4k_blast_dcache)(void);
191 EXPORT_SYMBOL(r4k_blast_dcache);
192 
r4k_blast_dcache_setup(void)193 static void r4k_blast_dcache_setup(void)
194 {
195 	unsigned long dc_lsize = cpu_dcache_line_size();
196 
197 	if (dc_lsize == 0)
198 		r4k_blast_dcache = (void *)cache_noop;
199 	else if (dc_lsize == 16)
200 		r4k_blast_dcache = blast_dcache16;
201 	else if (dc_lsize == 32)
202 		r4k_blast_dcache = blast_dcache32;
203 	else if (dc_lsize == 64)
204 		r4k_blast_dcache = blast_dcache64;
205 	else if (dc_lsize == 128)
206 		r4k_blast_dcache = blast_dcache128;
207 }
208 
209 /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
210 #define JUMP_TO_ALIGN(order) \
211 	__asm__ __volatile__( \
212 		"b\t1f\n\t" \
213 		".align\t" #order "\n\t" \
214 		"1:\n\t" \
215 		)
216 #define CACHE32_UNROLL32_ALIGN	JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
217 #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
218 
blast_r4600_v1_icache32(void)219 static inline void blast_r4600_v1_icache32(void)
220 {
221 	unsigned long flags;
222 
223 	local_irq_save(flags);
224 	blast_icache32();
225 	local_irq_restore(flags);
226 }
227 
tx49_blast_icache32(void)228 static inline void tx49_blast_icache32(void)
229 {
230 	unsigned long start = INDEX_BASE;
231 	unsigned long end = start + current_cpu_data.icache.waysize;
232 	unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
233 	unsigned long ws_end = current_cpu_data.icache.ways <<
234 			       current_cpu_data.icache.waybit;
235 	unsigned long ws, addr;
236 
237 	CACHE32_UNROLL32_ALIGN2;
238 	/* I'm in even chunk.  blast odd chunks */
239 	for (ws = 0; ws < ws_end; ws += ws_inc)
240 		for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
241 			cache32_unroll32(addr|ws, Index_Invalidate_I);
242 	CACHE32_UNROLL32_ALIGN;
243 	/* I'm in odd chunk.  blast even chunks */
244 	for (ws = 0; ws < ws_end; ws += ws_inc)
245 		for (addr = start; addr < end; addr += 0x400 * 2)
246 			cache32_unroll32(addr|ws, Index_Invalidate_I);
247 }
248 
blast_icache32_r4600_v1_page_indexed(unsigned long page)249 static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
250 {
251 	unsigned long flags;
252 
253 	local_irq_save(flags);
254 	blast_icache32_page_indexed(page);
255 	local_irq_restore(flags);
256 }
257 
tx49_blast_icache32_page_indexed(unsigned long page)258 static inline void tx49_blast_icache32_page_indexed(unsigned long page)
259 {
260 	unsigned long indexmask = current_cpu_data.icache.waysize - 1;
261 	unsigned long start = INDEX_BASE + (page & indexmask);
262 	unsigned long end = start + PAGE_SIZE;
263 	unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
264 	unsigned long ws_end = current_cpu_data.icache.ways <<
265 			       current_cpu_data.icache.waybit;
266 	unsigned long ws, addr;
267 
268 	CACHE32_UNROLL32_ALIGN2;
269 	/* I'm in even chunk.  blast odd chunks */
270 	for (ws = 0; ws < ws_end; ws += ws_inc)
271 		for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
272 			cache32_unroll32(addr|ws, Index_Invalidate_I);
273 	CACHE32_UNROLL32_ALIGN;
274 	/* I'm in odd chunk.  blast even chunks */
275 	for (ws = 0; ws < ws_end; ws += ws_inc)
276 		for (addr = start; addr < end; addr += 0x400 * 2)
277 			cache32_unroll32(addr|ws, Index_Invalidate_I);
278 }
279 
280 static void (* r4k_blast_icache_page)(unsigned long addr);
281 
r4k_blast_icache_page_setup(void)282 static void r4k_blast_icache_page_setup(void)
283 {
284 	unsigned long ic_lsize = cpu_icache_line_size();
285 
286 	if (ic_lsize == 0)
287 		r4k_blast_icache_page = (void *)cache_noop;
288 	else if (ic_lsize == 16)
289 		r4k_blast_icache_page = blast_icache16_page;
290 	else if (ic_lsize == 32 && current_cpu_type() == CPU_LOONGSON2)
291 		r4k_blast_icache_page = loongson2_blast_icache32_page;
292 	else if (ic_lsize == 32)
293 		r4k_blast_icache_page = blast_icache32_page;
294 	else if (ic_lsize == 64)
295 		r4k_blast_icache_page = blast_icache64_page;
296 	else if (ic_lsize == 128)
297 		r4k_blast_icache_page = blast_icache128_page;
298 }
299 
300 #ifndef CONFIG_EVA
301 #define r4k_blast_icache_user_page  r4k_blast_icache_page
302 #else
303 
304 static void (*r4k_blast_icache_user_page)(unsigned long addr);
305 
r4k_blast_icache_user_page_setup(void)306 static void __cpuinit r4k_blast_icache_user_page_setup(void)
307 {
308 	unsigned long ic_lsize = cpu_icache_line_size();
309 
310 	if (ic_lsize == 0)
311 		r4k_blast_icache_user_page = (void *)cache_noop;
312 	else if (ic_lsize == 16)
313 		r4k_blast_icache_user_page = blast_icache16_user_page;
314 	else if (ic_lsize == 32)
315 		r4k_blast_icache_user_page = blast_icache32_user_page;
316 	else if (ic_lsize == 64)
317 		r4k_blast_icache_user_page = blast_icache64_user_page;
318 }
319 
320 #endif
321 
322 static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
323 
r4k_blast_icache_page_indexed_setup(void)324 static void r4k_blast_icache_page_indexed_setup(void)
325 {
326 	unsigned long ic_lsize = cpu_icache_line_size();
327 
328 	if (ic_lsize == 0)
329 		r4k_blast_icache_page_indexed = (void *)cache_noop;
330 	else if (ic_lsize == 16)
331 		r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
332 	else if (ic_lsize == 32) {
333 		if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
334 			r4k_blast_icache_page_indexed =
335 				blast_icache32_r4600_v1_page_indexed;
336 		else if (TX49XX_ICACHE_INDEX_INV_WAR)
337 			r4k_blast_icache_page_indexed =
338 				tx49_blast_icache32_page_indexed;
339 		else if (current_cpu_type() == CPU_LOONGSON2)
340 			r4k_blast_icache_page_indexed =
341 				loongson2_blast_icache32_page_indexed;
342 		else
343 			r4k_blast_icache_page_indexed =
344 				blast_icache32_page_indexed;
345 	} else if (ic_lsize == 64)
346 		r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
347 }
348 
349 void (* r4k_blast_icache)(void);
350 EXPORT_SYMBOL(r4k_blast_icache);
351 
r4k_blast_icache_setup(void)352 static void r4k_blast_icache_setup(void)
353 {
354 	unsigned long ic_lsize = cpu_icache_line_size();
355 
356 	if (ic_lsize == 0)
357 		r4k_blast_icache = (void *)cache_noop;
358 	else if (ic_lsize == 16)
359 		r4k_blast_icache = blast_icache16;
360 	else if (ic_lsize == 32) {
361 		if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
362 			r4k_blast_icache = blast_r4600_v1_icache32;
363 		else if (TX49XX_ICACHE_INDEX_INV_WAR)
364 			r4k_blast_icache = tx49_blast_icache32;
365 		else if (current_cpu_type() == CPU_LOONGSON2)
366 			r4k_blast_icache = loongson2_blast_icache32;
367 		else
368 			r4k_blast_icache = blast_icache32;
369 	} else if (ic_lsize == 64)
370 		r4k_blast_icache = blast_icache64;
371 	else if (ic_lsize == 128)
372 		r4k_blast_icache = blast_icache128;
373 }
374 
375 static void (* r4k_blast_scache_page)(unsigned long addr);
376 
r4k_blast_scache_page_setup(void)377 static void r4k_blast_scache_page_setup(void)
378 {
379 	unsigned long sc_lsize = cpu_scache_line_size();
380 
381 	if (scache_size == 0)
382 		r4k_blast_scache_page = (void *)cache_noop;
383 	else if (sc_lsize == 16)
384 		r4k_blast_scache_page = blast_scache16_page;
385 	else if (sc_lsize == 32)
386 		r4k_blast_scache_page = blast_scache32_page;
387 	else if (sc_lsize == 64)
388 		r4k_blast_scache_page = blast_scache64_page;
389 	else if (sc_lsize == 128)
390 		r4k_blast_scache_page = blast_scache128_page;
391 }
392 
393 static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
394 
r4k_blast_scache_page_indexed_setup(void)395 static void r4k_blast_scache_page_indexed_setup(void)
396 {
397 	unsigned long sc_lsize = cpu_scache_line_size();
398 
399 	if (scache_size == 0)
400 		r4k_blast_scache_page_indexed = (void *)cache_noop;
401 	else if (sc_lsize == 16)
402 		r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
403 	else if (sc_lsize == 32)
404 		r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
405 	else if (sc_lsize == 64)
406 		r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
407 	else if (sc_lsize == 128)
408 		r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
409 }
410 
411 static void (* r4k_blast_scache)(void);
412 
r4k_blast_scache_setup(void)413 static void r4k_blast_scache_setup(void)
414 {
415 	unsigned long sc_lsize = cpu_scache_line_size();
416 
417 	if (scache_size == 0)
418 		r4k_blast_scache = (void *)cache_noop;
419 	else if (sc_lsize == 16)
420 		r4k_blast_scache = blast_scache16;
421 	else if (sc_lsize == 32)
422 		r4k_blast_scache = blast_scache32;
423 	else if (sc_lsize == 64)
424 		r4k_blast_scache = blast_scache64;
425 	else if (sc_lsize == 128)
426 		r4k_blast_scache = blast_scache128;
427 }
428 
local_r4k___flush_cache_all(void * args)429 static inline void local_r4k___flush_cache_all(void * args)
430 {
431 	switch (current_cpu_type()) {
432 	case CPU_LOONGSON2:
433 	case CPU_LOONGSON3:
434 	case CPU_R4000SC:
435 	case CPU_R4000MC:
436 	case CPU_R4400SC:
437 	case CPU_R4400MC:
438 	case CPU_R10000:
439 	case CPU_R12000:
440 	case CPU_R14000:
441 		/*
442 		 * These caches are inclusive caches, that is, if something
443 		 * is not cached in the S-cache, we know it also won't be
444 		 * in one of the primary caches.
445 		 */
446 		r4k_blast_scache();
447 		break;
448 
449 	default:
450 		r4k_blast_dcache();
451 		r4k_blast_icache();
452 		break;
453 	}
454 }
455 
r4k___flush_cache_all(void)456 static void r4k___flush_cache_all(void)
457 {
458 	r4k_on_each_cpu(local_r4k___flush_cache_all, NULL);
459 }
460 
has_valid_asid(const struct mm_struct * mm)461 static inline int has_valid_asid(const struct mm_struct *mm)
462 {
463 #ifdef CONFIG_MIPS_MT_SMP
464 	int i;
465 
466 	for_each_online_cpu(i)
467 		if (cpu_context(i, mm))
468 			return 1;
469 
470 	return 0;
471 #else
472 	return cpu_context(smp_processor_id(), mm);
473 #endif
474 }
475 
r4k__flush_cache_vmap(void)476 static void r4k__flush_cache_vmap(void)
477 {
478 	r4k_blast_dcache();
479 }
480 
r4k__flush_cache_vunmap(void)481 static void r4k__flush_cache_vunmap(void)
482 {
483 	r4k_blast_dcache();
484 }
485 
local_r4k_flush_cache_range(void * args)486 static inline void local_r4k_flush_cache_range(void * args)
487 {
488 	struct vm_area_struct *vma = args;
489 	int exec = vma->vm_flags & VM_EXEC;
490 
491 	if (!(has_valid_asid(vma->vm_mm)))
492 		return;
493 
494 	r4k_blast_dcache();
495 	if (exec)
496 		r4k_blast_icache();
497 }
498 
r4k_flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)499 static void r4k_flush_cache_range(struct vm_area_struct *vma,
500 	unsigned long start, unsigned long end)
501 {
502 	int exec = vma->vm_flags & VM_EXEC;
503 
504 	if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
505 		r4k_on_each_cpu(local_r4k_flush_cache_range, vma);
506 }
507 
local_r4k_flush_cache_mm(void * args)508 static inline void local_r4k_flush_cache_mm(void * args)
509 {
510 	struct mm_struct *mm = args;
511 
512 	if (!has_valid_asid(mm))
513 		return;
514 
515 	/*
516 	 * Kludge alert.  For obscure reasons R4000SC and R4400SC go nuts if we
517 	 * only flush the primary caches but R10000 and R12000 behave sane ...
518 	 * R4000SC and R4400SC indexed S-cache ops also invalidate primary
519 	 * caches, so we can bail out early.
520 	 */
521 	if (current_cpu_type() == CPU_R4000SC ||
522 	    current_cpu_type() == CPU_R4000MC ||
523 	    current_cpu_type() == CPU_R4400SC ||
524 	    current_cpu_type() == CPU_R4400MC) {
525 		r4k_blast_scache();
526 		return;
527 	}
528 
529 	r4k_blast_dcache();
530 }
531 
r4k_flush_cache_mm(struct mm_struct * mm)532 static void r4k_flush_cache_mm(struct mm_struct *mm)
533 {
534 	if (!cpu_has_dc_aliases)
535 		return;
536 
537 	r4k_on_each_cpu(local_r4k_flush_cache_mm, mm);
538 }
539 
540 struct flush_cache_page_args {
541 	struct vm_area_struct *vma;
542 	unsigned long addr;
543 	unsigned long pfn;
544 };
545 
local_r4k_flush_cache_page(void * args)546 static inline void local_r4k_flush_cache_page(void *args)
547 {
548 	struct flush_cache_page_args *fcp_args = args;
549 	struct vm_area_struct *vma = fcp_args->vma;
550 	unsigned long addr = fcp_args->addr;
551 	struct page *page = pfn_to_page(fcp_args->pfn);
552 	int exec = vma->vm_flags & VM_EXEC;
553 	struct mm_struct *mm = vma->vm_mm;
554 	int map_coherent = 0;
555 	pgd_t *pgdp;
556 	pud_t *pudp;
557 	pmd_t *pmdp;
558 	pte_t *ptep;
559 	void *vaddr;
560 
561 	/*
562 	 * If ownes no valid ASID yet, cannot possibly have gotten
563 	 * this page into the cache.
564 	 */
565 	if (!has_valid_asid(mm))
566 		return;
567 
568 	addr &= PAGE_MASK;
569 	pgdp = pgd_offset(mm, addr);
570 	pudp = pud_offset(pgdp, addr);
571 	pmdp = pmd_offset(pudp, addr);
572 	ptep = pte_offset(pmdp, addr);
573 
574 	/*
575 	 * If the page isn't marked valid, the page cannot possibly be
576 	 * in the cache.
577 	 */
578 	if (!(pte_present(*ptep)))
579 		return;
580 
581 	if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID))
582 		vaddr = NULL;
583 	else {
584 		/*
585 		 * Use kmap_coherent or kmap_atomic to do flushes for
586 		 * another ASID than the current one.
587 		 */
588 		map_coherent = (cpu_has_dc_aliases &&
589 				page_mapped(page) && !Page_dcache_dirty(page));
590 		if (map_coherent)
591 			vaddr = kmap_coherent(page, addr);
592 		else
593 			vaddr = kmap_atomic(page);
594 		addr = (unsigned long)vaddr;
595 	}
596 
597 	if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
598 		vaddr ? r4k_blast_dcache_page(addr) :
599 			r4k_blast_dcache_user_page(addr);
600 		if (exec && !cpu_icache_snoops_remote_store)
601 			r4k_blast_scache_page(addr);
602 	}
603 	if (exec) {
604 		if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
605 			int cpu = smp_processor_id();
606 
607 			if (cpu_context(cpu, mm) != 0)
608 				drop_mmu_context(mm, cpu);
609 		} else
610 			vaddr ? r4k_blast_icache_page(addr) :
611 				r4k_blast_icache_user_page(addr);
612 	}
613 
614 	if (vaddr) {
615 		if (map_coherent)
616 			kunmap_coherent();
617 		else
618 			kunmap_atomic(vaddr);
619 	}
620 }
621 
r4k_flush_cache_page(struct vm_area_struct * vma,unsigned long addr,unsigned long pfn)622 static void r4k_flush_cache_page(struct vm_area_struct *vma,
623 	unsigned long addr, unsigned long pfn)
624 {
625 	struct flush_cache_page_args args;
626 
627 	args.vma = vma;
628 	args.addr = addr;
629 	args.pfn = pfn;
630 
631 	r4k_on_each_cpu(local_r4k_flush_cache_page, &args);
632 }
633 
local_r4k_flush_data_cache_page(void * addr)634 static inline void local_r4k_flush_data_cache_page(void * addr)
635 {
636 	r4k_blast_dcache_page((unsigned long) addr);
637 }
638 
r4k_flush_data_cache_page(unsigned long addr)639 static void r4k_flush_data_cache_page(unsigned long addr)
640 {
641 	if (in_atomic())
642 		local_r4k_flush_data_cache_page((void *)addr);
643 	else
644 		r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr);
645 }
646 
647 struct flush_icache_range_args {
648 	unsigned long start;
649 	unsigned long end;
650 };
651 
local_r4k_flush_icache_range(unsigned long start,unsigned long end)652 static inline void local_r4k_flush_icache_range(unsigned long start, unsigned long end)
653 {
654 	if (!cpu_has_ic_fills_f_dc) {
655 		if (end - start >= dcache_size) {
656 			r4k_blast_dcache();
657 		} else {
658 			R4600_HIT_CACHEOP_WAR_IMPL;
659 			protected_blast_dcache_range(start, end);
660 		}
661 	}
662 
663 	if (end - start > icache_size)
664 		r4k_blast_icache();
665 	else {
666 		switch (boot_cpu_type()) {
667 		case CPU_LOONGSON2:
668 			protected_loongson2_blast_icache_range(start, end);
669 			break;
670 
671 		default:
672 			protected_blast_icache_range(start, end);
673 			break;
674 		}
675 	}
676 #ifdef CONFIG_EVA
677 	/*
678 	 * Due to all possible segment mappings, there might cache aliases
679 	 * caused by the bootloader being in non-EVA mode, and the CPU switching
680 	 * to EVA during early kernel init. It's best to flush the scache
681 	 * to avoid having secondary cores fetching stale data and lead to
682 	 * kernel crashes.
683 	 */
684 	bc_wback_inv(start, (end - start));
685 	__sync();
686 #endif
687 }
688 
local_r4k_flush_icache_range_ipi(void * args)689 static inline void local_r4k_flush_icache_range_ipi(void *args)
690 {
691 	struct flush_icache_range_args *fir_args = args;
692 	unsigned long start = fir_args->start;
693 	unsigned long end = fir_args->end;
694 
695 	local_r4k_flush_icache_range(start, end);
696 }
697 
r4k_flush_icache_range(unsigned long start,unsigned long end)698 static void r4k_flush_icache_range(unsigned long start, unsigned long end)
699 {
700 	struct flush_icache_range_args args;
701 
702 	args.start = start;
703 	args.end = end;
704 
705 	r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args);
706 	instruction_hazard();
707 }
708 
709 #if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
710 
r4k_dma_cache_wback_inv(unsigned long addr,unsigned long size)711 static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
712 {
713 	/* Catch bad driver code */
714 	BUG_ON(size == 0);
715 
716 	preempt_disable();
717 	if (cpu_has_inclusive_pcaches) {
718 		if (size >= scache_size)
719 			r4k_blast_scache();
720 		else
721 			blast_scache_range(addr, addr + size);
722 		preempt_enable();
723 		__sync();
724 		return;
725 	}
726 
727 	/*
728 	 * Either no secondary cache or the available caches don't have the
729 	 * subset property so we have to flush the primary caches
730 	 * explicitly
731 	 */
732 	if (cpu_has_safe_index_cacheops && size >= dcache_size) {
733 		r4k_blast_dcache();
734 	} else {
735 		R4600_HIT_CACHEOP_WAR_IMPL;
736 		blast_dcache_range(addr, addr + size);
737 	}
738 	preempt_enable();
739 
740 	bc_wback_inv(addr, size);
741 	__sync();
742 }
743 
r4k_dma_cache_inv(unsigned long addr,unsigned long size)744 static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
745 {
746 	/* Catch bad driver code */
747 	BUG_ON(size == 0);
748 
749 	preempt_disable();
750 	if (cpu_has_inclusive_pcaches) {
751 		if (size >= scache_size)
752 			r4k_blast_scache();
753 		else {
754 			/*
755 			 * There is no clearly documented alignment requirement
756 			 * for the cache instruction on MIPS processors and
757 			 * some processors, among them the RM5200 and RM7000
758 			 * QED processors will throw an address error for cache
759 			 * hit ops with insufficient alignment.	 Solved by
760 			 * aligning the address to cache line size.
761 			 */
762 			blast_inv_scache_range(addr, addr + size);
763 		}
764 		preempt_enable();
765 		__sync();
766 		return;
767 	}
768 
769 	if (cpu_has_safe_index_cacheops && size >= dcache_size) {
770 		r4k_blast_dcache();
771 	} else {
772 		R4600_HIT_CACHEOP_WAR_IMPL;
773 		blast_inv_dcache_range(addr, addr + size);
774 	}
775 	preempt_enable();
776 
777 	bc_inv(addr, size);
778 	__sync();
779 }
780 #endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
781 
782 /*
783  * While we're protected against bad userland addresses we don't care
784  * very much about what happens in that case.  Usually a segmentation
785  * fault will dump the process later on anyway ...
786  */
local_r4k_flush_cache_sigtramp(void * arg)787 static void local_r4k_flush_cache_sigtramp(void * arg)
788 {
789 	unsigned long ic_lsize = cpu_icache_line_size();
790 	unsigned long dc_lsize = cpu_dcache_line_size();
791 	unsigned long sc_lsize = cpu_scache_line_size();
792 	unsigned long addr = (unsigned long) arg;
793 
794 	R4600_HIT_CACHEOP_WAR_IMPL;
795 	if (dc_lsize)
796 		protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
797 	if (!cpu_icache_snoops_remote_store && scache_size)
798 		protected_writeback_scache_line(addr & ~(sc_lsize - 1));
799 	if (ic_lsize)
800 		protected_flush_icache_line(addr & ~(ic_lsize - 1));
801 	if (MIPS4K_ICACHE_REFILL_WAR) {
802 		__asm__ __volatile__ (
803 			".set push\n\t"
804 			".set noat\n\t"
805 			".set "MIPS_ISA_LEVEL"\n\t"
806 #ifdef CONFIG_32BIT
807 			"la	$at,1f\n\t"
808 #endif
809 #ifdef CONFIG_64BIT
810 			"dla	$at,1f\n\t"
811 #endif
812 			"cache	%0,($at)\n\t"
813 			"nop; nop; nop\n"
814 			"1:\n\t"
815 			".set pop"
816 			:
817 			: "i" (Hit_Invalidate_I));
818 	}
819 	if (MIPS_CACHE_SYNC_WAR)
820 		__asm__ __volatile__ ("sync");
821 }
822 
r4k_flush_cache_sigtramp(unsigned long addr)823 static void r4k_flush_cache_sigtramp(unsigned long addr)
824 {
825 	r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr);
826 }
827 
r4k_flush_icache_all(void)828 static void r4k_flush_icache_all(void)
829 {
830 	if (cpu_has_vtag_icache)
831 		r4k_blast_icache();
832 }
833 
834 struct flush_kernel_vmap_range_args {
835 	unsigned long	vaddr;
836 	int		size;
837 };
838 
local_r4k_flush_kernel_vmap_range(void * args)839 static inline void local_r4k_flush_kernel_vmap_range(void *args)
840 {
841 	struct flush_kernel_vmap_range_args *vmra = args;
842 	unsigned long vaddr = vmra->vaddr;
843 	int size = vmra->size;
844 
845 	/*
846 	 * Aliases only affect the primary caches so don't bother with
847 	 * S-caches or T-caches.
848 	 */
849 	if (cpu_has_safe_index_cacheops && size >= dcache_size)
850 		r4k_blast_dcache();
851 	else {
852 		R4600_HIT_CACHEOP_WAR_IMPL;
853 		blast_dcache_range(vaddr, vaddr + size);
854 	}
855 }
856 
r4k_flush_kernel_vmap_range(unsigned long vaddr,int size)857 static void r4k_flush_kernel_vmap_range(unsigned long vaddr, int size)
858 {
859 	struct flush_kernel_vmap_range_args args;
860 
861 	args.vaddr = (unsigned long) vaddr;
862 	args.size = size;
863 
864 	r4k_on_each_cpu(local_r4k_flush_kernel_vmap_range, &args);
865 }
866 
rm7k_erratum31(void)867 static inline void rm7k_erratum31(void)
868 {
869 	const unsigned long ic_lsize = 32;
870 	unsigned long addr;
871 
872 	/* RM7000 erratum #31. The icache is screwed at startup. */
873 	write_c0_taglo(0);
874 	write_c0_taghi(0);
875 
876 	for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
877 		__asm__ __volatile__ (
878 			".set push\n\t"
879 			".set noreorder\n\t"
880 			".set mips3\n\t"
881 			"cache\t%1, 0(%0)\n\t"
882 			"cache\t%1, 0x1000(%0)\n\t"
883 			"cache\t%1, 0x2000(%0)\n\t"
884 			"cache\t%1, 0x3000(%0)\n\t"
885 			"cache\t%2, 0(%0)\n\t"
886 			"cache\t%2, 0x1000(%0)\n\t"
887 			"cache\t%2, 0x2000(%0)\n\t"
888 			"cache\t%2, 0x3000(%0)\n\t"
889 			"cache\t%1, 0(%0)\n\t"
890 			"cache\t%1, 0x1000(%0)\n\t"
891 			"cache\t%1, 0x2000(%0)\n\t"
892 			"cache\t%1, 0x3000(%0)\n\t"
893 			".set pop\n"
894 			:
895 			: "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
896 	}
897 }
898 
alias_74k_erratum(struct cpuinfo_mips * c)899 static inline int alias_74k_erratum(struct cpuinfo_mips *c)
900 {
901 	unsigned int imp = c->processor_id & PRID_IMP_MASK;
902 	unsigned int rev = c->processor_id & PRID_REV_MASK;
903 	int present = 0;
904 
905 	/*
906 	 * Early versions of the 74K do not update the cache tags on a
907 	 * vtag miss/ptag hit which can occur in the case of KSEG0/KUSEG
908 	 * aliases.  In this case it is better to treat the cache as always
909 	 * having aliases.  Also disable the synonym tag update feature
910 	 * where available.  In this case no opportunistic tag update will
911 	 * happen where a load causes a virtual address miss but a physical
912 	 * address hit during a D-cache look-up.
913 	 */
914 	switch (imp) {
915 	case PRID_IMP_74K:
916 		if (rev <= PRID_REV_ENCODE_332(2, 4, 0))
917 			present = 1;
918 		if (rev == PRID_REV_ENCODE_332(2, 4, 0))
919 			write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND);
920 		break;
921 	case PRID_IMP_1074K:
922 		if (rev <= PRID_REV_ENCODE_332(1, 1, 0)) {
923 			present = 1;
924 			write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND);
925 		}
926 		break;
927 	default:
928 		BUG();
929 	}
930 
931 	return present;
932 }
933 
934 static char *way_string[] = { NULL, "direct mapped", "2-way",
935 	"3-way", "4-way", "5-way", "6-way", "7-way", "8-way",
936 	"9-way", "10-way", "11-way", "12-way",
937 	"13-way", "14-way", "15-way", "16-way",
938 };
939 
probe_pcache(void)940 static void probe_pcache(void)
941 {
942 	struct cpuinfo_mips *c = &current_cpu_data;
943 	unsigned int config = read_c0_config();
944 	unsigned int prid = read_c0_prid();
945 	int has_74k_erratum = 0;
946 	unsigned long config1;
947 	unsigned int lsize;
948 
949 	switch (current_cpu_type()) {
950 	case CPU_R4600:			/* QED style two way caches? */
951 	case CPU_R4700:
952 	case CPU_R5000:
953 	case CPU_NEVADA:
954 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
955 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
956 		c->icache.ways = 2;
957 		c->icache.waybit = __ffs(icache_size/2);
958 
959 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
960 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
961 		c->dcache.ways = 2;
962 		c->dcache.waybit= __ffs(dcache_size/2);
963 
964 		c->options |= MIPS_CPU_CACHE_CDEX_P;
965 		break;
966 
967 	case CPU_R5432:
968 	case CPU_R5500:
969 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
970 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
971 		c->icache.ways = 2;
972 		c->icache.waybit= 0;
973 
974 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
975 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
976 		c->dcache.ways = 2;
977 		c->dcache.waybit = 0;
978 
979 		c->options |= MIPS_CPU_CACHE_CDEX_P | MIPS_CPU_PREFETCH;
980 		break;
981 
982 	case CPU_TX49XX:
983 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
984 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
985 		c->icache.ways = 4;
986 		c->icache.waybit= 0;
987 
988 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
989 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
990 		c->dcache.ways = 4;
991 		c->dcache.waybit = 0;
992 
993 		c->options |= MIPS_CPU_CACHE_CDEX_P;
994 		c->options |= MIPS_CPU_PREFETCH;
995 		break;
996 
997 	case CPU_R4000PC:
998 	case CPU_R4000SC:
999 	case CPU_R4000MC:
1000 	case CPU_R4400PC:
1001 	case CPU_R4400SC:
1002 	case CPU_R4400MC:
1003 	case CPU_R4300:
1004 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
1005 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1006 		c->icache.ways = 1;
1007 		c->icache.waybit = 0;	/* doesn't matter */
1008 
1009 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
1010 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1011 		c->dcache.ways = 1;
1012 		c->dcache.waybit = 0;	/* does not matter */
1013 
1014 		c->options |= MIPS_CPU_CACHE_CDEX_P;
1015 		break;
1016 
1017 	case CPU_R10000:
1018 	case CPU_R12000:
1019 	case CPU_R14000:
1020 		icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
1021 		c->icache.linesz = 64;
1022 		c->icache.ways = 2;
1023 		c->icache.waybit = 0;
1024 
1025 		dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));
1026 		c->dcache.linesz = 32;
1027 		c->dcache.ways = 2;
1028 		c->dcache.waybit = 0;
1029 
1030 		c->options |= MIPS_CPU_PREFETCH;
1031 		break;
1032 
1033 	case CPU_VR4133:
1034 		write_c0_config(config & ~VR41_CONF_P4K);
1035 	case CPU_VR4131:
1036 		/* Workaround for cache instruction bug of VR4131 */
1037 		if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
1038 		    c->processor_id == 0x0c82U) {
1039 			config |= 0x00400000U;
1040 			if (c->processor_id == 0x0c80U)
1041 				config |= VR41_CONF_BP;
1042 			write_c0_config(config);
1043 		} else
1044 			c->options |= MIPS_CPU_CACHE_CDEX_P;
1045 
1046 		icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
1047 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1048 		c->icache.ways = 2;
1049 		c->icache.waybit = __ffs(icache_size/2);
1050 
1051 		dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
1052 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1053 		c->dcache.ways = 2;
1054 		c->dcache.waybit = __ffs(dcache_size/2);
1055 		break;
1056 
1057 	case CPU_VR41XX:
1058 	case CPU_VR4111:
1059 	case CPU_VR4121:
1060 	case CPU_VR4122:
1061 	case CPU_VR4181:
1062 	case CPU_VR4181A:
1063 		icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
1064 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1065 		c->icache.ways = 1;
1066 		c->icache.waybit = 0;	/* doesn't matter */
1067 
1068 		dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
1069 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1070 		c->dcache.ways = 1;
1071 		c->dcache.waybit = 0;	/* does not matter */
1072 
1073 		c->options |= MIPS_CPU_CACHE_CDEX_P;
1074 		break;
1075 
1076 	case CPU_RM7000:
1077 		rm7k_erratum31();
1078 
1079 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
1080 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1081 		c->icache.ways = 4;
1082 		c->icache.waybit = __ffs(icache_size / c->icache.ways);
1083 
1084 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
1085 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1086 		c->dcache.ways = 4;
1087 		c->dcache.waybit = __ffs(dcache_size / c->dcache.ways);
1088 
1089 		c->options |= MIPS_CPU_CACHE_CDEX_P;
1090 		c->options |= MIPS_CPU_PREFETCH;
1091 		break;
1092 
1093 	case CPU_LOONGSON2:
1094 		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
1095 		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1096 		if (prid & 0x3)
1097 			c->icache.ways = 4;
1098 		else
1099 			c->icache.ways = 2;
1100 		c->icache.waybit = 0;
1101 
1102 		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
1103 		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1104 		if (prid & 0x3)
1105 			c->dcache.ways = 4;
1106 		else
1107 			c->dcache.ways = 2;
1108 		c->dcache.waybit = 0;
1109 		break;
1110 
1111 	case CPU_LOONGSON3:
1112 		config1 = read_c0_config1();
1113 		lsize = (config1 >> 19) & 7;
1114 		if (lsize)
1115 			c->icache.linesz = 2 << lsize;
1116 		else
1117 			c->icache.linesz = 0;
1118 		c->icache.sets = 64 << ((config1 >> 22) & 7);
1119 		c->icache.ways = 1 + ((config1 >> 16) & 7);
1120 		icache_size = c->icache.sets *
1121 					  c->icache.ways *
1122 					  c->icache.linesz;
1123 		c->icache.waybit = 0;
1124 
1125 		lsize = (config1 >> 10) & 7;
1126 		if (lsize)
1127 			c->dcache.linesz = 2 << lsize;
1128 		else
1129 			c->dcache.linesz = 0;
1130 		c->dcache.sets = 64 << ((config1 >> 13) & 7);
1131 		c->dcache.ways = 1 + ((config1 >> 7) & 7);
1132 		dcache_size = c->dcache.sets *
1133 					  c->dcache.ways *
1134 					  c->dcache.linesz;
1135 		c->dcache.waybit = 0;
1136 		break;
1137 
1138 	case CPU_CAVIUM_OCTEON3:
1139 		/* For now lie about the number of ways. */
1140 		c->icache.linesz = 128;
1141 		c->icache.sets = 16;
1142 		c->icache.ways = 8;
1143 		c->icache.flags |= MIPS_CACHE_VTAG;
1144 		icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
1145 
1146 		c->dcache.linesz = 128;
1147 		c->dcache.ways = 8;
1148 		c->dcache.sets = 8;
1149 		dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
1150 		c->options |= MIPS_CPU_PREFETCH;
1151 		break;
1152 
1153 	default:
1154 		if (!(config & MIPS_CONF_M))
1155 			panic("Don't know how to probe P-caches on this cpu.");
1156 
1157 		/*
1158 		 * So we seem to be a MIPS32 or MIPS64 CPU
1159 		 * So let's probe the I-cache ...
1160 		 */
1161 		config1 = read_c0_config1();
1162 
1163 		lsize = (config1 >> 19) & 7;
1164 
1165 		/* IL == 7 is reserved */
1166 		if (lsize == 7)
1167 			panic("Invalid icache line size");
1168 
1169 		c->icache.linesz = lsize ? 2 << lsize : 0;
1170 
1171 		c->icache.sets = 32 << (((config1 >> 22) + 1) & 7);
1172 		c->icache.ways = 1 + ((config1 >> 16) & 7);
1173 
1174 		icache_size = c->icache.sets *
1175 			      c->icache.ways *
1176 			      c->icache.linesz;
1177 		c->icache.waybit = __ffs(icache_size/c->icache.ways);
1178 
1179 		if (config & 0x8)		/* VI bit */
1180 			c->icache.flags |= MIPS_CACHE_VTAG;
1181 
1182 		/*
1183 		 * Now probe the MIPS32 / MIPS64 data cache.
1184 		 */
1185 		c->dcache.flags = 0;
1186 
1187 		lsize = (config1 >> 10) & 7;
1188 
1189 		/* DL == 7 is reserved */
1190 		if (lsize == 7)
1191 			panic("Invalid dcache line size");
1192 
1193 		c->dcache.linesz = lsize ? 2 << lsize : 0;
1194 
1195 		c->dcache.sets = 32 << (((config1 >> 13) + 1) & 7);
1196 		c->dcache.ways = 1 + ((config1 >> 7) & 7);
1197 
1198 		dcache_size = c->dcache.sets *
1199 			      c->dcache.ways *
1200 			      c->dcache.linesz;
1201 		c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
1202 
1203 		c->options |= MIPS_CPU_PREFETCH;
1204 		break;
1205 	}
1206 
1207 	/*
1208 	 * Processor configuration sanity check for the R4000SC erratum
1209 	 * #5.	With page sizes larger than 32kB there is no possibility
1210 	 * to get a VCE exception anymore so we don't care about this
1211 	 * misconfiguration.  The case is rather theoretical anyway;
1212 	 * presumably no vendor is shipping his hardware in the "bad"
1213 	 * configuration.
1214 	 */
1215 	if ((prid & PRID_IMP_MASK) == PRID_IMP_R4000 &&
1216 	    (prid & PRID_REV_MASK) < PRID_REV_R4400 &&
1217 	    !(config & CONF_SC) && c->icache.linesz != 16 &&
1218 	    PAGE_SIZE <= 0x8000)
1219 		panic("Improper R4000SC processor configuration detected");
1220 
1221 	/* compute a couple of other cache variables */
1222 	c->icache.waysize = icache_size / c->icache.ways;
1223 	c->dcache.waysize = dcache_size / c->dcache.ways;
1224 
1225 	c->icache.sets = c->icache.linesz ?
1226 		icache_size / (c->icache.linesz * c->icache.ways) : 0;
1227 	c->dcache.sets = c->dcache.linesz ?
1228 		dcache_size / (c->dcache.linesz * c->dcache.ways) : 0;
1229 
1230 	/*
1231 	 * R10000 and R12000 P-caches are odd in a positive way.  They're 32kB
1232 	 * 2-way virtually indexed so normally would suffer from aliases.  So
1233 	 * normally they'd suffer from aliases but magic in the hardware deals
1234 	 * with that for us so we don't need to take care ourselves.
1235 	 */
1236 	switch (current_cpu_type()) {
1237 	case CPU_20KC:
1238 	case CPU_25KF:
1239 	case CPU_SB1:
1240 	case CPU_SB1A:
1241 	case CPU_XLR:
1242 		c->dcache.flags |= MIPS_CACHE_PINDEX;
1243 		break;
1244 
1245 	case CPU_R10000:
1246 	case CPU_R12000:
1247 	case CPU_R14000:
1248 		break;
1249 
1250 	case CPU_74K:
1251 	case CPU_1074K:
1252 		has_74k_erratum = alias_74k_erratum(c);
1253 		/* Fall through. */
1254 	case CPU_M14KC:
1255 	case CPU_M14KEC:
1256 	case CPU_24K:
1257 	case CPU_34K:
1258 	case CPU_1004K:
1259 	case CPU_INTERAPTIV:
1260 	case CPU_P5600:
1261 	case CPU_PROAPTIV:
1262 	case CPU_M5150:
1263 	case CPU_QEMU_GENERIC:
1264 		if (!(read_c0_config7() & MIPS_CONF7_IAR) &&
1265 		    (c->icache.waysize > PAGE_SIZE))
1266 			c->icache.flags |= MIPS_CACHE_ALIASES;
1267 		if (!has_74k_erratum && (read_c0_config7() & MIPS_CONF7_AR)) {
1268 			/*
1269 			 * Effectively physically indexed dcache,
1270 			 * thus no virtual aliases.
1271 			*/
1272 			c->dcache.flags |= MIPS_CACHE_PINDEX;
1273 			break;
1274 		}
1275 	default:
1276 		if (has_74k_erratum || c->dcache.waysize > PAGE_SIZE)
1277 			c->dcache.flags |= MIPS_CACHE_ALIASES;
1278 	}
1279 
1280 	switch (current_cpu_type()) {
1281 	case CPU_20KC:
1282 		/*
1283 		 * Some older 20Kc chips doesn't have the 'VI' bit in
1284 		 * the config register.
1285 		 */
1286 		c->icache.flags |= MIPS_CACHE_VTAG;
1287 		break;
1288 
1289 	case CPU_ALCHEMY:
1290 		c->icache.flags |= MIPS_CACHE_IC_F_DC;
1291 		break;
1292 
1293 	case CPU_LOONGSON2:
1294 		/*
1295 		 * LOONGSON2 has 4 way icache, but when using indexed cache op,
1296 		 * one op will act on all 4 ways
1297 		 */
1298 		c->icache.ways = 1;
1299 	}
1300 
1301 	printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
1302 	       icache_size >> 10,
1303 	       c->icache.flags & MIPS_CACHE_VTAG ? "VIVT" : "VIPT",
1304 	       way_string[c->icache.ways], c->icache.linesz);
1305 
1306 	printk("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
1307 	       dcache_size >> 10, way_string[c->dcache.ways],
1308 	       (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT",
1309 	       (c->dcache.flags & MIPS_CACHE_ALIASES) ?
1310 			"cache aliases" : "no aliases",
1311 	       c->dcache.linesz);
1312 }
1313 
1314 /*
1315  * If you even _breathe_ on this function, look at the gcc output and make sure
1316  * it does not pop things on and off the stack for the cache sizing loop that
1317  * executes in KSEG1 space or else you will crash and burn badly.  You have
1318  * been warned.
1319  */
probe_scache(void)1320 static int probe_scache(void)
1321 {
1322 	unsigned long flags, addr, begin, end, pow2;
1323 	unsigned int config = read_c0_config();
1324 	struct cpuinfo_mips *c = &current_cpu_data;
1325 
1326 	if (config & CONF_SC)
1327 		return 0;
1328 
1329 	begin = (unsigned long) &_stext;
1330 	begin &= ~((4 * 1024 * 1024) - 1);
1331 	end = begin + (4 * 1024 * 1024);
1332 
1333 	/*
1334 	 * This is such a bitch, you'd think they would make it easy to do
1335 	 * this.  Away you daemons of stupidity!
1336 	 */
1337 	local_irq_save(flags);
1338 
1339 	/* Fill each size-multiple cache line with a valid tag. */
1340 	pow2 = (64 * 1024);
1341 	for (addr = begin; addr < end; addr = (begin + pow2)) {
1342 		unsigned long *p = (unsigned long *) addr;
1343 		__asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
1344 		pow2 <<= 1;
1345 	}
1346 
1347 	/* Load first line with zero (therefore invalid) tag. */
1348 	write_c0_taglo(0);
1349 	write_c0_taghi(0);
1350 	__asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
1351 	cache_op(Index_Store_Tag_I, begin);
1352 	cache_op(Index_Store_Tag_D, begin);
1353 	cache_op(Index_Store_Tag_SD, begin);
1354 
1355 	/* Now search for the wrap around point. */
1356 	pow2 = (128 * 1024);
1357 	for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
1358 		cache_op(Index_Load_Tag_SD, addr);
1359 		__asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
1360 		if (!read_c0_taglo())
1361 			break;
1362 		pow2 <<= 1;
1363 	}
1364 	local_irq_restore(flags);
1365 	addr -= begin;
1366 
1367 	scache_size = addr;
1368 	c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
1369 	c->scache.ways = 1;
1370 	c->scache.waybit = 0;		/* does not matter */
1371 
1372 	return 1;
1373 }
1374 
loongson2_sc_init(void)1375 static void __init loongson2_sc_init(void)
1376 {
1377 	struct cpuinfo_mips *c = &current_cpu_data;
1378 
1379 	scache_size = 512*1024;
1380 	c->scache.linesz = 32;
1381 	c->scache.ways = 4;
1382 	c->scache.waybit = 0;
1383 	c->scache.waysize = scache_size / (c->scache.ways);
1384 	c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1385 	pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1386 	       scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1387 
1388 	c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1389 }
1390 
loongson3_sc_init(void)1391 static void __init loongson3_sc_init(void)
1392 {
1393 	struct cpuinfo_mips *c = &current_cpu_data;
1394 	unsigned int config2, lsize;
1395 
1396 	config2 = read_c0_config2();
1397 	lsize = (config2 >> 4) & 15;
1398 	if (lsize)
1399 		c->scache.linesz = 2 << lsize;
1400 	else
1401 		c->scache.linesz = 0;
1402 	c->scache.sets = 64 << ((config2 >> 8) & 15);
1403 	c->scache.ways = 1 + (config2 & 15);
1404 
1405 	scache_size = c->scache.sets *
1406 				  c->scache.ways *
1407 				  c->scache.linesz;
1408 	/* Loongson-3 has 4 cores, 1MB scache for each. scaches are shared */
1409 	scache_size *= 4;
1410 	c->scache.waybit = 0;
1411 	pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1412 	       scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1413 	if (scache_size)
1414 		c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1415 	return;
1416 }
1417 
1418 extern int r5k_sc_init(void);
1419 extern int rm7k_sc_init(void);
1420 extern int mips_sc_init(void);
1421 
setup_scache(void)1422 static void setup_scache(void)
1423 {
1424 	struct cpuinfo_mips *c = &current_cpu_data;
1425 	unsigned int config = read_c0_config();
1426 	int sc_present = 0;
1427 
1428 	/*
1429 	 * Do the probing thing on R4000SC and R4400SC processors.  Other
1430 	 * processors don't have a S-cache that would be relevant to the
1431 	 * Linux memory management.
1432 	 */
1433 	switch (current_cpu_type()) {
1434 	case CPU_R4000SC:
1435 	case CPU_R4000MC:
1436 	case CPU_R4400SC:
1437 	case CPU_R4400MC:
1438 		sc_present = run_uncached(probe_scache);
1439 		if (sc_present)
1440 			c->options |= MIPS_CPU_CACHE_CDEX_S;
1441 		break;
1442 
1443 	case CPU_R10000:
1444 	case CPU_R12000:
1445 	case CPU_R14000:
1446 		scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
1447 		c->scache.linesz = 64 << ((config >> 13) & 1);
1448 		c->scache.ways = 2;
1449 		c->scache.waybit= 0;
1450 		sc_present = 1;
1451 		break;
1452 
1453 	case CPU_R5000:
1454 	case CPU_NEVADA:
1455 #ifdef CONFIG_R5000_CPU_SCACHE
1456 		r5k_sc_init();
1457 #endif
1458 		return;
1459 
1460 	case CPU_RM7000:
1461 #ifdef CONFIG_RM7000_CPU_SCACHE
1462 		rm7k_sc_init();
1463 #endif
1464 		return;
1465 
1466 	case CPU_LOONGSON2:
1467 		loongson2_sc_init();
1468 		return;
1469 
1470 	case CPU_LOONGSON3:
1471 		loongson3_sc_init();
1472 		return;
1473 
1474 	case CPU_CAVIUM_OCTEON3:
1475 	case CPU_XLP:
1476 		/* don't need to worry about L2, fully coherent */
1477 		return;
1478 
1479 	default:
1480 		if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
1481 				    MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R1 |
1482 				    MIPS_CPU_ISA_M64R2 | MIPS_CPU_ISA_M64R6)) {
1483 #ifdef CONFIG_MIPS_CPU_SCACHE
1484 			if (mips_sc_init ()) {
1485 				scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
1486 				printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
1487 				       scache_size >> 10,
1488 				       way_string[c->scache.ways], c->scache.linesz);
1489 			}
1490 #else
1491 			if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
1492 				panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
1493 #endif
1494 			return;
1495 		}
1496 		sc_present = 0;
1497 	}
1498 
1499 	if (!sc_present)
1500 		return;
1501 
1502 	/* compute a couple of other cache variables */
1503 	c->scache.waysize = scache_size / c->scache.ways;
1504 
1505 	c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1506 
1507 	printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1508 	       scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1509 
1510 	c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1511 }
1512 
au1x00_fixup_config_od(void)1513 void au1x00_fixup_config_od(void)
1514 {
1515 	/*
1516 	 * c0_config.od (bit 19) was write only (and read as 0)
1517 	 * on the early revisions of Alchemy SOCs.  It disables the bus
1518 	 * transaction overlapping and needs to be set to fix various errata.
1519 	 */
1520 	switch (read_c0_prid()) {
1521 	case 0x00030100: /* Au1000 DA */
1522 	case 0x00030201: /* Au1000 HA */
1523 	case 0x00030202: /* Au1000 HB */
1524 	case 0x01030200: /* Au1500 AB */
1525 	/*
1526 	 * Au1100 errata actually keeps silence about this bit, so we set it
1527 	 * just in case for those revisions that require it to be set according
1528 	 * to the (now gone) cpu table.
1529 	 */
1530 	case 0x02030200: /* Au1100 AB */
1531 	case 0x02030201: /* Au1100 BA */
1532 	case 0x02030202: /* Au1100 BC */
1533 		set_c0_config(1 << 19);
1534 		break;
1535 	}
1536 }
1537 
1538 /* CP0 hazard avoidance. */
1539 #define NXP_BARRIER()							\
1540 	 __asm__ __volatile__(						\
1541 	".set noreorder\n\t"						\
1542 	"nop; nop; nop; nop; nop; nop;\n\t"				\
1543 	".set reorder\n\t")
1544 
nxp_pr4450_fixup_config(void)1545 static void nxp_pr4450_fixup_config(void)
1546 {
1547 	unsigned long config0;
1548 
1549 	config0 = read_c0_config();
1550 
1551 	/* clear all three cache coherency fields */
1552 	config0 &= ~(0x7 | (7 << 25) | (7 << 28));
1553 	config0 |= (((_page_cachable_default >> _CACHE_SHIFT) <<  0) |
1554 		    ((_page_cachable_default >> _CACHE_SHIFT) << 25) |
1555 		    ((_page_cachable_default >> _CACHE_SHIFT) << 28));
1556 	write_c0_config(config0);
1557 	NXP_BARRIER();
1558 }
1559 
1560 static int cca = -1;
1561 
cca_setup(char * str)1562 static int __init cca_setup(char *str)
1563 {
1564 	get_option(&str, &cca);
1565 
1566 	return 0;
1567 }
1568 
1569 early_param("cca", cca_setup);
1570 
coherency_setup(void)1571 static void coherency_setup(void)
1572 {
1573 	if (cca < 0 || cca > 7)
1574 		cca = read_c0_config() & CONF_CM_CMASK;
1575 	_page_cachable_default = cca << _CACHE_SHIFT;
1576 
1577 	pr_debug("Using cache attribute %d\n", cca);
1578 	change_c0_config(CONF_CM_CMASK, cca);
1579 
1580 	/*
1581 	 * c0_status.cu=0 specifies that updates by the sc instruction use
1582 	 * the coherency mode specified by the TLB; 1 means cachable
1583 	 * coherent update on write will be used.  Not all processors have
1584 	 * this bit and; some wire it to zero, others like Toshiba had the
1585 	 * silly idea of putting something else there ...
1586 	 */
1587 	switch (current_cpu_type()) {
1588 	case CPU_R4000PC:
1589 	case CPU_R4000SC:
1590 	case CPU_R4000MC:
1591 	case CPU_R4400PC:
1592 	case CPU_R4400SC:
1593 	case CPU_R4400MC:
1594 		clear_c0_config(CONF_CU);
1595 		break;
1596 	/*
1597 	 * We need to catch the early Alchemy SOCs with
1598 	 * the write-only co_config.od bit and set it back to one on:
1599 	 * Au1000 rev DA, HA, HB;  Au1100 AB, BA, BC, Au1500 AB
1600 	 */
1601 	case CPU_ALCHEMY:
1602 		au1x00_fixup_config_od();
1603 		break;
1604 
1605 	case PRID_IMP_PR4450:
1606 		nxp_pr4450_fixup_config();
1607 		break;
1608 	}
1609 }
1610 
r4k_cache_error_setup(void)1611 static void r4k_cache_error_setup(void)
1612 {
1613 	extern char __weak except_vec2_generic;
1614 	extern char __weak except_vec2_sb1;
1615 
1616 	switch (current_cpu_type()) {
1617 	case CPU_SB1:
1618 	case CPU_SB1A:
1619 		set_uncached_handler(0x100, &except_vec2_sb1, 0x80);
1620 		break;
1621 
1622 	default:
1623 		set_uncached_handler(0x100, &except_vec2_generic, 0x80);
1624 		break;
1625 	}
1626 }
1627 
r4k_cache_init(void)1628 void r4k_cache_init(void)
1629 {
1630 	extern void build_clear_page(void);
1631 	extern void build_copy_page(void);
1632 	struct cpuinfo_mips *c = &current_cpu_data;
1633 
1634 	probe_pcache();
1635 	setup_scache();
1636 
1637 	r4k_blast_dcache_page_setup();
1638 	r4k_blast_dcache_page_indexed_setup();
1639 	r4k_blast_dcache_setup();
1640 	r4k_blast_icache_page_setup();
1641 	r4k_blast_icache_page_indexed_setup();
1642 	r4k_blast_icache_setup();
1643 	r4k_blast_scache_page_setup();
1644 	r4k_blast_scache_page_indexed_setup();
1645 	r4k_blast_scache_setup();
1646 #ifdef CONFIG_EVA
1647 	r4k_blast_dcache_user_page_setup();
1648 	r4k_blast_icache_user_page_setup();
1649 #endif
1650 
1651 	/*
1652 	 * Some MIPS32 and MIPS64 processors have physically indexed caches.
1653 	 * This code supports virtually indexed processors and will be
1654 	 * unnecessarily inefficient on physically indexed processors.
1655 	 */
1656 	if (c->dcache.linesz && cpu_has_dc_aliases)
1657 		shm_align_mask = max_t( unsigned long,
1658 					c->dcache.sets * c->dcache.linesz - 1,
1659 					PAGE_SIZE - 1);
1660 	else
1661 		shm_align_mask = PAGE_SIZE-1;
1662 
1663 	__flush_cache_vmap	= r4k__flush_cache_vmap;
1664 	__flush_cache_vunmap	= r4k__flush_cache_vunmap;
1665 
1666 	flush_cache_all		= cache_noop;
1667 	__flush_cache_all	= r4k___flush_cache_all;
1668 	flush_cache_mm		= r4k_flush_cache_mm;
1669 	flush_cache_page	= r4k_flush_cache_page;
1670 	flush_cache_range	= r4k_flush_cache_range;
1671 
1672 	__flush_kernel_vmap_range = r4k_flush_kernel_vmap_range;
1673 
1674 	flush_cache_sigtramp	= r4k_flush_cache_sigtramp;
1675 	flush_icache_all	= r4k_flush_icache_all;
1676 	local_flush_data_cache_page	= local_r4k_flush_data_cache_page;
1677 	flush_data_cache_page	= r4k_flush_data_cache_page;
1678 	flush_icache_range	= r4k_flush_icache_range;
1679 	local_flush_icache_range	= local_r4k_flush_icache_range;
1680 
1681 #if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
1682 	if (coherentio) {
1683 		_dma_cache_wback_inv	= (void *)cache_noop;
1684 		_dma_cache_wback	= (void *)cache_noop;
1685 		_dma_cache_inv		= (void *)cache_noop;
1686 	} else {
1687 		_dma_cache_wback_inv	= r4k_dma_cache_wback_inv;
1688 		_dma_cache_wback	= r4k_dma_cache_wback_inv;
1689 		_dma_cache_inv		= r4k_dma_cache_inv;
1690 	}
1691 #endif
1692 
1693 	build_clear_page();
1694 	build_copy_page();
1695 
1696 	/*
1697 	 * We want to run CMP kernels on core with and without coherent
1698 	 * caches. Therefore, do not use CONFIG_MIPS_CMP to decide whether
1699 	 * or not to flush caches.
1700 	 */
1701 	local_r4k___flush_cache_all(NULL);
1702 
1703 	coherency_setup();
1704 	board_cache_error_setup = r4k_cache_error_setup;
1705 }
1706 
r4k_cache_pm_notifier(struct notifier_block * self,unsigned long cmd,void * v)1707 static int r4k_cache_pm_notifier(struct notifier_block *self, unsigned long cmd,
1708 			       void *v)
1709 {
1710 	switch (cmd) {
1711 	case CPU_PM_ENTER_FAILED:
1712 	case CPU_PM_EXIT:
1713 		coherency_setup();
1714 		break;
1715 	}
1716 
1717 	return NOTIFY_OK;
1718 }
1719 
1720 static struct notifier_block r4k_cache_pm_notifier_block = {
1721 	.notifier_call = r4k_cache_pm_notifier,
1722 };
1723 
r4k_cache_init_pm(void)1724 int __init r4k_cache_init_pm(void)
1725 {
1726 	return cpu_pm_register_notifier(&r4k_cache_pm_notifier_block);
1727 }
1728 arch_initcall(r4k_cache_init_pm);
1729