1 /*
2 * Copyright (C) 2006 Chris Dearman (chris@mips.com),
3 */
4 #include <linux/init.h>
5 #include <linux/kernel.h>
6 #include <linux/sched.h>
7 #include <linux/mm.h>
8
9 #include <asm/mipsregs.h>
10 #include <asm/gcmpregs.h>
11 #include <asm/gic.h>
12 #include <asm/bcache.h>
13 #include <asm/cacheops.h>
14 #include <asm/page.h>
15 #include <asm/pgtable.h>
16 #include <asm/mmu_context.h>
17 #include <asm/r4kcache.h>
18
19 /*
20 * MIPS32/MIPS64 L2 cache handling
21 */
22
23 extern int cm3_l2_init(unsigned long lsize, unsigned long indexbase,
24 unsigned long dcache_size, unsigned long gcmpbase);
25
26 /*
27 * Writeback and invalidate the secondary cache before DMA.
28 */
mips_sc_wback_inv(unsigned long addr,unsigned long size)29 static void mips_sc_wback_inv(unsigned long addr, unsigned long size)
30 {
31 if (!cpu_has_cm2)
32 __sync();
33 blast_scache_range(addr, addr + size);
34 if (cpu_has_cm2_l2sync)
35 *(unsigned long *)(_gcmp_base + GCMP_L2SYNC_OFFSET) = 0;
36 }
37
38 /*
39 * Invalidate the secondary cache before DMA.
40 */
mips_sc_inv(unsigned long addr,unsigned long size)41 static void mips_sc_inv(unsigned long addr, unsigned long size)
42 {
43 unsigned long lsize = cpu_scache_line_size();
44 unsigned long almask = ~(lsize - 1);
45
46 cache_op(Hit_Writeback_Inv_SD, addr & almask);
47 cache_op(Hit_Writeback_Inv_SD, (addr + size - 1) & almask);
48 blast_inv_scache_range(addr, addr + size);
49 }
50
mips_sc_enable(void)51 static void mips_sc_enable(void)
52 {
53 /* L2 cache is permanently enabled */
54 }
55
mips_sc_disable(void)56 static void mips_sc_disable(void)
57 {
58 /* L2 cache is permanently enabled */
59 }
60
61 static struct bcache_ops mips_sc_ops = {
62 .bc_enable = mips_sc_enable,
63 .bc_disable = mips_sc_disable,
64 .bc_wback_inv = mips_sc_wback_inv,
65 .bc_inv = mips_sc_inv
66 };
67
68 /*
69 * Check if the L2 cache controller is activated on a particular platform.
70 * MTI's L2 controller and the L2 cache controller of Broadcom's BMIPS
71 * cores both use c0_config2's bit 12 as "L2 Bypass" bit, that is the
72 * cache being disabled. However there is no guarantee for this to be
73 * true on all platforms. In an act of stupidity the spec defined bits
74 * 12..15 as implementation defined so below function will eventually have
75 * to be replaced by a platform specific probe.
76 */
mips_sc_is_activated(struct cpuinfo_mips * c)77 static inline int mips_sc_is_activated(struct cpuinfo_mips *c)
78 {
79 unsigned int config2 = read_c0_config2();
80 unsigned int tmp;
81
82 /* Check the bypass bit (L2B) */
83 switch (c->cputype) {
84 case CPU_34K:
85 case CPU_1004K:
86 case CPU_74K:
87 case CPU_PROAPTIV: /* proAptiv havn't L2B capability but ... */
88 case CPU_INTERAPTIV:
89 case CPU_P5600:
90 case CPU_BMIPS5000:
91 if (config2 & (1 << 12))
92 return 0;
93 }
94
95 tmp = (config2 >> 4) & 0x0f;
96 if (0 < tmp && tmp <= 7)
97 c->scache.linesz = 2 << tmp;
98 else
99 return 0;
100 return 1;
101 }
102
103 #ifdef CONFIG_MIPS_CMP
cm3_l2_setup(void)104 static inline int cm3_l2_setup(void)
105 {
106 struct cpuinfo_mips *c = ¤t_cpu_data;
107 unsigned int tmp;
108 unsigned int l2config = 0;
109 unsigned int l2p;
110
111 if (gcmp3_present)
112 l2config = GCMPGCB(L2CONFIG);
113 if (!(l2config & MIPS_CONF_M))
114 return 0;
115
116 tmp = (l2config & GCMP_GCB_L2CONFIG_LSIZE_MASK) >>
117 GCMP_GCB_L2CONFIG_LSIZE_SHF;
118 if (!tmp)
119 return 0;
120
121 if (l2config & GCMP_GCB_L2CONFIG_BYPASS_MASK) {
122 if (!cm3_l2_init(c->dcache.linesz, INDEX_BASE,
123 c->dcache.sets * c->dcache.ways * c->dcache.linesz,
124 _gcmp_base))
125 return 0;
126 printk("GCR_L2_CONFIG now 0x%08x\n",GCMPGCB(L2CONFIG));
127 printk("CM3 L2 initialized\n");
128 }
129
130 c->scache.linesz = 2 << tmp;
131 tmp = (l2config & GCMP_GCB_L2CONFIG_ASSOC_MASK) >>
132 GCMP_GCB_L2CONFIG_ASSOC_SHF;
133 c->scache.ways = tmp + 1;
134 tmp = (l2config & GCMP_GCB_L2CONFIG_SSIZE_MASK) >>
135 GCMP_GCB_L2CONFIG_SSIZE_SHF;
136 c->scache.sets = 64 << tmp;
137
138 /* setup L2 prefetch */
139 l2p = GCMPGCB(GCML2P);
140 if (l2p & GCMP_GCB_GCML2P_NPFT) {
141 GCMPGCB(GCML2P) = (l2p & ~GCMP_GCB_GCML2P_PAGE_MASK) |
142 PAGE_MASK | GCMP_GCB_GCML2P_PFTEN;
143 GCMPGCB(GCML2PB) |= GCMP_GCB_GCML2PB_CODE_PFTEN;
144 }
145
146 return 1;
147 }
148 #endif
149
mips_sc_probe(void)150 static inline int __init mips_sc_probe(void)
151 {
152 struct cpuinfo_mips *c = ¤t_cpu_data;
153 unsigned int config1, config2;
154 unsigned int tmp;
155
156 /* Mark as not present until probe completed */
157 c->scache.flags |= MIPS_CACHE_NOT_PRESENT;
158
159 /* Ignore anything but MIPSxx processors */
160 if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
161 MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2 |
162 MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)))
163 return 0;
164
165 /* Does this MIPS32/MIPS64 CPU have a config2 register? */
166 config1 = read_c0_config1();
167 if (!(config1 & MIPS_CONF_M))
168 return 0;
169
170 config2 = read_c0_config2();
171
172 if (cpu_has_l2c || !(config2 & ~(MIPS_CONF_M|MIPS_CONF2_SU))) {
173 #ifdef CONFIG_MIPS_CMP
174 if (!cm3_l2_setup())
175 #endif
176 return 0;
177 } else {
178 if (!mips_sc_is_activated(c))
179 return 0;
180
181 tmp = (config2 >> 8) & 0x0f;
182 if (0 <= tmp && tmp <= 7)
183 c->scache.sets = 64 << tmp;
184 else
185 return 0;
186
187 tmp = (config2 >> 0) & 0x0f;
188 if (0 <= tmp && tmp <= 7)
189 c->scache.ways = tmp + 1;
190 else
191 return 0;
192 }
193
194 c->scache.waysize = c->scache.sets * c->scache.linesz;
195 c->scache.waybit = __ffs(c->scache.waysize);
196
197 c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
198
199 return 1;
200 }
201
mips_sc_init(void)202 int __cpuinit mips_sc_init(void)
203 {
204 int found = mips_sc_probe();
205 if (found) {
206 mips_sc_enable();
207 bcops = &mips_sc_ops;
208 } else
209 cpu_data[0].options &= ~MIPS_CPU_CM2_L2SYNC;
210 return found;
211 }
212