1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2005-2007 Cavium Networks
7 */
8 #include <linux/init.h>
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/smp.h>
12 #include <linux/mm.h>
13 #include <linux/bitops.h>
14 #include <linux/cpu.h>
15 #include <linux/io.h>
16
17 #include <asm/bcache.h>
18 #include <asm/bootinfo.h>
19 #include <asm/cacheops.h>
20 #include <asm/cpu-features.h>
21 #include <asm/page.h>
22 #include <asm/pgtable.h>
23 #include <asm/r4kcache.h>
24 #include <asm/mmu_context.h>
25 #include <asm/war.h>
26
27 #include <asm/octeon/octeon.h>
28
29 unsigned long long cache_err_dcache[NR_CPUS];
30
31 /**
32 * Octeon automatically flushes the dcache on tlb changes, so
33 * from Linux's viewpoint it acts much like a physically
34 * tagged cache. No flushing is needed
35 *
36 */
octeon_flush_data_cache_page(unsigned long addr)37 static void octeon_flush_data_cache_page(unsigned long addr)
38 {
39 /* Nothing to do */
40 }
41
octeon_local_flush_icache(void)42 static inline void octeon_local_flush_icache(void)
43 {
44 asm volatile ("synci 0($0)");
45 }
46
47 /*
48 * Flush local I-cache for the specified range.
49 */
local_octeon_flush_icache_range(unsigned long start,unsigned long end)50 static void local_octeon_flush_icache_range(unsigned long start,
51 unsigned long end)
52 {
53 octeon_local_flush_icache();
54 }
55
56 /**
57 * Flush caches as necessary for all cores affected by a
58 * vma. If no vma is supplied, all cores are flushed.
59 *
60 * @vma: VMA to flush or NULL to flush all icaches.
61 */
octeon_flush_icache_all_cores(struct vm_area_struct * vma)62 static void octeon_flush_icache_all_cores(struct vm_area_struct *vma)
63 {
64 extern void octeon_send_ipi_single(int cpu, unsigned int action);
65 #ifdef CONFIG_SMP
66 int cpu;
67 cpumask_t mask;
68 #endif
69
70 mb();
71 octeon_local_flush_icache();
72 #ifdef CONFIG_SMP
73 preempt_disable();
74 cpu = smp_processor_id();
75
76 /*
77 * If we have a vma structure, we only need to worry about
78 * cores it has been used on
79 */
80 if (vma)
81 mask = *mm_cpumask(vma->vm_mm);
82 else
83 mask = *cpu_online_mask;
84 cpumask_clear_cpu(cpu, &mask);
85 for_each_cpu(cpu, &mask)
86 octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH);
87
88 preempt_enable();
89 #endif
90 }
91
92
93 /**
94 * Called to flush the icache on all cores
95 */
octeon_flush_icache_all(void)96 static void octeon_flush_icache_all(void)
97 {
98 octeon_flush_icache_all_cores(NULL);
99 }
100
101
102 /**
103 * Called to flush all memory associated with a memory
104 * context.
105 *
106 * @mm: Memory context to flush
107 */
octeon_flush_cache_mm(struct mm_struct * mm)108 static void octeon_flush_cache_mm(struct mm_struct *mm)
109 {
110 /*
111 * According to the R4K version of this file, CPUs without
112 * dcache aliases don't need to do anything here
113 */
114 }
115
116
117 /**
118 * Flush a range of kernel addresses out of the icache
119 *
120 */
octeon_flush_icache_range(unsigned long start,unsigned long end)121 static void octeon_flush_icache_range(unsigned long start, unsigned long end)
122 {
123 octeon_flush_icache_all_cores(NULL);
124 }
125
126
127 /**
128 * Flush the icache for a trampoline. These are used for interrupt
129 * and exception hooking.
130 *
131 * @addr: Address to flush
132 */
octeon_flush_cache_sigtramp(unsigned long addr)133 static void octeon_flush_cache_sigtramp(unsigned long addr)
134 {
135 struct vm_area_struct *vma;
136
137 vma = find_vma(current->mm, addr);
138 octeon_flush_icache_all_cores(vma);
139 }
140
141
142 /**
143 * Flush a range out of a vma
144 *
145 * @vma: VMA to flush
146 * @start:
147 * @end:
148 */
octeon_flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)149 static void octeon_flush_cache_range(struct vm_area_struct *vma,
150 unsigned long start, unsigned long end)
151 {
152 if (vma->vm_flags & VM_EXEC)
153 octeon_flush_icache_all_cores(vma);
154 }
155
156
157 /**
158 * Flush a specific page of a vma
159 *
160 * @vma: VMA to flush page for
161 * @page: Page to flush
162 * @pfn:
163 */
octeon_flush_cache_page(struct vm_area_struct * vma,unsigned long page,unsigned long pfn)164 static void octeon_flush_cache_page(struct vm_area_struct *vma,
165 unsigned long page, unsigned long pfn)
166 {
167 if (vma->vm_flags & VM_EXEC)
168 octeon_flush_icache_all_cores(vma);
169 }
170
octeon_flush_kernel_vmap_range(unsigned long vaddr,int size)171 static void octeon_flush_kernel_vmap_range(unsigned long vaddr, int size)
172 {
173 BUG();
174 }
175
176 /**
177 * Probe Octeon's caches
178 *
179 */
probe_octeon(void)180 static void __cpuinit probe_octeon(void)
181 {
182 unsigned long icache_size;
183 unsigned long dcache_size;
184 unsigned int config1;
185 struct cpuinfo_mips *c = ¤t_cpu_data;
186
187 config1 = read_c0_config1();
188 switch (c->cputype) {
189 case CPU_CAVIUM_OCTEON:
190 case CPU_CAVIUM_OCTEON_PLUS:
191 c->icache.linesz = 2 << ((config1 >> 19) & 7);
192 c->icache.sets = 64 << ((config1 >> 22) & 7);
193 c->icache.ways = 1 + ((config1 >> 16) & 7);
194 c->icache.flags |= MIPS_CACHE_VTAG;
195 icache_size =
196 c->icache.sets * c->icache.ways * c->icache.linesz;
197 c->icache.waybit = ffs(icache_size / c->icache.ways) - 1;
198 c->dcache.linesz = 128;
199 if (c->cputype == CPU_CAVIUM_OCTEON_PLUS)
200 c->dcache.sets = 2; /* CN5XXX has two Dcache sets */
201 else
202 c->dcache.sets = 1; /* CN3XXX has one Dcache set */
203 c->dcache.ways = 64;
204 dcache_size =
205 c->dcache.sets * c->dcache.ways * c->dcache.linesz;
206 c->dcache.waybit = ffs(dcache_size / c->dcache.ways) - 1;
207 c->options |= MIPS_CPU_PREFETCH;
208 break;
209
210 case CPU_CAVIUM_OCTEON2:
211 c->icache.linesz = 2 << ((config1 >> 19) & 7);
212 c->icache.sets = 8;
213 c->icache.ways = 37;
214 c->icache.flags |= MIPS_CACHE_VTAG;
215 icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
216
217 c->dcache.linesz = 128;
218 c->dcache.ways = 32;
219 c->dcache.sets = 8;
220 dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
221 c->options |= MIPS_CPU_PREFETCH;
222 break;
223
224 default:
225 panic("Unsupported Cavium Networks CPU type");
226 break;
227 }
228
229 /* compute a couple of other cache variables */
230 c->icache.waysize = icache_size / c->icache.ways;
231 c->dcache.waysize = dcache_size / c->dcache.ways;
232
233 c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways);
234 c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways);
235
236 if (smp_processor_id() == 0) {
237 pr_notice("Primary instruction cache %ldkB, %s, %d way, "
238 "%d sets, linesize %d bytes.\n",
239 icache_size >> 10,
240 cpu_has_vtag_icache ?
241 "virtually tagged" : "physically tagged",
242 c->icache.ways, c->icache.sets, c->icache.linesz);
243
244 pr_notice("Primary data cache %ldkB, %d-way, %d sets, "
245 "linesize %d bytes.\n",
246 dcache_size >> 10, c->dcache.ways,
247 c->dcache.sets, c->dcache.linesz);
248 }
249 }
250
251
252 /**
253 * Setup the Octeon cache flush routines
254 *
255 */
octeon_cache_init(void)256 void __cpuinit octeon_cache_init(void)
257 {
258 extern unsigned long ebase;
259 extern char except_vec2_octeon;
260
261 memcpy((void *)(ebase + 0x100), &except_vec2_octeon, 0x80);
262 octeon_flush_cache_sigtramp(ebase + 0x100);
263
264 probe_octeon();
265
266 shm_align_mask = PAGE_SIZE - 1;
267
268 flush_cache_all = octeon_flush_icache_all;
269 __flush_cache_all = octeon_flush_icache_all;
270 flush_cache_mm = octeon_flush_cache_mm;
271 flush_cache_page = octeon_flush_cache_page;
272 flush_cache_range = octeon_flush_cache_range;
273 flush_cache_sigtramp = octeon_flush_cache_sigtramp;
274 flush_icache_all = octeon_flush_icache_all;
275 flush_data_cache_page = octeon_flush_data_cache_page;
276 flush_icache_range = octeon_flush_icache_range;
277 local_flush_icache_range = local_octeon_flush_icache_range;
278
279 __flush_kernel_vmap_range = octeon_flush_kernel_vmap_range;
280
281 build_clear_page();
282 build_copy_page();
283 }
284
285 /**
286 * Handle a cache error exception
287 */
288
cache_parity_error_octeon(int non_recoverable)289 static void cache_parity_error_octeon(int non_recoverable)
290 {
291 unsigned long coreid = cvmx_get_core_num();
292 uint64_t icache_err = read_octeon_c0_icacheerr();
293
294 pr_err("Cache error exception:\n");
295 pr_err("cp0_errorepc == %lx\n", read_c0_errorepc());
296 if (icache_err & 1) {
297 pr_err("CacheErr (Icache) == %llx\n",
298 (unsigned long long)icache_err);
299 write_octeon_c0_icacheerr(0);
300 }
301 if (cache_err_dcache[coreid] & 1) {
302 pr_err("CacheErr (Dcache) == %llx\n",
303 (unsigned long long)cache_err_dcache[coreid]);
304 cache_err_dcache[coreid] = 0;
305 }
306
307 if (non_recoverable)
308 panic("Can't handle cache error: nested exception");
309 }
310
311 /**
312 * Called when the the exception is recoverable
313 */
314
cache_parity_error_octeon_recoverable(void)315 asmlinkage void cache_parity_error_octeon_recoverable(void)
316 {
317 cache_parity_error_octeon(0);
318 }
319
320 /**
321 * Called when the the exception is not recoverable
322 */
323
cache_parity_error_octeon_non_recoverable(void)324 asmlinkage void cache_parity_error_octeon_non_recoverable(void)
325 {
326 cache_parity_error_octeon(1);
327 }
328