• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2005-2007 Cavium Networks
7  */
8 #include <linux/export.h>
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/smp.h>
12 #include <linux/mm.h>
13 #include <linux/bitops.h>
14 #include <linux/cpu.h>
15 #include <linux/io.h>
16 
17 #include <asm/bcache.h>
18 #include <asm/bootinfo.h>
19 #include <asm/cacheops.h>
20 #include <asm/cpu-features.h>
21 #include <asm/cpu-type.h>
22 #include <asm/page.h>
23 #include <asm/r4kcache.h>
24 #include <asm/traps.h>
25 #include <asm/mmu_context.h>
26 #include <asm/war.h>
27 
28 #include <asm/octeon/octeon.h>
29 
30 unsigned long long cache_err_dcache[NR_CPUS];
31 EXPORT_SYMBOL_GPL(cache_err_dcache);
32 
33 /*
34  * Octeon automatically flushes the dcache on tlb changes, so
35  * from Linux's viewpoint it acts much like a physically
36  * tagged cache. No flushing is needed
37  *
38  */
octeon_flush_data_cache_page(unsigned long addr)39 static void octeon_flush_data_cache_page(unsigned long addr)
40 {
41     /* Nothing to do */
42 }
43 
octeon_local_flush_icache(void)44 static inline void octeon_local_flush_icache(void)
45 {
46 	asm volatile ("synci 0($0)");
47 }
48 
49 /*
50  * Flush local I-cache for the specified range.
51  */
local_octeon_flush_icache_range(unsigned long start,unsigned long end)52 static void local_octeon_flush_icache_range(unsigned long start,
53 					    unsigned long end)
54 {
55 	octeon_local_flush_icache();
56 }
57 
58 /**
59  * octeon_flush_icache_all_cores -  Flush caches as necessary for all cores
60  * affected by a vma. If no vma is supplied, all cores are flushed.
61  *
62  * @vma:    VMA to flush or NULL to flush all icaches.
63  */
octeon_flush_icache_all_cores(struct vm_area_struct * vma)64 static void octeon_flush_icache_all_cores(struct vm_area_struct *vma)
65 {
66 	extern void octeon_send_ipi_single(int cpu, unsigned int action);
67 #ifdef CONFIG_SMP
68 	int cpu;
69 	cpumask_t mask;
70 #endif
71 
72 	mb();
73 	octeon_local_flush_icache();
74 #ifdef CONFIG_SMP
75 	preempt_disable();
76 	cpu = smp_processor_id();
77 
78 	/*
79 	 * If we have a vma structure, we only need to worry about
80 	 * cores it has been used on
81 	 */
82 	if (vma)
83 		mask = *mm_cpumask(vma->vm_mm);
84 	else
85 		mask = *cpu_online_mask;
86 	cpumask_clear_cpu(cpu, &mask);
87 	for_each_cpu(cpu, &mask)
88 		octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH);
89 
90 	preempt_enable();
91 #endif
92 }
93 
94 
95 /*
96  * Called to flush the icache on all cores
97  */
octeon_flush_icache_all(void)98 static void octeon_flush_icache_all(void)
99 {
100 	octeon_flush_icache_all_cores(NULL);
101 }
102 
103 
104 /**
105  * octeon_flush_cache_mm - flush all memory associated with a memory context.
106  *
107  * @mm:	    Memory context to flush
108  */
octeon_flush_cache_mm(struct mm_struct * mm)109 static void octeon_flush_cache_mm(struct mm_struct *mm)
110 {
111 	/*
112 	 * According to the R4K version of this file, CPUs without
113 	 * dcache aliases don't need to do anything here
114 	 */
115 }
116 
117 
118 /*
119  * Flush a range of kernel addresses out of the icache
120  *
121  */
octeon_flush_icache_range(unsigned long start,unsigned long end)122 static void octeon_flush_icache_range(unsigned long start, unsigned long end)
123 {
124 	octeon_flush_icache_all_cores(NULL);
125 }
126 
127 
128 /**
129  * octeon_flush_cache_range - Flush a range out of a vma
130  *
131  * @vma:    VMA to flush
132  * @start:  beginning address for flush
133  * @end:    ending address for flush
134  */
octeon_flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)135 static void octeon_flush_cache_range(struct vm_area_struct *vma,
136 				     unsigned long start, unsigned long end)
137 {
138 	if (vma->vm_flags & VM_EXEC)
139 		octeon_flush_icache_all_cores(vma);
140 }
141 
142 
143 /**
144  * octeon_flush_cache_page - Flush a specific page of a vma
145  *
146  * @vma:    VMA to flush page for
147  * @page:   Page to flush
148  * @pfn:    Page frame number
149  */
octeon_flush_cache_page(struct vm_area_struct * vma,unsigned long page,unsigned long pfn)150 static void octeon_flush_cache_page(struct vm_area_struct *vma,
151 				    unsigned long page, unsigned long pfn)
152 {
153 	if (vma->vm_flags & VM_EXEC)
154 		octeon_flush_icache_all_cores(vma);
155 }
156 
octeon_flush_kernel_vmap_range(unsigned long vaddr,int size)157 static void octeon_flush_kernel_vmap_range(unsigned long vaddr, int size)
158 {
159 	BUG();
160 }
161 
162 /*
163  * Probe Octeon's caches
164  *
165  */
probe_octeon(void)166 static void probe_octeon(void)
167 {
168 	unsigned long icache_size;
169 	unsigned long dcache_size;
170 	unsigned int config1;
171 	struct cpuinfo_mips *c = &current_cpu_data;
172 	int cputype = current_cpu_type();
173 
174 	config1 = read_c0_config1();
175 	switch (cputype) {
176 	case CPU_CAVIUM_OCTEON:
177 	case CPU_CAVIUM_OCTEON_PLUS:
178 		c->icache.linesz = 2 << ((config1 >> 19) & 7);
179 		c->icache.sets = 64 << ((config1 >> 22) & 7);
180 		c->icache.ways = 1 + ((config1 >> 16) & 7);
181 		c->icache.flags |= MIPS_CACHE_VTAG;
182 		icache_size =
183 			c->icache.sets * c->icache.ways * c->icache.linesz;
184 		c->icache.waybit = ffs(icache_size / c->icache.ways) - 1;
185 		c->dcache.linesz = 128;
186 		if (cputype == CPU_CAVIUM_OCTEON_PLUS)
187 			c->dcache.sets = 2; /* CN5XXX has two Dcache sets */
188 		else
189 			c->dcache.sets = 1; /* CN3XXX has one Dcache set */
190 		c->dcache.ways = 64;
191 		dcache_size =
192 			c->dcache.sets * c->dcache.ways * c->dcache.linesz;
193 		c->dcache.waybit = ffs(dcache_size / c->dcache.ways) - 1;
194 		c->options |= MIPS_CPU_PREFETCH;
195 		break;
196 
197 	case CPU_CAVIUM_OCTEON2:
198 		c->icache.linesz = 2 << ((config1 >> 19) & 7);
199 		c->icache.sets = 8;
200 		c->icache.ways = 37;
201 		c->icache.flags |= MIPS_CACHE_VTAG;
202 		icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
203 
204 		c->dcache.linesz = 128;
205 		c->dcache.ways = 32;
206 		c->dcache.sets = 8;
207 		dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
208 		c->options |= MIPS_CPU_PREFETCH;
209 		break;
210 
211 	case CPU_CAVIUM_OCTEON3:
212 		c->icache.linesz = 128;
213 		c->icache.sets = 16;
214 		c->icache.ways = 39;
215 		c->icache.flags |= MIPS_CACHE_VTAG;
216 		icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
217 
218 		c->dcache.linesz = 128;
219 		c->dcache.ways = 32;
220 		c->dcache.sets = 8;
221 		dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
222 		c->options |= MIPS_CPU_PREFETCH;
223 		break;
224 
225 	default:
226 		panic("Unsupported Cavium Networks CPU type");
227 		break;
228 	}
229 
230 	/* compute a couple of other cache variables */
231 	c->icache.waysize = icache_size / c->icache.ways;
232 	c->dcache.waysize = dcache_size / c->dcache.ways;
233 
234 	c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways);
235 	c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways);
236 
237 	if (smp_processor_id() == 0) {
238 		pr_info("Primary instruction cache %ldkB, %s, %d way, "
239 			"%d sets, linesize %d bytes.\n",
240 			icache_size >> 10,
241 			cpu_has_vtag_icache ?
242 				"virtually tagged" : "physically tagged",
243 			c->icache.ways, c->icache.sets, c->icache.linesz);
244 
245 		pr_info("Primary data cache %ldkB, %d-way, %d sets, "
246 			"linesize %d bytes.\n",
247 			dcache_size >> 10, c->dcache.ways,
248 			c->dcache.sets, c->dcache.linesz);
249 	}
250 }
251 
octeon_cache_error_setup(void)252 static void  octeon_cache_error_setup(void)
253 {
254 	extern char except_vec2_octeon;
255 	set_handler(0x100, &except_vec2_octeon, 0x80);
256 }
257 
258 /*
259  * Setup the Octeon cache flush routines
260  *
261  */
octeon_cache_init(void)262 void octeon_cache_init(void)
263 {
264 	probe_octeon();
265 
266 	shm_align_mask = PAGE_SIZE - 1;
267 
268 	flush_cache_all			= octeon_flush_icache_all;
269 	__flush_cache_all		= octeon_flush_icache_all;
270 	flush_cache_mm			= octeon_flush_cache_mm;
271 	flush_cache_page		= octeon_flush_cache_page;
272 	flush_cache_range		= octeon_flush_cache_range;
273 	flush_icache_all		= octeon_flush_icache_all;
274 	flush_data_cache_page		= octeon_flush_data_cache_page;
275 	flush_icache_range		= octeon_flush_icache_range;
276 	local_flush_icache_range	= local_octeon_flush_icache_range;
277 	__flush_icache_user_range	= octeon_flush_icache_range;
278 	__local_flush_icache_user_range	= local_octeon_flush_icache_range;
279 
280 	__flush_kernel_vmap_range	= octeon_flush_kernel_vmap_range;
281 
282 	build_clear_page();
283 	build_copy_page();
284 
285 	board_cache_error_setup = octeon_cache_error_setup;
286 }
287 
288 /*
289  * Handle a cache error exception
290  */
291 static RAW_NOTIFIER_HEAD(co_cache_error_chain);
292 
register_co_cache_error_notifier(struct notifier_block * nb)293 int register_co_cache_error_notifier(struct notifier_block *nb)
294 {
295 	return raw_notifier_chain_register(&co_cache_error_chain, nb);
296 }
297 EXPORT_SYMBOL_GPL(register_co_cache_error_notifier);
298 
unregister_co_cache_error_notifier(struct notifier_block * nb)299 int unregister_co_cache_error_notifier(struct notifier_block *nb)
300 {
301 	return raw_notifier_chain_unregister(&co_cache_error_chain, nb);
302 }
303 EXPORT_SYMBOL_GPL(unregister_co_cache_error_notifier);
304 
co_cache_error_call_notifiers(unsigned long val)305 static void co_cache_error_call_notifiers(unsigned long val)
306 {
307 	int rv = raw_notifier_call_chain(&co_cache_error_chain, val, NULL);
308 	if ((rv & ~NOTIFY_STOP_MASK) != NOTIFY_OK) {
309 		u64 dcache_err;
310 		unsigned long coreid = cvmx_get_core_num();
311 		u64 icache_err = read_octeon_c0_icacheerr();
312 
313 		if (val) {
314 			dcache_err = cache_err_dcache[coreid];
315 			cache_err_dcache[coreid] = 0;
316 		} else {
317 			dcache_err = read_octeon_c0_dcacheerr();
318 		}
319 
320 		pr_err("Core%lu: Cache error exception:\n", coreid);
321 		pr_err("cp0_errorepc == %lx\n", read_c0_errorepc());
322 		if (icache_err & 1) {
323 			pr_err("CacheErr (Icache) == %llx\n",
324 			       (unsigned long long)icache_err);
325 			write_octeon_c0_icacheerr(0);
326 		}
327 		if (dcache_err & 1) {
328 			pr_err("CacheErr (Dcache) == %llx\n",
329 			       (unsigned long long)dcache_err);
330 		}
331 	}
332 }
333 
334 /*
335  * Called when the the exception is recoverable
336  */
337 
cache_parity_error_octeon_recoverable(void)338 asmlinkage void cache_parity_error_octeon_recoverable(void)
339 {
340 	co_cache_error_call_notifiers(0);
341 }
342 
343 /*
344  * Called when the the exception is not recoverable
345  */
346 
cache_parity_error_octeon_non_recoverable(void)347 asmlinkage void cache_parity_error_octeon_non_recoverable(void)
348 {
349 	co_cache_error_call_notifiers(1);
350 	panic("Can't handle cache error: nested exception");
351 }
352