• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2003, Axis Communications AB.
3  */
4 
5 #include <asm/irq.h>
6 #include <linux/irq.h>
7 #include <linux/interrupt.h>
8 #include <linux/smp.h>
9 #include <linux/kernel.h>
10 #include <linux/errno.h>
11 #include <linux/init.h>
12 #include <linux/profile.h>
13 #include <linux/of.h>
14 #include <linux/of_irq.h>
15 #include <linux/proc_fs.h>
16 #include <linux/seq_file.h>
17 #include <linux/threads.h>
18 #include <linux/spinlock.h>
19 #include <linux/kernel_stat.h>
20 #include <hwregs/reg_map.h>
21 #include <hwregs/reg_rdwr.h>
22 #include <hwregs/intr_vect.h>
23 #include <hwregs/intr_vect_defs.h>
24 
25 #define CPU_FIXED -1
26 
27 /* IRQ masks (refer to comment for crisv32_do_multiple) */
28 #if TIMER0_INTR_VECT - FIRST_IRQ < 32
29 #define TIMER_MASK (1 << (TIMER0_INTR_VECT - FIRST_IRQ))
30 #undef TIMER_VECT1
31 #else
32 #define TIMER_MASK (1 << (TIMER0_INTR_VECT - FIRST_IRQ - 32))
33 #define TIMER_VECT1
34 #endif
35 #ifdef CONFIG_ETRAX_KGDB
36 #if defined(CONFIG_ETRAX_KGDB_PORT0)
37 #define IGNOREMASK (1 << (SER0_INTR_VECT - FIRST_IRQ))
38 #elif defined(CONFIG_ETRAX_KGDB_PORT1)
39 #define IGNOREMASK (1 << (SER1_INTR_VECT - FIRST_IRQ))
40 #elif defined(CONFIG_ETRAX_KGDB_PORT2)
41 #define IGNOREMASK (1 << (SER2_INTR_VECT - FIRST_IRQ))
42 #elif defined(CONFIG_ETRAX_KGDB_PORT3)
43 #define IGNOREMASK (1 << (SER3_INTR_VECT - FIRST_IRQ))
44 #endif
45 #endif
46 
47 DEFINE_SPINLOCK(irq_lock);
48 
49 struct cris_irq_allocation
50 {
51   int cpu; /* The CPU to which the IRQ is currently allocated. */
52   cpumask_t mask; /* The CPUs to which the IRQ may be allocated. */
53 };
54 
55 struct cris_irq_allocation irq_allocations[NR_REAL_IRQS] =
56   { [0 ... NR_REAL_IRQS - 1] = {0, CPU_MASK_ALL} };
57 
58 static unsigned long irq_regs[NR_CPUS] =
59 {
60   regi_irq,
61 };
62 
63 #if NR_REAL_IRQS > 32
64 #define NBR_REGS 2
65 #else
66 #define NBR_REGS 1
67 #endif
68 
69 unsigned long cpu_irq_counters[NR_CPUS];
70 unsigned long irq_counters[NR_REAL_IRQS];
71 
72 /* From irq.c. */
73 extern void weird_irq(void);
74 
75 /* From entry.S. */
76 extern void system_call(void);
77 extern void nmi_interrupt(void);
78 extern void multiple_interrupt(void);
79 extern void gdb_handle_exception(void);
80 extern void i_mmu_refill(void);
81 extern void i_mmu_invalid(void);
82 extern void i_mmu_access(void);
83 extern void i_mmu_execute(void);
84 extern void d_mmu_refill(void);
85 extern void d_mmu_invalid(void);
86 extern void d_mmu_access(void);
87 extern void d_mmu_write(void);
88 
89 /* From kgdb.c. */
90 extern void kgdb_init(void);
91 extern void breakpoint(void);
92 
93 /* From traps.c.  */
94 extern void breakh_BUG(void);
95 
96 /*
97  * Build the IRQ handler stubs using macros from irq.h.
98  */
99 #ifdef CONFIG_CRIS_MACH_ARTPEC3
100 BUILD_TIMER_IRQ(0x31, 0)
101 #else
102 BUILD_IRQ(0x31)
103 #endif
104 BUILD_IRQ(0x32)
105 BUILD_IRQ(0x33)
106 BUILD_IRQ(0x34)
107 BUILD_IRQ(0x35)
108 BUILD_IRQ(0x36)
109 BUILD_IRQ(0x37)
110 BUILD_IRQ(0x38)
111 BUILD_IRQ(0x39)
112 BUILD_IRQ(0x3a)
113 BUILD_IRQ(0x3b)
114 BUILD_IRQ(0x3c)
115 BUILD_IRQ(0x3d)
116 BUILD_IRQ(0x3e)
117 BUILD_IRQ(0x3f)
118 BUILD_IRQ(0x40)
119 BUILD_IRQ(0x41)
120 BUILD_IRQ(0x42)
121 BUILD_IRQ(0x43)
122 BUILD_IRQ(0x44)
123 BUILD_IRQ(0x45)
124 BUILD_IRQ(0x46)
125 BUILD_IRQ(0x47)
126 BUILD_IRQ(0x48)
127 BUILD_IRQ(0x49)
128 BUILD_IRQ(0x4a)
129 #ifdef CONFIG_ETRAXFS
130 BUILD_TIMER_IRQ(0x4b, 0)
131 #else
132 BUILD_IRQ(0x4b)
133 #endif
134 BUILD_IRQ(0x4c)
135 BUILD_IRQ(0x4d)
136 BUILD_IRQ(0x4e)
137 BUILD_IRQ(0x4f)
138 BUILD_IRQ(0x50)
139 #if MACH_IRQS > 32
140 BUILD_IRQ(0x51)
141 BUILD_IRQ(0x52)
142 BUILD_IRQ(0x53)
143 BUILD_IRQ(0x54)
144 BUILD_IRQ(0x55)
145 BUILD_IRQ(0x56)
146 BUILD_IRQ(0x57)
147 BUILD_IRQ(0x58)
148 BUILD_IRQ(0x59)
149 BUILD_IRQ(0x5a)
150 BUILD_IRQ(0x5b)
151 BUILD_IRQ(0x5c)
152 BUILD_IRQ(0x5d)
153 BUILD_IRQ(0x5e)
154 BUILD_IRQ(0x5f)
155 BUILD_IRQ(0x60)
156 BUILD_IRQ(0x61)
157 BUILD_IRQ(0x62)
158 BUILD_IRQ(0x63)
159 BUILD_IRQ(0x64)
160 BUILD_IRQ(0x65)
161 BUILD_IRQ(0x66)
162 BUILD_IRQ(0x67)
163 BUILD_IRQ(0x68)
164 BUILD_IRQ(0x69)
165 BUILD_IRQ(0x6a)
166 BUILD_IRQ(0x6b)
167 BUILD_IRQ(0x6c)
168 BUILD_IRQ(0x6d)
169 BUILD_IRQ(0x6e)
170 BUILD_IRQ(0x6f)
171 BUILD_IRQ(0x70)
172 #endif
173 
174 /* Pointers to the low-level handlers. */
175 static void (*interrupt[MACH_IRQS])(void) = {
176 	IRQ0x31_interrupt, IRQ0x32_interrupt, IRQ0x33_interrupt,
177 	IRQ0x34_interrupt, IRQ0x35_interrupt, IRQ0x36_interrupt,
178 	IRQ0x37_interrupt, IRQ0x38_interrupt, IRQ0x39_interrupt,
179 	IRQ0x3a_interrupt, IRQ0x3b_interrupt, IRQ0x3c_interrupt,
180 	IRQ0x3d_interrupt, IRQ0x3e_interrupt, IRQ0x3f_interrupt,
181 	IRQ0x40_interrupt, IRQ0x41_interrupt, IRQ0x42_interrupt,
182 	IRQ0x43_interrupt, IRQ0x44_interrupt, IRQ0x45_interrupt,
183 	IRQ0x46_interrupt, IRQ0x47_interrupt, IRQ0x48_interrupt,
184 	IRQ0x49_interrupt, IRQ0x4a_interrupt, IRQ0x4b_interrupt,
185 	IRQ0x4c_interrupt, IRQ0x4d_interrupt, IRQ0x4e_interrupt,
186 	IRQ0x4f_interrupt, IRQ0x50_interrupt,
187 #if MACH_IRQS > 32
188 	IRQ0x51_interrupt, IRQ0x52_interrupt, IRQ0x53_interrupt,
189 	IRQ0x54_interrupt, IRQ0x55_interrupt, IRQ0x56_interrupt,
190 	IRQ0x57_interrupt, IRQ0x58_interrupt, IRQ0x59_interrupt,
191 	IRQ0x5a_interrupt, IRQ0x5b_interrupt, IRQ0x5c_interrupt,
192 	IRQ0x5d_interrupt, IRQ0x5e_interrupt, IRQ0x5f_interrupt,
193 	IRQ0x60_interrupt, IRQ0x61_interrupt, IRQ0x62_interrupt,
194 	IRQ0x63_interrupt, IRQ0x64_interrupt, IRQ0x65_interrupt,
195 	IRQ0x66_interrupt, IRQ0x67_interrupt, IRQ0x68_interrupt,
196 	IRQ0x69_interrupt, IRQ0x6a_interrupt, IRQ0x6b_interrupt,
197 	IRQ0x6c_interrupt, IRQ0x6d_interrupt, IRQ0x6e_interrupt,
198 	IRQ0x6f_interrupt, IRQ0x70_interrupt,
199 #endif
200 };
201 
202 void
block_irq(int irq,int cpu)203 block_irq(int irq, int cpu)
204 {
205 	int intr_mask;
206         unsigned long flags;
207 
208 	spin_lock_irqsave(&irq_lock, flags);
209 	/* Remember, 1 let thru, 0 block. */
210 	if (irq - FIRST_IRQ < 32) {
211 		intr_mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu],
212 			rw_mask, 0);
213 		intr_mask &= ~(1 << (irq - FIRST_IRQ));
214 		REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask,
215 			0, intr_mask);
216 	} else {
217 		intr_mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu],
218 			rw_mask, 1);
219 		intr_mask &= ~(1 << (irq - FIRST_IRQ - 32));
220 		REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask,
221 			1, intr_mask);
222 	}
223         spin_unlock_irqrestore(&irq_lock, flags);
224 }
225 
226 void
unblock_irq(int irq,int cpu)227 unblock_irq(int irq, int cpu)
228 {
229 	int intr_mask;
230         unsigned long flags;
231 
232         spin_lock_irqsave(&irq_lock, flags);
233 	/* Remember, 1 let thru, 0 block. */
234 	if (irq - FIRST_IRQ < 32) {
235 		intr_mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu],
236 			rw_mask, 0);
237 		intr_mask |= (1 << (irq - FIRST_IRQ));
238 		REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask,
239 			0, intr_mask);
240 	} else {
241 		intr_mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu],
242 			rw_mask, 1);
243 		intr_mask |= (1 << (irq - FIRST_IRQ - 32));
244 		REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask,
245 			1, intr_mask);
246 	}
247         spin_unlock_irqrestore(&irq_lock, flags);
248 }
249 
250 /* Find out which CPU the irq should be allocated to. */
irq_cpu(int irq)251 static int irq_cpu(int irq)
252 {
253 	int cpu;
254         unsigned long flags;
255 
256         spin_lock_irqsave(&irq_lock, flags);
257         cpu = irq_allocations[irq - FIRST_IRQ].cpu;
258 
259 	/* Fixed interrupts stay on the local CPU. */
260 	if (cpu == CPU_FIXED)
261         {
262 		spin_unlock_irqrestore(&irq_lock, flags);
263 		return smp_processor_id();
264         }
265 
266 
267 	/* Let the interrupt stay if possible */
268 	if (cpumask_test_cpu(cpu, &irq_allocations[irq - FIRST_IRQ].mask))
269 		goto out;
270 
271 	/* IRQ must be moved to another CPU. */
272 	cpu = cpumask_first(&irq_allocations[irq - FIRST_IRQ].mask);
273 	irq_allocations[irq - FIRST_IRQ].cpu = cpu;
274 out:
275 	spin_unlock_irqrestore(&irq_lock, flags);
276 	return cpu;
277 }
278 
crisv32_mask_irq(int irq)279 void crisv32_mask_irq(int irq)
280 {
281 	int cpu;
282 
283 	for (cpu = 0; cpu < NR_CPUS; cpu++)
284 		block_irq(irq, cpu);
285 }
286 
crisv32_unmask_irq(int irq)287 void crisv32_unmask_irq(int irq)
288 {
289 	unblock_irq(irq, irq_cpu(irq));
290 }
291 
292 
enable_crisv32_irq(struct irq_data * data)293 static void enable_crisv32_irq(struct irq_data *data)
294 {
295 	crisv32_unmask_irq(data->irq);
296 }
297 
disable_crisv32_irq(struct irq_data * data)298 static void disable_crisv32_irq(struct irq_data *data)
299 {
300 	crisv32_mask_irq(data->irq);
301 }
302 
set_affinity_crisv32_irq(struct irq_data * data,const struct cpumask * dest,bool force)303 static int set_affinity_crisv32_irq(struct irq_data *data,
304 				    const struct cpumask *dest, bool force)
305 {
306 	unsigned long flags;
307 
308 	spin_lock_irqsave(&irq_lock, flags);
309 	irq_allocations[data->irq - FIRST_IRQ].mask = *dest;
310 	spin_unlock_irqrestore(&irq_lock, flags);
311 	return 0;
312 }
313 
314 static struct irq_chip crisv32_irq_type = {
315 	.name			= "CRISv32",
316 	.irq_shutdown		= disable_crisv32_irq,
317 	.irq_enable		= enable_crisv32_irq,
318 	.irq_disable		= disable_crisv32_irq,
319 	.irq_set_affinity	= set_affinity_crisv32_irq,
320 };
321 
322 void
set_exception_vector(int n,irqvectptr addr)323 set_exception_vector(int n, irqvectptr addr)
324 {
325 	etrax_irv->v[n] = (irqvectptr) addr;
326 }
327 
328 extern void do_IRQ(int irq, struct pt_regs * regs);
329 
330 void
crisv32_do_IRQ(int irq,int block,struct pt_regs * regs)331 crisv32_do_IRQ(int irq, int block, struct pt_regs* regs)
332 {
333 	/* Interrupts that may not be moved to another CPU may
334 	 * skip blocking. This is currently only valid for the
335 	 * timer IRQ and the IPI and is used for the timer
336 	 * interrupt to avoid watchdog starvation.
337 	 */
338 	if (!block) {
339 		do_IRQ(irq, regs);
340 		return;
341 	}
342 
343 	block_irq(irq, smp_processor_id());
344 	do_IRQ(irq, regs);
345 
346 	unblock_irq(irq, irq_cpu(irq));
347 }
348 
349 /* If multiple interrupts occur simultaneously we get a multiple
350  * interrupt from the CPU and software has to sort out which
351  * interrupts that happened. There are two special cases here:
352  *
353  * 1. Timer interrupts may never be blocked because of the
354  *    watchdog (refer to comment in include/asr/arch/irq.h)
355  * 2. GDB serial port IRQs are unhandled here and will be handled
356  *    as a single IRQ when it strikes again because the GDB
357  *    stubb wants to save the registers in its own fashion.
358  */
359 void
crisv32_do_multiple(struct pt_regs * regs)360 crisv32_do_multiple(struct pt_regs* regs)
361 {
362 	int cpu;
363 	int mask;
364 	int masked[NBR_REGS];
365 	int bit;
366 	int i;
367 
368 	cpu = smp_processor_id();
369 
370 	/* An extra irq_enter here to prevent softIRQs to run after
371          * each do_IRQ. This will decrease the interrupt latency.
372 	 */
373 	irq_enter();
374 
375 	for (i = 0; i < NBR_REGS; i++) {
376 		/* Get which IRQs that happened. */
377 		masked[i] = REG_RD_INT_VECT(intr_vect, irq_regs[cpu],
378 			r_masked_vect, i);
379 
380 		/* Calculate new IRQ mask with these IRQs disabled. */
381 		mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, i);
382 		mask &= ~masked[i];
383 
384 	/* Timer IRQ is never masked */
385 #ifdef TIMER_VECT1
386 		if ((i == 1) && (masked[0] & TIMER_MASK))
387 			mask |= TIMER_MASK;
388 #else
389 		if ((i == 0) && (masked[0] & TIMER_MASK))
390 			mask |= TIMER_MASK;
391 #endif
392 		/* Block all the IRQs */
393 		REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, i, mask);
394 
395 	/* Check for timer IRQ and handle it special. */
396 #ifdef TIMER_VECT1
397 		if ((i == 1) && (masked[i] & TIMER_MASK)) {
398 			masked[i] &= ~TIMER_MASK;
399 			do_IRQ(TIMER0_INTR_VECT, regs);
400 		}
401 #else
402 		if ((i == 0) && (masked[i] & TIMER_MASK)) {
403 			 masked[i] &= ~TIMER_MASK;
404 			 do_IRQ(TIMER0_INTR_VECT, regs);
405 		}
406 #endif
407 	}
408 
409 #ifdef IGNORE_MASK
410 	/* Remove IRQs that can't be handled as multiple. */
411 	masked[0] &= ~IGNORE_MASK;
412 #endif
413 
414 	/* Handle the rest of the IRQs. */
415 	for (i = 0; i < NBR_REGS; i++) {
416 		for (bit = 0; bit < 32; bit++) {
417 			if (masked[i] & (1 << bit))
418 				do_IRQ(bit + FIRST_IRQ + i*32, regs);
419 		}
420 	}
421 
422 	/* Unblock all the IRQs. */
423 	for (i = 0; i < NBR_REGS; i++) {
424 		mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, i);
425 		mask |= masked[i];
426 		REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, i, mask);
427 	}
428 
429 	/* This irq_exit() will trigger the soft IRQs. */
430 	irq_exit();
431 }
432 
crisv32_irq_map(struct irq_domain * h,unsigned int virq,irq_hw_number_t hw_irq_num)433 static int crisv32_irq_map(struct irq_domain *h, unsigned int virq,
434 			   irq_hw_number_t hw_irq_num)
435 {
436 	irq_set_chip_and_handler(virq, &crisv32_irq_type, handle_simple_irq);
437 
438 	return 0;
439 }
440 
441 static struct irq_domain_ops crisv32_irq_ops = {
442 	.map	= crisv32_irq_map,
443 	.xlate	= irq_domain_xlate_onecell,
444 };
445 
446 /*
447  * This is called by start_kernel. It fixes the IRQ masks and setup the
448  * interrupt vector table to point to bad_interrupt pointers.
449  */
450 void __init
init_IRQ(void)451 init_IRQ(void)
452 {
453 	int i;
454 	int j;
455 	reg_intr_vect_rw_mask vect_mask = {0};
456 	struct device_node *np;
457 	struct irq_domain *domain;
458 
459 	/* Clear all interrupts masks. */
460 	for (i = 0; i < NBR_REGS; i++)
461 		REG_WR_VECT(intr_vect, regi_irq, rw_mask, i, vect_mask);
462 
463 	for (i = 0; i < 256; i++)
464 		etrax_irv->v[i] = weird_irq;
465 
466 	np = of_find_compatible_node(NULL, NULL, "axis,crisv32-intc");
467 	domain = irq_domain_add_legacy(np, NBR_INTR_VECT - FIRST_IRQ,
468 				       FIRST_IRQ, FIRST_IRQ,
469 				       &crisv32_irq_ops, NULL);
470 	BUG_ON(!domain);
471 	irq_set_default_host(domain);
472 	of_node_put(np);
473 
474 	for (i = FIRST_IRQ, j = 0; j < NBR_INTR_VECT; i++, j++) {
475 		set_exception_vector(i, interrupt[j]);
476 	}
477 
478 	/* Mark Timer and IPI IRQs as CPU local */
479 	irq_allocations[TIMER0_INTR_VECT - FIRST_IRQ].cpu = CPU_FIXED;
480 	irq_set_status_flags(TIMER0_INTR_VECT, IRQ_PER_CPU);
481 	irq_allocations[IPI_INTR_VECT - FIRST_IRQ].cpu = CPU_FIXED;
482 	irq_set_status_flags(IPI_INTR_VECT, IRQ_PER_CPU);
483 
484 	set_exception_vector(0x00, nmi_interrupt);
485 	set_exception_vector(0x30, multiple_interrupt);
486 
487 	/* Set up handler for various MMU bus faults. */
488 	set_exception_vector(0x04, i_mmu_refill);
489 	set_exception_vector(0x05, i_mmu_invalid);
490 	set_exception_vector(0x06, i_mmu_access);
491 	set_exception_vector(0x07, i_mmu_execute);
492 	set_exception_vector(0x08, d_mmu_refill);
493 	set_exception_vector(0x09, d_mmu_invalid);
494 	set_exception_vector(0x0a, d_mmu_access);
495 	set_exception_vector(0x0b, d_mmu_write);
496 
497 #ifdef CONFIG_BUG
498 	/* Break 14 handler, used to implement cheap BUG().  */
499 	set_exception_vector(0x1e, breakh_BUG);
500 #endif
501 
502 	/* The system-call trap is reached by "break 13". */
503 	set_exception_vector(0x1d, system_call);
504 
505 	/* Exception handlers for debugging, both user-mode and kernel-mode. */
506 
507 	/* Break 8. */
508 	set_exception_vector(0x18, gdb_handle_exception);
509 	/* Hardware single step. */
510 	set_exception_vector(0x3, gdb_handle_exception);
511 	/* Hardware breakpoint. */
512 	set_exception_vector(0xc, gdb_handle_exception);
513 
514 #ifdef CONFIG_ETRAX_KGDB
515 	kgdb_init();
516 	/* Everything is set up; now trap the kernel. */
517 	breakpoint();
518 #endif
519 }
520 
521