1 /*
2 * arch/sparc/kernel/sun4d_irq.c:
3 * SS1000/SC2000 interrupt handling.
4 *
5 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 * Heavily based on arch/sparc/kernel/irq.c.
7 */
8
9 #include <linux/errno.h>
10 #include <linux/linkage.h>
11 #include <linux/kernel_stat.h>
12 #include <linux/signal.h>
13 #include <linux/sched.h>
14 #include <linux/ptrace.h>
15 #include <linux/interrupt.h>
16 #include <linux/slab.h>
17 #include <linux/random.h>
18 #include <linux/init.h>
19 #include <linux/smp.h>
20 #include <linux/spinlock.h>
21 #include <linux/seq_file.h>
22 #include <linux/of.h>
23 #include <linux/of_device.h>
24
25 #include <asm/ptrace.h>
26 #include <asm/processor.h>
27 #include <asm/system.h>
28 #include <asm/psr.h>
29 #include <asm/smp.h>
30 #include <asm/vaddrs.h>
31 #include <asm/timer.h>
32 #include <asm/openprom.h>
33 #include <asm/oplib.h>
34 #include <asm/traps.h>
35 #include <asm/irq.h>
36 #include <asm/io.h>
37 #include <asm/pgalloc.h>
38 #include <asm/pgtable.h>
39 #include <asm/sbi.h>
40 #include <asm/cacheflush.h>
41 #include <asm/irq_regs.h>
42
43 #include "kernel.h"
44 #include "irq.h"
45
46 /* If you trust current SCSI layer to handle different SCSI IRQs, enable this. I don't trust it... -jj */
47 /* #define DISTRIBUTE_IRQS */
48
49 struct sun4d_timer_regs {
50 u32 l10_timer_limit;
51 u32 l10_cur_countx;
52 u32 l10_limit_noclear;
53 u32 ctrl;
54 u32 l10_cur_count;
55 };
56
57 static struct sun4d_timer_regs __iomem *sun4d_timers;
58
59 #define TIMER_IRQ 10
60
61 #define MAX_STATIC_ALLOC 4
62 extern int static_irq_count;
63 static unsigned char sbus_tid[32];
64
65 static struct irqaction *irq_action[NR_IRQS];
66 extern spinlock_t irq_action_lock;
67
68 static struct sbus_action {
69 struct irqaction *action;
70 /* For SMP this needs to be extended */
71 } *sbus_actions;
72
73 static int pil_to_sbus[] = {
74 0, 0, 1, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 0,
75 };
76
77 static int sbus_to_pil[] = {
78 0, 2, 3, 5, 7, 9, 11, 13,
79 };
80
81 static int nsbi;
82
83 /* Exported for sun4d_smp.c */
84 DEFINE_SPINLOCK(sun4d_imsk_lock);
85
show_sun4d_interrupts(struct seq_file * p,void * v)86 int show_sun4d_interrupts(struct seq_file *p, void *v)
87 {
88 int i = *(loff_t *) v, j = 0, k = 0, sbusl;
89 struct irqaction * action;
90 unsigned long flags;
91 #ifdef CONFIG_SMP
92 int x;
93 #endif
94
95 spin_lock_irqsave(&irq_action_lock, flags);
96 if (i < NR_IRQS) {
97 sbusl = pil_to_sbus[i];
98 if (!sbusl) {
99 action = *(i + irq_action);
100 if (!action)
101 goto out_unlock;
102 } else {
103 for (j = 0; j < nsbi; j++) {
104 for (k = 0; k < 4; k++)
105 if ((action = sbus_actions [(j << 5) + (sbusl << 2) + k].action))
106 goto found_it;
107 }
108 goto out_unlock;
109 }
110 found_it: seq_printf(p, "%3d: ", i);
111 #ifndef CONFIG_SMP
112 seq_printf(p, "%10u ", kstat_irqs(i));
113 #else
114 for_each_online_cpu(x)
115 seq_printf(p, "%10u ",
116 kstat_cpu(cpu_logical_map(x)).irqs[i]);
117 #endif
118 seq_printf(p, "%c %s",
119 (action->flags & IRQF_DISABLED) ? '+' : ' ',
120 action->name);
121 action = action->next;
122 for (;;) {
123 for (; action; action = action->next) {
124 seq_printf(p, ",%s %s",
125 (action->flags & IRQF_DISABLED) ? " +" : "",
126 action->name);
127 }
128 if (!sbusl) break;
129 k++;
130 if (k < 4)
131 action = sbus_actions [(j << 5) + (sbusl << 2) + k].action;
132 else {
133 j++;
134 if (j == nsbi) break;
135 k = 0;
136 action = sbus_actions [(j << 5) + (sbusl << 2)].action;
137 }
138 }
139 seq_putc(p, '\n');
140 }
141 out_unlock:
142 spin_unlock_irqrestore(&irq_action_lock, flags);
143 return 0;
144 }
145
sun4d_free_irq(unsigned int irq,void * dev_id)146 void sun4d_free_irq(unsigned int irq, void *dev_id)
147 {
148 struct irqaction *action, **actionp;
149 struct irqaction *tmp = NULL;
150 unsigned long flags;
151
152 spin_lock_irqsave(&irq_action_lock, flags);
153 if (irq < 15)
154 actionp = irq + irq_action;
155 else
156 actionp = &(sbus_actions[irq - (1 << 5)].action);
157 action = *actionp;
158 if (!action) {
159 printk("Trying to free free IRQ%d\n",irq);
160 goto out_unlock;
161 }
162 if (dev_id) {
163 for (; action; action = action->next) {
164 if (action->dev_id == dev_id)
165 break;
166 tmp = action;
167 }
168 if (!action) {
169 printk("Trying to free free shared IRQ%d\n",irq);
170 goto out_unlock;
171 }
172 } else if (action->flags & IRQF_SHARED) {
173 printk("Trying to free shared IRQ%d with NULL device ID\n", irq);
174 goto out_unlock;
175 }
176 if (action->flags & SA_STATIC_ALLOC)
177 {
178 /* This interrupt is marked as specially allocated
179 * so it is a bad idea to free it.
180 */
181 printk("Attempt to free statically allocated IRQ%d (%s)\n",
182 irq, action->name);
183 goto out_unlock;
184 }
185
186 if (action && tmp)
187 tmp->next = action->next;
188 else
189 *actionp = action->next;
190
191 spin_unlock_irqrestore(&irq_action_lock, flags);
192
193 synchronize_irq(irq);
194
195 spin_lock_irqsave(&irq_action_lock, flags);
196
197 kfree(action);
198
199 if (!(*actionp))
200 __disable_irq(irq);
201
202 out_unlock:
203 spin_unlock_irqrestore(&irq_action_lock, flags);
204 }
205
206 extern void unexpected_irq(int, void *, struct pt_regs *);
207
sun4d_handler_irq(int irq,struct pt_regs * regs)208 void sun4d_handler_irq(int irq, struct pt_regs * regs)
209 {
210 struct pt_regs *old_regs;
211 struct irqaction * action;
212 int cpu = smp_processor_id();
213 /* SBUS IRQ level (1 - 7) */
214 int sbusl = pil_to_sbus[irq];
215
216 /* FIXME: Is this necessary?? */
217 cc_get_ipen();
218
219 cc_set_iclr(1 << irq);
220
221 old_regs = set_irq_regs(regs);
222 irq_enter();
223 kstat_cpu(cpu).irqs[irq]++;
224 if (!sbusl) {
225 action = *(irq + irq_action);
226 if (!action)
227 unexpected_irq(irq, NULL, regs);
228 do {
229 action->handler(irq, action->dev_id);
230 action = action->next;
231 } while (action);
232 } else {
233 int bus_mask = bw_get_intr_mask(sbusl) & 0x3ffff;
234 int sbino;
235 struct sbus_action *actionp;
236 unsigned mask, slot;
237 int sbil = (sbusl << 2);
238
239 bw_clear_intr_mask(sbusl, bus_mask);
240
241 /* Loop for each pending SBI */
242 for (sbino = 0; bus_mask; sbino++, bus_mask >>= 1)
243 if (bus_mask & 1) {
244 mask = acquire_sbi(SBI2DEVID(sbino), 0xf << sbil);
245 mask &= (0xf << sbil);
246 actionp = sbus_actions + (sbino << 5) + (sbil);
247 /* Loop for each pending SBI slot */
248 for (slot = (1 << sbil); mask; slot <<= 1, actionp++)
249 if (mask & slot) {
250 mask &= ~slot;
251 action = actionp->action;
252
253 if (!action)
254 unexpected_irq(irq, NULL, regs);
255 do {
256 action->handler(irq, action->dev_id);
257 action = action->next;
258 } while (action);
259 release_sbi(SBI2DEVID(sbino), slot);
260 }
261 }
262 }
263 irq_exit();
264 set_irq_regs(old_regs);
265 }
266
sun4d_request_irq(unsigned int irq,irq_handler_t handler,unsigned long irqflags,const char * devname,void * dev_id)267 int sun4d_request_irq(unsigned int irq,
268 irq_handler_t handler,
269 unsigned long irqflags, const char * devname, void *dev_id)
270 {
271 struct irqaction *action, *tmp = NULL, **actionp;
272 unsigned long flags;
273 int ret;
274
275 if(irq > 14 && irq < (1 << 5)) {
276 ret = -EINVAL;
277 goto out;
278 }
279
280 if (!handler) {
281 ret = -EINVAL;
282 goto out;
283 }
284
285 spin_lock_irqsave(&irq_action_lock, flags);
286
287 if (irq >= (1 << 5))
288 actionp = &(sbus_actions[irq - (1 << 5)].action);
289 else
290 actionp = irq + irq_action;
291 action = *actionp;
292
293 if (action) {
294 if ((action->flags & IRQF_SHARED) && (irqflags & IRQF_SHARED)) {
295 for (tmp = action; tmp->next; tmp = tmp->next);
296 } else {
297 ret = -EBUSY;
298 goto out_unlock;
299 }
300 if ((action->flags & IRQF_DISABLED) ^ (irqflags & IRQF_DISABLED)) {
301 printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq);
302 ret = -EBUSY;
303 goto out_unlock;
304 }
305 action = NULL; /* Or else! */
306 }
307
308 /* If this is flagged as statically allocated then we use our
309 * private struct which is never freed.
310 */
311 if (irqflags & SA_STATIC_ALLOC) {
312 if (static_irq_count < MAX_STATIC_ALLOC)
313 action = &static_irqaction[static_irq_count++];
314 else
315 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n", irq, devname);
316 }
317
318 if (action == NULL)
319 action = kmalloc(sizeof(struct irqaction),
320 GFP_ATOMIC);
321
322 if (!action) {
323 ret = -ENOMEM;
324 goto out_unlock;
325 }
326
327 action->handler = handler;
328 action->flags = irqflags;
329 cpus_clear(action->mask);
330 action->name = devname;
331 action->next = NULL;
332 action->dev_id = dev_id;
333
334 if (tmp)
335 tmp->next = action;
336 else
337 *actionp = action;
338
339 __enable_irq(irq);
340
341 ret = 0;
342 out_unlock:
343 spin_unlock_irqrestore(&irq_action_lock, flags);
344 out:
345 return ret;
346 }
347
sun4d_disable_irq(unsigned int irq)348 static void sun4d_disable_irq(unsigned int irq)
349 {
350 int tid = sbus_tid[(irq >> 5) - 1];
351 unsigned long flags;
352
353 if (irq < NR_IRQS)
354 return;
355
356 spin_lock_irqsave(&sun4d_imsk_lock, flags);
357 cc_set_imsk_other(tid, cc_get_imsk_other(tid) | (1 << sbus_to_pil[(irq >> 2) & 7]));
358 spin_unlock_irqrestore(&sun4d_imsk_lock, flags);
359 }
360
sun4d_enable_irq(unsigned int irq)361 static void sun4d_enable_irq(unsigned int irq)
362 {
363 int tid = sbus_tid[(irq >> 5) - 1];
364 unsigned long flags;
365
366 if (irq < NR_IRQS)
367 return;
368
369 spin_lock_irqsave(&sun4d_imsk_lock, flags);
370 cc_set_imsk_other(tid, cc_get_imsk_other(tid) & ~(1 << sbus_to_pil[(irq >> 2) & 7]));
371 spin_unlock_irqrestore(&sun4d_imsk_lock, flags);
372 }
373
374 #ifdef CONFIG_SMP
sun4d_set_cpu_int(int cpu,int level)375 static void sun4d_set_cpu_int(int cpu, int level)
376 {
377 sun4d_send_ipi(cpu, level);
378 }
379
sun4d_clear_ipi(int cpu,int level)380 static void sun4d_clear_ipi(int cpu, int level)
381 {
382 }
383
sun4d_set_udt(int cpu)384 static void sun4d_set_udt(int cpu)
385 {
386 }
387
388 /* Setup IRQ distribution scheme. */
sun4d_distribute_irqs(void)389 void __init sun4d_distribute_irqs(void)
390 {
391 struct device_node *dp;
392
393 #ifdef DISTRIBUTE_IRQS
394 cpumask_t sbus_serving_map;
395
396 sbus_serving_map = cpu_present_map;
397 for_each_node_by_name(dp, "sbi") {
398 int board = of_getintprop_default(dp, "board#", 0);
399
400 if ((board * 2) == boot_cpu_id && cpu_isset(board * 2 + 1, cpu_present_map))
401 sbus_tid[board] = (board * 2 + 1);
402 else if (cpu_isset(board * 2, cpu_present_map))
403 sbus_tid[board] = (board * 2);
404 else if (cpu_isset(board * 2 + 1, cpu_present_map))
405 sbus_tid[board] = (board * 2 + 1);
406 else
407 sbus_tid[board] = 0xff;
408 if (sbus_tid[board] != 0xff)
409 cpu_clear(sbus_tid[board], sbus_serving_map);
410 }
411 for_each_node_by_name(dp, "sbi") {
412 int board = of_getintprop_default(dp, "board#", 0);
413 if (sbus_tid[board] == 0xff) {
414 int i = 31;
415
416 if (cpus_empty(sbus_serving_map))
417 sbus_serving_map = cpu_present_map;
418 while (cpu_isset(i, sbus_serving_map))
419 i--;
420 sbus_tid[board] = i;
421 cpu_clear(i, sbus_serving_map);
422 }
423 }
424 for_each_node_by_name(dp, "sbi") {
425 int devid = of_getintprop_default(dp, "device-id", 0);
426 int board = of_getintprop_default(dp, "board#", 0);
427 printk("sbus%d IRQs directed to CPU%d\n", board, sbus_tid[board]);
428 set_sbi_tid(devid, sbus_tid[board] << 3);
429 }
430 #else
431 int cpuid = cpu_logical_map(1);
432
433 if (cpuid == -1)
434 cpuid = cpu_logical_map(0);
435 for_each_node_by_name(dp, "sbi") {
436 int devid = of_getintprop_default(dp, "device-id", 0);
437 int board = of_getintprop_default(dp, "board#", 0);
438 sbus_tid[board] = cpuid;
439 set_sbi_tid(devid, cpuid << 3);
440 }
441 printk("All sbus IRQs directed to CPU%d\n", cpuid);
442 #endif
443 }
444 #endif
445
sun4d_clear_clock_irq(void)446 static void sun4d_clear_clock_irq(void)
447 {
448 sbus_readl(&sun4d_timers->l10_timer_limit);
449 }
450
sun4d_load_profile_irq(int cpu,unsigned int limit)451 static void sun4d_load_profile_irq(int cpu, unsigned int limit)
452 {
453 bw_set_prof_limit(cpu, limit);
454 }
455
sun4d_load_profile_irqs(void)456 static void __init sun4d_load_profile_irqs(void)
457 {
458 int cpu = 0, mid;
459
460 while (!cpu_find_by_instance(cpu, NULL, &mid)) {
461 sun4d_load_profile_irq(mid >> 3, 0);
462 cpu++;
463 }
464 }
465
sun4d_fixup_trap_table(void)466 static void __init sun4d_fixup_trap_table(void)
467 {
468 #ifdef CONFIG_SMP
469 unsigned long flags;
470 extern unsigned long lvl14_save[4];
471 struct tt_entry *trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (14 - 1)];
472 extern unsigned int real_irq_entry[], smp4d_ticker[];
473 extern unsigned int patchme_maybe_smp_msg[];
474
475 /* Adjust so that we jump directly to smp4d_ticker */
476 lvl14_save[2] += smp4d_ticker - real_irq_entry;
477
478 /* For SMP we use the level 14 ticker, however the bootup code
479 * has copied the firmware's level 14 vector into the boot cpu's
480 * trap table, we must fix this now or we get squashed.
481 */
482 local_irq_save(flags);
483 patchme_maybe_smp_msg[0] = 0x01000000; /* NOP out the branch */
484 trap_table->inst_one = lvl14_save[0];
485 trap_table->inst_two = lvl14_save[1];
486 trap_table->inst_three = lvl14_save[2];
487 trap_table->inst_four = lvl14_save[3];
488 local_flush_cache_all();
489 local_irq_restore(flags);
490 #endif
491 }
492
sun4d_init_timers(irq_handler_t counter_fn)493 static void __init sun4d_init_timers(irq_handler_t counter_fn)
494 {
495 struct device_node *dp;
496 struct resource res;
497 const u32 *reg;
498 int err;
499
500 dp = of_find_node_by_name(NULL, "cpu-unit");
501 if (!dp) {
502 prom_printf("sun4d_init_timers: Unable to find cpu-unit\n");
503 prom_halt();
504 }
505
506 /* Which cpu-unit we use is arbitrary, we can view the bootbus timer
507 * registers via any cpu's mapping. The first 'reg' property is the
508 * bootbus.
509 */
510 reg = of_get_property(dp, "reg", NULL);
511 of_node_put(dp);
512 if (!reg) {
513 prom_printf("sun4d_init_timers: No reg property\n");
514 prom_halt();
515 }
516
517 res.start = reg[1];
518 res.end = reg[2] - 1;
519 res.flags = reg[0] & 0xff;
520 sun4d_timers = of_ioremap(&res, BW_TIMER_LIMIT,
521 sizeof(struct sun4d_timer_regs), "user timer");
522 if (!sun4d_timers) {
523 prom_printf("sun4d_init_timers: Can't map timer regs\n");
524 prom_halt();
525 }
526
527 sbus_writel((((1000000/HZ) + 1) << 10), &sun4d_timers->l10_timer_limit);
528
529 master_l10_counter = &sun4d_timers->l10_cur_count;
530
531 err = request_irq(TIMER_IRQ, counter_fn,
532 (IRQF_DISABLED | SA_STATIC_ALLOC),
533 "timer", NULL);
534 if (err) {
535 prom_printf("sun4d_init_timers: request_irq() failed with %d\n", err);
536 prom_halt();
537 }
538 sun4d_load_profile_irqs();
539 sun4d_fixup_trap_table();
540 }
541
sun4d_init_sbi_irq(void)542 void __init sun4d_init_sbi_irq(void)
543 {
544 struct device_node *dp;
545 int target_cpu = 0;
546
547 #ifdef CONFIG_SMP
548 target_cpu = boot_cpu_id;
549 #endif
550
551 nsbi = 0;
552 for_each_node_by_name(dp, "sbi")
553 nsbi++;
554 sbus_actions = kzalloc (nsbi * 8 * 4 * sizeof(struct sbus_action), GFP_ATOMIC);
555 if (!sbus_actions) {
556 prom_printf("SUN4D: Cannot allocate sbus_actions, halting.\n");
557 prom_halt();
558 }
559 for_each_node_by_name(dp, "sbi") {
560 int devid = of_getintprop_default(dp, "device-id", 0);
561 int board = of_getintprop_default(dp, "board#", 0);
562 unsigned int mask;
563
564 set_sbi_tid(devid, target_cpu << 3);
565 sbus_tid[board] = target_cpu;
566
567 /* Get rid of pending irqs from PROM */
568 mask = acquire_sbi(devid, 0xffffffff);
569 if (mask) {
570 printk ("Clearing pending IRQs %08x on SBI %d\n", mask, board);
571 release_sbi(devid, mask);
572 }
573 }
574 }
575
sun4d_init_IRQ(void)576 void __init sun4d_init_IRQ(void)
577 {
578 local_irq_disable();
579
580 BTFIXUPSET_CALL(enable_irq, sun4d_enable_irq, BTFIXUPCALL_NORM);
581 BTFIXUPSET_CALL(disable_irq, sun4d_disable_irq, BTFIXUPCALL_NORM);
582 BTFIXUPSET_CALL(clear_clock_irq, sun4d_clear_clock_irq, BTFIXUPCALL_NORM);
583 BTFIXUPSET_CALL(load_profile_irq, sun4d_load_profile_irq, BTFIXUPCALL_NORM);
584 sparc_init_timers = sun4d_init_timers;
585 #ifdef CONFIG_SMP
586 BTFIXUPSET_CALL(set_cpu_int, sun4d_set_cpu_int, BTFIXUPCALL_NORM);
587 BTFIXUPSET_CALL(clear_cpu_int, sun4d_clear_ipi, BTFIXUPCALL_NOP);
588 BTFIXUPSET_CALL(set_irq_udt, sun4d_set_udt, BTFIXUPCALL_NOP);
589 #endif
590 /* Cannot enable interrupts until OBP ticker is disabled. */
591 }
592