1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Code to handle x86 style IRQs plus some generic interrupt stuff.
4 *
5 * Copyright (C) 1992 Linus Torvalds
6 * Copyright (C) 1994, 1995, 1996, 1997, 1998 Ralf Baechle
7 * Copyright (C) 1999 SuSE GmbH (Philipp Rumpf, prumpf@tux.org)
8 * Copyright (C) 1999-2000 Grant Grundler
9 * Copyright (c) 2005 Matthew Wilcox
10 */
11 #include <linux/bitops.h>
12 #include <linux/errno.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/seq_file.h>
17 #include <linux/types.h>
18 #include <asm/io.h>
19
20 #include <asm/smp.h>
21 #include <asm/ldcw.h>
22
23 #undef PARISC_IRQ_CR16_COUNTS
24
25 extern irqreturn_t timer_interrupt(int, void *);
26 extern irqreturn_t ipi_interrupt(int, void *);
27
28 #define EIEM_MASK(irq) (1UL<<(CPU_IRQ_MAX - irq))
29
30 /* Bits in EIEM correlate with cpu_irq_action[].
31 ** Numbered *Big Endian*! (ie bit 0 is MSB)
32 */
33 static volatile unsigned long cpu_eiem = 0;
34
35 /*
36 ** local ACK bitmap ... habitually set to 1, but reset to zero
37 ** between ->ack() and ->end() of the interrupt to prevent
38 ** re-interruption of a processing interrupt.
39 */
40 static DEFINE_PER_CPU(unsigned long, local_ack_eiem) = ~0UL;
41
cpu_mask_irq(struct irq_data * d)42 static void cpu_mask_irq(struct irq_data *d)
43 {
44 unsigned long eirr_bit = EIEM_MASK(d->irq);
45
46 cpu_eiem &= ~eirr_bit;
47 /* Do nothing on the other CPUs. If they get this interrupt,
48 * The & cpu_eiem in the do_cpu_irq_mask() ensures they won't
49 * handle it, and the set_eiem() at the bottom will ensure it
50 * then gets disabled */
51 }
52
__cpu_unmask_irq(unsigned int irq)53 static void __cpu_unmask_irq(unsigned int irq)
54 {
55 unsigned long eirr_bit = EIEM_MASK(irq);
56
57 cpu_eiem |= eirr_bit;
58
59 /* This is just a simple NOP IPI. But what it does is cause
60 * all the other CPUs to do a set_eiem(cpu_eiem) at the end
61 * of the interrupt handler */
62 smp_send_all_nop();
63 }
64
cpu_unmask_irq(struct irq_data * d)65 static void cpu_unmask_irq(struct irq_data *d)
66 {
67 __cpu_unmask_irq(d->irq);
68 }
69
cpu_ack_irq(struct irq_data * d)70 void cpu_ack_irq(struct irq_data *d)
71 {
72 unsigned long mask = EIEM_MASK(d->irq);
73 int cpu = smp_processor_id();
74
75 /* Clear in EIEM so we can no longer process */
76 per_cpu(local_ack_eiem, cpu) &= ~mask;
77
78 /* disable the interrupt */
79 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
80
81 /* and now ack it */
82 mtctl(mask, 23);
83 }
84
cpu_eoi_irq(struct irq_data * d)85 void cpu_eoi_irq(struct irq_data *d)
86 {
87 unsigned long mask = EIEM_MASK(d->irq);
88 int cpu = smp_processor_id();
89
90 /* set it in the eiems---it's no longer in process */
91 per_cpu(local_ack_eiem, cpu) |= mask;
92
93 /* enable the interrupt */
94 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
95 }
96
97 #ifdef CONFIG_SMP
cpu_check_affinity(struct irq_data * d,const struct cpumask * dest)98 int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest)
99 {
100 int cpu_dest;
101
102 /* timer and ipi have to always be received on all CPUs */
103 if (irqd_is_per_cpu(d))
104 return -EINVAL;
105
106 /* whatever mask they set, we just allow one CPU */
107 cpu_dest = cpumask_next_and(d->irq & (num_online_cpus()-1),
108 dest, cpu_online_mask);
109 if (cpu_dest >= nr_cpu_ids)
110 cpu_dest = cpumask_first_and(dest, cpu_online_mask);
111
112 return cpu_dest;
113 }
114
cpu_set_affinity_irq(struct irq_data * d,const struct cpumask * dest,bool force)115 static int cpu_set_affinity_irq(struct irq_data *d, const struct cpumask *dest,
116 bool force)
117 {
118 int cpu_dest;
119
120 cpu_dest = cpu_check_affinity(d, dest);
121 if (cpu_dest < 0)
122 return -1;
123
124 cpumask_copy(irq_data_get_affinity_mask(d), dest);
125
126 return 0;
127 }
128 #endif
129
130 static struct irq_chip cpu_interrupt_type = {
131 .name = "CPU",
132 .irq_mask = cpu_mask_irq,
133 .irq_unmask = cpu_unmask_irq,
134 .irq_ack = cpu_ack_irq,
135 .irq_eoi = cpu_eoi_irq,
136 #ifdef CONFIG_SMP
137 .irq_set_affinity = cpu_set_affinity_irq,
138 #endif
139 /* XXX: Needs to be written. We managed without it so far, but
140 * we really ought to write it.
141 */
142 .irq_retrigger = NULL,
143 };
144
145 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
146 #define irq_stats(x) (&per_cpu(irq_stat, x))
147
148 /*
149 * /proc/interrupts printing for arch specific interrupts
150 */
arch_show_interrupts(struct seq_file * p,int prec)151 int arch_show_interrupts(struct seq_file *p, int prec)
152 {
153 int j;
154
155 #ifdef CONFIG_DEBUG_STACKOVERFLOW
156 seq_printf(p, "%*s: ", prec, "STK");
157 for_each_online_cpu(j)
158 seq_printf(p, "%10u ", irq_stats(j)->kernel_stack_usage);
159 seq_puts(p, " Kernel stack usage\n");
160 # ifdef CONFIG_IRQSTACKS
161 seq_printf(p, "%*s: ", prec, "IST");
162 for_each_online_cpu(j)
163 seq_printf(p, "%10u ", irq_stats(j)->irq_stack_usage);
164 seq_puts(p, " Interrupt stack usage\n");
165 # endif
166 #endif
167 #ifdef CONFIG_SMP
168 if (num_online_cpus() > 1) {
169 seq_printf(p, "%*s: ", prec, "RES");
170 for_each_online_cpu(j)
171 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
172 seq_puts(p, " Rescheduling interrupts\n");
173 seq_printf(p, "%*s: ", prec, "CAL");
174 for_each_online_cpu(j)
175 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
176 seq_puts(p, " Function call interrupts\n");
177 }
178 #endif
179 seq_printf(p, "%*s: ", prec, "UAH");
180 for_each_online_cpu(j)
181 seq_printf(p, "%10u ", irq_stats(j)->irq_unaligned_count);
182 seq_puts(p, " Unaligned access handler traps\n");
183 seq_printf(p, "%*s: ", prec, "FPA");
184 for_each_online_cpu(j)
185 seq_printf(p, "%10u ", irq_stats(j)->irq_fpassist_count);
186 seq_puts(p, " Floating point assist traps\n");
187 seq_printf(p, "%*s: ", prec, "TLB");
188 for_each_online_cpu(j)
189 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
190 seq_puts(p, " TLB shootdowns\n");
191 return 0;
192 }
193
show_interrupts(struct seq_file * p,void * v)194 int show_interrupts(struct seq_file *p, void *v)
195 {
196 int i = *(loff_t *) v, j;
197 unsigned long flags;
198
199 if (i == 0) {
200 seq_puts(p, " ");
201 for_each_online_cpu(j)
202 seq_printf(p, " CPU%d", j);
203
204 #ifdef PARISC_IRQ_CR16_COUNTS
205 seq_printf(p, " [min/avg/max] (CPU cycle counts)");
206 #endif
207 seq_putc(p, '\n');
208 }
209
210 if (i < NR_IRQS) {
211 struct irq_desc *desc = irq_to_desc(i);
212 struct irqaction *action;
213
214 raw_spin_lock_irqsave(&desc->lock, flags);
215 action = desc->action;
216 if (!action)
217 goto skip;
218 seq_printf(p, "%3d: ", i);
219 #ifdef CONFIG_SMP
220 for_each_online_cpu(j)
221 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
222 #else
223 seq_printf(p, "%10u ", kstat_irqs(i));
224 #endif
225
226 seq_printf(p, " %14s", irq_desc_get_chip(desc)->name);
227 #ifndef PARISC_IRQ_CR16_COUNTS
228 seq_printf(p, " %s", action->name);
229
230 while ((action = action->next))
231 seq_printf(p, ", %s", action->name);
232 #else
233 for ( ;action; action = action->next) {
234 unsigned int k, avg, min, max;
235
236 min = max = action->cr16_hist[0];
237
238 for (avg = k = 0; k < PARISC_CR16_HIST_SIZE; k++) {
239 int hist = action->cr16_hist[k];
240
241 if (hist) {
242 avg += hist;
243 } else
244 break;
245
246 if (hist > max) max = hist;
247 if (hist < min) min = hist;
248 }
249
250 avg /= k;
251 seq_printf(p, " %s[%d/%d/%d]", action->name,
252 min,avg,max);
253 }
254 #endif
255
256 seq_putc(p, '\n');
257 skip:
258 raw_spin_unlock_irqrestore(&desc->lock, flags);
259 }
260
261 if (i == NR_IRQS)
262 arch_show_interrupts(p, 3);
263
264 return 0;
265 }
266
267
268
269 /*
270 ** The following form a "set": Virtual IRQ, Transaction Address, Trans Data.
271 ** Respectively, these map to IRQ region+EIRR, Processor HPA, EIRR bit.
272 **
273 ** To use txn_XXX() interfaces, get a Virtual IRQ first.
274 ** Then use that to get the Transaction address and data.
275 */
276
cpu_claim_irq(unsigned int irq,struct irq_chip * type,void * data)277 int cpu_claim_irq(unsigned int irq, struct irq_chip *type, void *data)
278 {
279 if (irq_has_action(irq))
280 return -EBUSY;
281 if (irq_get_chip(irq) != &cpu_interrupt_type)
282 return -EBUSY;
283
284 /* for iosapic interrupts */
285 if (type) {
286 irq_set_chip_and_handler(irq, type, handle_percpu_irq);
287 irq_set_chip_data(irq, data);
288 __cpu_unmask_irq(irq);
289 }
290 return 0;
291 }
292
txn_claim_irq(int irq)293 int txn_claim_irq(int irq)
294 {
295 return cpu_claim_irq(irq, NULL, NULL) ? -1 : irq;
296 }
297
298 /*
299 * The bits_wide parameter accommodates the limitations of the HW/SW which
300 * use these bits:
301 * Legacy PA I/O (GSC/NIO): 5 bits (architected EIM register)
302 * V-class (EPIC): 6 bits
303 * N/L/A-class (iosapic): 8 bits
304 * PCI 2.2 MSI: 16 bits
305 * Some PCI devices: 32 bits (Symbios SCSI/ATM/HyperFabric)
306 *
307 * On the service provider side:
308 * o PA 1.1 (and PA2.0 narrow mode) 5-bits (width of EIR register)
309 * o PA 2.0 wide mode 6-bits (per processor)
310 * o IA64 8-bits (0-256 total)
311 *
312 * So a Legacy PA I/O device on a PA 2.0 box can't use all the bits supported
313 * by the processor...and the N/L-class I/O subsystem supports more bits than
314 * PA2.0 has. The first case is the problem.
315 */
txn_alloc_irq(unsigned int bits_wide)316 int txn_alloc_irq(unsigned int bits_wide)
317 {
318 int irq;
319
320 /* never return irq 0 cause that's the interval timer */
321 for (irq = CPU_IRQ_BASE + 1; irq <= CPU_IRQ_MAX; irq++) {
322 if (cpu_claim_irq(irq, NULL, NULL) < 0)
323 continue;
324 if ((irq - CPU_IRQ_BASE) >= (1 << bits_wide))
325 continue;
326 return irq;
327 }
328
329 /* unlikely, but be prepared */
330 return -1;
331 }
332
333
txn_affinity_addr(unsigned int irq,int cpu)334 unsigned long txn_affinity_addr(unsigned int irq, int cpu)
335 {
336 #ifdef CONFIG_SMP
337 struct irq_data *d = irq_get_irq_data(irq);
338 cpumask_copy(irq_data_get_affinity_mask(d), cpumask_of(cpu));
339 #endif
340
341 return per_cpu(cpu_data, cpu).txn_addr;
342 }
343
344
txn_alloc_addr(unsigned int virt_irq)345 unsigned long txn_alloc_addr(unsigned int virt_irq)
346 {
347 static int next_cpu = -1;
348
349 next_cpu++; /* assign to "next" CPU we want this bugger on */
350
351 /* validate entry */
352 while ((next_cpu < nr_cpu_ids) &&
353 (!per_cpu(cpu_data, next_cpu).txn_addr ||
354 !cpu_online(next_cpu)))
355 next_cpu++;
356
357 if (next_cpu >= nr_cpu_ids)
358 next_cpu = 0; /* nothing else, assign monarch */
359
360 return txn_affinity_addr(virt_irq, next_cpu);
361 }
362
363
txn_alloc_data(unsigned int virt_irq)364 unsigned int txn_alloc_data(unsigned int virt_irq)
365 {
366 return virt_irq - CPU_IRQ_BASE;
367 }
368
eirr_to_irq(unsigned long eirr)369 static inline int eirr_to_irq(unsigned long eirr)
370 {
371 int bit = fls_long(eirr);
372 return (BITS_PER_LONG - bit) + TIMER_IRQ;
373 }
374
375 #ifdef CONFIG_IRQSTACKS
376 /*
377 * IRQ STACK - used for irq handler
378 */
379 #ifdef CONFIG_64BIT
380 #define IRQ_STACK_SIZE (4096 << 4) /* 64k irq stack size */
381 #else
382 #define IRQ_STACK_SIZE (4096 << 3) /* 32k irq stack size */
383 #endif
384
385 union irq_stack_union {
386 unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)];
387 volatile unsigned int slock[4];
388 volatile unsigned int lock[1];
389 };
390
391 DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
392 .slock = { 1,1,1,1 },
393 };
394 #endif
395
396
397 int sysctl_panic_on_stackoverflow = 1;
398
stack_overflow_check(struct pt_regs * regs)399 static inline void stack_overflow_check(struct pt_regs *regs)
400 {
401 #ifdef CONFIG_DEBUG_STACKOVERFLOW
402 #define STACK_MARGIN (256*6)
403
404 /* Our stack starts directly behind the thread_info struct. */
405 unsigned long stack_start = (unsigned long) current_thread_info();
406 unsigned long sp = regs->gr[30];
407 unsigned long stack_usage;
408 unsigned int *last_usage;
409 int cpu = smp_processor_id();
410
411 /* if sr7 != 0, we interrupted a userspace process which we do not want
412 * to check for stack overflow. We will only check the kernel stack. */
413 if (regs->sr[7])
414 return;
415
416 /* exit if already in panic */
417 if (sysctl_panic_on_stackoverflow < 0)
418 return;
419
420 /* calculate kernel stack usage */
421 stack_usage = sp - stack_start;
422 #ifdef CONFIG_IRQSTACKS
423 if (likely(stack_usage <= THREAD_SIZE))
424 goto check_kernel_stack; /* found kernel stack */
425
426 /* check irq stack usage */
427 stack_start = (unsigned long) &per_cpu(irq_stack_union, cpu).stack;
428 stack_usage = sp - stack_start;
429
430 last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu);
431 if (unlikely(stack_usage > *last_usage))
432 *last_usage = stack_usage;
433
434 if (likely(stack_usage < (IRQ_STACK_SIZE - STACK_MARGIN)))
435 return;
436
437 pr_emerg("stackcheck: %s will most likely overflow irq stack "
438 "(sp:%lx, stk bottom-top:%lx-%lx)\n",
439 current->comm, sp, stack_start, stack_start + IRQ_STACK_SIZE);
440 goto panic_check;
441
442 check_kernel_stack:
443 #endif
444
445 /* check kernel stack usage */
446 last_usage = &per_cpu(irq_stat.kernel_stack_usage, cpu);
447
448 if (unlikely(stack_usage > *last_usage))
449 *last_usage = stack_usage;
450
451 if (likely(stack_usage < (THREAD_SIZE - STACK_MARGIN)))
452 return;
453
454 pr_emerg("stackcheck: %s will most likely overflow kernel stack "
455 "(sp:%lx, stk bottom-top:%lx-%lx)\n",
456 current->comm, sp, stack_start, stack_start + THREAD_SIZE);
457
458 #ifdef CONFIG_IRQSTACKS
459 panic_check:
460 #endif
461 if (sysctl_panic_on_stackoverflow) {
462 sysctl_panic_on_stackoverflow = -1; /* disable further checks */
463 panic("low stack detected by irq handler - check messages\n");
464 }
465 #endif
466 }
467
468 #ifdef CONFIG_IRQSTACKS
469 /* in entry.S: */
470 void call_on_stack(unsigned long p1, void *func, unsigned long new_stack);
471
execute_on_irq_stack(void * func,unsigned long param1)472 static void execute_on_irq_stack(void *func, unsigned long param1)
473 {
474 union irq_stack_union *union_ptr;
475 unsigned long irq_stack;
476 volatile unsigned int *irq_stack_in_use;
477
478 union_ptr = &per_cpu(irq_stack_union, smp_processor_id());
479 irq_stack = (unsigned long) &union_ptr->stack;
480 irq_stack = ALIGN(irq_stack + sizeof(irq_stack_union.slock),
481 64); /* align for stack frame usage */
482
483 /* We may be called recursive. If we are already using the irq stack,
484 * just continue to use it. Use spinlocks to serialize
485 * the irq stack usage.
486 */
487 irq_stack_in_use = (volatile unsigned int *)__ldcw_align(union_ptr);
488 if (!__ldcw(irq_stack_in_use)) {
489 void (*direct_call)(unsigned long p1) = func;
490
491 /* We are using the IRQ stack already.
492 * Do direct call on current stack. */
493 direct_call(param1);
494 return;
495 }
496
497 /* This is where we switch to the IRQ stack. */
498 call_on_stack(param1, func, irq_stack);
499
500 /* free up irq stack usage. */
501 *irq_stack_in_use = 1;
502 }
503
do_softirq_own_stack(void)504 void do_softirq_own_stack(void)
505 {
506 execute_on_irq_stack(__do_softirq, 0);
507 }
508 #endif /* CONFIG_IRQSTACKS */
509
510 /* ONLY called from entry.S:intr_extint() */
do_cpu_irq_mask(struct pt_regs * regs)511 void do_cpu_irq_mask(struct pt_regs *regs)
512 {
513 struct pt_regs *old_regs;
514 unsigned long eirr_val;
515 int irq, cpu = smp_processor_id();
516 struct irq_data *irq_data;
517 #ifdef CONFIG_SMP
518 cpumask_t dest;
519 #endif
520
521 old_regs = set_irq_regs(regs);
522 local_irq_disable();
523 irq_enter();
524
525 eirr_val = mfctl(23) & cpu_eiem & per_cpu(local_ack_eiem, cpu);
526 if (!eirr_val)
527 goto set_out;
528 irq = eirr_to_irq(eirr_val);
529
530 irq_data = irq_get_irq_data(irq);
531
532 /* Filter out spurious interrupts, mostly from serial port at bootup */
533 if (unlikely(!irq_desc_has_action(irq_data_to_desc(irq_data))))
534 goto set_out;
535
536 #ifdef CONFIG_SMP
537 cpumask_copy(&dest, irq_data_get_affinity_mask(irq_data));
538 if (irqd_is_per_cpu(irq_data) &&
539 !cpumask_test_cpu(smp_processor_id(), &dest)) {
540 int cpu = cpumask_first(&dest);
541
542 printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
543 irq, smp_processor_id(), cpu);
544 gsc_writel(irq + CPU_IRQ_BASE,
545 per_cpu(cpu_data, cpu).hpa);
546 goto set_out;
547 }
548 #endif
549 stack_overflow_check(regs);
550
551 #ifdef CONFIG_IRQSTACKS
552 execute_on_irq_stack(&generic_handle_irq, irq);
553 #else
554 generic_handle_irq(irq);
555 #endif /* CONFIG_IRQSTACKS */
556
557 out:
558 irq_exit();
559 set_irq_regs(old_regs);
560 return;
561
562 set_out:
563 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
564 goto out;
565 }
566
claim_cpu_irqs(void)567 static void claim_cpu_irqs(void)
568 {
569 unsigned long flags = IRQF_TIMER | IRQF_PERCPU | IRQF_IRQPOLL;
570 int i;
571
572 for (i = CPU_IRQ_BASE; i <= CPU_IRQ_MAX; i++) {
573 irq_set_chip_and_handler(i, &cpu_interrupt_type,
574 handle_percpu_irq);
575 }
576
577 irq_set_handler(TIMER_IRQ, handle_percpu_irq);
578 if (request_irq(TIMER_IRQ, timer_interrupt, flags, "timer", NULL))
579 pr_err("Failed to register timer interrupt\n");
580 #ifdef CONFIG_SMP
581 irq_set_handler(IPI_IRQ, handle_percpu_irq);
582 if (request_irq(IPI_IRQ, ipi_interrupt, IRQF_PERCPU, "IPI", NULL))
583 pr_err("Failed to register IPI interrupt\n");
584 #endif
585 }
586
init_IRQ(void)587 void __init init_IRQ(void)
588 {
589 local_irq_disable(); /* PARANOID - should already be disabled */
590 mtctl(~0UL, 23); /* EIRR : clear all pending external intr */
591 #ifdef CONFIG_SMP
592 if (!cpu_eiem) {
593 claim_cpu_irqs();
594 cpu_eiem = EIEM_MASK(IPI_IRQ) | EIEM_MASK(TIMER_IRQ);
595 }
596 #else
597 claim_cpu_irqs();
598 cpu_eiem = EIEM_MASK(TIMER_IRQ);
599 #endif
600 set_eiem(cpu_eiem); /* EIEM : enable all external intr */
601 }
602