1 /*
2 * Cell Internal Interrupt Controller
3 *
4 * Copyright (C) 2006 Benjamin Herrenschmidt (benh@kernel.crashing.org)
5 * IBM, Corp.
6 *
7 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
8 *
9 * Author: Arnd Bergmann <arndb@de.ibm.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 * TODO:
26 * - Fix various assumptions related to HW CPU numbers vs. linux CPU numbers
27 * vs node numbers in the setup code
28 * - Implement proper handling of maxcpus=1/2 (that is, routing of irqs from
29 * a non-active node to the active node)
30 */
31
32 #include <linux/interrupt.h>
33 #include <linux/irq.h>
34 #include <linux/module.h>
35 #include <linux/percpu.h>
36 #include <linux/types.h>
37 #include <linux/ioport.h>
38 #include <linux/kernel_stat.h>
39
40 #include <asm/io.h>
41 #include <asm/pgtable.h>
42 #include <asm/prom.h>
43 #include <asm/ptrace.h>
44 #include <asm/machdep.h>
45 #include <asm/cell-regs.h>
46
47 #include "interrupt.h"
48
49 struct iic {
50 struct cbe_iic_thread_regs __iomem *regs;
51 u8 target_id;
52 u8 eoi_stack[16];
53 int eoi_ptr;
54 struct device_node *node;
55 };
56
57 static DEFINE_PER_CPU(struct iic, iic);
58 #define IIC_NODE_COUNT 2
59 static struct irq_host *iic_host;
60
61 /* Convert between "pending" bits and hw irq number */
iic_pending_to_hwnum(struct cbe_iic_pending_bits bits)62 static irq_hw_number_t iic_pending_to_hwnum(struct cbe_iic_pending_bits bits)
63 {
64 unsigned char unit = bits.source & 0xf;
65 unsigned char node = bits.source >> 4;
66 unsigned char class = bits.class & 3;
67
68 /* Decode IPIs */
69 if (bits.flags & CBE_IIC_IRQ_IPI)
70 return IIC_IRQ_TYPE_IPI | (bits.prio >> 4);
71 else
72 return (node << IIC_IRQ_NODE_SHIFT) | (class << 4) | unit;
73 }
74
iic_mask(unsigned int irq)75 static void iic_mask(unsigned int irq)
76 {
77 }
78
iic_unmask(unsigned int irq)79 static void iic_unmask(unsigned int irq)
80 {
81 }
82
iic_eoi(unsigned int irq)83 static void iic_eoi(unsigned int irq)
84 {
85 struct iic *iic = &__get_cpu_var(iic);
86 out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]);
87 BUG_ON(iic->eoi_ptr < 0);
88 }
89
90 static struct irq_chip iic_chip = {
91 .typename = " CELL-IIC ",
92 .mask = iic_mask,
93 .unmask = iic_unmask,
94 .eoi = iic_eoi,
95 };
96
97
iic_ioexc_eoi(unsigned int irq)98 static void iic_ioexc_eoi(unsigned int irq)
99 {
100 }
101
iic_ioexc_cascade(unsigned int irq,struct irq_desc * desc)102 static void iic_ioexc_cascade(unsigned int irq, struct irq_desc *desc)
103 {
104 struct cbe_iic_regs __iomem *node_iic = (void __iomem *)desc->handler_data;
105 unsigned int base = (irq & 0xffffff00) | IIC_IRQ_TYPE_IOEXC;
106 unsigned long bits, ack;
107 int cascade;
108
109 for (;;) {
110 bits = in_be64(&node_iic->iic_is);
111 if (bits == 0)
112 break;
113 /* pre-ack edge interrupts */
114 ack = bits & IIC_ISR_EDGE_MASK;
115 if (ack)
116 out_be64(&node_iic->iic_is, ack);
117 /* handle them */
118 for (cascade = 63; cascade >= 0; cascade--)
119 if (bits & (0x8000000000000000UL >> cascade)) {
120 unsigned int cirq =
121 irq_linear_revmap(iic_host,
122 base | cascade);
123 if (cirq != NO_IRQ)
124 generic_handle_irq(cirq);
125 }
126 /* post-ack level interrupts */
127 ack = bits & ~IIC_ISR_EDGE_MASK;
128 if (ack)
129 out_be64(&node_iic->iic_is, ack);
130 }
131 desc->chip->eoi(irq);
132 }
133
134
135 static struct irq_chip iic_ioexc_chip = {
136 .typename = " CELL-IOEX",
137 .mask = iic_mask,
138 .unmask = iic_unmask,
139 .eoi = iic_ioexc_eoi,
140 };
141
142 /* Get an IRQ number from the pending state register of the IIC */
iic_get_irq(void)143 static unsigned int iic_get_irq(void)
144 {
145 struct cbe_iic_pending_bits pending;
146 struct iic *iic;
147 unsigned int virq;
148
149 iic = &__get_cpu_var(iic);
150 *(unsigned long *) &pending =
151 in_be64((u64 __iomem *) &iic->regs->pending_destr);
152 if (!(pending.flags & CBE_IIC_IRQ_VALID))
153 return NO_IRQ;
154 virq = irq_linear_revmap(iic_host, iic_pending_to_hwnum(pending));
155 if (virq == NO_IRQ)
156 return NO_IRQ;
157 iic->eoi_stack[++iic->eoi_ptr] = pending.prio;
158 BUG_ON(iic->eoi_ptr > 15);
159 return virq;
160 }
161
iic_setup_cpu(void)162 void iic_setup_cpu(void)
163 {
164 out_be64(&__get_cpu_var(iic).regs->prio, 0xff);
165 }
166
iic_get_target_id(int cpu)167 u8 iic_get_target_id(int cpu)
168 {
169 return per_cpu(iic, cpu).target_id;
170 }
171
172 EXPORT_SYMBOL_GPL(iic_get_target_id);
173
174 #ifdef CONFIG_SMP
175
176 /* Use the highest interrupt priorities for IPI */
iic_ipi_to_irq(int ipi)177 static inline int iic_ipi_to_irq(int ipi)
178 {
179 return IIC_IRQ_TYPE_IPI + 0xf - ipi;
180 }
181
iic_cause_IPI(int cpu,int mesg)182 void iic_cause_IPI(int cpu, int mesg)
183 {
184 out_be64(&per_cpu(iic, cpu).regs->generate, (0xf - mesg) << 4);
185 }
186
iic_get_irq_host(int node)187 struct irq_host *iic_get_irq_host(int node)
188 {
189 return iic_host;
190 }
191 EXPORT_SYMBOL_GPL(iic_get_irq_host);
192
iic_ipi_action(int irq,void * dev_id)193 static irqreturn_t iic_ipi_action(int irq, void *dev_id)
194 {
195 int ipi = (int)(long)dev_id;
196
197 smp_message_recv(ipi);
198
199 return IRQ_HANDLED;
200 }
iic_request_ipi(int ipi,const char * name)201 static void iic_request_ipi(int ipi, const char *name)
202 {
203 int virq;
204
205 virq = irq_create_mapping(iic_host, iic_ipi_to_irq(ipi));
206 if (virq == NO_IRQ) {
207 printk(KERN_ERR
208 "iic: failed to map IPI %s\n", name);
209 return;
210 }
211 if (request_irq(virq, iic_ipi_action, IRQF_DISABLED, name,
212 (void *)(long)ipi))
213 printk(KERN_ERR
214 "iic: failed to request IPI %s\n", name);
215 }
216
iic_request_IPIs(void)217 void iic_request_IPIs(void)
218 {
219 iic_request_ipi(PPC_MSG_CALL_FUNCTION, "IPI-call");
220 iic_request_ipi(PPC_MSG_RESCHEDULE, "IPI-resched");
221 iic_request_ipi(PPC_MSG_CALL_FUNC_SINGLE, "IPI-call-single");
222 #ifdef CONFIG_DEBUGGER
223 iic_request_ipi(PPC_MSG_DEBUGGER_BREAK, "IPI-debug");
224 #endif /* CONFIG_DEBUGGER */
225 }
226
227 #endif /* CONFIG_SMP */
228
229
iic_host_match(struct irq_host * h,struct device_node * node)230 static int iic_host_match(struct irq_host *h, struct device_node *node)
231 {
232 return of_device_is_compatible(node,
233 "IBM,CBEA-Internal-Interrupt-Controller");
234 }
235
236 extern int noirqdebug;
237
handle_iic_irq(unsigned int irq,struct irq_desc * desc)238 static void handle_iic_irq(unsigned int irq, struct irq_desc *desc)
239 {
240 const unsigned int cpu = smp_processor_id();
241
242 spin_lock(&desc->lock);
243
244 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
245
246 /*
247 * If we're currently running this IRQ, or its disabled,
248 * we shouldn't process the IRQ. Mark it pending, handle
249 * the necessary masking and go out
250 */
251 if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) ||
252 !desc->action)) {
253 desc->status |= IRQ_PENDING;
254 goto out_eoi;
255 }
256
257 kstat_cpu(cpu).irqs[irq]++;
258
259 /* Mark the IRQ currently in progress.*/
260 desc->status |= IRQ_INPROGRESS;
261
262 do {
263 struct irqaction *action = desc->action;
264 irqreturn_t action_ret;
265
266 if (unlikely(!action))
267 goto out_eoi;
268
269 desc->status &= ~IRQ_PENDING;
270 spin_unlock(&desc->lock);
271 action_ret = handle_IRQ_event(irq, action);
272 if (!noirqdebug)
273 note_interrupt(irq, desc, action_ret);
274 spin_lock(&desc->lock);
275
276 } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
277
278 desc->status &= ~IRQ_INPROGRESS;
279 out_eoi:
280 desc->chip->eoi(irq);
281 spin_unlock(&desc->lock);
282 }
283
iic_host_map(struct irq_host * h,unsigned int virq,irq_hw_number_t hw)284 static int iic_host_map(struct irq_host *h, unsigned int virq,
285 irq_hw_number_t hw)
286 {
287 switch (hw & IIC_IRQ_TYPE_MASK) {
288 case IIC_IRQ_TYPE_IPI:
289 set_irq_chip_and_handler(virq, &iic_chip, handle_percpu_irq);
290 break;
291 case IIC_IRQ_TYPE_IOEXC:
292 set_irq_chip_and_handler(virq, &iic_ioexc_chip,
293 handle_iic_irq);
294 break;
295 default:
296 set_irq_chip_and_handler(virq, &iic_chip, handle_iic_irq);
297 }
298 return 0;
299 }
300
iic_host_xlate(struct irq_host * h,struct device_node * ct,u32 * intspec,unsigned int intsize,irq_hw_number_t * out_hwirq,unsigned int * out_flags)301 static int iic_host_xlate(struct irq_host *h, struct device_node *ct,
302 u32 *intspec, unsigned int intsize,
303 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
304
305 {
306 unsigned int node, ext, unit, class;
307 const u32 *val;
308
309 if (!of_device_is_compatible(ct,
310 "IBM,CBEA-Internal-Interrupt-Controller"))
311 return -ENODEV;
312 if (intsize != 1)
313 return -ENODEV;
314 val = of_get_property(ct, "#interrupt-cells", NULL);
315 if (val == NULL || *val != 1)
316 return -ENODEV;
317
318 node = intspec[0] >> 24;
319 ext = (intspec[0] >> 16) & 0xff;
320 class = (intspec[0] >> 8) & 0xff;
321 unit = intspec[0] & 0xff;
322
323 /* Check if node is in supported range */
324 if (node > 1)
325 return -EINVAL;
326
327 /* Build up interrupt number, special case for IO exceptions */
328 *out_hwirq = (node << IIC_IRQ_NODE_SHIFT);
329 if (unit == IIC_UNIT_IIC && class == 1)
330 *out_hwirq |= IIC_IRQ_TYPE_IOEXC | ext;
331 else
332 *out_hwirq |= IIC_IRQ_TYPE_NORMAL |
333 (class << IIC_IRQ_CLASS_SHIFT) | unit;
334
335 /* Dummy flags, ignored by iic code */
336 *out_flags = IRQ_TYPE_EDGE_RISING;
337
338 return 0;
339 }
340
341 static struct irq_host_ops iic_host_ops = {
342 .match = iic_host_match,
343 .map = iic_host_map,
344 .xlate = iic_host_xlate,
345 };
346
init_one_iic(unsigned int hw_cpu,unsigned long addr,struct device_node * node)347 static void __init init_one_iic(unsigned int hw_cpu, unsigned long addr,
348 struct device_node *node)
349 {
350 /* XXX FIXME: should locate the linux CPU number from the HW cpu
351 * number properly. We are lucky for now
352 */
353 struct iic *iic = &per_cpu(iic, hw_cpu);
354
355 iic->regs = ioremap(addr, sizeof(struct cbe_iic_thread_regs));
356 BUG_ON(iic->regs == NULL);
357
358 iic->target_id = ((hw_cpu & 2) << 3) | ((hw_cpu & 1) ? 0xf : 0xe);
359 iic->eoi_stack[0] = 0xff;
360 iic->node = of_node_get(node);
361 out_be64(&iic->regs->prio, 0);
362
363 printk(KERN_INFO "IIC for CPU %d target id 0x%x : %s\n",
364 hw_cpu, iic->target_id, node->full_name);
365 }
366
setup_iic(void)367 static int __init setup_iic(void)
368 {
369 struct device_node *dn;
370 struct resource r0, r1;
371 unsigned int node, cascade, found = 0;
372 struct cbe_iic_regs __iomem *node_iic;
373 const u32 *np;
374
375 for (dn = NULL;
376 (dn = of_find_node_by_name(dn,"interrupt-controller")) != NULL;) {
377 if (!of_device_is_compatible(dn,
378 "IBM,CBEA-Internal-Interrupt-Controller"))
379 continue;
380 np = of_get_property(dn, "ibm,interrupt-server-ranges", NULL);
381 if (np == NULL) {
382 printk(KERN_WARNING "IIC: CPU association not found\n");
383 of_node_put(dn);
384 return -ENODEV;
385 }
386 if (of_address_to_resource(dn, 0, &r0) ||
387 of_address_to_resource(dn, 1, &r1)) {
388 printk(KERN_WARNING "IIC: Can't resolve addresses\n");
389 of_node_put(dn);
390 return -ENODEV;
391 }
392 found++;
393 init_one_iic(np[0], r0.start, dn);
394 init_one_iic(np[1], r1.start, dn);
395
396 /* Setup cascade for IO exceptions. XXX cleanup tricks to get
397 * node vs CPU etc...
398 * Note that we configure the IIC_IRR here with a hard coded
399 * priority of 1. We might want to improve that later.
400 */
401 node = np[0] >> 1;
402 node_iic = cbe_get_cpu_iic_regs(np[0]);
403 cascade = node << IIC_IRQ_NODE_SHIFT;
404 cascade |= 1 << IIC_IRQ_CLASS_SHIFT;
405 cascade |= IIC_UNIT_IIC;
406 cascade = irq_create_mapping(iic_host, cascade);
407 if (cascade == NO_IRQ)
408 continue;
409 /*
410 * irq_data is a generic pointer that gets passed back
411 * to us later, so the forced cast is fine.
412 */
413 set_irq_data(cascade, (void __force *)node_iic);
414 set_irq_chained_handler(cascade , iic_ioexc_cascade);
415 out_be64(&node_iic->iic_ir,
416 (1 << 12) /* priority */ |
417 (node << 4) /* dest node */ |
418 IIC_UNIT_THREAD_0 /* route them to thread 0 */);
419 /* Flush pending (make sure it triggers if there is
420 * anything pending
421 */
422 out_be64(&node_iic->iic_is, 0xfffffffffffffffful);
423 }
424
425 if (found)
426 return 0;
427 else
428 return -ENODEV;
429 }
430
iic_init_IRQ(void)431 void __init iic_init_IRQ(void)
432 {
433 /* Setup an irq host data structure */
434 iic_host = irq_alloc_host(NULL, IRQ_HOST_MAP_LINEAR, IIC_SOURCE_COUNT,
435 &iic_host_ops, IIC_IRQ_INVALID);
436 BUG_ON(iic_host == NULL);
437 irq_set_default_host(iic_host);
438
439 /* Discover and initialize iics */
440 if (setup_iic() < 0)
441 panic("IIC: Failed to initialize !\n");
442
443 /* Set master interrupt handling function */
444 ppc_md.get_irq = iic_get_irq;
445
446 /* Enable on current CPU */
447 iic_setup_cpu();
448 }
449
iic_set_interrupt_routing(int cpu,int thread,int priority)450 void iic_set_interrupt_routing(int cpu, int thread, int priority)
451 {
452 struct cbe_iic_regs __iomem *iic_regs = cbe_get_cpu_iic_regs(cpu);
453 u64 iic_ir = 0;
454 int node = cpu >> 1;
455
456 /* Set which node and thread will handle the next interrupt */
457 iic_ir |= CBE_IIC_IR_PRIO(priority) |
458 CBE_IIC_IR_DEST_NODE(node);
459 if (thread == 0)
460 iic_ir |= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_0);
461 else
462 iic_ir |= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_1);
463 out_be64(&iic_regs->iic_ir, iic_ir);
464 }
465