• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * File:         arch/blackfin/mach-common/ints-priority.c
3  *
4  * Description:  Set up the interrupt priorities
5  *
6  * Modified:
7  *               1996 Roman Zippel
8  *               1999 D. Jeff Dionne <jeff@uclinux.org>
9  *               2000-2001 Lineo, Inc. D. Jefff Dionne <jeff@lineo.ca>
10  *               2002 Arcturus Networks Inc. MaTed <mated@sympatico.ca>
11  *               2003 Metrowerks/Motorola
12  *               2003 Bas Vermeulen <bas@buyways.nl>
13  *               Copyright 2004-2008 Analog Devices Inc.
14  *
15  * Bugs:         Enter bugs at http://blackfin.uclinux.org/
16  *
17  * This program is free software; you can redistribute it and/or modify
18  * it under the terms of the GNU General Public License as published by
19  * the Free Software Foundation; either version 2 of the License, or
20  * (at your option) any later version.
21  *
22  * This program is distributed in the hope that it will be useful,
23  * but WITHOUT ANY WARRANTY; without even the implied warranty of
24  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
25  * GNU General Public License for more details.
26  *
27  * You should have received a copy of the GNU General Public License
28  * along with this program; if not, see the file COPYING, or write
29  * to the Free Software Foundation, Inc.,
30  * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
31  */
32 
33 #include <linux/module.h>
34 #include <linux/kernel_stat.h>
35 #include <linux/seq_file.h>
36 #include <linux/irq.h>
37 #ifdef CONFIG_IPIPE
38 #include <linux/ipipe.h>
39 #endif
40 #ifdef CONFIG_KGDB
41 #include <linux/kgdb.h>
42 #endif
43 #include <asm/traps.h>
44 #include <asm/blackfin.h>
45 #include <asm/gpio.h>
46 #include <asm/irq_handler.h>
47 
48 #define SIC_SYSIRQ(irq)	(irq - (IRQ_CORETMR + 1))
49 
50 #ifdef BF537_FAMILY
51 # define BF537_GENERIC_ERROR_INT_DEMUX
52 #else
53 # undef BF537_GENERIC_ERROR_INT_DEMUX
54 #endif
55 
56 /*
57  * NOTES:
58  * - we have separated the physical Hardware interrupt from the
59  * levels that the LINUX kernel sees (see the description in irq.h)
60  * -
61  */
62 
63 #ifndef CONFIG_SMP
64 /* Initialize this to an actual value to force it into the .data
65  * section so that we know it is properly initialized at entry into
66  * the kernel but before bss is initialized to zero (which is where
67  * it would live otherwise).  The 0x1f magic represents the IRQs we
68  * cannot actually mask out in hardware.
69  */
70 unsigned long bfin_irq_flags = 0x1f;
71 EXPORT_SYMBOL(bfin_irq_flags);
72 #endif
73 
74 /* The number of spurious interrupts */
75 atomic_t num_spurious;
76 
77 #ifdef CONFIG_PM
78 unsigned long bfin_sic_iwr[3];	/* Up to 3 SIC_IWRx registers */
79 unsigned vr_wakeup;
80 #endif
81 
82 struct ivgx {
83 	/* irq number for request_irq, available in mach-bf5xx/irq.h */
84 	unsigned int irqno;
85 	/* corresponding bit in the SIC_ISR register */
86 	unsigned int isrflag;
87 } ivg_table[NR_PERI_INTS];
88 
89 struct ivg_slice {
90 	/* position of first irq in ivg_table for given ivg */
91 	struct ivgx *ifirst;
92 	struct ivgx *istop;
93 } ivg7_13[IVG13 - IVG7 + 1];
94 
95 
96 /*
97  * Search SIC_IAR and fill tables with the irqvalues
98  * and their positions in the SIC_ISR register.
99  */
search_IAR(void)100 static void __init search_IAR(void)
101 {
102 	unsigned ivg, irq_pos = 0;
103 	for (ivg = 0; ivg <= IVG13 - IVG7; ivg++) {
104 		int irqn;
105 
106 		ivg7_13[ivg].istop = ivg7_13[ivg].ifirst = &ivg_table[irq_pos];
107 
108 		for (irqn = 0; irqn < NR_PERI_INTS; irqn++) {
109 			int iar_shift = (irqn & 7) * 4;
110 				if (ivg == (0xf &
111 #if defined(CONFIG_BF52x) || defined(CONFIG_BF538) \
112 	|| defined(CONFIG_BF539) || defined(CONFIG_BF51x)
113 			     bfin_read32((unsigned long *)SIC_IAR0 +
114 					 ((irqn % 32) >> 3) + ((irqn / 32) *
115 					 ((SIC_IAR4 - SIC_IAR0) / 4))) >> iar_shift)) {
116 #else
117 			     bfin_read32((unsigned long *)SIC_IAR0 +
118 					 (irqn >> 3)) >> iar_shift)) {
119 #endif
120 				ivg_table[irq_pos].irqno = IVG7 + irqn;
121 				ivg_table[irq_pos].isrflag = 1 << (irqn % 32);
122 				ivg7_13[ivg].istop++;
123 				irq_pos++;
124 			}
125 		}
126 	}
127 }
128 
129 /*
130  * This is for core internal IRQs
131  */
132 
133 static void bfin_ack_noop(unsigned int irq)
134 {
135 	/* Dummy function.  */
136 }
137 
138 static void bfin_core_mask_irq(unsigned int irq)
139 {
140 	bfin_irq_flags &= ~(1 << irq);
141 	if (!irqs_disabled_hw())
142 		local_irq_enable_hw();
143 }
144 
145 static void bfin_core_unmask_irq(unsigned int irq)
146 {
147 	bfin_irq_flags |= 1 << irq;
148 	/*
149 	 * If interrupts are enabled, IMASK must contain the same value
150 	 * as bfin_irq_flags.  Make sure that invariant holds.  If interrupts
151 	 * are currently disabled we need not do anything; one of the
152 	 * callers will take care of setting IMASK to the proper value
153 	 * when reenabling interrupts.
154 	 * local_irq_enable just does "STI bfin_irq_flags", so it's exactly
155 	 * what we need.
156 	 */
157 	if (!irqs_disabled_hw())
158 		local_irq_enable_hw();
159 	return;
160 }
161 
162 static void bfin_internal_mask_irq(unsigned int irq)
163 {
164 	unsigned long flags;
165 
166 #ifdef CONFIG_BF53x
167 	local_irq_save_hw(flags);
168 	bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() &
169 			     ~(1 << SIC_SYSIRQ(irq)));
170 #else
171 	unsigned mask_bank, mask_bit;
172 	local_irq_save_hw(flags);
173 	mask_bank = SIC_SYSIRQ(irq) / 32;
174 	mask_bit = SIC_SYSIRQ(irq) % 32;
175 	bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) &
176 			     ~(1 << mask_bit));
177 #ifdef CONFIG_SMP
178 	bfin_write_SICB_IMASK(mask_bank, bfin_read_SICB_IMASK(mask_bank) &
179 			     ~(1 << mask_bit));
180 #endif
181 #endif
182 	local_irq_restore_hw(flags);
183 }
184 
185 static void bfin_internal_unmask_irq(unsigned int irq)
186 {
187 	unsigned long flags;
188 
189 #ifdef CONFIG_BF53x
190 	local_irq_save_hw(flags);
191 	bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() |
192 			     (1 << SIC_SYSIRQ(irq)));
193 #else
194 	unsigned mask_bank, mask_bit;
195 	local_irq_save_hw(flags);
196 	mask_bank = SIC_SYSIRQ(irq) / 32;
197 	mask_bit = SIC_SYSIRQ(irq) % 32;
198 	bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) |
199 			     (1 << mask_bit));
200 #ifdef CONFIG_SMP
201 	bfin_write_SICB_IMASK(mask_bank, bfin_read_SICB_IMASK(mask_bank) |
202 			     (1 << mask_bit));
203 #endif
204 #endif
205 	local_irq_restore_hw(flags);
206 }
207 
208 #ifdef CONFIG_PM
209 int bfin_internal_set_wake(unsigned int irq, unsigned int state)
210 {
211 	u32 bank, bit, wakeup = 0;
212 	unsigned long flags;
213 	bank = SIC_SYSIRQ(irq) / 32;
214 	bit = SIC_SYSIRQ(irq) % 32;
215 
216 	switch (irq) {
217 #ifdef IRQ_RTC
218 	case IRQ_RTC:
219 	wakeup |= WAKE;
220 	break;
221 #endif
222 #ifdef IRQ_CAN0_RX
223 	case IRQ_CAN0_RX:
224 	wakeup |= CANWE;
225 	break;
226 #endif
227 #ifdef IRQ_CAN1_RX
228 	case IRQ_CAN1_RX:
229 	wakeup |= CANWE;
230 	break;
231 #endif
232 #ifdef IRQ_USB_INT0
233 	case IRQ_USB_INT0:
234 	wakeup |= USBWE;
235 	break;
236 #endif
237 #ifdef IRQ_KEY
238 	case IRQ_KEY:
239 	wakeup |= KPADWE;
240 	break;
241 #endif
242 #ifdef CONFIG_BF54x
243 	case IRQ_CNT:
244 	wakeup |= ROTWE;
245 	break;
246 #endif
247 	default:
248 	break;
249 	}
250 
251 	local_irq_save_hw(flags);
252 
253 	if (state) {
254 		bfin_sic_iwr[bank] |= (1 << bit);
255 		vr_wakeup  |= wakeup;
256 
257 	} else {
258 		bfin_sic_iwr[bank] &= ~(1 << bit);
259 		vr_wakeup  &= ~wakeup;
260 	}
261 
262 	local_irq_restore_hw(flags);
263 
264 	return 0;
265 }
266 #endif
267 
268 static struct irq_chip bfin_core_irqchip = {
269 	.name = "CORE",
270 	.ack = bfin_ack_noop,
271 	.mask = bfin_core_mask_irq,
272 	.unmask = bfin_core_unmask_irq,
273 };
274 
275 static struct irq_chip bfin_internal_irqchip = {
276 	.name = "INTN",
277 	.ack = bfin_ack_noop,
278 	.mask = bfin_internal_mask_irq,
279 	.unmask = bfin_internal_unmask_irq,
280 	.mask_ack = bfin_internal_mask_irq,
281 	.disable = bfin_internal_mask_irq,
282 	.enable = bfin_internal_unmask_irq,
283 #ifdef CONFIG_PM
284 	.set_wake = bfin_internal_set_wake,
285 #endif
286 };
287 
288 static void bfin_handle_irq(unsigned irq)
289 {
290 #ifdef CONFIG_IPIPE
291 	struct pt_regs regs;    /* Contents not used. */
292 	ipipe_trace_irq_entry(irq);
293 	__ipipe_handle_irq(irq, &regs);
294 	ipipe_trace_irq_exit(irq);
295 #else /* !CONFIG_IPIPE */
296 	struct irq_desc *desc = irq_desc + irq;
297 	desc->handle_irq(irq, desc);
298 #endif  /* !CONFIG_IPIPE */
299 }
300 
301 #ifdef BF537_GENERIC_ERROR_INT_DEMUX
302 static int error_int_mask;
303 
304 static void bfin_generic_error_mask_irq(unsigned int irq)
305 {
306 	error_int_mask &= ~(1L << (irq - IRQ_PPI_ERROR));
307 
308 	if (!error_int_mask)
309 		bfin_internal_mask_irq(IRQ_GENERIC_ERROR);
310 }
311 
312 static void bfin_generic_error_unmask_irq(unsigned int irq)
313 {
314 	bfin_internal_unmask_irq(IRQ_GENERIC_ERROR);
315 	error_int_mask |= 1L << (irq - IRQ_PPI_ERROR);
316 }
317 
318 static struct irq_chip bfin_generic_error_irqchip = {
319 	.name = "ERROR",
320 	.ack = bfin_ack_noop,
321 	.mask_ack = bfin_generic_error_mask_irq,
322 	.mask = bfin_generic_error_mask_irq,
323 	.unmask = bfin_generic_error_unmask_irq,
324 };
325 
326 static void bfin_demux_error_irq(unsigned int int_err_irq,
327 				 struct irq_desc *inta_desc)
328 {
329 	int irq = 0;
330 
331 #if (defined(CONFIG_BF537) || defined(CONFIG_BF536))
332 	if (bfin_read_EMAC_SYSTAT() & EMAC_ERR_MASK)
333 		irq = IRQ_MAC_ERROR;
334 	else
335 #endif
336 	if (bfin_read_SPORT0_STAT() & SPORT_ERR_MASK)
337 		irq = IRQ_SPORT0_ERROR;
338 	else if (bfin_read_SPORT1_STAT() & SPORT_ERR_MASK)
339 		irq = IRQ_SPORT1_ERROR;
340 	else if (bfin_read_PPI_STATUS() & PPI_ERR_MASK)
341 		irq = IRQ_PPI_ERROR;
342 	else if (bfin_read_CAN_GIF() & CAN_ERR_MASK)
343 		irq = IRQ_CAN_ERROR;
344 	else if (bfin_read_SPI_STAT() & SPI_ERR_MASK)
345 		irq = IRQ_SPI_ERROR;
346 	else if ((bfin_read_UART0_IIR() & UART_ERR_MASK_STAT1) &&
347 		 (bfin_read_UART0_IIR() & UART_ERR_MASK_STAT0))
348 		irq = IRQ_UART0_ERROR;
349 	else if ((bfin_read_UART1_IIR() & UART_ERR_MASK_STAT1) &&
350 		 (bfin_read_UART1_IIR() & UART_ERR_MASK_STAT0))
351 		irq = IRQ_UART1_ERROR;
352 
353 	if (irq) {
354 		if (error_int_mask & (1L << (irq - IRQ_PPI_ERROR)))
355 			bfin_handle_irq(irq);
356 		else {
357 
358 			switch (irq) {
359 			case IRQ_PPI_ERROR:
360 				bfin_write_PPI_STATUS(PPI_ERR_MASK);
361 				break;
362 #if (defined(CONFIG_BF537) || defined(CONFIG_BF536))
363 			case IRQ_MAC_ERROR:
364 				bfin_write_EMAC_SYSTAT(EMAC_ERR_MASK);
365 				break;
366 #endif
367 			case IRQ_SPORT0_ERROR:
368 				bfin_write_SPORT0_STAT(SPORT_ERR_MASK);
369 				break;
370 
371 			case IRQ_SPORT1_ERROR:
372 				bfin_write_SPORT1_STAT(SPORT_ERR_MASK);
373 				break;
374 
375 			case IRQ_CAN_ERROR:
376 				bfin_write_CAN_GIS(CAN_ERR_MASK);
377 				break;
378 
379 			case IRQ_SPI_ERROR:
380 				bfin_write_SPI_STAT(SPI_ERR_MASK);
381 				break;
382 
383 			default:
384 				break;
385 			}
386 
387 			pr_debug("IRQ %d:"
388 				 " MASKED PERIPHERAL ERROR INTERRUPT ASSERTED\n",
389 				 irq);
390 		}
391 	} else
392 		printk(KERN_ERR
393 		       "%s : %s : LINE %d :\nIRQ ?: PERIPHERAL ERROR"
394 		       " INTERRUPT ASSERTED BUT NO SOURCE FOUND\n",
395 		       __func__, __FILE__, __LINE__);
396 
397 }
398 #endif				/* BF537_GENERIC_ERROR_INT_DEMUX */
399 
400 static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle)
401 {
402 #ifdef CONFIG_IPIPE
403 	_set_irq_handler(irq, handle_level_irq);
404 #else
405 	struct irq_desc *desc = irq_desc + irq;
406 	/* May not call generic set_irq_handler() due to spinlock
407 	   recursion. */
408 	desc->handle_irq = handle;
409 #endif
410 }
411 
412 static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS);
413 extern void bfin_gpio_irq_prepare(unsigned gpio);
414 
415 #if !defined(CONFIG_BF54x)
416 
417 static void bfin_gpio_ack_irq(unsigned int irq)
418 {
419 	/* AFAIK ack_irq in case mask_ack is provided
420 	 * get's only called for edge sense irqs
421 	 */
422 	set_gpio_data(irq_to_gpio(irq), 0);
423 }
424 
425 static void bfin_gpio_mask_ack_irq(unsigned int irq)
426 {
427 	struct irq_desc *desc = irq_desc + irq;
428 	u32 gpionr = irq_to_gpio(irq);
429 
430 	if (desc->handle_irq == handle_edge_irq)
431 		set_gpio_data(gpionr, 0);
432 
433 	set_gpio_maska(gpionr, 0);
434 }
435 
436 static void bfin_gpio_mask_irq(unsigned int irq)
437 {
438 	set_gpio_maska(irq_to_gpio(irq), 0);
439 }
440 
441 static void bfin_gpio_unmask_irq(unsigned int irq)
442 {
443 	set_gpio_maska(irq_to_gpio(irq), 1);
444 }
445 
446 static unsigned int bfin_gpio_irq_startup(unsigned int irq)
447 {
448 	u32 gpionr = irq_to_gpio(irq);
449 
450 	if (__test_and_set_bit(gpionr, gpio_enabled))
451 		bfin_gpio_irq_prepare(gpionr);
452 
453 	bfin_gpio_unmask_irq(irq);
454 
455 	return 0;
456 }
457 
458 static void bfin_gpio_irq_shutdown(unsigned int irq)
459 {
460 	u32 gpionr = irq_to_gpio(irq);
461 
462 	bfin_gpio_mask_irq(irq);
463 	__clear_bit(gpionr, gpio_enabled);
464 	bfin_gpio_irq_free(gpionr);
465 }
466 
467 static int bfin_gpio_irq_type(unsigned int irq, unsigned int type)
468 {
469 	int ret;
470 	char buf[16];
471 	u32 gpionr = irq_to_gpio(irq);
472 
473 	if (type == IRQ_TYPE_PROBE) {
474 		/* only probe unenabled GPIO interrupt lines */
475 		if (__test_bit(gpionr, gpio_enabled))
476 			return 0;
477 		type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
478 	}
479 
480 	if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING |
481 		    IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
482 
483 		snprintf(buf, 16, "gpio-irq%d", irq);
484 		ret = bfin_gpio_irq_request(gpionr, buf);
485 		if (ret)
486 			return ret;
487 
488 		if (__test_and_set_bit(gpionr, gpio_enabled))
489 			bfin_gpio_irq_prepare(gpionr);
490 
491 	} else {
492 		__clear_bit(gpionr, gpio_enabled);
493 		return 0;
494 	}
495 
496 	set_gpio_inen(gpionr, 0);
497 	set_gpio_dir(gpionr, 0);
498 
499 	if ((type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
500 	    == (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
501 		set_gpio_both(gpionr, 1);
502 	else
503 		set_gpio_both(gpionr, 0);
504 
505 	if ((type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)))
506 		set_gpio_polar(gpionr, 1);	/* low or falling edge denoted by one */
507 	else
508 		set_gpio_polar(gpionr, 0);	/* high or rising edge denoted by zero */
509 
510 	if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
511 		set_gpio_edge(gpionr, 1);
512 		set_gpio_inen(gpionr, 1);
513 		set_gpio_data(gpionr, 0);
514 
515 	} else {
516 		set_gpio_edge(gpionr, 0);
517 		set_gpio_inen(gpionr, 1);
518 	}
519 
520 	if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
521 		bfin_set_irq_handler(irq, handle_edge_irq);
522 	else
523 		bfin_set_irq_handler(irq, handle_level_irq);
524 
525 	return 0;
526 }
527 
528 #ifdef CONFIG_PM
529 int bfin_gpio_set_wake(unsigned int irq, unsigned int state)
530 {
531 	unsigned gpio = irq_to_gpio(irq);
532 
533 	if (state)
534 		gpio_pm_wakeup_request(gpio, PM_WAKE_IGNORE);
535 	else
536 		gpio_pm_wakeup_free(gpio);
537 
538 	return 0;
539 }
540 #endif
541 
542 static void bfin_demux_gpio_irq(unsigned int inta_irq,
543 				struct irq_desc *desc)
544 {
545 	unsigned int i, gpio, mask, irq, search = 0;
546 
547 	switch (inta_irq) {
548 #if defined(CONFIG_BF53x)
549 	case IRQ_PROG_INTA:
550 		irq = IRQ_PF0;
551 		search = 1;
552 		break;
553 # if defined(BF537_FAMILY) && !(defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE))
554 	case IRQ_MAC_RX:
555 		irq = IRQ_PH0;
556 		break;
557 # endif
558 #elif defined(CONFIG_BF538) || defined(CONFIG_BF539)
559 	case IRQ_PORTF_INTA:
560 		irq = IRQ_PF0;
561 		break;
562 #elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
563 	case IRQ_PORTF_INTA:
564 		irq = IRQ_PF0;
565 		break;
566 	case IRQ_PORTG_INTA:
567 		irq = IRQ_PG0;
568 		break;
569 	case IRQ_PORTH_INTA:
570 		irq = IRQ_PH0;
571 		break;
572 #elif defined(CONFIG_BF561)
573 	case IRQ_PROG0_INTA:
574 		irq = IRQ_PF0;
575 		break;
576 	case IRQ_PROG1_INTA:
577 		irq = IRQ_PF16;
578 		break;
579 	case IRQ_PROG2_INTA:
580 		irq = IRQ_PF32;
581 		break;
582 #endif
583 	default:
584 		BUG();
585 		return;
586 	}
587 
588 	if (search) {
589 		for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) {
590 			irq += i;
591 
592 			mask = get_gpiop_data(i) & get_gpiop_maska(i);
593 
594 			while (mask) {
595 				if (mask & 1)
596 					bfin_handle_irq(irq);
597 				irq++;
598 				mask >>= 1;
599 			}
600 		}
601 	} else {
602 			gpio = irq_to_gpio(irq);
603 			mask = get_gpiop_data(gpio) & get_gpiop_maska(gpio);
604 
605 			do {
606 				if (mask & 1)
607 					bfin_handle_irq(irq);
608 				irq++;
609 				mask >>= 1;
610 			} while (mask);
611 	}
612 
613 }
614 
615 #else				/* CONFIG_BF54x */
616 
617 #define NR_PINT_SYS_IRQS	4
618 #define NR_PINT_BITS		32
619 #define NR_PINTS		160
620 #define IRQ_NOT_AVAIL		0xFF
621 
622 #define PINT_2_BANK(x)		((x) >> 5)
623 #define PINT_2_BIT(x)		((x) & 0x1F)
624 #define PINT_BIT(x)		(1 << (PINT_2_BIT(x)))
625 
626 static unsigned char irq2pint_lut[NR_PINTS];
627 static unsigned char pint2irq_lut[NR_PINT_SYS_IRQS * NR_PINT_BITS];
628 
629 struct pin_int_t {
630 	unsigned int mask_set;
631 	unsigned int mask_clear;
632 	unsigned int request;
633 	unsigned int assign;
634 	unsigned int edge_set;
635 	unsigned int edge_clear;
636 	unsigned int invert_set;
637 	unsigned int invert_clear;
638 	unsigned int pinstate;
639 	unsigned int latch;
640 };
641 
642 static struct pin_int_t *pint[NR_PINT_SYS_IRQS] = {
643 	(struct pin_int_t *)PINT0_MASK_SET,
644 	(struct pin_int_t *)PINT1_MASK_SET,
645 	(struct pin_int_t *)PINT2_MASK_SET,
646 	(struct pin_int_t *)PINT3_MASK_SET,
647 };
648 
649 inline unsigned int get_irq_base(u32 bank, u8 bmap)
650 {
651 	unsigned int irq_base;
652 
653 	if (bank < 2) {		/*PA-PB */
654 		irq_base = IRQ_PA0 + bmap * 16;
655 	} else {		/*PC-PJ */
656 		irq_base = IRQ_PC0 + bmap * 16;
657 	}
658 
659 	return irq_base;
660 }
661 
662 	/* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */
663 void init_pint_lut(void)
664 {
665 	u16 bank, bit, irq_base, bit_pos;
666 	u32 pint_assign;
667 	u8 bmap;
668 
669 	memset(irq2pint_lut, IRQ_NOT_AVAIL, sizeof(irq2pint_lut));
670 
671 	for (bank = 0; bank < NR_PINT_SYS_IRQS; bank++) {
672 
673 		pint_assign = pint[bank]->assign;
674 
675 		for (bit = 0; bit < NR_PINT_BITS; bit++) {
676 
677 			bmap = (pint_assign >> ((bit / 8) * 8)) & 0xFF;
678 
679 			irq_base = get_irq_base(bank, bmap);
680 
681 			irq_base += (bit % 8) + ((bit / 8) & 1 ? 8 : 0);
682 			bit_pos = bit + bank * NR_PINT_BITS;
683 
684 			pint2irq_lut[bit_pos] = irq_base - SYS_IRQS;
685 			irq2pint_lut[irq_base - SYS_IRQS] = bit_pos;
686 		}
687 	}
688 }
689 
690 static void bfin_gpio_ack_irq(unsigned int irq)
691 {
692 	struct irq_desc *desc = irq_desc + irq;
693 	u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
694 	u32 pintbit = PINT_BIT(pint_val);
695 	u32 bank = PINT_2_BANK(pint_val);
696 
697 	if ((desc->status & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
698 		if (pint[bank]->invert_set & pintbit)
699 			pint[bank]->invert_clear = pintbit;
700 		else
701 			pint[bank]->invert_set = pintbit;
702 	}
703 	pint[bank]->request = pintbit;
704 
705 }
706 
707 static void bfin_gpio_mask_ack_irq(unsigned int irq)
708 {
709 	struct irq_desc *desc = irq_desc + irq;
710 	u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
711 	u32 pintbit = PINT_BIT(pint_val);
712 	u32 bank = PINT_2_BANK(pint_val);
713 
714 	if ((desc->status & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
715 		if (pint[bank]->invert_set & pintbit)
716 			pint[bank]->invert_clear = pintbit;
717 		else
718 			pint[bank]->invert_set = pintbit;
719 	}
720 
721 	pint[bank]->request = pintbit;
722 	pint[bank]->mask_clear = pintbit;
723 }
724 
725 static void bfin_gpio_mask_irq(unsigned int irq)
726 {
727 	u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
728 
729 	pint[PINT_2_BANK(pint_val)]->mask_clear = PINT_BIT(pint_val);
730 }
731 
732 static void bfin_gpio_unmask_irq(unsigned int irq)
733 {
734 	u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
735 	u32 pintbit = PINT_BIT(pint_val);
736 	u32 bank = PINT_2_BANK(pint_val);
737 
738 	pint[bank]->request = pintbit;
739 	pint[bank]->mask_set = pintbit;
740 }
741 
742 static unsigned int bfin_gpio_irq_startup(unsigned int irq)
743 {
744 	u32 gpionr = irq_to_gpio(irq);
745 	u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
746 
747 	if (pint_val == IRQ_NOT_AVAIL) {
748 		printk(KERN_ERR
749 		"GPIO IRQ %d :Not in PINT Assign table "
750 		"Reconfigure Interrupt to Port Assignemt\n", irq);
751 		return -ENODEV;
752 	}
753 
754 	if (__test_and_set_bit(gpionr, gpio_enabled))
755 		bfin_gpio_irq_prepare(gpionr);
756 
757 	bfin_gpio_unmask_irq(irq);
758 
759 	return 0;
760 }
761 
762 static void bfin_gpio_irq_shutdown(unsigned int irq)
763 {
764 	u32 gpionr = irq_to_gpio(irq);
765 
766 	bfin_gpio_mask_irq(irq);
767 	__clear_bit(gpionr, gpio_enabled);
768 	bfin_gpio_irq_free(gpionr);
769 }
770 
771 static int bfin_gpio_irq_type(unsigned int irq, unsigned int type)
772 {
773 	int ret;
774 	char buf[16];
775 	u32 gpionr = irq_to_gpio(irq);
776 	u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
777 	u32 pintbit = PINT_BIT(pint_val);
778 	u32 bank = PINT_2_BANK(pint_val);
779 
780 	if (pint_val == IRQ_NOT_AVAIL)
781 		return -ENODEV;
782 
783 	if (type == IRQ_TYPE_PROBE) {
784 		/* only probe unenabled GPIO interrupt lines */
785 		if (__test_bit(gpionr, gpio_enabled))
786 			return 0;
787 		type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
788 	}
789 
790 	if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING |
791 		    IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
792 
793 		snprintf(buf, 16, "gpio-irq%d", irq);
794 		ret = bfin_gpio_irq_request(gpionr, buf);
795 		if (ret)
796 			return ret;
797 
798 		if (__test_and_set_bit(gpionr, gpio_enabled))
799 			bfin_gpio_irq_prepare(gpionr);
800 
801 	} else {
802 		__clear_bit(gpionr, gpio_enabled);
803 		return 0;
804 	}
805 
806 	if ((type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)))
807 		pint[bank]->invert_set = pintbit;	/* low or falling edge denoted by one */
808 	else
809 		pint[bank]->invert_clear = pintbit;	/* high or rising edge denoted by zero */
810 
811 	if ((type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
812 	    == (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
813 		if (gpio_get_value(gpionr))
814 			pint[bank]->invert_set = pintbit;
815 		else
816 			pint[bank]->invert_clear = pintbit;
817 	}
818 
819 	if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
820 		pint[bank]->edge_set = pintbit;
821 		bfin_set_irq_handler(irq, handle_edge_irq);
822 	} else {
823 		pint[bank]->edge_clear = pintbit;
824 		bfin_set_irq_handler(irq, handle_level_irq);
825 	}
826 
827 	return 0;
828 }
829 
830 #ifdef CONFIG_PM
831 u32 pint_saved_masks[NR_PINT_SYS_IRQS];
832 u32 pint_wakeup_masks[NR_PINT_SYS_IRQS];
833 
834 int bfin_gpio_set_wake(unsigned int irq, unsigned int state)
835 {
836 	u32 pint_irq;
837 	u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
838 	u32 bank = PINT_2_BANK(pint_val);
839 	u32 pintbit = PINT_BIT(pint_val);
840 
841 	switch (bank) {
842 	case 0:
843 		pint_irq = IRQ_PINT0;
844 		break;
845 	case 2:
846 		pint_irq = IRQ_PINT2;
847 		break;
848 	case 3:
849 		pint_irq = IRQ_PINT3;
850 		break;
851 	case 1:
852 		pint_irq = IRQ_PINT1;
853 		break;
854 	default:
855 		return -EINVAL;
856 	}
857 
858 	bfin_internal_set_wake(pint_irq, state);
859 
860 	if (state)
861 		pint_wakeup_masks[bank] |= pintbit;
862 	else
863 		pint_wakeup_masks[bank] &= ~pintbit;
864 
865 	return 0;
866 }
867 
868 u32 bfin_pm_setup(void)
869 {
870 	u32 val, i;
871 
872 	for (i = 0; i < NR_PINT_SYS_IRQS; i++) {
873 		val = pint[i]->mask_clear;
874 		pint_saved_masks[i] = val;
875 		if (val ^ pint_wakeup_masks[i]) {
876 			pint[i]->mask_clear = val;
877 			pint[i]->mask_set = pint_wakeup_masks[i];
878 		}
879 	}
880 
881 	return 0;
882 }
883 
884 void bfin_pm_restore(void)
885 {
886 	u32 i, val;
887 
888 	for (i = 0; i < NR_PINT_SYS_IRQS; i++) {
889 		val = pint_saved_masks[i];
890 		if (val ^ pint_wakeup_masks[i]) {
891 			pint[i]->mask_clear = pint[i]->mask_clear;
892 			pint[i]->mask_set = val;
893 		}
894 	}
895 }
896 #endif
897 
898 static void bfin_demux_gpio_irq(unsigned int inta_irq,
899 				struct irq_desc *desc)
900 {
901 	u32 bank, pint_val;
902 	u32 request, irq;
903 
904 	switch (inta_irq) {
905 	case IRQ_PINT0:
906 		bank = 0;
907 		break;
908 	case IRQ_PINT2:
909 		bank = 2;
910 		break;
911 	case IRQ_PINT3:
912 		bank = 3;
913 		break;
914 	case IRQ_PINT1:
915 		bank = 1;
916 		break;
917 	default:
918 		return;
919 	}
920 
921 	pint_val = bank * NR_PINT_BITS;
922 
923 	request = pint[bank]->request;
924 
925 	while (request) {
926 		if (request & 1) {
927 			irq = pint2irq_lut[pint_val] + SYS_IRQS;
928 			bfin_handle_irq(irq);
929 		}
930 		pint_val++;
931 		request >>= 1;
932 	}
933 
934 }
935 #endif
936 
937 static struct irq_chip bfin_gpio_irqchip = {
938 	.name = "GPIO",
939 	.ack = bfin_gpio_ack_irq,
940 	.mask = bfin_gpio_mask_irq,
941 	.mask_ack = bfin_gpio_mask_ack_irq,
942 	.unmask = bfin_gpio_unmask_irq,
943 	.disable = bfin_gpio_mask_irq,
944 	.enable = bfin_gpio_unmask_irq,
945 	.set_type = bfin_gpio_irq_type,
946 	.startup = bfin_gpio_irq_startup,
947 	.shutdown = bfin_gpio_irq_shutdown,
948 #ifdef CONFIG_PM
949 	.set_wake = bfin_gpio_set_wake,
950 #endif
951 };
952 
953 void __cpuinit init_exception_vectors(void)
954 {
955 	/* cannot program in software:
956 	 * evt0 - emulation (jtag)
957 	 * evt1 - reset
958 	 */
959 	bfin_write_EVT2(evt_nmi);
960 	bfin_write_EVT3(trap);
961 	bfin_write_EVT5(evt_ivhw);
962 	bfin_write_EVT6(evt_timer);
963 	bfin_write_EVT7(evt_evt7);
964 	bfin_write_EVT8(evt_evt8);
965 	bfin_write_EVT9(evt_evt9);
966 	bfin_write_EVT10(evt_evt10);
967 	bfin_write_EVT11(evt_evt11);
968 	bfin_write_EVT12(evt_evt12);
969 	bfin_write_EVT13(evt_evt13);
970 	bfin_write_EVT14(evt14_softirq);
971 	bfin_write_EVT15(evt_system_call);
972 	CSYNC();
973 }
974 
975 /*
976  * This function should be called during kernel startup to initialize
977  * the BFin IRQ handling routines.
978  */
979 
980 int __init init_arch_irq(void)
981 {
982 	int irq;
983 	unsigned long ilat = 0;
984 	/*  Disable all the peripheral intrs  - page 4-29 HW Ref manual */
985 #if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561) \
986 	|| defined(BF538_FAMILY) || defined(CONFIG_BF51x)
987 	bfin_write_SIC_IMASK0(SIC_UNMASK_ALL);
988 	bfin_write_SIC_IMASK1(SIC_UNMASK_ALL);
989 # ifdef CONFIG_BF54x
990 	bfin_write_SIC_IMASK2(SIC_UNMASK_ALL);
991 # endif
992 # ifdef CONFIG_SMP
993 	bfin_write_SICB_IMASK0(SIC_UNMASK_ALL);
994 	bfin_write_SICB_IMASK1(SIC_UNMASK_ALL);
995 # endif
996 #else
997 	bfin_write_SIC_IMASK(SIC_UNMASK_ALL);
998 #endif
999 
1000 	local_irq_disable();
1001 
1002 #if (defined(CONFIG_BF537) || defined(CONFIG_BF536))
1003 	/* Clear EMAC Interrupt Status bits so we can demux it later */
1004 	bfin_write_EMAC_SYSTAT(-1);
1005 #endif
1006 
1007 #ifdef CONFIG_BF54x
1008 # ifdef CONFIG_PINTx_REASSIGN
1009 	pint[0]->assign = CONFIG_PINT0_ASSIGN;
1010 	pint[1]->assign = CONFIG_PINT1_ASSIGN;
1011 	pint[2]->assign = CONFIG_PINT2_ASSIGN;
1012 	pint[3]->assign = CONFIG_PINT3_ASSIGN;
1013 # endif
1014 	/* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */
1015 	init_pint_lut();
1016 #endif
1017 
1018 	for (irq = 0; irq <= SYS_IRQS; irq++) {
1019 		if (irq <= IRQ_CORETMR)
1020 			set_irq_chip(irq, &bfin_core_irqchip);
1021 		else
1022 			set_irq_chip(irq, &bfin_internal_irqchip);
1023 
1024 		switch (irq) {
1025 #if defined(CONFIG_BF53x)
1026 		case IRQ_PROG_INTA:
1027 # if defined(BF537_FAMILY) && !(defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE))
1028 		case IRQ_MAC_RX:
1029 # endif
1030 #elif defined(CONFIG_BF54x)
1031 		case IRQ_PINT0:
1032 		case IRQ_PINT1:
1033 		case IRQ_PINT2:
1034 		case IRQ_PINT3:
1035 #elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
1036 		case IRQ_PORTF_INTA:
1037 		case IRQ_PORTG_INTA:
1038 		case IRQ_PORTH_INTA:
1039 #elif defined(CONFIG_BF561)
1040 		case IRQ_PROG0_INTA:
1041 		case IRQ_PROG1_INTA:
1042 		case IRQ_PROG2_INTA:
1043 #elif defined(CONFIG_BF538) || defined(CONFIG_BF539)
1044 		case IRQ_PORTF_INTA:
1045 #endif
1046 
1047 			set_irq_chained_handler(irq,
1048 						bfin_demux_gpio_irq);
1049 			break;
1050 #ifdef BF537_GENERIC_ERROR_INT_DEMUX
1051 		case IRQ_GENERIC_ERROR:
1052 			set_irq_chained_handler(irq, bfin_demux_error_irq);
1053 			break;
1054 #endif
1055 #if defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE)
1056 		case IRQ_TIMER0:
1057 			set_irq_handler(irq, handle_percpu_irq);
1058 			break;
1059 #endif
1060 #ifdef CONFIG_SMP
1061 		case IRQ_SUPPLE_0:
1062 		case IRQ_SUPPLE_1:
1063 			set_irq_handler(irq, handle_percpu_irq);
1064 			break;
1065 #endif
1066 		default:
1067 #ifdef CONFIG_IPIPE
1068 			/*
1069 			 * We want internal interrupt sources to be
1070 			 * masked, because ISRs may trigger interrupts
1071 			 * recursively (e.g. DMA), but interrupts are
1072 			 * _not_ masked at CPU level. So let's handle
1073 			 * most of them as level interrupts, except
1074 			 * the timer interrupt which is special.
1075 			 */
1076 			if (irq == IRQ_SYSTMR || irq == IRQ_CORETMR)
1077 				set_irq_handler(irq, handle_simple_irq);
1078 			else
1079 				set_irq_handler(irq, handle_level_irq);
1080 #else /* !CONFIG_IPIPE */
1081 			set_irq_handler(irq, handle_simple_irq);
1082 #endif /* !CONFIG_IPIPE */
1083 			break;
1084 		}
1085 	}
1086 
1087 #ifdef BF537_GENERIC_ERROR_INT_DEMUX
1088 	for (irq = IRQ_PPI_ERROR; irq <= IRQ_UART1_ERROR; irq++)
1089 		set_irq_chip_and_handler(irq, &bfin_generic_error_irqchip,
1090 					 handle_level_irq);
1091 #endif
1092 
1093 	/* if configured as edge, then will be changed to do_edge_IRQ */
1094 	for (irq = GPIO_IRQ_BASE; irq < NR_IRQS; irq++)
1095 		set_irq_chip_and_handler(irq, &bfin_gpio_irqchip,
1096 					 handle_level_irq);
1097 
1098 
1099 	bfin_write_IMASK(0);
1100 	CSYNC();
1101 	ilat = bfin_read_ILAT();
1102 	CSYNC();
1103 	bfin_write_ILAT(ilat);
1104 	CSYNC();
1105 
1106 	printk(KERN_INFO "Configuring Blackfin Priority Driven Interrupts\n");
1107 	/* IMASK=xxx is equivalent to STI xx or bfin_irq_flags=xx,
1108 	 * local_irq_enable()
1109 	 */
1110 	program_IAR();
1111 	/* Therefore it's better to setup IARs before interrupts enabled */
1112 	search_IAR();
1113 
1114 	/* Enable interrupts IVG7-15 */
1115 	bfin_irq_flags |= IMASK_IVG15 |
1116 	    IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
1117 	    IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
1118 
1119 #ifdef SIC_IWR0
1120 	bfin_write_SIC_IWR0(IWR_DISABLE_ALL);
1121 # ifdef SIC_IWR1
1122 	/* BF52x/BF51x system reset does not properly reset SIC_IWR1 which
1123 	 * will screw up the bootrom as it relies on MDMA0/1 waking it
1124 	 * up from IDLE instructions.  See this report for more info:
1125 	 * http://blackfin.uclinux.org/gf/tracker/4323
1126 	 */
1127 	if (ANOMALY_05000435)
1128 		bfin_write_SIC_IWR1(IWR_ENABLE(10) | IWR_ENABLE(11));
1129 	else
1130 		bfin_write_SIC_IWR1(IWR_DISABLE_ALL);
1131 # endif
1132 # ifdef SIC_IWR2
1133 	bfin_write_SIC_IWR2(IWR_DISABLE_ALL);
1134 # endif
1135 #else
1136 	bfin_write_SIC_IWR(IWR_DISABLE_ALL);
1137 #endif
1138 
1139 #ifdef CONFIG_IPIPE
1140 	for (irq = 0; irq < NR_IRQS; irq++) {
1141 		struct irq_desc *desc = irq_to_desc(irq);
1142 		desc->ic_prio = __ipipe_get_irq_priority(irq);
1143 	}
1144 #endif /* CONFIG_IPIPE */
1145 
1146 	return 0;
1147 }
1148 
1149 #ifdef CONFIG_DO_IRQ_L1
1150 __attribute__((l1_text))
1151 #endif
1152 void do_irq(int vec, struct pt_regs *fp)
1153 {
1154 	if (vec == EVT_IVTMR_P) {
1155 		vec = IRQ_CORETMR;
1156 	} else {
1157 		struct ivgx *ivg = ivg7_13[vec - IVG7].ifirst;
1158 		struct ivgx *ivg_stop = ivg7_13[vec - IVG7].istop;
1159 #if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561) \
1160 	|| defined(BF538_FAMILY) || defined(CONFIG_BF51x)
1161 		unsigned long sic_status[3];
1162 
1163 		if (smp_processor_id()) {
1164 #ifdef CONFIG_SMP
1165 			/* This will be optimized out in UP mode. */
1166 			sic_status[0] = bfin_read_SICB_ISR0() & bfin_read_SICB_IMASK0();
1167 			sic_status[1] = bfin_read_SICB_ISR1() & bfin_read_SICB_IMASK1();
1168 #endif
1169 		} else {
1170 			sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
1171 			sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
1172 		}
1173 #ifdef CONFIG_BF54x
1174 		sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2();
1175 #endif
1176 		for (;; ivg++) {
1177 			if (ivg >= ivg_stop) {
1178 				atomic_inc(&num_spurious);
1179 				return;
1180 			}
1181 			if (sic_status[(ivg->irqno - IVG7) / 32] & ivg->isrflag)
1182 				break;
1183 		}
1184 #else
1185 		unsigned long sic_status;
1186 
1187 		sic_status = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR();
1188 
1189 		for (;; ivg++) {
1190 			if (ivg >= ivg_stop) {
1191 				atomic_inc(&num_spurious);
1192 				return;
1193 			} else if (sic_status & ivg->isrflag)
1194 				break;
1195 		}
1196 #endif
1197 		vec = ivg->irqno;
1198 	}
1199 	asm_do_IRQ(vec, fp);
1200 }
1201 
1202 #ifdef CONFIG_IPIPE
1203 
1204 int __ipipe_get_irq_priority(unsigned irq)
1205 {
1206 	int ient, prio;
1207 
1208 	if (irq <= IRQ_CORETMR)
1209 		return irq;
1210 
1211 	for (ient = 0; ient < NR_PERI_INTS; ient++) {
1212 		struct ivgx *ivg = ivg_table + ient;
1213 		if (ivg->irqno == irq) {
1214 			for (prio = 0; prio <= IVG13-IVG7; prio++) {
1215 				if (ivg7_13[prio].ifirst <= ivg &&
1216 				    ivg7_13[prio].istop > ivg)
1217 					return IVG7 + prio;
1218 			}
1219 		}
1220 	}
1221 
1222 	return IVG15;
1223 }
1224 
1225 /* Hw interrupts are disabled on entry (check SAVE_CONTEXT). */
1226 #ifdef CONFIG_DO_IRQ_L1
1227 __attribute__((l1_text))
1228 #endif
1229 asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
1230 {
1231 	struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr();
1232 	struct ipipe_domain *this_domain = ipipe_current_domain;
1233 	struct ivgx *ivg_stop = ivg7_13[vec-IVG7].istop;
1234 	struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst;
1235 	int irq, s;
1236 
1237 	if (likely(vec == EVT_IVTMR_P)) {
1238 		irq = IRQ_CORETMR;
1239 		goto core_tick;
1240 	}
1241 
1242 	SSYNC();
1243 
1244 #if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561)
1245 	{
1246 		unsigned long sic_status[3];
1247 
1248 		sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
1249 		sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
1250 #ifdef CONFIG_BF54x
1251 		sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2();
1252 #endif
1253 		for (;; ivg++) {
1254 			if (ivg >= ivg_stop) {
1255 				atomic_inc(&num_spurious);
1256 				return 0;
1257 			}
1258 			if (sic_status[(ivg->irqno - IVG7) / 32] & ivg->isrflag)
1259 				break;
1260 		}
1261 	}
1262 #else
1263 	{
1264 		unsigned long sic_status;
1265 
1266 		sic_status = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR();
1267 
1268 		for (;; ivg++) {
1269 			if (ivg >= ivg_stop) {
1270 				atomic_inc(&num_spurious);
1271 				return 0;
1272 			} else if (sic_status & ivg->isrflag)
1273 				break;
1274 		}
1275 	}
1276 #endif
1277 
1278 	irq = ivg->irqno;
1279 
1280 	if (irq == IRQ_SYSTMR) {
1281 #ifdef CONFIG_GENERIC_CLOCKEVENTS
1282 core_tick:
1283 #else
1284 		bfin_write_TIMER_STATUS(1); /* Latch TIMIL0 */
1285 #endif
1286 		/* This is basically what we need from the register frame. */
1287 		__raw_get_cpu_var(__ipipe_tick_regs).ipend = regs->ipend;
1288 		__raw_get_cpu_var(__ipipe_tick_regs).pc = regs->pc;
1289 		if (this_domain != ipipe_root_domain)
1290 			__raw_get_cpu_var(__ipipe_tick_regs).ipend &= ~0x10;
1291 		else
1292 			__raw_get_cpu_var(__ipipe_tick_regs).ipend |= 0x10;
1293 	}
1294 
1295 #ifndef CONFIG_GENERIC_CLOCKEVENTS
1296 core_tick:
1297 #endif
1298 	if (this_domain == ipipe_root_domain) {
1299 		s = __test_and_set_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
1300 		barrier();
1301 	}
1302 
1303 	ipipe_trace_irq_entry(irq);
1304 	__ipipe_handle_irq(irq, regs);
1305 	ipipe_trace_irq_exit(irq);
1306 
1307 	if (this_domain == ipipe_root_domain) {
1308 		set_thread_flag(TIF_IRQ_SYNC);
1309 		if (!s) {
1310 			__clear_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
1311 			return !test_bit(IPIPE_STALL_FLAG, &p->status);
1312 		}
1313 	}
1314 
1315        return 0;
1316 }
1317 
1318 #endif /* CONFIG_IPIPE */
1319