1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _ASM_POWERPC_INTERRUPT_H
3 #define _ASM_POWERPC_INTERRUPT_H
4
5 /* BookE/4xx */
6 #define INTERRUPT_CRITICAL_INPUT 0x100
7
8 /* BookE */
9 #define INTERRUPT_DEBUG 0xd00
10 #ifdef CONFIG_BOOKE
11 #define INTERRUPT_PERFMON 0x260
12 #define INTERRUPT_DOORBELL 0x280
13 #endif
14
15 /* BookS/4xx/8xx */
16 #define INTERRUPT_MACHINE_CHECK 0x200
17
18 /* BookS/8xx */
19 #define INTERRUPT_SYSTEM_RESET 0x100
20
21 /* BookS */
22 #define INTERRUPT_DATA_SEGMENT 0x380
23 #define INTERRUPT_INST_SEGMENT 0x480
24 #define INTERRUPT_TRACE 0xd00
25 #define INTERRUPT_H_DATA_STORAGE 0xe00
26 #define INTERRUPT_HMI 0xe60
27 #define INTERRUPT_H_FAC_UNAVAIL 0xf80
28 #ifdef CONFIG_PPC_BOOK3S
29 #define INTERRUPT_DOORBELL 0xa00
30 #define INTERRUPT_PERFMON 0xf00
31 #define INTERRUPT_ALTIVEC_UNAVAIL 0xf20
32 #endif
33
34 /* BookE/BookS/4xx/8xx */
35 #define INTERRUPT_DATA_STORAGE 0x300
36 #define INTERRUPT_INST_STORAGE 0x400
37 #define INTERRUPT_EXTERNAL 0x500
38 #define INTERRUPT_ALIGNMENT 0x600
39 #define INTERRUPT_PROGRAM 0x700
40 #define INTERRUPT_SYSCALL 0xc00
41 #define INTERRUPT_TRACE 0xd00
42
43 /* BookE/BookS/44x */
44 #define INTERRUPT_FP_UNAVAIL 0x800
45
46 /* BookE/BookS/44x/8xx */
47 #define INTERRUPT_DECREMENTER 0x900
48
49 #ifndef INTERRUPT_PERFMON
50 #define INTERRUPT_PERFMON 0x0
51 #endif
52
53 /* 8xx */
54 #define INTERRUPT_SOFT_EMU_8xx 0x1000
55 #define INTERRUPT_INST_TLB_MISS_8xx 0x1100
56 #define INTERRUPT_DATA_TLB_MISS_8xx 0x1200
57 #define INTERRUPT_INST_TLB_ERROR_8xx 0x1300
58 #define INTERRUPT_DATA_TLB_ERROR_8xx 0x1400
59 #define INTERRUPT_DATA_BREAKPOINT_8xx 0x1c00
60 #define INTERRUPT_INST_BREAKPOINT_8xx 0x1d00
61
62 /* 603 */
63 #define INTERRUPT_INST_TLB_MISS_603 0x1000
64 #define INTERRUPT_DATA_LOAD_TLB_MISS_603 0x1100
65 #define INTERRUPT_DATA_STORE_TLB_MISS_603 0x1200
66
67 #ifndef __ASSEMBLY__
68
69 #include <linux/context_tracking.h>
70 #include <linux/hardirq.h>
71 #include <asm/cputime.h>
72 #include <asm/ftrace.h>
73 #include <asm/kprobes.h>
74 #include <asm/runlatch.h>
75
76 #ifdef CONFIG_PPC_BOOK3S_64
77 extern char __end_soft_masked[];
78 bool search_kernel_soft_mask_table(unsigned long addr);
79 unsigned long search_kernel_restart_table(unsigned long addr);
80
81 DECLARE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant);
82
is_implicit_soft_masked(struct pt_regs * regs)83 static inline bool is_implicit_soft_masked(struct pt_regs *regs)
84 {
85 if (regs->msr & MSR_PR)
86 return false;
87
88 if (regs->nip >= (unsigned long)__end_soft_masked)
89 return false;
90
91 return search_kernel_soft_mask_table(regs->nip);
92 }
93
srr_regs_clobbered(void)94 static inline void srr_regs_clobbered(void)
95 {
96 local_paca->srr_valid = 0;
97 local_paca->hsrr_valid = 0;
98 }
99 #else
is_implicit_soft_masked(struct pt_regs * regs)100 static inline bool is_implicit_soft_masked(struct pt_regs *regs)
101 {
102 return false;
103 }
104
srr_regs_clobbered(void)105 static inline void srr_regs_clobbered(void)
106 {
107 }
108 #endif
109
nap_adjust_return(struct pt_regs * regs)110 static inline void nap_adjust_return(struct pt_regs *regs)
111 {
112 #ifdef CONFIG_PPC_970_NAP
113 if (unlikely(test_thread_local_flags(_TLF_NAPPING))) {
114 /* Can avoid a test-and-clear because NMIs do not call this */
115 clear_thread_local_flags(_TLF_NAPPING);
116 regs_set_return_ip(regs, (unsigned long)power4_idle_nap_return);
117 }
118 #endif
119 }
120
121 struct interrupt_state {
122 };
123
booke_restore_dbcr0(void)124 static inline void booke_restore_dbcr0(void)
125 {
126 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
127 unsigned long dbcr0 = current->thread.debug.dbcr0;
128
129 if (IS_ENABLED(CONFIG_PPC32) && unlikely(dbcr0 & DBCR0_IDM)) {
130 mtspr(SPRN_DBSR, -1);
131 mtspr(SPRN_DBCR0, global_dbcr0[smp_processor_id()]);
132 }
133 #endif
134 }
135
interrupt_enter_prepare(struct pt_regs * regs,struct interrupt_state * state)136 static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
137 {
138 #ifdef CONFIG_PPC32
139 if (!arch_irq_disabled_regs(regs))
140 trace_hardirqs_off();
141
142 if (user_mode(regs))
143 account_cpu_user_entry();
144 else
145 kuap_save_and_lock(regs);
146 #endif
147
148 #ifdef CONFIG_PPC64
149 if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED)
150 trace_hardirqs_off();
151 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
152
153 if (user_mode(regs)) {
154 CT_WARN_ON(ct_state() != CONTEXT_USER);
155 user_exit_irqoff();
156
157 account_cpu_user_entry();
158 account_stolen_time();
159 } else {
160 /*
161 * CT_WARN_ON comes here via program_check_exception,
162 * so avoid recursion.
163 */
164 if (TRAP(regs) != INTERRUPT_PROGRAM) {
165 CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
166 BUG_ON(is_implicit_soft_masked(regs));
167 }
168 #ifdef CONFIG_PPC_BOOK3S
169 /* Move this under a debugging check */
170 if (arch_irq_disabled_regs(regs))
171 BUG_ON(search_kernel_restart_table(regs->nip));
172 #endif
173 }
174 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
175 BUG_ON(!arch_irq_disabled_regs(regs) && !(regs->msr & MSR_EE));
176 #endif
177
178 booke_restore_dbcr0();
179 }
180
181 /*
182 * Care should be taken to note that interrupt_exit_prepare and
183 * interrupt_async_exit_prepare do not necessarily return immediately to
184 * regs context (e.g., if regs is usermode, we don't necessarily return to
185 * user mode). Other interrupts might be taken between here and return,
186 * context switch / preemption may occur in the exit path after this, or a
187 * signal may be delivered, etc.
188 *
189 * The real interrupt exit code is platform specific, e.g.,
190 * interrupt_exit_user_prepare / interrupt_exit_kernel_prepare for 64s.
191 *
192 * However interrupt_nmi_exit_prepare does return directly to regs, because
193 * NMIs do not do "exit work" or replay soft-masked interrupts.
194 */
interrupt_exit_prepare(struct pt_regs * regs,struct interrupt_state * state)195 static inline void interrupt_exit_prepare(struct pt_regs *regs, struct interrupt_state *state)
196 {
197 }
198
interrupt_async_enter_prepare(struct pt_regs * regs,struct interrupt_state * state)199 static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
200 {
201 #ifdef CONFIG_PPC_BOOK3S_64
202 if (cpu_has_feature(CPU_FTR_CTRL) &&
203 !test_thread_local_flags(_TLF_RUNLATCH))
204 __ppc64_runlatch_on();
205 #endif
206
207 interrupt_enter_prepare(regs, state);
208 irq_enter();
209 }
210
interrupt_async_exit_prepare(struct pt_regs * regs,struct interrupt_state * state)211 static inline void interrupt_async_exit_prepare(struct pt_regs *regs, struct interrupt_state *state)
212 {
213 /*
214 * Adjust at exit so the main handler sees the true NIA. This must
215 * come before irq_exit() because irq_exit can enable interrupts, and
216 * if another interrupt is taken before nap_adjust_return has run
217 * here, then that interrupt would return directly to idle nap return.
218 */
219 nap_adjust_return(regs);
220
221 irq_exit();
222 interrupt_exit_prepare(regs, state);
223 }
224
225 struct interrupt_nmi_state {
226 #ifdef CONFIG_PPC64
227 u8 irq_soft_mask;
228 u8 irq_happened;
229 u8 ftrace_enabled;
230 u64 softe;
231 #endif
232 };
233
nmi_disables_ftrace(struct pt_regs * regs)234 static inline bool nmi_disables_ftrace(struct pt_regs *regs)
235 {
236 /* Allow DEC and PMI to be traced when they are soft-NMI */
237 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) {
238 if (TRAP(regs) == INTERRUPT_DECREMENTER)
239 return false;
240 if (TRAP(regs) == INTERRUPT_PERFMON)
241 return false;
242 }
243 if (IS_ENABLED(CONFIG_PPC_BOOK3E)) {
244 if (TRAP(regs) == INTERRUPT_PERFMON)
245 return false;
246 }
247
248 return true;
249 }
250
interrupt_nmi_enter_prepare(struct pt_regs * regs,struct interrupt_nmi_state * state)251 static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
252 {
253 #ifdef CONFIG_PPC64
254 state->irq_soft_mask = local_paca->irq_soft_mask;
255 state->irq_happened = local_paca->irq_happened;
256 state->softe = regs->softe;
257
258 /*
259 * Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does
260 * the right thing, and set IRQ_HARD_DIS. We do not want to reconcile
261 * because that goes through irq tracing which we don't want in NMI.
262 */
263 local_paca->irq_soft_mask = IRQS_ALL_DISABLED;
264 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
265
266 if (!(regs->msr & MSR_EE) || is_implicit_soft_masked(regs)) {
267 /*
268 * Adjust regs->softe to be soft-masked if it had not been
269 * reconcied (e.g., interrupt entry with MSR[EE]=0 but softe
270 * not yet set disabled), or if it was in an implicit soft
271 * masked state. This makes arch_irq_disabled_regs(regs)
272 * behave as expected.
273 */
274 regs->softe = IRQS_ALL_DISABLED;
275 }
276
277 /* Don't do any per-CPU operations until interrupt state is fixed */
278
279 if (nmi_disables_ftrace(regs)) {
280 state->ftrace_enabled = this_cpu_get_ftrace_enabled();
281 this_cpu_set_ftrace_enabled(0);
282 }
283 #endif
284
285 /*
286 * Do not use nmi_enter() for pseries hash guest taking a real-mode
287 * NMI because not everything it touches is within the RMA limit.
288 */
289 if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) ||
290 !firmware_has_feature(FW_FEATURE_LPAR) ||
291 radix_enabled() || (mfmsr() & MSR_DR))
292 nmi_enter();
293 }
294
interrupt_nmi_exit_prepare(struct pt_regs * regs,struct interrupt_nmi_state * state)295 static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
296 {
297 if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) ||
298 !firmware_has_feature(FW_FEATURE_LPAR) ||
299 radix_enabled() || (mfmsr() & MSR_DR))
300 nmi_exit();
301
302 /*
303 * nmi does not call nap_adjust_return because nmi should not create
304 * new work to do (must use irq_work for that).
305 */
306
307 #ifdef CONFIG_PPC64
308 #ifdef CONFIG_PPC_BOOK3S
309 if (arch_irq_disabled_regs(regs)) {
310 unsigned long rst = search_kernel_restart_table(regs->nip);
311 if (rst)
312 regs_set_return_ip(regs, rst);
313 }
314 #endif
315
316 if (nmi_disables_ftrace(regs))
317 this_cpu_set_ftrace_enabled(state->ftrace_enabled);
318
319 /* Check we didn't change the pending interrupt mask. */
320 WARN_ON_ONCE((state->irq_happened | PACA_IRQ_HARD_DIS) != local_paca->irq_happened);
321 regs->softe = state->softe;
322 local_paca->irq_happened = state->irq_happened;
323 local_paca->irq_soft_mask = state->irq_soft_mask;
324 #endif
325 }
326
327 /*
328 * Don't use noinstr here like x86, but rather add NOKPROBE_SYMBOL to each
329 * function definition. The reason for this is the noinstr section is placed
330 * after the main text section, i.e., very far away from the interrupt entry
331 * asm. That creates problems with fitting linker stubs when building large
332 * kernels.
333 */
334 #define interrupt_handler __visible noinline notrace __no_kcsan __no_sanitize_address
335
336 /**
337 * DECLARE_INTERRUPT_HANDLER_RAW - Declare raw interrupt handler function
338 * @func: Function name of the entry point
339 * @returns: Returns a value back to asm caller
340 */
341 #define DECLARE_INTERRUPT_HANDLER_RAW(func) \
342 __visible long func(struct pt_regs *regs)
343
344 /**
345 * DEFINE_INTERRUPT_HANDLER_RAW - Define raw interrupt handler function
346 * @func: Function name of the entry point
347 * @returns: Returns a value back to asm caller
348 *
349 * @func is called from ASM entry code.
350 *
351 * This is a plain function which does no tracing, reconciling, etc.
352 * The macro is written so it acts as function definition. Append the
353 * body with a pair of curly brackets.
354 *
355 * raw interrupt handlers must not enable or disable interrupts, or
356 * schedule, tracing and instrumentation (ftrace, lockdep, etc) would
357 * not be advisable either, although may be possible in a pinch, the
358 * trace will look odd at least.
359 *
360 * A raw handler may call one of the other interrupt handler functions
361 * to be converted into that interrupt context without these restrictions.
362 *
363 * On PPC64, _RAW handlers may return with fast_interrupt_return.
364 *
365 * Specific handlers may have additional restrictions.
366 */
367 #define DEFINE_INTERRUPT_HANDLER_RAW(func) \
368 static __always_inline long ____##func(struct pt_regs *regs); \
369 \
370 interrupt_handler long func(struct pt_regs *regs) \
371 { \
372 long ret; \
373 \
374 ret = ____##func (regs); \
375 \
376 return ret; \
377 } \
378 NOKPROBE_SYMBOL(func); \
379 \
380 static __always_inline long ____##func(struct pt_regs *regs)
381
382 /**
383 * DECLARE_INTERRUPT_HANDLER - Declare synchronous interrupt handler function
384 * @func: Function name of the entry point
385 */
386 #define DECLARE_INTERRUPT_HANDLER(func) \
387 __visible void func(struct pt_regs *regs)
388
389 /**
390 * DEFINE_INTERRUPT_HANDLER - Define synchronous interrupt handler function
391 * @func: Function name of the entry point
392 *
393 * @func is called from ASM entry code.
394 *
395 * The macro is written so it acts as function definition. Append the
396 * body with a pair of curly brackets.
397 */
398 #define DEFINE_INTERRUPT_HANDLER(func) \
399 static __always_inline void ____##func(struct pt_regs *regs); \
400 \
401 interrupt_handler void func(struct pt_regs *regs) \
402 { \
403 struct interrupt_state state; \
404 \
405 interrupt_enter_prepare(regs, &state); \
406 \
407 ____##func (regs); \
408 \
409 interrupt_exit_prepare(regs, &state); \
410 } \
411 NOKPROBE_SYMBOL(func); \
412 \
413 static __always_inline void ____##func(struct pt_regs *regs)
414
415 /**
416 * DECLARE_INTERRUPT_HANDLER_RET - Declare synchronous interrupt handler function
417 * @func: Function name of the entry point
418 * @returns: Returns a value back to asm caller
419 */
420 #define DECLARE_INTERRUPT_HANDLER_RET(func) \
421 __visible long func(struct pt_regs *regs)
422
423 /**
424 * DEFINE_INTERRUPT_HANDLER_RET - Define synchronous interrupt handler function
425 * @func: Function name of the entry point
426 * @returns: Returns a value back to asm caller
427 *
428 * @func is called from ASM entry code.
429 *
430 * The macro is written so it acts as function definition. Append the
431 * body with a pair of curly brackets.
432 */
433 #define DEFINE_INTERRUPT_HANDLER_RET(func) \
434 static __always_inline long ____##func(struct pt_regs *regs); \
435 \
436 interrupt_handler long func(struct pt_regs *regs) \
437 { \
438 struct interrupt_state state; \
439 long ret; \
440 \
441 interrupt_enter_prepare(regs, &state); \
442 \
443 ret = ____##func (regs); \
444 \
445 interrupt_exit_prepare(regs, &state); \
446 \
447 return ret; \
448 } \
449 NOKPROBE_SYMBOL(func); \
450 \
451 static __always_inline long ____##func(struct pt_regs *regs)
452
453 /**
454 * DECLARE_INTERRUPT_HANDLER_ASYNC - Declare asynchronous interrupt handler function
455 * @func: Function name of the entry point
456 */
457 #define DECLARE_INTERRUPT_HANDLER_ASYNC(func) \
458 __visible void func(struct pt_regs *regs)
459
460 /**
461 * DEFINE_INTERRUPT_HANDLER_ASYNC - Define asynchronous interrupt handler function
462 * @func: Function name of the entry point
463 *
464 * @func is called from ASM entry code.
465 *
466 * The macro is written so it acts as function definition. Append the
467 * body with a pair of curly brackets.
468 */
469 #define DEFINE_INTERRUPT_HANDLER_ASYNC(func) \
470 static __always_inline void ____##func(struct pt_regs *regs); \
471 \
472 interrupt_handler void func(struct pt_regs *regs) \
473 { \
474 struct interrupt_state state; \
475 \
476 interrupt_async_enter_prepare(regs, &state); \
477 \
478 ____##func (regs); \
479 \
480 interrupt_async_exit_prepare(regs, &state); \
481 } \
482 NOKPROBE_SYMBOL(func); \
483 \
484 static __always_inline void ____##func(struct pt_regs *regs)
485
486 /**
487 * DECLARE_INTERRUPT_HANDLER_NMI - Declare NMI interrupt handler function
488 * @func: Function name of the entry point
489 * @returns: Returns a value back to asm caller
490 */
491 #define DECLARE_INTERRUPT_HANDLER_NMI(func) \
492 __visible long func(struct pt_regs *regs)
493
494 /**
495 * DEFINE_INTERRUPT_HANDLER_NMI - Define NMI interrupt handler function
496 * @func: Function name of the entry point
497 * @returns: Returns a value back to asm caller
498 *
499 * @func is called from ASM entry code.
500 *
501 * The macro is written so it acts as function definition. Append the
502 * body with a pair of curly brackets.
503 */
504 #define DEFINE_INTERRUPT_HANDLER_NMI(func) \
505 static __always_inline long ____##func(struct pt_regs *regs); \
506 \
507 interrupt_handler long func(struct pt_regs *regs) \
508 { \
509 struct interrupt_nmi_state state; \
510 long ret; \
511 \
512 interrupt_nmi_enter_prepare(regs, &state); \
513 \
514 ret = ____##func (regs); \
515 \
516 interrupt_nmi_exit_prepare(regs, &state); \
517 \
518 return ret; \
519 } \
520 NOKPROBE_SYMBOL(func); \
521 \
522 static __always_inline long ____##func(struct pt_regs *regs)
523
524
525 /* Interrupt handlers */
526 /* kernel/traps.c */
527 DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception);
528 #ifdef CONFIG_PPC_BOOK3S_64
529 DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async);
530 #endif
531 DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception);
532 DECLARE_INTERRUPT_HANDLER(SMIException);
533 DECLARE_INTERRUPT_HANDLER(handle_hmi_exception);
534 DECLARE_INTERRUPT_HANDLER(unknown_exception);
535 DECLARE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception);
536 DECLARE_INTERRUPT_HANDLER_NMI(unknown_nmi_exception);
537 DECLARE_INTERRUPT_HANDLER(instruction_breakpoint_exception);
538 DECLARE_INTERRUPT_HANDLER(RunModeException);
539 DECLARE_INTERRUPT_HANDLER(single_step_exception);
540 DECLARE_INTERRUPT_HANDLER(program_check_exception);
541 DECLARE_INTERRUPT_HANDLER(emulation_assist_interrupt);
542 DECLARE_INTERRUPT_HANDLER(alignment_exception);
543 DECLARE_INTERRUPT_HANDLER(StackOverflow);
544 DECLARE_INTERRUPT_HANDLER(stack_overflow_exception);
545 DECLARE_INTERRUPT_HANDLER(kernel_fp_unavailable_exception);
546 DECLARE_INTERRUPT_HANDLER(altivec_unavailable_exception);
547 DECLARE_INTERRUPT_HANDLER(vsx_unavailable_exception);
548 DECLARE_INTERRUPT_HANDLER(facility_unavailable_exception);
549 DECLARE_INTERRUPT_HANDLER(fp_unavailable_tm);
550 DECLARE_INTERRUPT_HANDLER(altivec_unavailable_tm);
551 DECLARE_INTERRUPT_HANDLER(vsx_unavailable_tm);
552 DECLARE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi);
553 DECLARE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async);
554 DECLARE_INTERRUPT_HANDLER_RAW(performance_monitor_exception);
555 DECLARE_INTERRUPT_HANDLER(DebugException);
556 DECLARE_INTERRUPT_HANDLER(altivec_assist_exception);
557 DECLARE_INTERRUPT_HANDLER(CacheLockingException);
558 DECLARE_INTERRUPT_HANDLER(SPEFloatingPointException);
559 DECLARE_INTERRUPT_HANDLER(SPEFloatingPointRoundException);
560 DECLARE_INTERRUPT_HANDLER_NMI(WatchdogException);
561 DECLARE_INTERRUPT_HANDLER(kernel_bad_stack);
562
563 /* slb.c */
564 DECLARE_INTERRUPT_HANDLER_RAW(do_slb_fault);
565 DECLARE_INTERRUPT_HANDLER(do_bad_slb_fault);
566
567 /* hash_utils.c */
568 DECLARE_INTERRUPT_HANDLER(do_hash_fault);
569
570 /* fault.c */
571 DECLARE_INTERRUPT_HANDLER(do_page_fault);
572 DECLARE_INTERRUPT_HANDLER(do_bad_page_fault_segv);
573
574 /* process.c */
575 DECLARE_INTERRUPT_HANDLER(do_break);
576
577 /* time.c */
578 DECLARE_INTERRUPT_HANDLER_ASYNC(timer_interrupt);
579
580 /* mce.c */
581 DECLARE_INTERRUPT_HANDLER_NMI(machine_check_early);
582 DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode);
583
584 DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException);
585
586 /* irq.c */
587 DECLARE_INTERRUPT_HANDLER_ASYNC(do_IRQ);
588
589 void __noreturn unrecoverable_exception(struct pt_regs *regs);
590
591 void replay_system_reset(void);
592 void replay_soft_interrupts(void);
593
interrupt_cond_local_irq_enable(struct pt_regs * regs)594 static inline void interrupt_cond_local_irq_enable(struct pt_regs *regs)
595 {
596 if (!arch_irq_disabled_regs(regs))
597 local_irq_enable();
598 }
599
600 #endif /* __ASSEMBLY__ */
601
602 #endif /* _ASM_POWERPC_INTERRUPT_H */
603