• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  linux/include/linux/nmi.h
3  */
4 #ifndef LINUX_NMI_H
5 #define LINUX_NMI_H
6 
7 #include <linux/sched.h>
8 #include <asm/irq.h>
9 
10 /**
11  * touch_nmi_watchdog - restart NMI watchdog timeout.
12  *
13  * If the architecture supports the NMI watchdog, touch_nmi_watchdog()
14  * may be used to reset the timeout - for code which intentionally
15  * disables interrupts for a long time. This call is stateless.
16  */
17 #if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR_NMI)
18 #include <asm/nmi.h>
19 #endif
20 
21 #if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
22 extern void touch_nmi_watchdog(void);
23 #else
touch_nmi_watchdog(void)24 static inline void touch_nmi_watchdog(void)
25 {
26 	touch_softlockup_watchdog();
27 }
28 #endif
29 
30 #if defined(CONFIG_HARDLOCKUP_DETECTOR)
31 extern void watchdog_enable_hardlockup_detector(bool val);
32 extern bool watchdog_hardlockup_detector_is_enabled(void);
33 #else
watchdog_enable_hardlockup_detector(bool val)34 static inline void watchdog_enable_hardlockup_detector(bool val)
35 {
36 }
watchdog_hardlockup_detector_is_enabled(void)37 static inline bool watchdog_hardlockup_detector_is_enabled(void)
38 {
39 	return true;
40 }
41 #endif
42 
43 /*
44  * Create trigger_all_cpu_backtrace() out of the arch-provided
45  * base function. Return whether such support was available,
46  * to allow calling code to fall back to some other mechanism:
47  */
48 #ifdef arch_trigger_all_cpu_backtrace
trigger_all_cpu_backtrace(void)49 static inline bool trigger_all_cpu_backtrace(void)
50 {
51 	#if defined(CONFIG_ARM)
52 	arch_trigger_all_cpu_backtrace();
53 	#else
54 	arch_trigger_all_cpu_backtrace(true);
55 	#endif
56 
57 	return true;
58 }
trigger_allbutself_cpu_backtrace(void)59 static inline bool trigger_allbutself_cpu_backtrace(void)
60 {
61 	#if defined(CONFIG_ARM)
62 	arch_trigger_all_cpu_backtrace();
63 	#else
64 	arch_trigger_all_cpu_backtrace(false);
65 	#endif
66 
67 
68 	return true;
69 }
70 #else
trigger_all_cpu_backtrace(void)71 static inline bool trigger_all_cpu_backtrace(void)
72 {
73 	return false;
74 }
trigger_allbutself_cpu_backtrace(void)75 static inline bool trigger_allbutself_cpu_backtrace(void)
76 {
77 	return false;
78 }
79 #endif
80 
81 #ifdef CONFIG_LOCKUP_DETECTOR
82 int hw_nmi_is_cpu_stuck(struct pt_regs *);
83 u64 hw_nmi_get_sample_period(int watchdog_thresh);
84 extern int watchdog_user_enabled;
85 extern int watchdog_thresh;
86 extern int sysctl_softlockup_all_cpu_backtrace;
87 struct ctl_table;
88 extern int proc_dowatchdog(struct ctl_table *, int ,
89 			   void __user *, size_t *, loff_t *);
90 #endif
91 
92 #ifdef CONFIG_HAVE_ACPI_APEI_NMI
93 #include <asm/nmi.h>
94 #endif
95 
96 #endif
97