1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * linux/include/linux/nmi.h
4 */
5 #ifndef LINUX_NMI_H
6 #define LINUX_NMI_H
7
8 #include <linux/sched.h>
9 #include <asm/irq.h>
10 #if defined(CONFIG_HAVE_NMI_WATCHDOG)
11 #include <asm/nmi.h>
12 #endif
13
14 #ifdef CONFIG_LOCKUP_DETECTOR
15 void lockup_detector_init(void);
16 #ifdef CONFIG_CPU_ISOLATION_OPT
17 extern void watchdog_enable(unsigned int cpu);
18 extern void watchdog_disable(unsigned int cpu);
19 extern bool watchdog_configured(unsigned int cpu);
20 #endif
21 void lockup_detector_soft_poweroff(void);
22 void lockup_detector_cleanup(void);
23 bool is_hardlockup(void);
24
25 extern int watchdog_user_enabled;
26 extern int nmi_watchdog_user_enabled;
27 extern int soft_watchdog_user_enabled;
28 extern int watchdog_thresh;
29 extern unsigned long watchdog_enabled;
30
31 extern struct cpumask watchdog_cpumask;
32 extern unsigned long *watchdog_cpumask_bits;
33 #ifdef CONFIG_SMP
34 extern int sysctl_softlockup_all_cpu_backtrace;
35 extern int sysctl_hardlockup_all_cpu_backtrace;
36 #else
37 #define sysctl_softlockup_all_cpu_backtrace 0
38 #define sysctl_hardlockup_all_cpu_backtrace 0
39 #endif /* !CONFIG_SMP */
40
41 #else /* CONFIG_LOCKUP_DETECTOR */
lockup_detector_init(void)42 static inline void lockup_detector_init(void) { }
lockup_detector_soft_poweroff(void)43 static inline void lockup_detector_soft_poweroff(void) { }
lockup_detector_cleanup(void)44 static inline void lockup_detector_cleanup(void) { }
45 #ifdef CONFIG_CPU_ISOLATION_OPT
watchdog_enable(unsigned int cpu)46 static inline void watchdog_enable(unsigned int cpu)
47 {
48 }
watchdog_disable(unsigned int cpu)49 static inline void watchdog_disable(unsigned int cpu)
50 {
51 }
watchdog_configured(unsigned int cpu)52 static inline bool watchdog_configured(unsigned int cpu)
53 {
54 /*
55 * Pretend the watchdog is always configured.
56 * We will be waiting for the watchdog to be enabled in core isolation
57 */
58 return true;
59 }
60 #endif
61 #endif /* !CONFIG_LOCKUP_DETECTOR */
62
63 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
64 extern void touch_softlockup_watchdog_sched(void);
65 extern void touch_softlockup_watchdog(void);
66 extern void touch_softlockup_watchdog_sync(void);
67 extern void touch_all_softlockup_watchdogs(void);
68 extern unsigned int softlockup_panic;
69
70 extern int lockup_detector_online_cpu(unsigned int cpu);
71 extern int lockup_detector_offline_cpu(unsigned int cpu);
72 #else /* CONFIG_SOFTLOCKUP_DETECTOR */
touch_softlockup_watchdog_sched(void)73 static inline void touch_softlockup_watchdog_sched(void) { }
touch_softlockup_watchdog(void)74 static inline void touch_softlockup_watchdog(void) { }
touch_softlockup_watchdog_sync(void)75 static inline void touch_softlockup_watchdog_sync(void) { }
touch_all_softlockup_watchdogs(void)76 static inline void touch_all_softlockup_watchdogs(void) { }
77
78 #define lockup_detector_online_cpu NULL
79 #define lockup_detector_offline_cpu NULL
80 #endif /* CONFIG_SOFTLOCKUP_DETECTOR */
81
82 #ifdef CONFIG_DETECT_HUNG_TASK
83 void reset_hung_task_detector(void);
84 #else
reset_hung_task_detector(void)85 static inline void reset_hung_task_detector(void) { }
86 #endif
87
88 /*
89 * The run state of the lockup detectors is controlled by the content of the
90 * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
91 * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
92 *
93 * 'watchdog_user_enabled', 'nmi_watchdog_user_enabled' and
94 * 'soft_watchdog_user_enabled' are variables that are only used as an
95 * 'interface' between the parameters in /proc/sys/kernel and the internal
96 * state bits in 'watchdog_enabled'. The 'watchdog_thresh' variable is
97 * handled differently because its value is not boolean, and the lockup
98 * detectors are 'suspended' while 'watchdog_thresh' is equal zero.
99 */
100 #define NMI_WATCHDOG_ENABLED_BIT 0
101 #define SOFT_WATCHDOG_ENABLED_BIT 1
102 #define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT)
103 #define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT)
104
105 #if defined(CONFIG_HARDLOCKUP_DETECTOR)
106 extern void hardlockup_detector_disable(void);
107 extern unsigned int hardlockup_panic;
108 #else
hardlockup_detector_disable(void)109 static inline void hardlockup_detector_disable(void) {}
110 #endif
111
112 #if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
113 # define NMI_WATCHDOG_SYSCTL_PERM 0644
114 #else
115 # define NMI_WATCHDOG_SYSCTL_PERM 0444
116 #endif
117
118 #if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF)
119 extern void arch_touch_nmi_watchdog(void);
120 extern void hardlockup_detector_perf_stop(void);
121 extern void hardlockup_detector_perf_restart(void);
122 extern void hardlockup_detector_perf_disable(void);
123 extern void hardlockup_detector_perf_enable(void);
124 extern void hardlockup_detector_perf_cleanup(void);
125 extern int hardlockup_detector_perf_init(void);
126 #else
hardlockup_detector_perf_stop(void)127 static inline void hardlockup_detector_perf_stop(void) { }
hardlockup_detector_perf_restart(void)128 static inline void hardlockup_detector_perf_restart(void) { }
hardlockup_detector_perf_disable(void)129 static inline void hardlockup_detector_perf_disable(void) { }
hardlockup_detector_perf_enable(void)130 static inline void hardlockup_detector_perf_enable(void) { }
hardlockup_detector_perf_cleanup(void)131 static inline void hardlockup_detector_perf_cleanup(void) { }
132 # if !defined(CONFIG_HAVE_NMI_WATCHDOG)
hardlockup_detector_perf_init(void)133 static inline int hardlockup_detector_perf_init(void) { return -ENODEV; }
arch_touch_nmi_watchdog(void)134 static inline void arch_touch_nmi_watchdog(void) {}
135 # else
hardlockup_detector_perf_init(void)136 static inline int hardlockup_detector_perf_init(void) { return 0; }
137 # endif
138 #endif
139
140 void watchdog_nmi_stop(void);
141 void watchdog_nmi_start(void);
142 int watchdog_nmi_probe(void);
143 int watchdog_nmi_enable(unsigned int cpu);
144 void watchdog_nmi_disable(unsigned int cpu);
145
146 void lockup_detector_reconfigure(void);
147
148 /**
149 * touch_nmi_watchdog - restart NMI watchdog timeout.
150 *
151 * If the architecture supports the NMI watchdog, touch_nmi_watchdog()
152 * may be used to reset the timeout - for code which intentionally
153 * disables interrupts for a long time. This call is stateless.
154 */
touch_nmi_watchdog(void)155 static inline void touch_nmi_watchdog(void)
156 {
157 arch_touch_nmi_watchdog();
158 touch_softlockup_watchdog();
159 }
160
161 /*
162 * Create trigger_all_cpu_backtrace() out of the arch-provided
163 * base function. Return whether such support was available,
164 * to allow calling code to fall back to some other mechanism:
165 */
166 #ifdef arch_trigger_cpumask_backtrace
trigger_all_cpu_backtrace(void)167 static inline bool trigger_all_cpu_backtrace(void)
168 {
169 arch_trigger_cpumask_backtrace(cpu_online_mask, false);
170 return true;
171 }
172
trigger_allbutself_cpu_backtrace(void)173 static inline bool trigger_allbutself_cpu_backtrace(void)
174 {
175 arch_trigger_cpumask_backtrace(cpu_online_mask, true);
176 return true;
177 }
178
trigger_cpumask_backtrace(struct cpumask * mask)179 static inline bool trigger_cpumask_backtrace(struct cpumask *mask)
180 {
181 arch_trigger_cpumask_backtrace(mask, false);
182 return true;
183 }
184
trigger_single_cpu_backtrace(int cpu)185 static inline bool trigger_single_cpu_backtrace(int cpu)
186 {
187 arch_trigger_cpumask_backtrace(cpumask_of(cpu), false);
188 return true;
189 }
190
191 /* generic implementation */
192 void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
193 bool exclude_self,
194 void (*raise)(cpumask_t *mask));
195 bool nmi_cpu_backtrace(struct pt_regs *regs);
196
197 #else
trigger_all_cpu_backtrace(void)198 static inline bool trigger_all_cpu_backtrace(void)
199 {
200 return false;
201 }
trigger_allbutself_cpu_backtrace(void)202 static inline bool trigger_allbutself_cpu_backtrace(void)
203 {
204 return false;
205 }
trigger_cpumask_backtrace(struct cpumask * mask)206 static inline bool trigger_cpumask_backtrace(struct cpumask *mask)
207 {
208 return false;
209 }
trigger_single_cpu_backtrace(int cpu)210 static inline bool trigger_single_cpu_backtrace(int cpu)
211 {
212 return false;
213 }
214 #endif
215
216 #ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
217 u64 hw_nmi_get_sample_period(int watchdog_thresh);
218 #endif
219
220 #if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \
221 defined(CONFIG_HARDLOCKUP_DETECTOR_PERF)
222 void watchdog_update_hrtimer_threshold(u64 period);
223 #else
watchdog_update_hrtimer_threshold(u64 period)224 static inline void watchdog_update_hrtimer_threshold(u64 period) { }
225 #endif
226
227 struct ctl_table;
228 int proc_watchdog(struct ctl_table *, int, void *, size_t *, loff_t *);
229 int proc_nmi_watchdog(struct ctl_table *, int , void *, size_t *, loff_t *);
230 int proc_soft_watchdog(struct ctl_table *, int , void *, size_t *, loff_t *);
231 int proc_watchdog_thresh(struct ctl_table *, int , void *, size_t *, loff_t *);
232 int proc_watchdog_cpumask(struct ctl_table *, int, void *, size_t *, loff_t *);
233
234 #ifdef CONFIG_HAVE_ACPI_APEI_NMI
235 #include <asm/nmi.h>
236 #endif
237
238 #endif
239