1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Detect hard and soft lockups on a system
4 *
5 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
6 *
7 * Note: Most of this code is borrowed heavily from the original softlockup
8 * detector, so thanks to Ingo for the initial implementation.
9 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
10 * to those contributors as well.
11 */
12
13 #define pr_fmt(fmt) "watchdog: " fmt
14
15 #include <linux/mm.h>
16 #include <linux/cpu.h>
17 #include <linux/device.h>
18 #include <linux/nmi.h>
19 #include <linux/init.h>
20 #include <linux/module.h>
21 #include <linux/sysctl.h>
22 #include <linux/tick.h>
23 #include <linux/sched/clock.h>
24 #include <linux/sched/debug.h>
25 #include <linux/sched/isolation.h>
26 #include <linux/stop_machine.h>
27
28 #include <asm/irq_regs.h>
29 #include <linux/kvm_para.h>
30
31 static DEFINE_MUTEX(watchdog_mutex);
32
33 #if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG)
34 # define WATCHDOG_DEFAULT (SOFT_WATCHDOG_ENABLED | NMI_WATCHDOG_ENABLED)
35 # define NMI_WATCHDOG_DEFAULT 1
36 #else
37 # define WATCHDOG_DEFAULT (SOFT_WATCHDOG_ENABLED)
38 # define NMI_WATCHDOG_DEFAULT 0
39 #endif
40
41 unsigned long __read_mostly watchdog_enabled;
42 int __read_mostly watchdog_user_enabled = 1;
43 int __read_mostly nmi_watchdog_user_enabled = NMI_WATCHDOG_DEFAULT;
44 int __read_mostly soft_watchdog_user_enabled = 1;
45 int __read_mostly watchdog_thresh = 10;
46 static int __read_mostly nmi_watchdog_available;
47
48 struct cpumask watchdog_cpumask __read_mostly;
49 unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
50
51 #ifdef CONFIG_HARDLOCKUP_DETECTOR
52
53 # ifdef CONFIG_SMP
54 int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
55 # endif /* CONFIG_SMP */
56
57 /*
58 * Should we panic when a soft-lockup or hard-lockup occurs:
59 */
60 unsigned int __read_mostly hardlockup_panic =
61 CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
62 /*
63 * We may not want to enable hard lockup detection by default in all cases,
64 * for example when running the kernel as a guest on a hypervisor. In these
65 * cases this function can be called to disable hard lockup detection. This
66 * function should only be executed once by the boot processor before the
67 * kernel command line parameters are parsed, because otherwise it is not
68 * possible to override this in hardlockup_panic_setup().
69 */
hardlockup_detector_disable(void)70 void __init hardlockup_detector_disable(void)
71 {
72 nmi_watchdog_user_enabled = 0;
73 }
74
hardlockup_panic_setup(char * str)75 static int __init hardlockup_panic_setup(char *str)
76 {
77 if (!strncmp(str, "panic", 5))
78 hardlockup_panic = 1;
79 else if (!strncmp(str, "nopanic", 7))
80 hardlockup_panic = 0;
81 else if (!strncmp(str, "0", 1))
82 nmi_watchdog_user_enabled = 0;
83 else if (!strncmp(str, "1", 1))
84 nmi_watchdog_user_enabled = 1;
85 return 1;
86 }
87 __setup("nmi_watchdog=", hardlockup_panic_setup);
88
89 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
90
91 /*
92 * These functions can be overridden if an architecture implements its
93 * own hardlockup detector.
94 *
95 * watchdog_nmi_enable/disable can be implemented to start and stop when
96 * softlockup watchdog threads start and stop. The arch must select the
97 * SOFTLOCKUP_DETECTOR Kconfig.
98 */
watchdog_nmi_enable(unsigned int cpu)99 int __weak watchdog_nmi_enable(unsigned int cpu)
100 {
101 hardlockup_detector_perf_enable();
102 return 0;
103 }
104
watchdog_nmi_disable(unsigned int cpu)105 void __weak watchdog_nmi_disable(unsigned int cpu)
106 {
107 hardlockup_detector_perf_disable();
108 }
109
110 /* Return 0, if a NMI watchdog is available. Error code otherwise */
watchdog_nmi_probe(void)111 int __weak __init watchdog_nmi_probe(void)
112 {
113 return hardlockup_detector_perf_init();
114 }
115
116 /**
117 * watchdog_nmi_stop - Stop the watchdog for reconfiguration
118 *
119 * The reconfiguration steps are:
120 * watchdog_nmi_stop();
121 * update_variables();
122 * watchdog_nmi_start();
123 */
watchdog_nmi_stop(void)124 void __weak watchdog_nmi_stop(void) { }
125
126 /**
127 * watchdog_nmi_start - Start the watchdog after reconfiguration
128 *
129 * Counterpart to watchdog_nmi_stop().
130 *
131 * The following variables have been updated in update_variables() and
132 * contain the currently valid configuration:
133 * - watchdog_enabled
134 * - watchdog_thresh
135 * - watchdog_cpumask
136 */
watchdog_nmi_start(void)137 void __weak watchdog_nmi_start(void) { }
138
139 /**
140 * lockup_detector_update_enable - Update the sysctl enable bit
141 *
142 * Caller needs to make sure that the NMI/perf watchdogs are off, so this
143 * can't race with watchdog_nmi_disable().
144 */
lockup_detector_update_enable(void)145 static void lockup_detector_update_enable(void)
146 {
147 watchdog_enabled = 0;
148 if (!watchdog_user_enabled)
149 return;
150 if (nmi_watchdog_available && nmi_watchdog_user_enabled)
151 watchdog_enabled |= NMI_WATCHDOG_ENABLED;
152 if (soft_watchdog_user_enabled)
153 watchdog_enabled |= SOFT_WATCHDOG_ENABLED;
154 }
155
156 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
157
158 #define SOFTLOCKUP_RESET ULONG_MAX
159
160 #ifdef CONFIG_SMP
161 int __read_mostly sysctl_softlockup_all_cpu_backtrace;
162 #endif
163
164 static struct cpumask watchdog_allowed_mask __read_mostly;
165
166 /* Global variables, exported for sysctl */
167 unsigned int __read_mostly softlockup_panic =
168 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
169
170 static bool softlockup_initialized __read_mostly;
171 static u64 __read_mostly sample_period;
172
173 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
174 static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
175 static DEFINE_PER_CPU(unsigned int, watchdog_en);
176 static DEFINE_PER_CPU(bool, softlockup_touch_sync);
177 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
178 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
179 static unsigned long soft_lockup_nmi_warn;
180
softlockup_panic_setup(char * str)181 static int __init softlockup_panic_setup(char *str)
182 {
183 softlockup_panic = simple_strtoul(str, NULL, 0);
184 return 1;
185 }
186 __setup("softlockup_panic=", softlockup_panic_setup);
187
nowatchdog_setup(char * str)188 static int __init nowatchdog_setup(char *str)
189 {
190 watchdog_user_enabled = 0;
191 return 1;
192 }
193 __setup("nowatchdog", nowatchdog_setup);
194
nosoftlockup_setup(char * str)195 static int __init nosoftlockup_setup(char *str)
196 {
197 soft_watchdog_user_enabled = 0;
198 return 1;
199 }
200 __setup("nosoftlockup", nosoftlockup_setup);
201
watchdog_thresh_setup(char * str)202 static int __init watchdog_thresh_setup(char *str)
203 {
204 get_option(&str, &watchdog_thresh);
205 return 1;
206 }
207 __setup("watchdog_thresh=", watchdog_thresh_setup);
208
209 static void __lockup_detector_cleanup(void);
210
211 /*
212 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
213 * lockups can have false positives under extreme conditions. So we generally
214 * want a higher threshold for soft lockups than for hard lockups. So we couple
215 * the thresholds with a factor: we make the soft threshold twice the amount of
216 * time the hard threshold is.
217 */
get_softlockup_thresh(void)218 static int get_softlockup_thresh(void)
219 {
220 return watchdog_thresh * 2;
221 }
222
223 /*
224 * Returns seconds, approximately. We don't need nanosecond
225 * resolution, and we don't need to waste time with a big divide when
226 * 2^30ns == 1.074s.
227 */
get_timestamp(void)228 static unsigned long get_timestamp(void)
229 {
230 return running_clock() >> 30LL; /* 2^30 ~= 10^9 */
231 }
232
set_sample_period(void)233 static void set_sample_period(void)
234 {
235 /*
236 * convert watchdog_thresh from seconds to ns
237 * the divide by 5 is to give hrtimer several chances (two
238 * or three with the current relation between the soft
239 * and hard thresholds) to increment before the
240 * hardlockup detector generates a warning
241 */
242 sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
243 watchdog_update_hrtimer_threshold(sample_period);
244 }
245
246 /* Commands for resetting the watchdog */
update_touch_ts(void)247 static void update_touch_ts(void)
248 {
249 __this_cpu_write(watchdog_touch_ts, get_timestamp());
250 }
251
252 /**
253 * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
254 *
255 * Call when the scheduler may have stalled for legitimate reasons
256 * preventing the watchdog task from executing - e.g. the scheduler
257 * entering idle state. This should only be used for scheduler events.
258 * Use touch_softlockup_watchdog() for everything else.
259 */
touch_softlockup_watchdog_sched(void)260 notrace void touch_softlockup_watchdog_sched(void)
261 {
262 /*
263 * Preemption can be enabled. It doesn't matter which CPU's timestamp
264 * gets zeroed here, so use the raw_ operation.
265 */
266 raw_cpu_write(watchdog_touch_ts, SOFTLOCKUP_RESET);
267 }
268
touch_softlockup_watchdog(void)269 notrace void touch_softlockup_watchdog(void)
270 {
271 touch_softlockup_watchdog_sched();
272 wq_watchdog_touch(raw_smp_processor_id());
273 }
274 EXPORT_SYMBOL(touch_softlockup_watchdog);
275
touch_all_softlockup_watchdogs(void)276 void touch_all_softlockup_watchdogs(void)
277 {
278 int cpu;
279
280 /*
281 * watchdog_mutex cannpt be taken here, as this might be called
282 * from (soft)interrupt context, so the access to
283 * watchdog_allowed_cpumask might race with a concurrent update.
284 *
285 * The watchdog time stamp can race against a concurrent real
286 * update as well, the only side effect might be a cycle delay for
287 * the softlockup check.
288 */
289 for_each_cpu(cpu, &watchdog_allowed_mask)
290 per_cpu(watchdog_touch_ts, cpu) = SOFTLOCKUP_RESET;
291 wq_watchdog_touch(-1);
292 }
293
touch_softlockup_watchdog_sync(void)294 void touch_softlockup_watchdog_sync(void)
295 {
296 __this_cpu_write(softlockup_touch_sync, true);
297 __this_cpu_write(watchdog_touch_ts, SOFTLOCKUP_RESET);
298 }
299
is_softlockup(unsigned long touch_ts)300 static int is_softlockup(unsigned long touch_ts)
301 {
302 unsigned long now = get_timestamp();
303
304 if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){
305 /* Warn about unreasonable delays. */
306 if (time_after(now, touch_ts + get_softlockup_thresh()))
307 return now - touch_ts;
308 }
309 return 0;
310 }
311
312 /* watchdog detector functions */
is_hardlockup(void)313 bool is_hardlockup(void)
314 {
315 unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
316
317 if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
318 return true;
319
320 __this_cpu_write(hrtimer_interrupts_saved, hrint);
321 return false;
322 }
323
watchdog_interrupt_count(void)324 static void watchdog_interrupt_count(void)
325 {
326 __this_cpu_inc(hrtimer_interrupts);
327 }
328
329 static DEFINE_PER_CPU(struct completion, softlockup_completion);
330 static DEFINE_PER_CPU(struct cpu_stop_work, softlockup_stop_work);
331
332 /*
333 * The watchdog thread function - touches the timestamp.
334 *
335 * It only runs once every sample_period seconds (4 seconds by
336 * default) to reset the softlockup timestamp. If this gets delayed
337 * for more than 2*watchdog_thresh seconds then the debug-printout
338 * triggers in watchdog_timer_fn().
339 */
softlockup_fn(void * data)340 static int softlockup_fn(void *data)
341 {
342 update_touch_ts();
343 complete(this_cpu_ptr(&softlockup_completion));
344
345 return 0;
346 }
347
348 /* watchdog kicker functions */
watchdog_timer_fn(struct hrtimer * hrtimer)349 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
350 {
351 unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
352 struct pt_regs *regs = get_irq_regs();
353 int duration;
354 int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
355
356 if (!watchdog_enabled)
357 return HRTIMER_NORESTART;
358
359 /* kick the hardlockup detector */
360 watchdog_interrupt_count();
361
362 /* kick the softlockup detector */
363 if (completion_done(this_cpu_ptr(&softlockup_completion))) {
364 reinit_completion(this_cpu_ptr(&softlockup_completion));
365 stop_one_cpu_nowait(smp_processor_id(),
366 softlockup_fn, NULL,
367 this_cpu_ptr(&softlockup_stop_work));
368 }
369
370 /* .. and repeat */
371 hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
372
373 if (touch_ts == SOFTLOCKUP_RESET) {
374 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
375 /*
376 * If the time stamp was touched atomically
377 * make sure the scheduler tick is up to date.
378 */
379 __this_cpu_write(softlockup_touch_sync, false);
380 sched_clock_tick();
381 }
382
383 /* Clear the guest paused flag on watchdog reset */
384 kvm_check_and_clear_guest_paused();
385 update_touch_ts();
386 return HRTIMER_RESTART;
387 }
388
389 /* check for a softlockup
390 * This is done by making sure a high priority task is
391 * being scheduled. The task touches the watchdog to
392 * indicate it is getting cpu time. If it hasn't then
393 * this is a good indication some task is hogging the cpu
394 */
395 duration = is_softlockup(touch_ts);
396 if (unlikely(duration)) {
397 /*
398 * If a virtual machine is stopped by the host it can look to
399 * the watchdog like a soft lockup, check to see if the host
400 * stopped the vm before we issue the warning
401 */
402 if (kvm_check_and_clear_guest_paused())
403 return HRTIMER_RESTART;
404
405 /*
406 * Prevent multiple soft-lockup reports if one cpu is already
407 * engaged in dumping all cpu back traces.
408 */
409 if (softlockup_all_cpu_backtrace) {
410 if (test_and_set_bit_lock(0, &soft_lockup_nmi_warn))
411 return HRTIMER_RESTART;
412 }
413
414 /* Start period for the next softlockup warning. */
415 update_touch_ts();
416
417 pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
418 smp_processor_id(), duration,
419 current->comm, task_pid_nr(current));
420 print_modules();
421 print_irqtrace_events(current);
422 if (regs)
423 show_regs(regs);
424 else
425 dump_stack();
426
427 if (softlockup_all_cpu_backtrace) {
428 trigger_allbutself_cpu_backtrace();
429 clear_bit_unlock(0, &soft_lockup_nmi_warn);
430 }
431
432 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
433 if (softlockup_panic)
434 panic("softlockup: hung tasks");
435 }
436
437 return HRTIMER_RESTART;
438 }
439
watchdog_enable(unsigned int cpu)440 void watchdog_enable(unsigned int cpu)
441 {
442 struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
443 struct completion *done = this_cpu_ptr(&softlockup_completion);
444 unsigned int *enabled = this_cpu_ptr(&watchdog_en);
445
446 WARN_ON_ONCE(cpu != smp_processor_id());
447
448 init_completion(done);
449 complete(done);
450
451 if (*enabled)
452 return;
453
454 /*
455 * Start the timer first to prevent the NMI watchdog triggering
456 * before the timer has a chance to fire.
457 */
458 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
459 hrtimer->function = watchdog_timer_fn;
460 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
461 HRTIMER_MODE_REL_PINNED_HARD);
462
463 /* Initialize timestamp */
464 update_touch_ts();
465 /* Enable the perf event */
466 if (watchdog_enabled & NMI_WATCHDOG_ENABLED)
467 watchdog_nmi_enable(cpu);
468
469 /*
470 * Need to ensure above operations are observed by other CPUs before
471 * indicating that timer is enabled. This is to synchronize core
472 * isolation and hotplug. Core isolation will wait for this flag to be
473 * set.
474 */
475 mb();
476 *enabled = 1;
477 }
478
watchdog_disable(unsigned int cpu)479 void watchdog_disable(unsigned int cpu)
480 {
481 struct hrtimer *hrtimer = per_cpu_ptr(&watchdog_hrtimer, cpu);
482 unsigned int *enabled = per_cpu_ptr(&watchdog_en, cpu);
483
484 if (!*enabled)
485 return;
486
487 /*
488 * Disable the perf event first. That prevents that a large delay
489 * between disabling the timer and disabling the perf event causes
490 * the perf NMI to detect a false positive.
491 */
492 watchdog_nmi_disable(cpu);
493 hrtimer_cancel(hrtimer);
494 wait_for_completion(per_cpu_ptr(&softlockup_completion, cpu));
495
496 /*
497 * No need for barrier here since disabling the watchdog is
498 * synchronized with hotplug lock
499 */
500 *enabled = 0;
501 }
502
watchdog_configured(unsigned int cpu)503 bool watchdog_configured(unsigned int cpu)
504 {
505 return *per_cpu_ptr(&watchdog_en, cpu);
506 }
507
softlockup_stop_fn(void * data)508 static int softlockup_stop_fn(void *data)
509 {
510 watchdog_disable(smp_processor_id());
511 return 0;
512 }
513
softlockup_stop_all(void)514 static void softlockup_stop_all(void)
515 {
516 int cpu;
517
518 if (!softlockup_initialized)
519 return;
520
521 for_each_cpu(cpu, &watchdog_allowed_mask)
522 smp_call_on_cpu(cpu, softlockup_stop_fn, NULL, false);
523
524 cpumask_clear(&watchdog_allowed_mask);
525 }
526
softlockup_start_fn(void * data)527 static int softlockup_start_fn(void *data)
528 {
529 watchdog_enable(smp_processor_id());
530 return 0;
531 }
532
softlockup_start_all(void)533 static void softlockup_start_all(void)
534 {
535 int cpu;
536
537 cpumask_copy(&watchdog_allowed_mask, &watchdog_cpumask);
538 for_each_cpu(cpu, &watchdog_allowed_mask)
539 smp_call_on_cpu(cpu, softlockup_start_fn, NULL, false);
540 }
541
lockup_detector_online_cpu(unsigned int cpu)542 int lockup_detector_online_cpu(unsigned int cpu)
543 {
544 if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
545 watchdog_enable(cpu);
546 return 0;
547 }
548
lockup_detector_offline_cpu(unsigned int cpu)549 int lockup_detector_offline_cpu(unsigned int cpu)
550 {
551 if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
552 watchdog_disable(cpu);
553 return 0;
554 }
555
__lockup_detector_reconfigure(void)556 static void __lockup_detector_reconfigure(void)
557 {
558 cpus_read_lock();
559 watchdog_nmi_stop();
560
561 softlockup_stop_all();
562 set_sample_period();
563 lockup_detector_update_enable();
564 if (watchdog_enabled && watchdog_thresh)
565 softlockup_start_all();
566
567 watchdog_nmi_start();
568 cpus_read_unlock();
569 /*
570 * Must be called outside the cpus locked section to prevent
571 * recursive locking in the perf code.
572 */
573 __lockup_detector_cleanup();
574 }
575
lockup_detector_reconfigure(void)576 void lockup_detector_reconfigure(void)
577 {
578 mutex_lock(&watchdog_mutex);
579 __lockup_detector_reconfigure();
580 mutex_unlock(&watchdog_mutex);
581 }
582
583 /*
584 * Create the watchdog thread infrastructure and configure the detector(s).
585 *
586 * The threads are not unparked as watchdog_allowed_mask is empty. When
587 * the threads are successfully initialized, take the proper locks and
588 * unpark the threads in the watchdog_cpumask if the watchdog is enabled.
589 */
lockup_detector_setup(void)590 static __init void lockup_detector_setup(void)
591 {
592 /*
593 * If sysctl is off and watchdog got disabled on the command line,
594 * nothing to do here.
595 */
596 lockup_detector_update_enable();
597
598 if (!IS_ENABLED(CONFIG_SYSCTL) &&
599 !(watchdog_enabled && watchdog_thresh))
600 return;
601
602 mutex_lock(&watchdog_mutex);
603 __lockup_detector_reconfigure();
604 softlockup_initialized = true;
605 mutex_unlock(&watchdog_mutex);
606 }
607
608 #else /* CONFIG_SOFTLOCKUP_DETECTOR */
__lockup_detector_reconfigure(void)609 static void __lockup_detector_reconfigure(void)
610 {
611 cpus_read_lock();
612 watchdog_nmi_stop();
613 lockup_detector_update_enable();
614 watchdog_nmi_start();
615 cpus_read_unlock();
616 }
lockup_detector_reconfigure(void)617 void lockup_detector_reconfigure(void)
618 {
619 __lockup_detector_reconfigure();
620 }
lockup_detector_setup(void)621 static inline void lockup_detector_setup(void)
622 {
623 __lockup_detector_reconfigure();
624 }
625 #endif /* !CONFIG_SOFTLOCKUP_DETECTOR */
626
__lockup_detector_cleanup(void)627 static void __lockup_detector_cleanup(void)
628 {
629 lockdep_assert_held(&watchdog_mutex);
630 hardlockup_detector_perf_cleanup();
631 }
632
633 /**
634 * lockup_detector_cleanup - Cleanup after cpu hotplug or sysctl changes
635 *
636 * Caller must not hold the cpu hotplug rwsem.
637 */
lockup_detector_cleanup(void)638 void lockup_detector_cleanup(void)
639 {
640 mutex_lock(&watchdog_mutex);
641 __lockup_detector_cleanup();
642 mutex_unlock(&watchdog_mutex);
643 }
644
645 /**
646 * lockup_detector_soft_poweroff - Interface to stop lockup detector(s)
647 *
648 * Special interface for parisc. It prevents lockup detector warnings from
649 * the default pm_poweroff() function which busy loops forever.
650 */
lockup_detector_soft_poweroff(void)651 void lockup_detector_soft_poweroff(void)
652 {
653 watchdog_enabled = 0;
654 }
655
656 #ifdef CONFIG_SYSCTL
657
658 /* Propagate any changes to the watchdog threads */
proc_watchdog_update(void)659 static void proc_watchdog_update(void)
660 {
661 /* Remove impossible cpus to keep sysctl output clean. */
662 cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask);
663 __lockup_detector_reconfigure();
664 }
665
666 /*
667 * common function for watchdog, nmi_watchdog and soft_watchdog parameter
668 *
669 * caller | table->data points to | 'which'
670 * -------------------|----------------------------|--------------------------
671 * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED |
672 * | | SOFT_WATCHDOG_ENABLED
673 * -------------------|----------------------------|--------------------------
674 * proc_nmi_watchdog | nmi_watchdog_user_enabled | NMI_WATCHDOG_ENABLED
675 * -------------------|----------------------------|--------------------------
676 * proc_soft_watchdog | soft_watchdog_user_enabled | SOFT_WATCHDOG_ENABLED
677 */
proc_watchdog_common(int which,struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)678 static int proc_watchdog_common(int which, struct ctl_table *table, int write,
679 void *buffer, size_t *lenp, loff_t *ppos)
680 {
681 int err, old, *param = table->data;
682
683 mutex_lock(&watchdog_mutex);
684
685 if (!write) {
686 /*
687 * On read synchronize the userspace interface. This is a
688 * racy snapshot.
689 */
690 *param = (watchdog_enabled & which) != 0;
691 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
692 } else {
693 old = READ_ONCE(*param);
694 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
695 if (!err && old != READ_ONCE(*param))
696 proc_watchdog_update();
697 }
698 mutex_unlock(&watchdog_mutex);
699 return err;
700 }
701
702 /*
703 * /proc/sys/kernel/watchdog
704 */
proc_watchdog(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)705 int proc_watchdog(struct ctl_table *table, int write,
706 void *buffer, size_t *lenp, loff_t *ppos)
707 {
708 return proc_watchdog_common(NMI_WATCHDOG_ENABLED|SOFT_WATCHDOG_ENABLED,
709 table, write, buffer, lenp, ppos);
710 }
711
712 /*
713 * /proc/sys/kernel/nmi_watchdog
714 */
proc_nmi_watchdog(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)715 int proc_nmi_watchdog(struct ctl_table *table, int write,
716 void *buffer, size_t *lenp, loff_t *ppos)
717 {
718 if (!nmi_watchdog_available && write)
719 return -ENOTSUPP;
720 return proc_watchdog_common(NMI_WATCHDOG_ENABLED,
721 table, write, buffer, lenp, ppos);
722 }
723
724 /*
725 * /proc/sys/kernel/soft_watchdog
726 */
proc_soft_watchdog(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)727 int proc_soft_watchdog(struct ctl_table *table, int write,
728 void *buffer, size_t *lenp, loff_t *ppos)
729 {
730 return proc_watchdog_common(SOFT_WATCHDOG_ENABLED,
731 table, write, buffer, lenp, ppos);
732 }
733
734 /*
735 * /proc/sys/kernel/watchdog_thresh
736 */
proc_watchdog_thresh(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)737 int proc_watchdog_thresh(struct ctl_table *table, int write,
738 void *buffer, size_t *lenp, loff_t *ppos)
739 {
740 int err, old;
741
742 mutex_lock(&watchdog_mutex);
743
744 old = READ_ONCE(watchdog_thresh);
745 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
746
747 if (!err && write && old != READ_ONCE(watchdog_thresh))
748 proc_watchdog_update();
749
750 mutex_unlock(&watchdog_mutex);
751 return err;
752 }
753
754 /*
755 * The cpumask is the mask of possible cpus that the watchdog can run
756 * on, not the mask of cpus it is actually running on. This allows the
757 * user to specify a mask that will include cpus that have not yet
758 * been brought online, if desired.
759 */
proc_watchdog_cpumask(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)760 int proc_watchdog_cpumask(struct ctl_table *table, int write,
761 void *buffer, size_t *lenp, loff_t *ppos)
762 {
763 int err;
764
765 mutex_lock(&watchdog_mutex);
766
767 err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
768 if (!err && write)
769 proc_watchdog_update();
770
771 mutex_unlock(&watchdog_mutex);
772 return err;
773 }
774 #endif /* CONFIG_SYSCTL */
775
lockup_detector_init(void)776 void __init lockup_detector_init(void)
777 {
778 if (tick_nohz_full_enabled())
779 pr_info("Disabling watchdog on nohz_full cores by default\n");
780
781 cpumask_copy(&watchdog_cpumask,
782 housekeeping_cpumask(HK_FLAG_TIMER));
783
784 if (!watchdog_nmi_probe())
785 nmi_watchdog_available = true;
786 lockup_detector_setup();
787 }
788