1 /*
2 * watchdog_dev.c
3 *
4 * (c) Copyright 2008-2011 Alan Cox <alan@lxorguk.ukuu.org.uk>,
5 * All Rights Reserved.
6 *
7 * (c) Copyright 2008-2011 Wim Van Sebroeck <wim@iguana.be>.
8 *
9 *
10 * This source code is part of the generic code that can be used
11 * by all the watchdog timer drivers.
12 *
13 * This part of the generic code takes care of the following
14 * misc device: /dev/watchdog.
15 *
16 * Based on source code of the following authors:
17 * Matt Domsch <Matt_Domsch@dell.com>,
18 * Rob Radez <rob@osinvestor.com>,
19 * Rusty Lynch <rusty@linux.co.intel.com>
20 * Satyam Sharma <satyam@infradead.org>
21 * Randy Dunlap <randy.dunlap@oracle.com>
22 *
23 * This program is free software; you can redistribute it and/or
24 * modify it under the terms of the GNU General Public License
25 * as published by the Free Software Foundation; either version
26 * 2 of the License, or (at your option) any later version.
27 *
28 * Neither Alan Cox, CymruNet Ltd., Wim Van Sebroeck nor Iguana vzw.
29 * admit liability nor provide warranty for any of this software.
30 * This material is provided "AS-IS" and at no charge.
31 */
32
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35 #include <linux/cdev.h> /* For character device */
36 #include <linux/errno.h> /* For the -ENODEV/... values */
37 #include <linux/fs.h> /* For file operations */
38 #include <linux/init.h> /* For __init/__exit/... */
39 #include <linux/hrtimer.h> /* For hrtimers */
40 #include <linux/kernel.h> /* For printk/panic/... */
41 #include <linux/kthread.h> /* For kthread_work */
42 #include <linux/miscdevice.h> /* For handling misc devices */
43 #include <linux/module.h> /* For module stuff/... */
44 #include <linux/mutex.h> /* For mutexes */
45 #include <linux/slab.h> /* For memory functions */
46 #include <linux/types.h> /* For standard types (like size_t) */
47 #include <linux/watchdog.h> /* For watchdog specific items */
48 #include <linux/uaccess.h> /* For copy_to_user/put_user/... */
49
50 #include <uapi/linux/sched/types.h> /* For struct sched_param */
51
52 #include "watchdog_core.h"
53 #include "watchdog_pretimeout.h"
54
55 /*
56 * struct watchdog_core_data - watchdog core internal data
57 * @dev: The watchdog's internal device
58 * @cdev: The watchdog's Character device.
59 * @wdd: Pointer to watchdog device.
60 * @lock: Lock for watchdog core.
61 * @status: Watchdog core internal status bits.
62 */
63 struct watchdog_core_data {
64 struct device dev;
65 struct cdev cdev;
66 struct watchdog_device *wdd;
67 struct mutex lock;
68 ktime_t last_keepalive;
69 ktime_t last_hw_keepalive;
70 struct hrtimer timer;
71 struct kthread_work work;
72 unsigned long status; /* Internal status bits */
73 #define _WDOG_DEV_OPEN 0 /* Opened ? */
74 #define _WDOG_ALLOW_RELEASE 1 /* Did we receive the magic char ? */
75 #define _WDOG_KEEPALIVE 2 /* Did we receive a keepalive ? */
76 };
77
78 /* the dev_t structure to store the dynamically allocated watchdog devices */
79 static dev_t watchdog_devt;
80 /* Reference to watchdog device behind /dev/watchdog */
81 static struct watchdog_core_data *old_wd_data;
82
83 static struct kthread_worker *watchdog_kworker;
84
85 static bool handle_boot_enabled =
86 IS_ENABLED(CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED);
87
watchdog_need_worker(struct watchdog_device * wdd)88 static inline bool watchdog_need_worker(struct watchdog_device *wdd)
89 {
90 /* All variables in milli-seconds */
91 unsigned int hm = wdd->max_hw_heartbeat_ms;
92 unsigned int t = wdd->timeout * 1000;
93
94 /*
95 * A worker to generate heartbeat requests is needed if all of the
96 * following conditions are true.
97 * - Userspace activated the watchdog.
98 * - The driver provided a value for the maximum hardware timeout, and
99 * thus is aware that the framework supports generating heartbeat
100 * requests.
101 * - Userspace requests a longer timeout than the hardware can handle.
102 *
103 * Alternatively, if userspace has not opened the watchdog
104 * device, we take care of feeding the watchdog if it is
105 * running.
106 */
107 return (hm && watchdog_active(wdd) && t > hm) ||
108 (t && !watchdog_active(wdd) && watchdog_hw_running(wdd));
109 }
110
watchdog_next_keepalive(struct watchdog_device * wdd)111 static ktime_t watchdog_next_keepalive(struct watchdog_device *wdd)
112 {
113 struct watchdog_core_data *wd_data = wdd->wd_data;
114 unsigned int timeout_ms = wdd->timeout * 1000;
115 ktime_t keepalive_interval;
116 ktime_t last_heartbeat, latest_heartbeat;
117 ktime_t virt_timeout;
118 unsigned int hw_heartbeat_ms;
119
120 virt_timeout = ktime_add(wd_data->last_keepalive,
121 ms_to_ktime(timeout_ms));
122 hw_heartbeat_ms = min_not_zero(timeout_ms, wdd->max_hw_heartbeat_ms);
123 keepalive_interval = ms_to_ktime(hw_heartbeat_ms / 2);
124
125 if (!watchdog_active(wdd))
126 return keepalive_interval;
127
128 /*
129 * To ensure that the watchdog times out wdd->timeout seconds
130 * after the most recent ping from userspace, the last
131 * worker ping has to come in hw_heartbeat_ms before this timeout.
132 */
133 last_heartbeat = ktime_sub(virt_timeout, ms_to_ktime(hw_heartbeat_ms));
134 latest_heartbeat = ktime_sub(last_heartbeat, ktime_get());
135 if (ktime_before(latest_heartbeat, keepalive_interval))
136 return latest_heartbeat;
137 return keepalive_interval;
138 }
139
watchdog_update_worker(struct watchdog_device * wdd)140 static inline void watchdog_update_worker(struct watchdog_device *wdd)
141 {
142 struct watchdog_core_data *wd_data = wdd->wd_data;
143
144 if (watchdog_need_worker(wdd)) {
145 ktime_t t = watchdog_next_keepalive(wdd);
146
147 if (t > 0)
148 hrtimer_start(&wd_data->timer, t, HRTIMER_MODE_REL);
149 } else {
150 hrtimer_cancel(&wd_data->timer);
151 }
152 }
153
__watchdog_ping(struct watchdog_device * wdd)154 static int __watchdog_ping(struct watchdog_device *wdd)
155 {
156 struct watchdog_core_data *wd_data = wdd->wd_data;
157 ktime_t earliest_keepalive, now;
158 int err;
159
160 earliest_keepalive = ktime_add(wd_data->last_hw_keepalive,
161 ms_to_ktime(wdd->min_hw_heartbeat_ms));
162 now = ktime_get();
163
164 if (ktime_after(earliest_keepalive, now)) {
165 hrtimer_start(&wd_data->timer,
166 ktime_sub(earliest_keepalive, now),
167 HRTIMER_MODE_REL);
168 return 0;
169 }
170
171 wd_data->last_hw_keepalive = now;
172
173 if (wdd->ops->ping)
174 err = wdd->ops->ping(wdd); /* ping the watchdog */
175 else
176 err = wdd->ops->start(wdd); /* restart watchdog */
177
178 watchdog_update_worker(wdd);
179
180 return err;
181 }
182
183 /*
184 * watchdog_ping: ping the watchdog.
185 * @wdd: the watchdog device to ping
186 *
187 * The caller must hold wd_data->lock.
188 *
189 * If the watchdog has no own ping operation then it needs to be
190 * restarted via the start operation. This wrapper function does
191 * exactly that.
192 * We only ping when the watchdog device is running.
193 */
194
watchdog_ping(struct watchdog_device * wdd)195 static int watchdog_ping(struct watchdog_device *wdd)
196 {
197 struct watchdog_core_data *wd_data = wdd->wd_data;
198
199 if (!watchdog_active(wdd) && !watchdog_hw_running(wdd))
200 return 0;
201
202 set_bit(_WDOG_KEEPALIVE, &wd_data->status);
203
204 wd_data->last_keepalive = ktime_get();
205 return __watchdog_ping(wdd);
206 }
207
watchdog_worker_should_ping(struct watchdog_core_data * wd_data)208 static bool watchdog_worker_should_ping(struct watchdog_core_data *wd_data)
209 {
210 struct watchdog_device *wdd = wd_data->wdd;
211
212 return wdd && (watchdog_active(wdd) || watchdog_hw_running(wdd));
213 }
214
watchdog_ping_work(struct kthread_work * work)215 static void watchdog_ping_work(struct kthread_work *work)
216 {
217 struct watchdog_core_data *wd_data;
218
219 wd_data = container_of(work, struct watchdog_core_data, work);
220
221 mutex_lock(&wd_data->lock);
222 if (watchdog_worker_should_ping(wd_data))
223 __watchdog_ping(wd_data->wdd);
224 mutex_unlock(&wd_data->lock);
225 }
226
watchdog_timer_expired(struct hrtimer * timer)227 static enum hrtimer_restart watchdog_timer_expired(struct hrtimer *timer)
228 {
229 struct watchdog_core_data *wd_data;
230
231 wd_data = container_of(timer, struct watchdog_core_data, timer);
232
233 kthread_queue_work(watchdog_kworker, &wd_data->work);
234 return HRTIMER_NORESTART;
235 }
236
237 /*
238 * watchdog_start: wrapper to start the watchdog.
239 * @wdd: the watchdog device to start
240 *
241 * The caller must hold wd_data->lock.
242 *
243 * Start the watchdog if it is not active and mark it active.
244 * This function returns zero on success or a negative errno code for
245 * failure.
246 */
247
watchdog_start(struct watchdog_device * wdd)248 static int watchdog_start(struct watchdog_device *wdd)
249 {
250 struct watchdog_core_data *wd_data = wdd->wd_data;
251 ktime_t started_at;
252 int err;
253
254 if (watchdog_active(wdd))
255 return 0;
256
257 set_bit(_WDOG_KEEPALIVE, &wd_data->status);
258
259 started_at = ktime_get();
260 if (watchdog_hw_running(wdd) && wdd->ops->ping)
261 err = wdd->ops->ping(wdd);
262 else
263 err = wdd->ops->start(wdd);
264 if (err == 0) {
265 set_bit(WDOG_ACTIVE, &wdd->status);
266 wd_data->last_keepalive = started_at;
267 wd_data->last_hw_keepalive = started_at;
268 watchdog_update_worker(wdd);
269 }
270
271 return err;
272 }
273
274 /*
275 * watchdog_stop: wrapper to stop the watchdog.
276 * @wdd: the watchdog device to stop
277 *
278 * The caller must hold wd_data->lock.
279 *
280 * Stop the watchdog if it is still active and unmark it active.
281 * This function returns zero on success or a negative errno code for
282 * failure.
283 * If the 'nowayout' feature was set, the watchdog cannot be stopped.
284 */
285
watchdog_stop(struct watchdog_device * wdd)286 static int watchdog_stop(struct watchdog_device *wdd)
287 {
288 int err = 0;
289
290 if (!watchdog_active(wdd))
291 return 0;
292
293 if (test_bit(WDOG_NO_WAY_OUT, &wdd->status)) {
294 pr_info("watchdog%d: nowayout prevents watchdog being stopped!\n",
295 wdd->id);
296 return -EBUSY;
297 }
298
299 if (wdd->ops->stop) {
300 clear_bit(WDOG_HW_RUNNING, &wdd->status);
301 err = wdd->ops->stop(wdd);
302 } else {
303 set_bit(WDOG_HW_RUNNING, &wdd->status);
304 }
305
306 if (err == 0) {
307 clear_bit(WDOG_ACTIVE, &wdd->status);
308 watchdog_update_worker(wdd);
309 }
310
311 return err;
312 }
313
314 /*
315 * watchdog_get_status: wrapper to get the watchdog status
316 * @wdd: the watchdog device to get the status from
317 *
318 * The caller must hold wd_data->lock.
319 *
320 * Get the watchdog's status flags.
321 */
322
watchdog_get_status(struct watchdog_device * wdd)323 static unsigned int watchdog_get_status(struct watchdog_device *wdd)
324 {
325 struct watchdog_core_data *wd_data = wdd->wd_data;
326 unsigned int status;
327
328 if (wdd->ops->status)
329 status = wdd->ops->status(wdd);
330 else
331 status = wdd->bootstatus & (WDIOF_CARDRESET |
332 WDIOF_OVERHEAT |
333 WDIOF_FANFAULT |
334 WDIOF_EXTERN1 |
335 WDIOF_EXTERN2 |
336 WDIOF_POWERUNDER |
337 WDIOF_POWEROVER);
338
339 if (test_bit(_WDOG_ALLOW_RELEASE, &wd_data->status))
340 status |= WDIOF_MAGICCLOSE;
341
342 if (test_and_clear_bit(_WDOG_KEEPALIVE, &wd_data->status))
343 status |= WDIOF_KEEPALIVEPING;
344
345 return status;
346 }
347
348 /*
349 * watchdog_set_timeout: set the watchdog timer timeout
350 * @wdd: the watchdog device to set the timeout for
351 * @timeout: timeout to set in seconds
352 *
353 * The caller must hold wd_data->lock.
354 */
355
watchdog_set_timeout(struct watchdog_device * wdd,unsigned int timeout)356 static int watchdog_set_timeout(struct watchdog_device *wdd,
357 unsigned int timeout)
358 {
359 int err = 0;
360
361 if (!(wdd->info->options & WDIOF_SETTIMEOUT))
362 return -EOPNOTSUPP;
363
364 if (watchdog_timeout_invalid(wdd, timeout))
365 return -EINVAL;
366
367 if (wdd->ops->set_timeout) {
368 err = wdd->ops->set_timeout(wdd, timeout);
369 } else {
370 wdd->timeout = timeout;
371 /* Disable pretimeout if it doesn't fit the new timeout */
372 if (wdd->pretimeout >= wdd->timeout)
373 wdd->pretimeout = 0;
374 }
375
376 watchdog_update_worker(wdd);
377
378 return err;
379 }
380
381 /*
382 * watchdog_set_pretimeout: set the watchdog timer pretimeout
383 * @wdd: the watchdog device to set the timeout for
384 * @timeout: pretimeout to set in seconds
385 */
386
watchdog_set_pretimeout(struct watchdog_device * wdd,unsigned int timeout)387 static int watchdog_set_pretimeout(struct watchdog_device *wdd,
388 unsigned int timeout)
389 {
390 int err = 0;
391
392 if (!(wdd->info->options & WDIOF_PRETIMEOUT))
393 return -EOPNOTSUPP;
394
395 if (watchdog_pretimeout_invalid(wdd, timeout))
396 return -EINVAL;
397
398 if (wdd->ops->set_pretimeout)
399 err = wdd->ops->set_pretimeout(wdd, timeout);
400 else
401 wdd->pretimeout = timeout;
402
403 return err;
404 }
405
406 /*
407 * watchdog_get_timeleft: wrapper to get the time left before a reboot
408 * @wdd: the watchdog device to get the remaining time from
409 * @timeleft: the time that's left
410 *
411 * The caller must hold wd_data->lock.
412 *
413 * Get the time before a watchdog will reboot (if not pinged).
414 */
415
watchdog_get_timeleft(struct watchdog_device * wdd,unsigned int * timeleft)416 static int watchdog_get_timeleft(struct watchdog_device *wdd,
417 unsigned int *timeleft)
418 {
419 *timeleft = 0;
420
421 if (!wdd->ops->get_timeleft)
422 return -EOPNOTSUPP;
423
424 *timeleft = wdd->ops->get_timeleft(wdd);
425
426 return 0;
427 }
428
429 #ifdef CONFIG_WATCHDOG_SYSFS
nowayout_show(struct device * dev,struct device_attribute * attr,char * buf)430 static ssize_t nowayout_show(struct device *dev, struct device_attribute *attr,
431 char *buf)
432 {
433 struct watchdog_device *wdd = dev_get_drvdata(dev);
434
435 return sprintf(buf, "%d\n", !!test_bit(WDOG_NO_WAY_OUT, &wdd->status));
436 }
437 static DEVICE_ATTR_RO(nowayout);
438
status_show(struct device * dev,struct device_attribute * attr,char * buf)439 static ssize_t status_show(struct device *dev, struct device_attribute *attr,
440 char *buf)
441 {
442 struct watchdog_device *wdd = dev_get_drvdata(dev);
443 struct watchdog_core_data *wd_data = wdd->wd_data;
444 unsigned int status;
445
446 mutex_lock(&wd_data->lock);
447 status = watchdog_get_status(wdd);
448 mutex_unlock(&wd_data->lock);
449
450 return sprintf(buf, "0x%x\n", status);
451 }
452 static DEVICE_ATTR_RO(status);
453
bootstatus_show(struct device * dev,struct device_attribute * attr,char * buf)454 static ssize_t bootstatus_show(struct device *dev,
455 struct device_attribute *attr, char *buf)
456 {
457 struct watchdog_device *wdd = dev_get_drvdata(dev);
458
459 return sprintf(buf, "%u\n", wdd->bootstatus);
460 }
461 static DEVICE_ATTR_RO(bootstatus);
462
timeleft_show(struct device * dev,struct device_attribute * attr,char * buf)463 static ssize_t timeleft_show(struct device *dev, struct device_attribute *attr,
464 char *buf)
465 {
466 struct watchdog_device *wdd = dev_get_drvdata(dev);
467 struct watchdog_core_data *wd_data = wdd->wd_data;
468 ssize_t status;
469 unsigned int val;
470
471 mutex_lock(&wd_data->lock);
472 status = watchdog_get_timeleft(wdd, &val);
473 mutex_unlock(&wd_data->lock);
474 if (!status)
475 status = sprintf(buf, "%u\n", val);
476
477 return status;
478 }
479 static DEVICE_ATTR_RO(timeleft);
480
timeout_show(struct device * dev,struct device_attribute * attr,char * buf)481 static ssize_t timeout_show(struct device *dev, struct device_attribute *attr,
482 char *buf)
483 {
484 struct watchdog_device *wdd = dev_get_drvdata(dev);
485
486 return sprintf(buf, "%u\n", wdd->timeout);
487 }
488 static DEVICE_ATTR_RO(timeout);
489
pretimeout_show(struct device * dev,struct device_attribute * attr,char * buf)490 static ssize_t pretimeout_show(struct device *dev,
491 struct device_attribute *attr, char *buf)
492 {
493 struct watchdog_device *wdd = dev_get_drvdata(dev);
494
495 return sprintf(buf, "%u\n", wdd->pretimeout);
496 }
497 static DEVICE_ATTR_RO(pretimeout);
498
identity_show(struct device * dev,struct device_attribute * attr,char * buf)499 static ssize_t identity_show(struct device *dev, struct device_attribute *attr,
500 char *buf)
501 {
502 struct watchdog_device *wdd = dev_get_drvdata(dev);
503
504 return sprintf(buf, "%s\n", wdd->info->identity);
505 }
506 static DEVICE_ATTR_RO(identity);
507
state_show(struct device * dev,struct device_attribute * attr,char * buf)508 static ssize_t state_show(struct device *dev, struct device_attribute *attr,
509 char *buf)
510 {
511 struct watchdog_device *wdd = dev_get_drvdata(dev);
512
513 if (watchdog_active(wdd))
514 return sprintf(buf, "active\n");
515
516 return sprintf(buf, "inactive\n");
517 }
518 static DEVICE_ATTR_RO(state);
519
pretimeout_available_governors_show(struct device * dev,struct device_attribute * attr,char * buf)520 static ssize_t pretimeout_available_governors_show(struct device *dev,
521 struct device_attribute *attr, char *buf)
522 {
523 return watchdog_pretimeout_available_governors_get(buf);
524 }
525 static DEVICE_ATTR_RO(pretimeout_available_governors);
526
pretimeout_governor_show(struct device * dev,struct device_attribute * attr,char * buf)527 static ssize_t pretimeout_governor_show(struct device *dev,
528 struct device_attribute *attr,
529 char *buf)
530 {
531 struct watchdog_device *wdd = dev_get_drvdata(dev);
532
533 return watchdog_pretimeout_governor_get(wdd, buf);
534 }
535
pretimeout_governor_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)536 static ssize_t pretimeout_governor_store(struct device *dev,
537 struct device_attribute *attr,
538 const char *buf, size_t count)
539 {
540 struct watchdog_device *wdd = dev_get_drvdata(dev);
541 int ret = watchdog_pretimeout_governor_set(wdd, buf);
542
543 if (!ret)
544 ret = count;
545
546 return ret;
547 }
548 static DEVICE_ATTR_RW(pretimeout_governor);
549
wdt_is_visible(struct kobject * kobj,struct attribute * attr,int n)550 static umode_t wdt_is_visible(struct kobject *kobj, struct attribute *attr,
551 int n)
552 {
553 struct device *dev = container_of(kobj, struct device, kobj);
554 struct watchdog_device *wdd = dev_get_drvdata(dev);
555 umode_t mode = attr->mode;
556
557 if (attr == &dev_attr_timeleft.attr && !wdd->ops->get_timeleft)
558 mode = 0;
559 else if (attr == &dev_attr_pretimeout.attr &&
560 !(wdd->info->options & WDIOF_PRETIMEOUT))
561 mode = 0;
562 else if ((attr == &dev_attr_pretimeout_governor.attr ||
563 attr == &dev_attr_pretimeout_available_governors.attr) &&
564 (!(wdd->info->options & WDIOF_PRETIMEOUT) ||
565 !IS_ENABLED(CONFIG_WATCHDOG_PRETIMEOUT_GOV)))
566 mode = 0;
567
568 return mode;
569 }
570 static struct attribute *wdt_attrs[] = {
571 &dev_attr_state.attr,
572 &dev_attr_identity.attr,
573 &dev_attr_timeout.attr,
574 &dev_attr_pretimeout.attr,
575 &dev_attr_timeleft.attr,
576 &dev_attr_bootstatus.attr,
577 &dev_attr_status.attr,
578 &dev_attr_nowayout.attr,
579 &dev_attr_pretimeout_governor.attr,
580 &dev_attr_pretimeout_available_governors.attr,
581 NULL,
582 };
583
584 static const struct attribute_group wdt_group = {
585 .attrs = wdt_attrs,
586 .is_visible = wdt_is_visible,
587 };
588 __ATTRIBUTE_GROUPS(wdt);
589 #else
590 #define wdt_groups NULL
591 #endif
592
593 /*
594 * watchdog_ioctl_op: call the watchdog drivers ioctl op if defined
595 * @wdd: the watchdog device to do the ioctl on
596 * @cmd: watchdog command
597 * @arg: argument pointer
598 *
599 * The caller must hold wd_data->lock.
600 */
601
watchdog_ioctl_op(struct watchdog_device * wdd,unsigned int cmd,unsigned long arg)602 static int watchdog_ioctl_op(struct watchdog_device *wdd, unsigned int cmd,
603 unsigned long arg)
604 {
605 if (!wdd->ops->ioctl)
606 return -ENOIOCTLCMD;
607
608 return wdd->ops->ioctl(wdd, cmd, arg);
609 }
610
611 /*
612 * watchdog_write: writes to the watchdog.
613 * @file: file from VFS
614 * @data: user address of data
615 * @len: length of data
616 * @ppos: pointer to the file offset
617 *
618 * A write to a watchdog device is defined as a keepalive ping.
619 * Writing the magic 'V' sequence allows the next close to turn
620 * off the watchdog (if 'nowayout' is not set).
621 */
622
watchdog_write(struct file * file,const char __user * data,size_t len,loff_t * ppos)623 static ssize_t watchdog_write(struct file *file, const char __user *data,
624 size_t len, loff_t *ppos)
625 {
626 struct watchdog_core_data *wd_data = file->private_data;
627 struct watchdog_device *wdd;
628 int err;
629 size_t i;
630 char c;
631
632 if (len == 0)
633 return 0;
634
635 /*
636 * Note: just in case someone wrote the magic character
637 * five months ago...
638 */
639 clear_bit(_WDOG_ALLOW_RELEASE, &wd_data->status);
640
641 /* scan to see whether or not we got the magic character */
642 for (i = 0; i != len; i++) {
643 if (get_user(c, data + i))
644 return -EFAULT;
645 if (c == 'V')
646 set_bit(_WDOG_ALLOW_RELEASE, &wd_data->status);
647 }
648
649 /* someone wrote to us, so we send the watchdog a keepalive ping */
650
651 err = -ENODEV;
652 mutex_lock(&wd_data->lock);
653 wdd = wd_data->wdd;
654 if (wdd)
655 err = watchdog_ping(wdd);
656 mutex_unlock(&wd_data->lock);
657
658 if (err < 0)
659 return err;
660
661 return len;
662 }
663
664 /*
665 * watchdog_ioctl: handle the different ioctl's for the watchdog device.
666 * @file: file handle to the device
667 * @cmd: watchdog command
668 * @arg: argument pointer
669 *
670 * The watchdog API defines a common set of functions for all watchdogs
671 * according to their available features.
672 */
673
watchdog_ioctl(struct file * file,unsigned int cmd,unsigned long arg)674 static long watchdog_ioctl(struct file *file, unsigned int cmd,
675 unsigned long arg)
676 {
677 struct watchdog_core_data *wd_data = file->private_data;
678 void __user *argp = (void __user *)arg;
679 struct watchdog_device *wdd;
680 int __user *p = argp;
681 unsigned int val;
682 int err;
683
684 mutex_lock(&wd_data->lock);
685
686 wdd = wd_data->wdd;
687 if (!wdd) {
688 err = -ENODEV;
689 goto out_ioctl;
690 }
691
692 err = watchdog_ioctl_op(wdd, cmd, arg);
693 if (err != -ENOIOCTLCMD)
694 goto out_ioctl;
695
696 switch (cmd) {
697 case WDIOC_GETSUPPORT:
698 err = copy_to_user(argp, wdd->info,
699 sizeof(struct watchdog_info)) ? -EFAULT : 0;
700 break;
701 case WDIOC_GETSTATUS:
702 val = watchdog_get_status(wdd);
703 err = put_user(val, p);
704 break;
705 case WDIOC_GETBOOTSTATUS:
706 err = put_user(wdd->bootstatus, p);
707 break;
708 case WDIOC_SETOPTIONS:
709 if (get_user(val, p)) {
710 err = -EFAULT;
711 break;
712 }
713 if (val & WDIOS_DISABLECARD) {
714 err = watchdog_stop(wdd);
715 if (err < 0)
716 break;
717 }
718 if (val & WDIOS_ENABLECARD)
719 err = watchdog_start(wdd);
720 break;
721 case WDIOC_KEEPALIVE:
722 if (!(wdd->info->options & WDIOF_KEEPALIVEPING)) {
723 err = -EOPNOTSUPP;
724 break;
725 }
726 err = watchdog_ping(wdd);
727 break;
728 case WDIOC_SETTIMEOUT:
729 if (get_user(val, p)) {
730 err = -EFAULT;
731 break;
732 }
733 err = watchdog_set_timeout(wdd, val);
734 if (err < 0)
735 break;
736 /* If the watchdog is active then we send a keepalive ping
737 * to make sure that the watchdog keep's running (and if
738 * possible that it takes the new timeout) */
739 err = watchdog_ping(wdd);
740 if (err < 0)
741 break;
742 /* fall through */
743 case WDIOC_GETTIMEOUT:
744 /* timeout == 0 means that we don't know the timeout */
745 if (wdd->timeout == 0) {
746 err = -EOPNOTSUPP;
747 break;
748 }
749 err = put_user(wdd->timeout, p);
750 break;
751 case WDIOC_GETTIMELEFT:
752 err = watchdog_get_timeleft(wdd, &val);
753 if (err < 0)
754 break;
755 err = put_user(val, p);
756 break;
757 case WDIOC_SETPRETIMEOUT:
758 if (get_user(val, p)) {
759 err = -EFAULT;
760 break;
761 }
762 err = watchdog_set_pretimeout(wdd, val);
763 break;
764 case WDIOC_GETPRETIMEOUT:
765 err = put_user(wdd->pretimeout, p);
766 break;
767 default:
768 err = -ENOTTY;
769 break;
770 }
771
772 out_ioctl:
773 mutex_unlock(&wd_data->lock);
774 return err;
775 }
776
777 /*
778 * watchdog_open: open the /dev/watchdog* devices.
779 * @inode: inode of device
780 * @file: file handle to device
781 *
782 * When the /dev/watchdog* device gets opened, we start the watchdog.
783 * Watch out: the /dev/watchdog device is single open, so we make sure
784 * it can only be opened once.
785 */
786
watchdog_open(struct inode * inode,struct file * file)787 static int watchdog_open(struct inode *inode, struct file *file)
788 {
789 struct watchdog_core_data *wd_data;
790 struct watchdog_device *wdd;
791 bool hw_running;
792 int err;
793
794 /* Get the corresponding watchdog device */
795 if (imajor(inode) == MISC_MAJOR)
796 wd_data = old_wd_data;
797 else
798 wd_data = container_of(inode->i_cdev, struct watchdog_core_data,
799 cdev);
800
801 /* the watchdog is single open! */
802 if (test_and_set_bit(_WDOG_DEV_OPEN, &wd_data->status))
803 return -EBUSY;
804
805 wdd = wd_data->wdd;
806
807 /*
808 * If the /dev/watchdog device is open, we don't want the module
809 * to be unloaded.
810 */
811 hw_running = watchdog_hw_running(wdd);
812 if (!hw_running && !try_module_get(wdd->ops->owner)) {
813 err = -EBUSY;
814 goto out_clear;
815 }
816
817 err = watchdog_start(wdd);
818 if (err < 0)
819 goto out_mod;
820
821 file->private_data = wd_data;
822
823 if (!hw_running)
824 get_device(&wd_data->dev);
825
826 /* dev/watchdog is a virtual (and thus non-seekable) filesystem */
827 return nonseekable_open(inode, file);
828
829 out_mod:
830 module_put(wd_data->wdd->ops->owner);
831 out_clear:
832 clear_bit(_WDOG_DEV_OPEN, &wd_data->status);
833 return err;
834 }
835
watchdog_core_data_release(struct device * dev)836 static void watchdog_core_data_release(struct device *dev)
837 {
838 struct watchdog_core_data *wd_data;
839
840 wd_data = container_of(dev, struct watchdog_core_data, dev);
841
842 kfree(wd_data);
843 }
844
845 /*
846 * watchdog_release: release the watchdog device.
847 * @inode: inode of device
848 * @file: file handle to device
849 *
850 * This is the code for when /dev/watchdog gets closed. We will only
851 * stop the watchdog when we have received the magic char (and nowayout
852 * was not set), else the watchdog will keep running.
853 */
854
watchdog_release(struct inode * inode,struct file * file)855 static int watchdog_release(struct inode *inode, struct file *file)
856 {
857 struct watchdog_core_data *wd_data = file->private_data;
858 struct watchdog_device *wdd;
859 int err = -EBUSY;
860 bool running;
861
862 mutex_lock(&wd_data->lock);
863
864 wdd = wd_data->wdd;
865 if (!wdd)
866 goto done;
867
868 /*
869 * We only stop the watchdog if we received the magic character
870 * or if WDIOF_MAGICCLOSE is not set. If nowayout was set then
871 * watchdog_stop will fail.
872 */
873 if (!test_bit(WDOG_ACTIVE, &wdd->status))
874 err = 0;
875 else if (test_and_clear_bit(_WDOG_ALLOW_RELEASE, &wd_data->status) ||
876 !(wdd->info->options & WDIOF_MAGICCLOSE))
877 err = watchdog_stop(wdd);
878
879 /* If the watchdog was not stopped, send a keepalive ping */
880 if (err < 0) {
881 pr_crit("watchdog%d: watchdog did not stop!\n", wdd->id);
882 watchdog_ping(wdd);
883 }
884
885 watchdog_update_worker(wdd);
886
887 /* make sure that /dev/watchdog can be re-opened */
888 clear_bit(_WDOG_DEV_OPEN, &wd_data->status);
889
890 done:
891 running = wdd && watchdog_hw_running(wdd);
892 mutex_unlock(&wd_data->lock);
893 /*
894 * Allow the owner module to be unloaded again unless the watchdog
895 * is still running. If the watchdog is still running, it can not
896 * be stopped, and its driver must not be unloaded.
897 */
898 if (!running) {
899 module_put(wd_data->cdev.owner);
900 put_device(&wd_data->dev);
901 }
902 return 0;
903 }
904
905 static const struct file_operations watchdog_fops = {
906 .owner = THIS_MODULE,
907 .write = watchdog_write,
908 .unlocked_ioctl = watchdog_ioctl,
909 .open = watchdog_open,
910 .release = watchdog_release,
911 };
912
913 static struct miscdevice watchdog_miscdev = {
914 .minor = WATCHDOG_MINOR,
915 .name = "watchdog",
916 .fops = &watchdog_fops,
917 };
918
919 static struct class watchdog_class = {
920 .name = "watchdog",
921 .owner = THIS_MODULE,
922 .dev_groups = wdt_groups,
923 };
924
925 /*
926 * watchdog_cdev_register: register watchdog character device
927 * @wdd: watchdog device
928 *
929 * Register a watchdog character device including handling the legacy
930 * /dev/watchdog node. /dev/watchdog is actually a miscdevice and
931 * thus we set it up like that.
932 */
933
watchdog_cdev_register(struct watchdog_device * wdd)934 static int watchdog_cdev_register(struct watchdog_device *wdd)
935 {
936 struct watchdog_core_data *wd_data;
937 int err;
938
939 wd_data = kzalloc(sizeof(struct watchdog_core_data), GFP_KERNEL);
940 if (!wd_data)
941 return -ENOMEM;
942 mutex_init(&wd_data->lock);
943
944 wd_data->wdd = wdd;
945 wdd->wd_data = wd_data;
946
947 if (IS_ERR_OR_NULL(watchdog_kworker)) {
948 kfree(wd_data);
949 return -ENODEV;
950 }
951
952 device_initialize(&wd_data->dev);
953 wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id);
954 wd_data->dev.class = &watchdog_class;
955 wd_data->dev.parent = wdd->parent;
956 wd_data->dev.groups = wdd->groups;
957 wd_data->dev.release = watchdog_core_data_release;
958 dev_set_drvdata(&wd_data->dev, wdd);
959 dev_set_name(&wd_data->dev, "watchdog%d", wdd->id);
960
961 kthread_init_work(&wd_data->work, watchdog_ping_work);
962 hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
963 wd_data->timer.function = watchdog_timer_expired;
964
965 if (wdd->id == 0) {
966 old_wd_data = wd_data;
967 watchdog_miscdev.parent = wdd->parent;
968 err = misc_register(&watchdog_miscdev);
969 if (err != 0) {
970 pr_err("%s: cannot register miscdev on minor=%d (err=%d).\n",
971 wdd->info->identity, WATCHDOG_MINOR, err);
972 if (err == -EBUSY)
973 pr_err("%s: a legacy watchdog module is probably present.\n",
974 wdd->info->identity);
975 old_wd_data = NULL;
976 put_device(&wd_data->dev);
977 return err;
978 }
979 }
980
981 /* Fill in the data structures */
982 cdev_init(&wd_data->cdev, &watchdog_fops);
983
984 /* Add the device */
985 err = cdev_device_add(&wd_data->cdev, &wd_data->dev);
986 if (err) {
987 pr_err("watchdog%d unable to add device %d:%d\n",
988 wdd->id, MAJOR(watchdog_devt), wdd->id);
989 if (wdd->id == 0) {
990 misc_deregister(&watchdog_miscdev);
991 old_wd_data = NULL;
992 put_device(&wd_data->dev);
993 }
994 return err;
995 }
996
997 wd_data->cdev.owner = wdd->ops->owner;
998
999 /* Record time of most recent heartbeat as 'just before now'. */
1000 wd_data->last_hw_keepalive = ktime_sub(ktime_get(), 1);
1001
1002 /*
1003 * If the watchdog is running, prevent its driver from being unloaded,
1004 * and schedule an immediate ping.
1005 */
1006 if (watchdog_hw_running(wdd)) {
1007 __module_get(wdd->ops->owner);
1008 get_device(&wd_data->dev);
1009 if (handle_boot_enabled)
1010 hrtimer_start(&wd_data->timer, 0, HRTIMER_MODE_REL);
1011 else
1012 pr_info("watchdog%d running and kernel based pre-userspace handler disabled\n",
1013 wdd->id);
1014 }
1015
1016 return 0;
1017 }
1018
1019 /*
1020 * watchdog_cdev_unregister: unregister watchdog character device
1021 * @watchdog: watchdog device
1022 *
1023 * Unregister watchdog character device and if needed the legacy
1024 * /dev/watchdog device.
1025 */
1026
watchdog_cdev_unregister(struct watchdog_device * wdd)1027 static void watchdog_cdev_unregister(struct watchdog_device *wdd)
1028 {
1029 struct watchdog_core_data *wd_data = wdd->wd_data;
1030
1031 cdev_device_del(&wd_data->cdev, &wd_data->dev);
1032 if (wdd->id == 0) {
1033 misc_deregister(&watchdog_miscdev);
1034 old_wd_data = NULL;
1035 }
1036
1037 if (watchdog_active(wdd) &&
1038 test_bit(WDOG_STOP_ON_UNREGISTER, &wdd->status)) {
1039 watchdog_stop(wdd);
1040 }
1041
1042 mutex_lock(&wd_data->lock);
1043 wd_data->wdd = NULL;
1044 wdd->wd_data = NULL;
1045 mutex_unlock(&wd_data->lock);
1046
1047 hrtimer_cancel(&wd_data->timer);
1048 kthread_cancel_work_sync(&wd_data->work);
1049
1050 put_device(&wd_data->dev);
1051 }
1052
1053 /*
1054 * watchdog_dev_register: register a watchdog device
1055 * @wdd: watchdog device
1056 *
1057 * Register a watchdog device including handling the legacy
1058 * /dev/watchdog node. /dev/watchdog is actually a miscdevice and
1059 * thus we set it up like that.
1060 */
1061
watchdog_dev_register(struct watchdog_device * wdd)1062 int watchdog_dev_register(struct watchdog_device *wdd)
1063 {
1064 int ret;
1065
1066 ret = watchdog_cdev_register(wdd);
1067 if (ret)
1068 return ret;
1069
1070 ret = watchdog_register_pretimeout(wdd);
1071 if (ret)
1072 watchdog_cdev_unregister(wdd);
1073
1074 return ret;
1075 }
1076
1077 /*
1078 * watchdog_dev_unregister: unregister a watchdog device
1079 * @watchdog: watchdog device
1080 *
1081 * Unregister watchdog device and if needed the legacy
1082 * /dev/watchdog device.
1083 */
1084
watchdog_dev_unregister(struct watchdog_device * wdd)1085 void watchdog_dev_unregister(struct watchdog_device *wdd)
1086 {
1087 watchdog_unregister_pretimeout(wdd);
1088 watchdog_cdev_unregister(wdd);
1089 }
1090
1091 /*
1092 * watchdog_dev_init: init dev part of watchdog core
1093 *
1094 * Allocate a range of chardev nodes to use for watchdog devices
1095 */
1096
watchdog_dev_init(void)1097 int __init watchdog_dev_init(void)
1098 {
1099 int err;
1100 struct sched_param param = {.sched_priority = MAX_RT_PRIO - 1,};
1101
1102 watchdog_kworker = kthread_create_worker(0, "watchdogd");
1103 if (IS_ERR(watchdog_kworker)) {
1104 pr_err("Failed to create watchdog kworker\n");
1105 return PTR_ERR(watchdog_kworker);
1106 }
1107 sched_setscheduler(watchdog_kworker->task, SCHED_FIFO, ¶m);
1108
1109 err = class_register(&watchdog_class);
1110 if (err < 0) {
1111 pr_err("couldn't register class\n");
1112 goto err_register;
1113 }
1114
1115 err = alloc_chrdev_region(&watchdog_devt, 0, MAX_DOGS, "watchdog");
1116 if (err < 0) {
1117 pr_err("watchdog: unable to allocate char dev region\n");
1118 goto err_alloc;
1119 }
1120
1121 return 0;
1122
1123 err_alloc:
1124 class_unregister(&watchdog_class);
1125 err_register:
1126 kthread_destroy_worker(watchdog_kworker);
1127 return err;
1128 }
1129
1130 /*
1131 * watchdog_dev_exit: exit dev part of watchdog core
1132 *
1133 * Release the range of chardev nodes used for watchdog devices
1134 */
1135
watchdog_dev_exit(void)1136 void __exit watchdog_dev_exit(void)
1137 {
1138 unregister_chrdev_region(watchdog_devt, MAX_DOGS);
1139 class_unregister(&watchdog_class);
1140 kthread_destroy_worker(watchdog_kworker);
1141 }
1142
1143 module_param(handle_boot_enabled, bool, 0444);
1144 MODULE_PARM_DESC(handle_boot_enabled,
1145 "Watchdog core auto-updates boot enabled watchdogs before userspace takes over (default="
1146 __MODULE_STRING(IS_ENABLED(CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED)) ")");
1147